var/home/core/zuul-output/0000755000175000017500000000000015112344220014517 5ustar corecorevar/home/core/zuul-output/logs/0000755000175000017500000000000015112371161015467 5ustar corecorevar/home/core/zuul-output/logs/kubelet.log0000644000000000000000006517024215112371152017702 0ustar rootrootNov 28 16:10:16 crc systemd[1]: Starting Kubernetes Kubelet... Nov 28 16:10:16 crc restorecon[4760]: Relabeled /var/lib/kubelet/config.json from system_u:object_r:unlabeled_t:s0 to system_u:object_r:container_var_lib_t:s0 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/device-plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/device-plugins/kubelet.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/volumes/kubernetes.io~configmap/nginx-conf/..2025_02_23_05_40_35.4114275528/nginx.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/22e96971 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/21c98286 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/0f1869e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/46889d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/5b6a5969 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/6c7921f5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4804f443 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/2a46b283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/a6b5573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4f88ee5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/5a4eee4b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/cd87c521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/38602af4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/1483b002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/0346718b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/d3ed4ada not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/3bb473a5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/8cd075a9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/00ab4760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/54a21c09 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/70478888 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/43802770 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/955a0edc not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/bca2d009 not reset as customized by admin to system_u:object_r:container_file_t:s0:c140,c1009 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/b295f9bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/bc46ea27 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5731fc1b not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5e1b2a3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/943f0936 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/3f764ee4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/8695e3f9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/aed7aa86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/c64d7448 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/0ba16bd2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/207a939f not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/54aa8cdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/1f5fa595 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/bf9c8153 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/47fba4ea not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/7ae55ce9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7906a268 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/ce43fa69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7fc7ea3a not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/d8c38b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/9ef015fb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/b9db6a41 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/b1733d79 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/afccd338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/9df0a185 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/18938cf8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/7ab4eb23 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/56930be6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_35.630010865 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/0d8e3722 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/d22b2e76 not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/e036759f not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/2734c483 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/57878fe7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/3f3c2e58 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/375bec3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/7bc41e08 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/48c7a72d not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/4b66701f not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/a5a1c202 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_40.1388695756 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/26f3df5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/6d8fb21d not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/50e94777 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208473b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/ec9e08ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3b787c39 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208eaed5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/93aa3a2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3c697968 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/ba950ec9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/cb5cdb37 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/f2df9827 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/fedaa673 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/9ca2df95 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/b2d7460e not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2207853c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/241c1c29 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2d910eaf not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/c6c0f2e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/399edc97 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8049f7cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/0cec5484 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/312446d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c406,c828 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8e56a35d not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/2d30ddb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/eca8053d not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/c3a25c9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c168,c522 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/b9609c22 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/e8b0eca9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/b36a9c3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/38af7b07 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/ae821620 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/baa23338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/2c534809 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/59b29eae not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/c91a8e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/4d87494a not reset as customized by admin to system_u:object_r:container_file_t:s0:c442,c857 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/1e33ca63 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/8dea7be2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d0b04a99 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d84f01e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/4109059b not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/a7258a3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/05bdf2b6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/f3261b51 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/315d045e not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/5fdcf278 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/d053f757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/c2850dc7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fcfb0b2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c7ac9b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fa0c0d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c609b6ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/2be6c296 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/89a32653 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/4eb9afeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/13af6efa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/b03f9724 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/e3d105cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/3aed4d83 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/0765fa6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/2cefc627 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/3dcc6345 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/365af391 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b1130c0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/236a5913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b9432e26 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/5ddb0e3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/986dc4fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/8a23ff9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/9728ae68 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/665f31d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/136c9b42 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/98a1575b not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/cac69136 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/5deb77a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/2ae53400 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/e46f2326 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/dc688d3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/3497c3cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/177eb008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/af5a2afa not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/d780cb1f not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/49b0f374 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/26fbb125 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/cf14125a not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/b7f86972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/e51d739c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/88ba6a69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/669a9acf not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/5cd51231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/75349ec7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/15c26839 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/45023dcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/2bb66a50 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/64d03bdd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/ab8e7ca0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/bb9be25f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/9a0b61d3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/d471b9d2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/8cb76b8e not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/11a00840 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/ec355a92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/992f735e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d59cdbbc not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/72133ff0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/c56c834c not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d13724c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/0a498258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa471982 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fc900d92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa7d68da not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/4bacf9b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/424021b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/fc2e31a3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/f51eefac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/c8997f2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/7481f599 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/fdafea19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/d0e1c571 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/ee398915 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/682bb6b8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a3e67855 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a989f289 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/915431bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/7796fdab not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/dcdb5f19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/a3aaa88c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/5508e3e6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/160585de not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/e99f8da3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/8bc85570 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/a5861c91 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/84db1135 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/9e1a6043 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/c1aba1c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/d55ccd6d not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/971cc9f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/8f2e3dcf not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/ceb35e9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/1c192745 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/5209e501 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/f83de4df not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/e7b978ac not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/c64304a1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/5384386b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/cce3e3ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/8fb75465 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/740f573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/32fd1134 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/0a861bd3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/80363026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/bfa952a8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..2025_02_23_05_33_31.333075221 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/793bf43d not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/7db1bb6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/4f6a0368 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/c12c7d86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/36c4a773 not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/4c1e98ae not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/a4c8115c not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/setup/7db1802e not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver/a008a7ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-syncer/2c836bac not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-regeneration-controller/0ce62299 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-insecure-readyz/945d2457 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-check-endpoints/7d5c1dd8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/index.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/bundle-v1.15.0.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/channel.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/package.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/bc8d0691 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/6b76097a not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/34d1af30 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/312ba61c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/645d5dd1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/16e825f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/4cf51fc9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/2a23d348 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/075dbd49 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/dd585ddd not reset as customized by admin to system_u:object_r:container_file_t:s0:c377,c642 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/17ebd0ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c343 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/005579f4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_23_11.1287037894 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/bf5f3b9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/af276eb7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/ea28e322 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/692e6683 not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/871746a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/4eb2e958 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 16:10:16 crc restorecon[4760]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/ca9b62da not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/0edd6fce not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/containers/controller-manager/89b4555f not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/655fcd71 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/0d43c002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/e68efd17 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/9acf9b65 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/5ae3ff11 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/1e59206a not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/27af16d1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c304,c1017 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/7918e729 not reset as customized by admin to system_u:object_r:container_file_t:s0:c853,c893 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/5d976d0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c585,c981 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/d7f55cbb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/f0812073 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/1a56cbeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/7fdd437e not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/cdfb5652 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/fix-audit-permissions/fb93119e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver/f1e8fc0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver-check-endpoints/218511f3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server/serving-certs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/ca8af7b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/72cc8a75 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/6e8a3760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4c3455c0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/2278acb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4b453e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/3ec09bda not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2/cacerts.bin not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java/cacerts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl/ca-bundle.trust.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/email-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/objsign-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2ae6433e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fde84897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75680d2e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/openshift-service-serving-signer_1740288168.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/facfc4fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f5a969c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CFCA_EV_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9ef4a08a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ingress-operator_1740288202.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2f332aed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/248c8271.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d10a21f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ACCVRAIZ1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a94d09e5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c9a4d3b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40193066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd8c0d63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b936d1c6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CA_Disig_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4fd49c6c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM_SERVIDORES_SEGUROS.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b81b93f0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f9a69fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b30d5fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ANF_Secure_Server_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b433981b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93851c9e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9282e51c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7dd1bc4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Actalis_Authentication_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/930ac5d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f47b495.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e113c810.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5931b5bc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Commercial.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2b349938.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e48193cf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/302904dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a716d4ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Networking.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93bc0acc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/86212b19.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b727005e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbc54cab.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f51bb24c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c28a8a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9c8dfbd4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ccc52f49.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cb1c3204.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ce5e74ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd08c599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6d41d539.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb5fa911.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e35234b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8cb5ee0f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a7c655d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f8fc53da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/de6d66f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d41b5e2a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/41a3f684.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1df5a75f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_2011.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e36a6752.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b872f2b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9576d26b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/228f89db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_ECC_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb717492.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d21b73c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b1b94ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/595e996b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_RSA_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b46e03d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/128f4b91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_3_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81f2d2b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Autoridad_de_Certificacion_Firmaprofesional_CIF_A62634068.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3bde41ac.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d16a5865.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_EC-384_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0179095f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ffa7f1eb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9482e63a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4dae3dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e359ba6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7e067d03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/95aff9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7746a63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Baltimore_CyberTrust_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/653b494a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3ad48a91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_2_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/54657681.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/82223c44.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8de2f56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d9dafe4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d96b65e2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee64a828.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40547a79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5a3f0ff8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a780d93.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/34d996fb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/eed8c118.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/89c02a45.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b1159c4c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d6325660.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4c339cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8312c4c1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_E1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8508e720.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5fdd185d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48bec511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/69105f4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b9bc432.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/32888f65.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b03dec0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/219d9499.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5acf816d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbf06781.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc99f41e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AAA_Certificate_Services.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/985c1f52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8794b4e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_BR_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7c037b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ef954a4e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_EV_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2add47b6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/90c5a3c8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0f3e76e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/53a1b57a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_EV_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5ad8a5d6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/68dd7389.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d04f354.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d6437c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/062cdee6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bd43e1dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7f3d5d1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c491639e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3513523f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/399e7759.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/feffd413.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d18e9066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/607986c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c90bc37d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1b0f7e5c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e08bfd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dd8e9d41.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed39abd0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a3418fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bc3f2570.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_High_Assurance_EV_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/244b5494.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81b9768f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4be590e0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_ECC_P384_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9846683b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/252252d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e8e7201.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_RSA4096_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d52c538d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c44cc0c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Trusted_Root_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75d1b2ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a2c66da8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ecccd8db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust.net_Certification_Authority__2048_.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/aee5f10d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e7271e8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0e59380.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4c3982f2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b99d060.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf64f35b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0a775a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/002c0b4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cc450945.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_EC1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/106f3e4d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b3fb433b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4042bcee.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/02265526.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/455f1b52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0d69c7e1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9f727ac7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5e98733a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0cd152c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc4d6a89.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6187b673.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/FIRMAPROFESIONAL_CA_ROOT-A_WEB.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ba8887ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/068570d1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f081611a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48a195d8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GDCA_TrustAUTH_R5_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f6fa695.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab59055e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b92fd57f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GLOBALTRUST_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fa5da96b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ec40989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7719f463.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1001acf7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f013ecaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/626dceaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c559d742.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1d3472b9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9479c8c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a81e292b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4bfab552.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e071171e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/57bcb2da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_ECC_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab5346f4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5046c355.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_RSA_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/865fbdf9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da0cfd1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/85cde254.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_ECC_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbb3f32b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureSign_RootCA11.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5860aaa6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/31188b5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HiPKI_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c7f1359b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f15c80c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hongkong_Post_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/09789157.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/18856ac4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e09d511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Commercial_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cf701eeb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d06393bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Public_Sector_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/10531352.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Izenpe.com.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureTrust_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0ed035a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsec_e-Szigno_Root_CA_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8160b96c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8651083.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2c63f966.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_ECC_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d89cda1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/01419da9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_RSA_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7a5b843.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_RSA_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf53fb88.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9591a472.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3afde786.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Gold_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NAVER_Global_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3fb36b73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d39b0a2c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a89d74c2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd58d51e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7db1890.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NetLock_Arany__Class_Gold__F__tan__s__tv__ny.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/988a38cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/60afe812.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f39fc864.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5443e9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GB_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e73d606e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dfc0fe80.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b66938e9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e1eab7c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GC_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/773e07ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c899c73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d59297b8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ddcda989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_1_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/749e9e03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/52b525c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7e8dc79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a819ef2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/08063a00.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b483515.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/064e0aa9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1f58a078.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6f7454b3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7fa05551.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76faf6c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9339512a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f387163d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee37c333.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e18bfb83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e442e424.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fe8a2cd8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/23f4c490.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5cd81ad7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0c70a8d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7892ad52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SZAFIR_ROOT_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4f316efb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_RSA_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/06dc52d5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/583d0756.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0bf05006.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/88950faa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9046744a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c860d51.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_RSA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6fa5da56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/33ee480d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Secure_Global_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/63a2c897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_ECC_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bdacca6f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ff34af3f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbff3a01.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_ECC_RootCA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_C1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/406c9bb1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_C3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Services_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Silver_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/99e1b953.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/14bc7599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TUBITAK_Kamu_SM_SSL_Kok_Sertifikasi_-_Surum_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a3adc42.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f459871d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_ECC_Root_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_RSA_Root_2023.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TeliaSonera_Root_CA_v1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telia_Root_CA_v2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f103249.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f058632f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-certificates.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9bf03295.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/98aaf404.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1cef98f5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/073bfcc5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2923b3f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f249de83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/edcbddb5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P256_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b5697b0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ae85e5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b74d2bd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P384_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d887a5bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9aef356c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TunTrust_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd64f3fc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e13665f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Extended_Validation_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f5dc4f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da7377f6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Global_G2_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c01eb047.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/304d27c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed858448.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f30dd6ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/04f60c28.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_ECC_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fc5a8f99.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/35105088.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee532fd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/XRamp_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/706f604c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76579174.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d86cdd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/882de061.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f618aec.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a9d40e02.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e-Szigno_Root_CA_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e868b802.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/83e9984f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ePKI_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca6e4ad9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d6523ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4b718d9b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/869fbf79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/containers/registry/f8d22bdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/6e8bbfac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/54dd7996 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/a4f1bb05 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/207129da not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/c1df39e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/15b8f1cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/77bd6913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/2382c1b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/704ce128 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/70d16fe0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/bfb95535 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/57a8e8e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/1b9d3e5e not reset as customized by admin to system_u:object_r:container_file_t:s0:c107,c917 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/fddb173c not reset as customized by admin to system_u:object_r:container_file_t:s0:c202,c983 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/95d3c6c4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/bfb5fff5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/2aef40aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/c0391cad not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/1119e69d not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/660608b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/8220bd53 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/85f99d5c not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/4b0225f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/9c2a3394 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/e820b243 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/1ca52ea0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/e6988e45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/6655f00b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/98bc3986 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/08e3458a not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/2a191cb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/6c4eeefb not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/f61a549c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/24891863 not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/fbdfd89c not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/9b63b3bc not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/8acde6d6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/node-driver-registrar/59ecbba3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/csi-provisioner/685d4be3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/containers/route-controller-manager/feaea55e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/63709497 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/d966b7fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/f5773757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/81c9edb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/57bf57ee not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/86f5e6aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/0aabe31d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/d2af85c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/09d157d9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c0fe7256 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c30319e4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/e6b1dd45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/2bb643f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/920de426 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/70fa1e87 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/a1c12a2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/9442e6c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/5b45ec72 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/3c9f3a59 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/1091c11b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/9a6821c6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/ec0c35e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/517f37e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/6214fe78 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/ba189c8b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/351e4f31 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/c0f219ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/8069f607 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/559c3d82 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/605ad488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/148df488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/3bf6dcb4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/022a2feb not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/938c3924 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/729fe23e not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/1fd5cbd4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/a96697e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/e155ddca not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/10dd0e0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/6f2c8392 not reset as customized by admin to system_u:object_r:container_file_t:s0:c267,c588 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/bd241ad9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/plugins/csi-hostpath not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/plugins/csi-hostpath/csi.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/plugins/kubernetes.io not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/plugins/kubernetes.io/csi not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983 not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/vol_data.json not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 28 16:10:17 crc restorecon[4760]: /var/lib/kubelet/plugins_registry not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 28 16:10:17 crc restorecon[4760]: Relabeled /var/usrlocal/bin/kubenswrapper from system_u:object_r:bin_t:s0 to system_u:object_r:kubelet_exec_t:s0 Nov 28 16:10:17 crc kubenswrapper[4909]: Flag --container-runtime-endpoint has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 28 16:10:17 crc kubenswrapper[4909]: Flag --minimum-container-ttl-duration has been deprecated, Use --eviction-hard or --eviction-soft instead. Will be removed in a future version. Nov 28 16:10:17 crc kubenswrapper[4909]: Flag --volume-plugin-dir has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 28 16:10:17 crc kubenswrapper[4909]: Flag --register-with-taints has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 28 16:10:17 crc kubenswrapper[4909]: Flag --pod-infra-container-image has been deprecated, will be removed in a future release. Image garbage collector will get sandbox image information from CRI. Nov 28 16:10:17 crc kubenswrapper[4909]: Flag --system-reserved has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.723565 4909 server.go:211] "--pod-infra-container-image will not be pruned by the image garbage collector in kubelet and should also be set in the remote runtime" Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.726484 4909 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.726506 4909 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.726512 4909 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.726517 4909 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.726522 4909 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.726528 4909 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.726534 4909 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.726539 4909 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.726543 4909 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.726548 4909 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.726554 4909 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.726560 4909 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.726565 4909 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.726570 4909 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.726580 4909 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.726585 4909 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.726591 4909 feature_gate.go:330] unrecognized feature gate: Example Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.726595 4909 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.726600 4909 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.726605 4909 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.726611 4909 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.726616 4909 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.726621 4909 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.726625 4909 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.726629 4909 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.726634 4909 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.726638 4909 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.726642 4909 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.726647 4909 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.726670 4909 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.726676 4909 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.726681 4909 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.726687 4909 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.726692 4909 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.726697 4909 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.726702 4909 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.726706 4909 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.726711 4909 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.726715 4909 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.726720 4909 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.726725 4909 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.726729 4909 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.726736 4909 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.726742 4909 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.726749 4909 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.726755 4909 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.726760 4909 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.726765 4909 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.726770 4909 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.726776 4909 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.726781 4909 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.726787 4909 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.726791 4909 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.726796 4909 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.726801 4909 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.726806 4909 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.726810 4909 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.726816 4909 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.726820 4909 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.726825 4909 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.726830 4909 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.726834 4909 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.726839 4909 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.726843 4909 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.726848 4909 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.726852 4909 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.726856 4909 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.726862 4909 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.726866 4909 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.726874 4909 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.726880 4909 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.726973 4909 flags.go:64] FLAG: --address="0.0.0.0" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.726983 4909 flags.go:64] FLAG: --allowed-unsafe-sysctls="[]" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.726993 4909 flags.go:64] FLAG: --anonymous-auth="true" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.727000 4909 flags.go:64] FLAG: --application-metrics-count-limit="100" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.727006 4909 flags.go:64] FLAG: --authentication-token-webhook="false" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.727012 4909 flags.go:64] FLAG: --authentication-token-webhook-cache-ttl="2m0s" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.727018 4909 flags.go:64] FLAG: --authorization-mode="AlwaysAllow" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.727025 4909 flags.go:64] FLAG: --authorization-webhook-cache-authorized-ttl="5m0s" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.727030 4909 flags.go:64] FLAG: --authorization-webhook-cache-unauthorized-ttl="30s" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.727036 4909 flags.go:64] FLAG: --boot-id-file="/proc/sys/kernel/random/boot_id" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.727042 4909 flags.go:64] FLAG: --bootstrap-kubeconfig="/etc/kubernetes/kubeconfig" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.727048 4909 flags.go:64] FLAG: --cert-dir="/var/lib/kubelet/pki" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.727053 4909 flags.go:64] FLAG: --cgroup-driver="cgroupfs" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.727058 4909 flags.go:64] FLAG: --cgroup-root="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.727063 4909 flags.go:64] FLAG: --cgroups-per-qos="true" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.727069 4909 flags.go:64] FLAG: --client-ca-file="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.727074 4909 flags.go:64] FLAG: --cloud-config="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.727080 4909 flags.go:64] FLAG: --cloud-provider="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.727086 4909 flags.go:64] FLAG: --cluster-dns="[]" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.727094 4909 flags.go:64] FLAG: --cluster-domain="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.727099 4909 flags.go:64] FLAG: --config="/etc/kubernetes/kubelet.conf" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.727104 4909 flags.go:64] FLAG: --config-dir="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.727109 4909 flags.go:64] FLAG: --container-hints="/etc/cadvisor/container_hints.json" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.727115 4909 flags.go:64] FLAG: --container-log-max-files="5" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.727122 4909 flags.go:64] FLAG: --container-log-max-size="10Mi" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.727127 4909 flags.go:64] FLAG: --container-runtime-endpoint="/var/run/crio/crio.sock" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.727132 4909 flags.go:64] FLAG: --containerd="/run/containerd/containerd.sock" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.727138 4909 flags.go:64] FLAG: --containerd-namespace="k8s.io" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.727143 4909 flags.go:64] FLAG: --contention-profiling="false" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.727148 4909 flags.go:64] FLAG: --cpu-cfs-quota="true" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.727153 4909 flags.go:64] FLAG: --cpu-cfs-quota-period="100ms" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.727159 4909 flags.go:64] FLAG: --cpu-manager-policy="none" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.727164 4909 flags.go:64] FLAG: --cpu-manager-policy-options="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.727172 4909 flags.go:64] FLAG: --cpu-manager-reconcile-period="10s" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.727177 4909 flags.go:64] FLAG: --enable-controller-attach-detach="true" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.727183 4909 flags.go:64] FLAG: --enable-debugging-handlers="true" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.727188 4909 flags.go:64] FLAG: --enable-load-reader="false" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.727193 4909 flags.go:64] FLAG: --enable-server="true" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.727199 4909 flags.go:64] FLAG: --enforce-node-allocatable="[pods]" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.727205 4909 flags.go:64] FLAG: --event-burst="100" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.727210 4909 flags.go:64] FLAG: --event-qps="50" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.727215 4909 flags.go:64] FLAG: --event-storage-age-limit="default=0" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.727221 4909 flags.go:64] FLAG: --event-storage-event-limit="default=0" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.727226 4909 flags.go:64] FLAG: --eviction-hard="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.727232 4909 flags.go:64] FLAG: --eviction-max-pod-grace-period="0" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.727237 4909 flags.go:64] FLAG: --eviction-minimum-reclaim="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.727242 4909 flags.go:64] FLAG: --eviction-pressure-transition-period="5m0s" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.727248 4909 flags.go:64] FLAG: --eviction-soft="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.727253 4909 flags.go:64] FLAG: --eviction-soft-grace-period="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.727259 4909 flags.go:64] FLAG: --exit-on-lock-contention="false" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.727265 4909 flags.go:64] FLAG: --experimental-allocatable-ignore-eviction="false" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.727270 4909 flags.go:64] FLAG: --experimental-mounter-path="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.727275 4909 flags.go:64] FLAG: --fail-cgroupv1="false" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.727280 4909 flags.go:64] FLAG: --fail-swap-on="true" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.727285 4909 flags.go:64] FLAG: --feature-gates="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.727291 4909 flags.go:64] FLAG: --file-check-frequency="20s" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.727296 4909 flags.go:64] FLAG: --global-housekeeping-interval="1m0s" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.727302 4909 flags.go:64] FLAG: --hairpin-mode="promiscuous-bridge" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.727307 4909 flags.go:64] FLAG: --healthz-bind-address="127.0.0.1" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.727312 4909 flags.go:64] FLAG: --healthz-port="10248" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.727318 4909 flags.go:64] FLAG: --help="false" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.727323 4909 flags.go:64] FLAG: --hostname-override="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.727328 4909 flags.go:64] FLAG: --housekeeping-interval="10s" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.727333 4909 flags.go:64] FLAG: --http-check-frequency="20s" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.727338 4909 flags.go:64] FLAG: --image-credential-provider-bin-dir="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.727343 4909 flags.go:64] FLAG: --image-credential-provider-config="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.727348 4909 flags.go:64] FLAG: --image-gc-high-threshold="85" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.727353 4909 flags.go:64] FLAG: --image-gc-low-threshold="80" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.727358 4909 flags.go:64] FLAG: --image-service-endpoint="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.727364 4909 flags.go:64] FLAG: --kernel-memcg-notification="false" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.727369 4909 flags.go:64] FLAG: --kube-api-burst="100" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.727374 4909 flags.go:64] FLAG: --kube-api-content-type="application/vnd.kubernetes.protobuf" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.727380 4909 flags.go:64] FLAG: --kube-api-qps="50" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.727385 4909 flags.go:64] FLAG: --kube-reserved="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.727390 4909 flags.go:64] FLAG: --kube-reserved-cgroup="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.727395 4909 flags.go:64] FLAG: --kubeconfig="/var/lib/kubelet/kubeconfig" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.727400 4909 flags.go:64] FLAG: --kubelet-cgroups="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.727405 4909 flags.go:64] FLAG: --local-storage-capacity-isolation="true" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.727411 4909 flags.go:64] FLAG: --lock-file="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.727417 4909 flags.go:64] FLAG: --log-cadvisor-usage="false" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.727423 4909 flags.go:64] FLAG: --log-flush-frequency="5s" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.727428 4909 flags.go:64] FLAG: --log-json-info-buffer-size="0" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.727437 4909 flags.go:64] FLAG: --log-json-split-stream="false" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.727442 4909 flags.go:64] FLAG: --log-text-info-buffer-size="0" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.727447 4909 flags.go:64] FLAG: --log-text-split-stream="false" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.727452 4909 flags.go:64] FLAG: --logging-format="text" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.727457 4909 flags.go:64] FLAG: --machine-id-file="/etc/machine-id,/var/lib/dbus/machine-id" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.727463 4909 flags.go:64] FLAG: --make-iptables-util-chains="true" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.727468 4909 flags.go:64] FLAG: --manifest-url="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.727474 4909 flags.go:64] FLAG: --manifest-url-header="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.727480 4909 flags.go:64] FLAG: --max-housekeeping-interval="15s" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.727485 4909 flags.go:64] FLAG: --max-open-files="1000000" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.727491 4909 flags.go:64] FLAG: --max-pods="110" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.727496 4909 flags.go:64] FLAG: --maximum-dead-containers="-1" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.727501 4909 flags.go:64] FLAG: --maximum-dead-containers-per-container="1" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.727506 4909 flags.go:64] FLAG: --memory-manager-policy="None" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.727511 4909 flags.go:64] FLAG: --minimum-container-ttl-duration="6m0s" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.727516 4909 flags.go:64] FLAG: --minimum-image-ttl-duration="2m0s" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.727521 4909 flags.go:64] FLAG: --node-ip="192.168.126.11" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.727526 4909 flags.go:64] FLAG: --node-labels="node-role.kubernetes.io/control-plane=,node-role.kubernetes.io/master=,node.openshift.io/os_id=rhcos" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.727537 4909 flags.go:64] FLAG: --node-status-max-images="50" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.727542 4909 flags.go:64] FLAG: --node-status-update-frequency="10s" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.727547 4909 flags.go:64] FLAG: --oom-score-adj="-999" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.727552 4909 flags.go:64] FLAG: --pod-cidr="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.727557 4909 flags.go:64] FLAG: --pod-infra-container-image="quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:33549946e22a9ffa738fd94b1345f90921bc8f92fa6137784cb33c77ad806f9d" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.727565 4909 flags.go:64] FLAG: --pod-manifest-path="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.727570 4909 flags.go:64] FLAG: --pod-max-pids="-1" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.727575 4909 flags.go:64] FLAG: --pods-per-core="0" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.727581 4909 flags.go:64] FLAG: --port="10250" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.727586 4909 flags.go:64] FLAG: --protect-kernel-defaults="false" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.727591 4909 flags.go:64] FLAG: --provider-id="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.727596 4909 flags.go:64] FLAG: --qos-reserved="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.727601 4909 flags.go:64] FLAG: --read-only-port="10255" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.727607 4909 flags.go:64] FLAG: --register-node="true" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.727612 4909 flags.go:64] FLAG: --register-schedulable="true" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.727617 4909 flags.go:64] FLAG: --register-with-taints="node-role.kubernetes.io/master=:NoSchedule" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.727626 4909 flags.go:64] FLAG: --registry-burst="10" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.727631 4909 flags.go:64] FLAG: --registry-qps="5" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.727637 4909 flags.go:64] FLAG: --reserved-cpus="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.727642 4909 flags.go:64] FLAG: --reserved-memory="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.727649 4909 flags.go:64] FLAG: --resolv-conf="/etc/resolv.conf" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.727671 4909 flags.go:64] FLAG: --root-dir="/var/lib/kubelet" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.727677 4909 flags.go:64] FLAG: --rotate-certificates="false" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.727683 4909 flags.go:64] FLAG: --rotate-server-certificates="false" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.727688 4909 flags.go:64] FLAG: --runonce="false" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.727693 4909 flags.go:64] FLAG: --runtime-cgroups="/system.slice/crio.service" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.727699 4909 flags.go:64] FLAG: --runtime-request-timeout="2m0s" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.727705 4909 flags.go:64] FLAG: --seccomp-default="false" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.727710 4909 flags.go:64] FLAG: --serialize-image-pulls="true" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.727715 4909 flags.go:64] FLAG: --storage-driver-buffer-duration="1m0s" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.727721 4909 flags.go:64] FLAG: --storage-driver-db="cadvisor" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.727726 4909 flags.go:64] FLAG: --storage-driver-host="localhost:8086" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.727731 4909 flags.go:64] FLAG: --storage-driver-password="root" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.727736 4909 flags.go:64] FLAG: --storage-driver-secure="false" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.727742 4909 flags.go:64] FLAG: --storage-driver-table="stats" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.727747 4909 flags.go:64] FLAG: --storage-driver-user="root" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.727753 4909 flags.go:64] FLAG: --streaming-connection-idle-timeout="4h0m0s" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.727758 4909 flags.go:64] FLAG: --sync-frequency="1m0s" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.727763 4909 flags.go:64] FLAG: --system-cgroups="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.727769 4909 flags.go:64] FLAG: --system-reserved="cpu=200m,ephemeral-storage=350Mi,memory=350Mi" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.727776 4909 flags.go:64] FLAG: --system-reserved-cgroup="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.727783 4909 flags.go:64] FLAG: --tls-cert-file="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.727788 4909 flags.go:64] FLAG: --tls-cipher-suites="[]" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.727795 4909 flags.go:64] FLAG: --tls-min-version="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.727800 4909 flags.go:64] FLAG: --tls-private-key-file="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.727810 4909 flags.go:64] FLAG: --topology-manager-policy="none" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.727815 4909 flags.go:64] FLAG: --topology-manager-policy-options="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.727821 4909 flags.go:64] FLAG: --topology-manager-scope="container" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.727827 4909 flags.go:64] FLAG: --v="2" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.727833 4909 flags.go:64] FLAG: --version="false" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.727840 4909 flags.go:64] FLAG: --vmodule="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.727846 4909 flags.go:64] FLAG: --volume-plugin-dir="/etc/kubernetes/kubelet-plugins/volume/exec" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.727851 4909 flags.go:64] FLAG: --volume-stats-agg-period="1m0s" Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.727971 4909 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.727977 4909 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.727984 4909 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.727990 4909 feature_gate.go:330] unrecognized feature gate: Example Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.727996 4909 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.728002 4909 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.728007 4909 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.728012 4909 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.728017 4909 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.728021 4909 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.728026 4909 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.728031 4909 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.728037 4909 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.728043 4909 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.728048 4909 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.728054 4909 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.728059 4909 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.728065 4909 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.728069 4909 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.728074 4909 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.728079 4909 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.728085 4909 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.728089 4909 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.728095 4909 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.728104 4909 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.728109 4909 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.728115 4909 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.728120 4909 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.728125 4909 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.728129 4909 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.728134 4909 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.728138 4909 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.728142 4909 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.728147 4909 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.728151 4909 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.728155 4909 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.728160 4909 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.728164 4909 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.728169 4909 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.728173 4909 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.728178 4909 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.728390 4909 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.728399 4909 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.728405 4909 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.728410 4909 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.728414 4909 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.728418 4909 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.728423 4909 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.728427 4909 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.728432 4909 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.728437 4909 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.728442 4909 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.728447 4909 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.728453 4909 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.728458 4909 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.728462 4909 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.728473 4909 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.728477 4909 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.728482 4909 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.728487 4909 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.728493 4909 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.728499 4909 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.728505 4909 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.728510 4909 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.728515 4909 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.728520 4909 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.728526 4909 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.728532 4909 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.728537 4909 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.728542 4909 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.728547 4909 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.728746 4909 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.739619 4909 server.go:491] "Kubelet version" kubeletVersion="v1.31.5" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.739701 4909 server.go:493] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.739833 4909 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.739847 4909 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.739858 4909 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.739869 4909 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.739878 4909 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.739887 4909 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.739899 4909 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.739910 4909 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.739920 4909 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.739931 4909 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.739942 4909 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.739951 4909 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.739959 4909 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.739967 4909 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.739975 4909 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.739984 4909 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.739991 4909 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.739999 4909 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.740006 4909 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.740015 4909 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.740022 4909 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.740031 4909 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.740039 4909 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.740046 4909 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.740054 4909 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.740062 4909 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.740069 4909 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.740099 4909 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.740107 4909 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.740118 4909 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.740128 4909 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.740136 4909 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.740145 4909 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.740155 4909 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.740166 4909 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.740174 4909 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.740183 4909 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.740192 4909 feature_gate.go:330] unrecognized feature gate: Example Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.740200 4909 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.740210 4909 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.740219 4909 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.740227 4909 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.740235 4909 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.740243 4909 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.740251 4909 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.740259 4909 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.740266 4909 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.740274 4909 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.740281 4909 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.740289 4909 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.740297 4909 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.740305 4909 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.740312 4909 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.740320 4909 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.740327 4909 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.740336 4909 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.740344 4909 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.740352 4909 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.740359 4909 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.740368 4909 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.740376 4909 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.740384 4909 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.740391 4909 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.740399 4909 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.740409 4909 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.740417 4909 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.740425 4909 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.740433 4909 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.740440 4909 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.740447 4909 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.740455 4909 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.740468 4909 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.740779 4909 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.740797 4909 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.740809 4909 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.740820 4909 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.740830 4909 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.740838 4909 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.740846 4909 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.740854 4909 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.740863 4909 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.740871 4909 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.740878 4909 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.740889 4909 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.740899 4909 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.740908 4909 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.740918 4909 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.740926 4909 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.740934 4909 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.740943 4909 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.740951 4909 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.740960 4909 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.740969 4909 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.740976 4909 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.740984 4909 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.740992 4909 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.741000 4909 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.741008 4909 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.741042 4909 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.741053 4909 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.741062 4909 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.741071 4909 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.741080 4909 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.741088 4909 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.741097 4909 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.741106 4909 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.741115 4909 feature_gate.go:330] unrecognized feature gate: Example Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.741125 4909 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.741250 4909 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.741260 4909 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.741268 4909 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.741276 4909 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.741284 4909 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.741292 4909 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.741300 4909 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.741308 4909 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.741315 4909 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.741326 4909 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.741334 4909 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.741343 4909 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.741350 4909 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.741358 4909 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.741366 4909 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.741374 4909 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.741383 4909 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.741392 4909 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.741400 4909 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.741408 4909 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.741415 4909 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.741423 4909 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.741431 4909 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.741439 4909 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.741446 4909 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.741454 4909 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.741462 4909 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.741470 4909 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.741477 4909 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.741485 4909 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.741492 4909 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.741500 4909 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.741507 4909 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.741515 4909 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.741522 4909 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.741534 4909 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.742124 4909 server.go:940] "Client rotation is on, will bootstrap in background" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.746343 4909 bootstrap.go:85] "Current kubeconfig file contents are still valid, no bootstrap necessary" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.746473 4909 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-client-current.pem". Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.747443 4909 server.go:997] "Starting client certificate rotation" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.747473 4909 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate rotation is enabled Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.747886 4909 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate expiration is 2026-02-24 05:52:08 +0000 UTC, rotation deadline is 2025-11-10 09:41:44.633119653 +0000 UTC Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.747992 4909 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Rotating certificates Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.753981 4909 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Nov 28 16:10:17 crc kubenswrapper[4909]: E1128 16:10:17.755775 4909 certificate_manager.go:562] "Unhandled Error" err="kubernetes.io/kube-apiserver-client-kubelet: Failed while requesting a signed certificate from the control plane: cannot create certificate signing request: Post \"https://api-int.crc.testing:6443/apis/certificates.k8s.io/v1/certificatesigningrequests\": dial tcp 38.102.83.53:6443: connect: connection refused" logger="UnhandledError" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.756851 4909 dynamic_cafile_content.go:161] "Starting controller" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.768368 4909 log.go:25] "Validated CRI v1 runtime API" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.784900 4909 log.go:25] "Validated CRI v1 image API" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.786961 4909 server.go:1437] "Using cgroup driver setting received from the CRI runtime" cgroupDriver="systemd" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.790021 4909 fs.go:133] Filesystem UUIDs: map[0b076daa-c26a-46d2-b3a6-72a8dbc6e257:/dev/vda4 2025-11-28-16-05-30-00:/dev/sr0 7B77-95E7:/dev/vda2 de0497b0-db1b-465a-b278-03db02455c71:/dev/vda3] Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.790062 4909 fs.go:134] Filesystem partitions: map[/dev/shm:{mountpoint:/dev/shm major:0 minor:22 fsType:tmpfs blockSize:0} /dev/vda3:{mountpoint:/boot major:252 minor:3 fsType:ext4 blockSize:0} /dev/vda4:{mountpoint:/var major:252 minor:4 fsType:xfs blockSize:0} /run:{mountpoint:/run major:0 minor:24 fsType:tmpfs blockSize:0} /run/user/1000:{mountpoint:/run/user/1000 major:0 minor:42 fsType:tmpfs blockSize:0} /tmp:{mountpoint:/tmp major:0 minor:30 fsType:tmpfs blockSize:0} /var/lib/etcd:{mountpoint:/var/lib/etcd major:0 minor:43 fsType:tmpfs blockSize:0}] Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.812279 4909 manager.go:217] Machine: {Timestamp:2025-11-28 16:10:17.810780918 +0000 UTC m=+0.207465482 CPUVendorID:AuthenticAMD NumCores:12 NumPhysicalCores:1 NumSockets:12 CpuFrequency:2800000 MemoryCapacity:33654128640 SwapCapacity:0 MemoryByType:map[] NVMInfo:{MemoryModeCapacity:0 AppDirectModeCapacity:0 AvgPowerBudget:0} HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] MachineID:21801e6708c44f15b81395eb736a7cec SystemUUID:1e8d38e9-395c-4d37-b567-3bfe4869e3f7 BootID:b44f6a6c-5ae2-4ed6-9fc9-6c0acf034e9d Filesystems:[{Device:/var/lib/etcd DeviceMajor:0 DeviceMinor:43 Capacity:1073741824 Type:vfs Inodes:4108170 HasInodes:true} {Device:/dev/shm DeviceMajor:0 DeviceMinor:22 Capacity:16827064320 Type:vfs Inodes:4108170 HasInodes:true} {Device:/run DeviceMajor:0 DeviceMinor:24 Capacity:6730825728 Type:vfs Inodes:819200 HasInodes:true} {Device:/dev/vda4 DeviceMajor:252 DeviceMinor:4 Capacity:85292941312 Type:vfs Inodes:41679680 HasInodes:true} {Device:/tmp DeviceMajor:0 DeviceMinor:30 Capacity:16827064320 Type:vfs Inodes:1048576 HasInodes:true} {Device:/dev/vda3 DeviceMajor:252 DeviceMinor:3 Capacity:366869504 Type:vfs Inodes:98304 HasInodes:true} {Device:/run/user/1000 DeviceMajor:0 DeviceMinor:42 Capacity:3365412864 Type:vfs Inodes:821634 HasInodes:true}] DiskMap:map[252:0:{Name:vda Major:252 Minor:0 Size:214748364800 Scheduler:none}] NetworkDevices:[{Name:br-ex MacAddress:fa:16:3e:b7:71:b8 Speed:0 Mtu:1500} {Name:br-int MacAddress:d6:39:55:2e:22:71 Speed:0 Mtu:1400} {Name:ens3 MacAddress:fa:16:3e:b7:71:b8 Speed:-1 Mtu:1500} {Name:ens7 MacAddress:fa:16:3e:1f:ae:04 Speed:-1 Mtu:1500} {Name:ens7.20 MacAddress:52:54:00:e9:19:ff Speed:-1 Mtu:1496} {Name:ens7.21 MacAddress:52:54:00:e6:11:5d Speed:-1 Mtu:1496} {Name:ens7.22 MacAddress:52:54:00:f3:cc:17 Speed:-1 Mtu:1496} {Name:ens7.23 MacAddress:52:54:00:a2:49:51 Speed:-1 Mtu:1496} {Name:eth10 MacAddress:5a:ac:05:ff:98:b0 Speed:0 Mtu:1500} {Name:ovn-k8s-mp0 MacAddress:0a:58:0a:d9:00:02 Speed:0 Mtu:1400} {Name:ovs-system MacAddress:a6:83:66:87:96:e6 Speed:0 Mtu:1500}] Topology:[{Id:0 Memory:33654128640 HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] Cores:[{Id:0 Threads:[0] Caches:[{Id:0 Size:32768 Type:Data Level:1} {Id:0 Size:32768 Type:Instruction Level:1} {Id:0 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:0 Size:16777216 Type:Unified Level:3}] SocketID:0 BookID: DrawerID:} {Id:0 Threads:[1] Caches:[{Id:1 Size:32768 Type:Data Level:1} {Id:1 Size:32768 Type:Instruction Level:1} {Id:1 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:1 Size:16777216 Type:Unified Level:3}] SocketID:1 BookID: DrawerID:} {Id:0 Threads:[10] Caches:[{Id:10 Size:32768 Type:Data Level:1} {Id:10 Size:32768 Type:Instruction Level:1} {Id:10 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:10 Size:16777216 Type:Unified Level:3}] SocketID:10 BookID: DrawerID:} {Id:0 Threads:[11] Caches:[{Id:11 Size:32768 Type:Data Level:1} {Id:11 Size:32768 Type:Instruction Level:1} {Id:11 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:11 Size:16777216 Type:Unified Level:3}] SocketID:11 BookID: DrawerID:} {Id:0 Threads:[2] Caches:[{Id:2 Size:32768 Type:Data Level:1} {Id:2 Size:32768 Type:Instruction Level:1} {Id:2 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:2 Size:16777216 Type:Unified Level:3}] SocketID:2 BookID: DrawerID:} {Id:0 Threads:[3] Caches:[{Id:3 Size:32768 Type:Data Level:1} {Id:3 Size:32768 Type:Instruction Level:1} {Id:3 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:3 Size:16777216 Type:Unified Level:3}] SocketID:3 BookID: DrawerID:} {Id:0 Threads:[4] Caches:[{Id:4 Size:32768 Type:Data Level:1} {Id:4 Size:32768 Type:Instruction Level:1} {Id:4 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:4 Size:16777216 Type:Unified Level:3}] SocketID:4 BookID: DrawerID:} {Id:0 Threads:[5] Caches:[{Id:5 Size:32768 Type:Data Level:1} {Id:5 Size:32768 Type:Instruction Level:1} {Id:5 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:5 Size:16777216 Type:Unified Level:3}] SocketID:5 BookID: DrawerID:} {Id:0 Threads:[6] Caches:[{Id:6 Size:32768 Type:Data Level:1} {Id:6 Size:32768 Type:Instruction Level:1} {Id:6 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:6 Size:16777216 Type:Unified Level:3}] SocketID:6 BookID: DrawerID:} {Id:0 Threads:[7] Caches:[{Id:7 Size:32768 Type:Data Level:1} {Id:7 Size:32768 Type:Instruction Level:1} {Id:7 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:7 Size:16777216 Type:Unified Level:3}] SocketID:7 BookID: DrawerID:} {Id:0 Threads:[8] Caches:[{Id:8 Size:32768 Type:Data Level:1} {Id:8 Size:32768 Type:Instruction Level:1} {Id:8 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:8 Size:16777216 Type:Unified Level:3}] SocketID:8 BookID: DrawerID:} {Id:0 Threads:[9] Caches:[{Id:9 Size:32768 Type:Data Level:1} {Id:9 Size:32768 Type:Instruction Level:1} {Id:9 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:9 Size:16777216 Type:Unified Level:3}] SocketID:9 BookID: DrawerID:}] Caches:[] Distances:[10]}] CloudProvider:Unknown InstanceType:Unknown InstanceID:None} Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.812873 4909 manager_no_libpfm.go:29] cAdvisor is build without cgo and/or libpfm support. Perf event counters are not available. Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.813099 4909 manager.go:233] Version: {KernelVersion:5.14.0-427.50.2.el9_4.x86_64 ContainerOsVersion:Red Hat Enterprise Linux CoreOS 418.94.202502100215-0 DockerVersion: DockerAPIVersion: CadvisorVersion: CadvisorRevision:} Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.813887 4909 swap_util.go:113] "Swap is on" /proc/swaps contents="Filename\t\t\t\tType\t\tSize\t\tUsed\t\tPriority" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.814180 4909 container_manager_linux.go:267] "Container manager verified user specified cgroup-root exists" cgroupRoot=[] Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.814283 4909 container_manager_linux.go:272] "Creating Container Manager object based on Node Config" nodeConfig={"NodeName":"crc","RuntimeCgroupsName":"/system.slice/crio.service","SystemCgroupsName":"/system.slice","KubeletCgroupsName":"","KubeletOOMScoreAdj":-999,"ContainerRuntime":"","CgroupsPerQOS":true,"CgroupRoot":"/","CgroupDriver":"systemd","KubeletRootDir":"/var/lib/kubelet","ProtectKernelDefaults":true,"KubeReservedCgroupName":"","SystemReservedCgroupName":"","ReservedSystemCPUs":{},"EnforceNodeAllocatable":{"pods":{}},"KubeReserved":null,"SystemReserved":{"cpu":"200m","ephemeral-storage":"350Mi","memory":"350Mi"},"HardEvictionThresholds":[{"Signal":"memory.available","Operator":"LessThan","Value":{"Quantity":"100Mi","Percentage":0},"GracePeriod":0,"MinReclaim":null},{"Signal":"nodefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.1},"GracePeriod":0,"MinReclaim":null},{"Signal":"nodefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null},{"Signal":"imagefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.15},"GracePeriod":0,"MinReclaim":null},{"Signal":"imagefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null}],"QOSReserved":{},"CPUManagerPolicy":"none","CPUManagerPolicyOptions":null,"TopologyManagerScope":"container","CPUManagerReconcilePeriod":10000000000,"ExperimentalMemoryManagerPolicy":"None","ExperimentalMemoryManagerReservedMemory":null,"PodPidsLimit":4096,"EnforceCPULimits":true,"CPUCFSQuotaPeriod":100000000,"TopologyManagerPolicy":"none","TopologyManagerPolicyOptions":null,"CgroupVersion":2} Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.814722 4909 topology_manager.go:138] "Creating topology manager with none policy" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.814797 4909 container_manager_linux.go:303] "Creating device plugin manager" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.815076 4909 manager.go:142] "Creating Device Plugin manager" path="/var/lib/kubelet/device-plugins/kubelet.sock" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.815205 4909 server.go:66] "Creating device plugin registration server" version="v1beta1" socket="/var/lib/kubelet/device-plugins/kubelet.sock" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.815449 4909 state_mem.go:36] "Initialized new in-memory state store" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.815600 4909 server.go:1245] "Using root directory" path="/var/lib/kubelet" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.816977 4909 kubelet.go:418] "Attempting to sync node with API server" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.817105 4909 kubelet.go:313] "Adding static pod path" path="/etc/kubernetes/manifests" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.817227 4909 file.go:69] "Watching path" path="/etc/kubernetes/manifests" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.817329 4909 kubelet.go:324] "Adding apiserver pod source" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.817435 4909 apiserver.go:42] "Waiting for node sync before watching apiserver pods" Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.818960 4909 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.53:6443: connect: connection refused Nov 28 16:10:17 crc kubenswrapper[4909]: E1128 16:10:17.819086 4909 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.53:6443: connect: connection refused" logger="UnhandledError" Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.818945 4909 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.53:6443: connect: connection refused Nov 28 16:10:17 crc kubenswrapper[4909]: E1128 16:10:17.819169 4909 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.53:6443: connect: connection refused" logger="UnhandledError" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.820132 4909 kuberuntime_manager.go:262] "Container runtime initialized" containerRuntime="cri-o" version="1.31.5-4.rhaos4.18.gitdad78d5.el9" apiVersion="v1" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.820451 4909 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-server-current.pem". Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.821262 4909 kubelet.go:854] "Not starting ClusterTrustBundle informer because we are in static kubelet mode" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.821749 4909 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/portworx-volume" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.821769 4909 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/empty-dir" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.821777 4909 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/git-repo" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.821784 4909 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/host-path" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.821795 4909 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/nfs" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.821802 4909 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/secret" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.821809 4909 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/iscsi" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.821820 4909 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/downward-api" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.821827 4909 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/fc" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.821834 4909 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/configmap" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.821845 4909 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/projected" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.821851 4909 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/local-volume" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.822040 4909 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/csi" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.822501 4909 server.go:1280] "Started kubelet" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.822810 4909 server.go:163] "Starting to listen" address="0.0.0.0" port=10250 Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.822806 4909 ratelimit.go:55] "Setting rate limiting for endpoint" service="podresources" qps=100 burstTokens=10 Nov 28 16:10:17 crc systemd[1]: Started Kubernetes Kubelet. Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.824224 4909 server.go:236] "Starting to serve the podresources API" endpoint="unix:/var/lib/kubelet/pod-resources/kubelet.sock" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.824610 4909 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.53:6443: connect: connection refused Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.825773 4909 server.go:460] "Adding debug handlers to kubelet server" Nov 28 16:10:17 crc kubenswrapper[4909]: E1128 16:10:17.832790 4909 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": dial tcp 38.102.83.53:6443: connect: connection refused" event="&Event{ObjectMeta:{crc.187c3789861c8f7f default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-11-28 16:10:17.822457727 +0000 UTC m=+0.219142251,LastTimestamp:2025-11-28 16:10:17.822457727 +0000 UTC m=+0.219142251,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.834144 4909 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate rotation is enabled Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.834199 4909 fs_resource_analyzer.go:67] "Starting FS ResourceAnalyzer" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.834370 4909 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-24 08:00:07.637116619 +0000 UTC Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.834407 4909 certificate_manager.go:356] kubernetes.io/kubelet-serving: Waiting 615h49m49.802711913s for next certificate rotation Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.835137 4909 volume_manager.go:287] "The desired_state_of_world populator starts" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.835175 4909 volume_manager.go:289] "Starting Kubelet Volume Manager" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.835328 4909 desired_state_of_world_populator.go:146] "Desired state populator starts to run" Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.836346 4909 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.53:6443: connect: connection refused Nov 28 16:10:17 crc kubenswrapper[4909]: E1128 16:10:17.836473 4909 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 28 16:10:17 crc kubenswrapper[4909]: E1128 16:10:17.836506 4909 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.53:6443: connect: connection refused" logger="UnhandledError" Nov 28 16:10:17 crc kubenswrapper[4909]: E1128 16:10:17.836556 4909 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.53:6443: connect: connection refused" interval="200ms" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.837558 4909 factory.go:55] Registering systemd factory Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.837589 4909 factory.go:221] Registration of the systemd container factory successfully Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.838511 4909 factory.go:153] Registering CRI-O factory Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.838531 4909 factory.go:221] Registration of the crio container factory successfully Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.838587 4909 factory.go:219] Registration of the containerd container factory failed: unable to create containerd client: containerd: cannot unix dial containerd api service: dial unix /run/containerd/containerd.sock: connect: no such file or directory Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.838608 4909 factory.go:103] Registering Raw factory Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.838623 4909 manager.go:1196] Started watching for new ooms in manager Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.841065 4909 manager.go:319] Starting recovery of all containers Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.854638 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.854807 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.854843 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.854864 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.854898 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.854929 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.854946 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.854965 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.855002 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.855045 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.855070 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.855114 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.855141 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.855180 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.855211 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.855275 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.855296 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.855319 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.855341 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.855398 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.855422 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.855444 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.855468 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.855492 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.855514 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.855538 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.855563 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.855588 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.855612 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.855635 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.855715 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.855749 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.855774 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.855797 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.855821 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.855845 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.855868 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.855896 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.855920 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.855946 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d751cbb-f2e2-430d-9754-c882a5e924a5" volumeName="kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.855969 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.855994 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.856017 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.856042 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.856069 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.857044 4909 reconstruct.go:144] "Volume is marked device as uncertain and added into the actual state" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" deviceMountPath="/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.857093 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.857138 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.857165 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.857191 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.857228 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.857263 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.857300 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" volumeName="kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.857359 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.857389 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.857415 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.857442 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.857471 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.857495 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.857533 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.857558 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.857582 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.857608 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.857639 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.857715 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.857753 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.857780 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.857803 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.857839 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.857865 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.857890 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.857926 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.857950 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.857988 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.858200 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.858396 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.858425 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.858469 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.858497 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.858534 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" volumeName="kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.858572 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.858600 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.864274 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.864377 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.864419 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.864448 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.864465 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.864491 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.864506 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.864521 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.864717 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.864735 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.864756 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.864795 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.864811 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.864831 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="44663579-783b-4372-86d6-acf235a62d72" volumeName="kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.864845 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.864869 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.864996 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.865035 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.865054 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.865073 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.865087 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.865126 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.865197 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.865293 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.865347 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.865365 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.865386 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.865405 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.865436 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.865455 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.865473 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.865490 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.865526 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.865560 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.865602 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.865616 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.865864 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.865885 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.865897 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.865914 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.865927 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.865938 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49ef4625-1d3a-4a9f-b595-c2433d32326d" volumeName="kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.865954 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.865969 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.865985 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.865997 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.866009 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.866026 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.866037 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.866070 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.866082 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.866094 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.866110 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.866136 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.866152 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.866165 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.866178 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.866198 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.866213 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.866224 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.866257 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.866269 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.866285 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.866298 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.866313 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.866328 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.866342 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.866360 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.866372 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.866384 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.866400 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.866412 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.866429 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.866441 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.866453 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.866468 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.866480 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.866498 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.866511 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.866523 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.866539 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.866551 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.866568 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.866581 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.866594 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.866609 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.866622 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.866633 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.866665 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.866678 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.866693 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.866705 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.866718 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.866732 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3b6479f0-333b-4a96-9adf-2099afdc2447" volumeName="kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.866745 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.866760 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.866771 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.866783 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.866897 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.867516 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.868348 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.868437 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.868463 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.868483 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.868503 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.868523 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.868545 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.868565 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.868586 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.868606 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.868623 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.868644 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.868726 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.868746 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.868767 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.868788 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.868806 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.868835 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.868856 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.868875 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.868896 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.868916 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.868934 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.868954 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.868974 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.868994 4909 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" seLinuxMountContext="" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.869012 4909 reconstruct.go:97] "Volume reconstruction finished" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.869027 4909 reconciler.go:26] "Reconciler: start to sync state" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.875868 4909 manager.go:324] Recovery completed Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.883944 4909 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.886691 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.886723 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.886731 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.889368 4909 cpu_manager.go:225] "Starting CPU manager" policy="none" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.889387 4909 cpu_manager.go:226] "Reconciling" reconcilePeriod="10s" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.889407 4909 state_mem.go:36] "Initialized new in-memory state store" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.897709 4909 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv4" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.899120 4909 policy_none.go:49] "None policy: Start" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.899959 4909 memory_manager.go:170] "Starting memorymanager" policy="None" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.899980 4909 state_mem.go:35] "Initializing new in-memory state store" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.900155 4909 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv6" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.900203 4909 status_manager.go:217] "Starting to sync pod status with apiserver" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.900279 4909 kubelet.go:2335] "Starting kubelet main sync loop" Nov 28 16:10:17 crc kubenswrapper[4909]: E1128 16:10:17.900355 4909 kubelet.go:2359] "Skipping pod synchronization" err="[container runtime status check may not have completed yet, PLEG is not healthy: pleg has yet to be successful]" Nov 28 16:10:17 crc kubenswrapper[4909]: W1128 16:10:17.901643 4909 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.53:6443: connect: connection refused Nov 28 16:10:17 crc kubenswrapper[4909]: E1128 16:10:17.901826 4909 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.53:6443: connect: connection refused" logger="UnhandledError" Nov 28 16:10:17 crc kubenswrapper[4909]: E1128 16:10:17.936544 4909 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.972535 4909 manager.go:334] "Starting Device Plugin manager" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.972628 4909 manager.go:513] "Failed to read data from checkpoint" checkpoint="kubelet_internal_checkpoint" err="checkpoint is not found" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.972680 4909 server.go:79] "Starting device plugin registration server" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.973498 4909 eviction_manager.go:189] "Eviction manager: starting control loop" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.973525 4909 container_log_manager.go:189] "Initializing container log rotate workers" workers=1 monitorPeriod="10s" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.973878 4909 plugin_watcher.go:51] "Plugin Watcher Start" path="/var/lib/kubelet/plugins_registry" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.974051 4909 plugin_manager.go:116] "The desired_state_of_world populator (plugin watcher) starts" Nov 28 16:10:17 crc kubenswrapper[4909]: I1128 16:10:17.974067 4909 plugin_manager.go:118] "Starting Kubelet Plugin Manager" Nov 28 16:10:17 crc kubenswrapper[4909]: E1128 16:10:17.984597 4909 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Nov 28 16:10:18 crc kubenswrapper[4909]: I1128 16:10:18.001456 4909 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-machine-config-operator/kube-rbac-proxy-crio-crc","openshift-etcd/etcd-crc","openshift-kube-apiserver/kube-apiserver-crc","openshift-kube-controller-manager/kube-controller-manager-crc","openshift-kube-scheduler/openshift-kube-scheduler-crc"] Nov 28 16:10:18 crc kubenswrapper[4909]: I1128 16:10:18.001553 4909 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 16:10:18 crc kubenswrapper[4909]: I1128 16:10:18.002667 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:18 crc kubenswrapper[4909]: I1128 16:10:18.002709 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:18 crc kubenswrapper[4909]: I1128 16:10:18.002744 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:18 crc kubenswrapper[4909]: I1128 16:10:18.002937 4909 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 16:10:18 crc kubenswrapper[4909]: I1128 16:10:18.003113 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 28 16:10:18 crc kubenswrapper[4909]: I1128 16:10:18.003184 4909 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 16:10:18 crc kubenswrapper[4909]: I1128 16:10:18.004051 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:18 crc kubenswrapper[4909]: I1128 16:10:18.004095 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:18 crc kubenswrapper[4909]: I1128 16:10:18.004111 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:18 crc kubenswrapper[4909]: I1128 16:10:18.004342 4909 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 16:10:18 crc kubenswrapper[4909]: I1128 16:10:18.004522 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Nov 28 16:10:18 crc kubenswrapper[4909]: I1128 16:10:18.004572 4909 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 16:10:18 crc kubenswrapper[4909]: I1128 16:10:18.004904 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:18 crc kubenswrapper[4909]: I1128 16:10:18.004945 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:18 crc kubenswrapper[4909]: I1128 16:10:18.004957 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:18 crc kubenswrapper[4909]: I1128 16:10:18.005464 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:18 crc kubenswrapper[4909]: I1128 16:10:18.005510 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:18 crc kubenswrapper[4909]: I1128 16:10:18.005528 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:18 crc kubenswrapper[4909]: I1128 16:10:18.005583 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:18 crc kubenswrapper[4909]: I1128 16:10:18.005607 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:18 crc kubenswrapper[4909]: I1128 16:10:18.005682 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:18 crc kubenswrapper[4909]: I1128 16:10:18.005753 4909 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 16:10:18 crc kubenswrapper[4909]: I1128 16:10:18.005952 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 16:10:18 crc kubenswrapper[4909]: I1128 16:10:18.006003 4909 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 16:10:18 crc kubenswrapper[4909]: I1128 16:10:18.006733 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:18 crc kubenswrapper[4909]: I1128 16:10:18.006760 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:18 crc kubenswrapper[4909]: I1128 16:10:18.006771 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:18 crc kubenswrapper[4909]: I1128 16:10:18.006921 4909 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 16:10:18 crc kubenswrapper[4909]: I1128 16:10:18.007015 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:18 crc kubenswrapper[4909]: I1128 16:10:18.007044 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 16:10:18 crc kubenswrapper[4909]: I1128 16:10:18.007062 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:18 crc kubenswrapper[4909]: I1128 16:10:18.007080 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:18 crc kubenswrapper[4909]: I1128 16:10:18.007104 4909 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 16:10:18 crc kubenswrapper[4909]: I1128 16:10:18.008906 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:18 crc kubenswrapper[4909]: I1128 16:10:18.008929 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:18 crc kubenswrapper[4909]: I1128 16:10:18.008932 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:18 crc kubenswrapper[4909]: I1128 16:10:18.008974 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:18 crc kubenswrapper[4909]: I1128 16:10:18.009003 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:18 crc kubenswrapper[4909]: I1128 16:10:18.008949 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:18 crc kubenswrapper[4909]: I1128 16:10:18.009254 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 28 16:10:18 crc kubenswrapper[4909]: I1128 16:10:18.009293 4909 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 16:10:18 crc kubenswrapper[4909]: I1128 16:10:18.010260 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:18 crc kubenswrapper[4909]: I1128 16:10:18.010284 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:18 crc kubenswrapper[4909]: I1128 16:10:18.010304 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:18 crc kubenswrapper[4909]: E1128 16:10:18.037322 4909 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.53:6443: connect: connection refused" interval="400ms" Nov 28 16:10:18 crc kubenswrapper[4909]: I1128 16:10:18.071321 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 16:10:18 crc kubenswrapper[4909]: I1128 16:10:18.071398 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 16:10:18 crc kubenswrapper[4909]: I1128 16:10:18.071450 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 16:10:18 crc kubenswrapper[4909]: I1128 16:10:18.071493 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 16:10:18 crc kubenswrapper[4909]: I1128 16:10:18.071540 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 16:10:18 crc kubenswrapper[4909]: I1128 16:10:18.071620 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 16:10:18 crc kubenswrapper[4909]: I1128 16:10:18.071717 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 16:10:18 crc kubenswrapper[4909]: I1128 16:10:18.071763 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 28 16:10:18 crc kubenswrapper[4909]: I1128 16:10:18.071817 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 16:10:18 crc kubenswrapper[4909]: I1128 16:10:18.071862 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 16:10:18 crc kubenswrapper[4909]: I1128 16:10:18.071899 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 16:10:18 crc kubenswrapper[4909]: I1128 16:10:18.071941 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 16:10:18 crc kubenswrapper[4909]: I1128 16:10:18.071985 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 28 16:10:18 crc kubenswrapper[4909]: I1128 16:10:18.072026 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 28 16:10:18 crc kubenswrapper[4909]: I1128 16:10:18.072066 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 28 16:10:18 crc kubenswrapper[4909]: I1128 16:10:18.073739 4909 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 16:10:18 crc kubenswrapper[4909]: I1128 16:10:18.075176 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:18 crc kubenswrapper[4909]: I1128 16:10:18.075262 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:18 crc kubenswrapper[4909]: I1128 16:10:18.075289 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:18 crc kubenswrapper[4909]: I1128 16:10:18.075337 4909 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 28 16:10:18 crc kubenswrapper[4909]: E1128 16:10:18.076093 4909 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.53:6443: connect: connection refused" node="crc" Nov 28 16:10:18 crc kubenswrapper[4909]: I1128 16:10:18.173831 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 16:10:18 crc kubenswrapper[4909]: I1128 16:10:18.173883 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 16:10:18 crc kubenswrapper[4909]: I1128 16:10:18.173905 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 16:10:18 crc kubenswrapper[4909]: I1128 16:10:18.173925 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 16:10:18 crc kubenswrapper[4909]: I1128 16:10:18.173945 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 28 16:10:18 crc kubenswrapper[4909]: I1128 16:10:18.173962 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 16:10:18 crc kubenswrapper[4909]: I1128 16:10:18.173977 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 16:10:18 crc kubenswrapper[4909]: I1128 16:10:18.173992 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 16:10:18 crc kubenswrapper[4909]: I1128 16:10:18.174009 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 16:10:18 crc kubenswrapper[4909]: I1128 16:10:18.174035 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 28 16:10:18 crc kubenswrapper[4909]: I1128 16:10:18.174058 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 28 16:10:18 crc kubenswrapper[4909]: I1128 16:10:18.174078 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 28 16:10:18 crc kubenswrapper[4909]: I1128 16:10:18.174093 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 16:10:18 crc kubenswrapper[4909]: I1128 16:10:18.174108 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 16:10:18 crc kubenswrapper[4909]: I1128 16:10:18.174100 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 16:10:18 crc kubenswrapper[4909]: I1128 16:10:18.174162 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 16:10:18 crc kubenswrapper[4909]: I1128 16:10:18.174092 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 16:10:18 crc kubenswrapper[4909]: I1128 16:10:18.174127 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 16:10:18 crc kubenswrapper[4909]: I1128 16:10:18.174213 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 28 16:10:18 crc kubenswrapper[4909]: I1128 16:10:18.174305 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 28 16:10:18 crc kubenswrapper[4909]: I1128 16:10:18.174336 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 16:10:18 crc kubenswrapper[4909]: I1128 16:10:18.174367 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 16:10:18 crc kubenswrapper[4909]: I1128 16:10:18.174375 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 16:10:18 crc kubenswrapper[4909]: I1128 16:10:18.174372 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 16:10:18 crc kubenswrapper[4909]: I1128 16:10:18.174396 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 16:10:18 crc kubenswrapper[4909]: I1128 16:10:18.174336 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 16:10:18 crc kubenswrapper[4909]: I1128 16:10:18.174349 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 28 16:10:18 crc kubenswrapper[4909]: I1128 16:10:18.174454 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 28 16:10:18 crc kubenswrapper[4909]: I1128 16:10:18.174435 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 16:10:18 crc kubenswrapper[4909]: I1128 16:10:18.174472 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 16:10:18 crc kubenswrapper[4909]: I1128 16:10:18.276421 4909 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 16:10:18 crc kubenswrapper[4909]: I1128 16:10:18.278959 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:18 crc kubenswrapper[4909]: I1128 16:10:18.278996 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:18 crc kubenswrapper[4909]: I1128 16:10:18.279004 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:18 crc kubenswrapper[4909]: I1128 16:10:18.279025 4909 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 28 16:10:18 crc kubenswrapper[4909]: E1128 16:10:18.279326 4909 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.53:6443: connect: connection refused" node="crc" Nov 28 16:10:18 crc kubenswrapper[4909]: I1128 16:10:18.339203 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 28 16:10:18 crc kubenswrapper[4909]: I1128 16:10:18.361414 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Nov 28 16:10:18 crc kubenswrapper[4909]: W1128 16:10:18.372167 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd1b160f5dda77d281dd8e69ec8d817f9.slice/crio-8b7dd0d755528e3eebdf0801f0364276a9080a33b8174bd679a8722e4810ead5 WatchSource:0}: Error finding container 8b7dd0d755528e3eebdf0801f0364276a9080a33b8174bd679a8722e4810ead5: Status 404 returned error can't find the container with id 8b7dd0d755528e3eebdf0801f0364276a9080a33b8174bd679a8722e4810ead5 Nov 28 16:10:18 crc kubenswrapper[4909]: I1128 16:10:18.387036 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 16:10:18 crc kubenswrapper[4909]: W1128 16:10:18.387063 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2139d3e2895fc6797b9c76a1b4c9886d.slice/crio-b3a95d4d7cfb3413275c65ad53d4de0eeee2e0a1209638a28330a004a79c96c9 WatchSource:0}: Error finding container b3a95d4d7cfb3413275c65ad53d4de0eeee2e0a1209638a28330a004a79c96c9: Status 404 returned error can't find the container with id b3a95d4d7cfb3413275c65ad53d4de0eeee2e0a1209638a28330a004a79c96c9 Nov 28 16:10:18 crc kubenswrapper[4909]: W1128 16:10:18.400493 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf4b27818a5e8e43d0dc095d08835c792.slice/crio-f9013c017bc6e78f9a79fc35cff21b1b74bf556eaab34d216502e6e1c2fc74e5 WatchSource:0}: Error finding container f9013c017bc6e78f9a79fc35cff21b1b74bf556eaab34d216502e6e1c2fc74e5: Status 404 returned error can't find the container with id f9013c017bc6e78f9a79fc35cff21b1b74bf556eaab34d216502e6e1c2fc74e5 Nov 28 16:10:18 crc kubenswrapper[4909]: I1128 16:10:18.415120 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 16:10:18 crc kubenswrapper[4909]: I1128 16:10:18.426298 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 28 16:10:18 crc kubenswrapper[4909]: W1128 16:10:18.432321 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf614b9022728cf315e60c057852e563e.slice/crio-047d12644e99e3913b2a368fc496d2f7fc38eacfdf775770c57ea3e8f4c313bb WatchSource:0}: Error finding container 047d12644e99e3913b2a368fc496d2f7fc38eacfdf775770c57ea3e8f4c313bb: Status 404 returned error can't find the container with id 047d12644e99e3913b2a368fc496d2f7fc38eacfdf775770c57ea3e8f4c313bb Nov 28 16:10:18 crc kubenswrapper[4909]: E1128 16:10:18.438402 4909 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.53:6443: connect: connection refused" interval="800ms" Nov 28 16:10:18 crc kubenswrapper[4909]: W1128 16:10:18.442909 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3dcd261975c3d6b9a6ad6367fd4facd3.slice/crio-b10de18739655bd28fe648e9ee2cf53e86d74b23c920d96c8415368bb1e617c7 WatchSource:0}: Error finding container b10de18739655bd28fe648e9ee2cf53e86d74b23c920d96c8415368bb1e617c7: Status 404 returned error can't find the container with id b10de18739655bd28fe648e9ee2cf53e86d74b23c920d96c8415368bb1e617c7 Nov 28 16:10:18 crc kubenswrapper[4909]: I1128 16:10:18.679424 4909 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 16:10:18 crc kubenswrapper[4909]: I1128 16:10:18.680871 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:18 crc kubenswrapper[4909]: I1128 16:10:18.680913 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:18 crc kubenswrapper[4909]: I1128 16:10:18.680922 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:18 crc kubenswrapper[4909]: I1128 16:10:18.680949 4909 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 28 16:10:18 crc kubenswrapper[4909]: E1128 16:10:18.681334 4909 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.53:6443: connect: connection refused" node="crc" Nov 28 16:10:18 crc kubenswrapper[4909]: W1128 16:10:18.723900 4909 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.53:6443: connect: connection refused Nov 28 16:10:18 crc kubenswrapper[4909]: E1128 16:10:18.723988 4909 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.53:6443: connect: connection refused" logger="UnhandledError" Nov 28 16:10:18 crc kubenswrapper[4909]: I1128 16:10:18.827623 4909 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.53:6443: connect: connection refused Nov 28 16:10:18 crc kubenswrapper[4909]: I1128 16:10:18.905730 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"b10de18739655bd28fe648e9ee2cf53e86d74b23c920d96c8415368bb1e617c7"} Nov 28 16:10:18 crc kubenswrapper[4909]: I1128 16:10:18.907006 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"047d12644e99e3913b2a368fc496d2f7fc38eacfdf775770c57ea3e8f4c313bb"} Nov 28 16:10:18 crc kubenswrapper[4909]: I1128 16:10:18.908286 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"f9013c017bc6e78f9a79fc35cff21b1b74bf556eaab34d216502e6e1c2fc74e5"} Nov 28 16:10:18 crc kubenswrapper[4909]: I1128 16:10:18.909357 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"b3a95d4d7cfb3413275c65ad53d4de0eeee2e0a1209638a28330a004a79c96c9"} Nov 28 16:10:18 crc kubenswrapper[4909]: I1128 16:10:18.910518 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"8b7dd0d755528e3eebdf0801f0364276a9080a33b8174bd679a8722e4810ead5"} Nov 28 16:10:19 crc kubenswrapper[4909]: W1128 16:10:19.003939 4909 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.53:6443: connect: connection refused Nov 28 16:10:19 crc kubenswrapper[4909]: E1128 16:10:19.004427 4909 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.53:6443: connect: connection refused" logger="UnhandledError" Nov 28 16:10:19 crc kubenswrapper[4909]: W1128 16:10:19.097268 4909 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.53:6443: connect: connection refused Nov 28 16:10:19 crc kubenswrapper[4909]: E1128 16:10:19.097382 4909 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.53:6443: connect: connection refused" logger="UnhandledError" Nov 28 16:10:19 crc kubenswrapper[4909]: E1128 16:10:19.219174 4909 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": dial tcp 38.102.83.53:6443: connect: connection refused" event="&Event{ObjectMeta:{crc.187c3789861c8f7f default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-11-28 16:10:17.822457727 +0000 UTC m=+0.219142251,LastTimestamp:2025-11-28 16:10:17.822457727 +0000 UTC m=+0.219142251,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Nov 28 16:10:19 crc kubenswrapper[4909]: E1128 16:10:19.239316 4909 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.53:6443: connect: connection refused" interval="1.6s" Nov 28 16:10:19 crc kubenswrapper[4909]: W1128 16:10:19.347447 4909 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.53:6443: connect: connection refused Nov 28 16:10:19 crc kubenswrapper[4909]: E1128 16:10:19.347520 4909 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.53:6443: connect: connection refused" logger="UnhandledError" Nov 28 16:10:19 crc kubenswrapper[4909]: I1128 16:10:19.481597 4909 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 16:10:19 crc kubenswrapper[4909]: I1128 16:10:19.483328 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:19 crc kubenswrapper[4909]: I1128 16:10:19.483371 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:19 crc kubenswrapper[4909]: I1128 16:10:19.483383 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:19 crc kubenswrapper[4909]: I1128 16:10:19.483412 4909 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 28 16:10:19 crc kubenswrapper[4909]: E1128 16:10:19.483868 4909 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.53:6443: connect: connection refused" node="crc" Nov 28 16:10:19 crc kubenswrapper[4909]: I1128 16:10:19.826329 4909 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.53:6443: connect: connection refused Nov 28 16:10:19 crc kubenswrapper[4909]: I1128 16:10:19.867670 4909 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Rotating certificates Nov 28 16:10:19 crc kubenswrapper[4909]: E1128 16:10:19.868643 4909 certificate_manager.go:562] "Unhandled Error" err="kubernetes.io/kube-apiserver-client-kubelet: Failed while requesting a signed certificate from the control plane: cannot create certificate signing request: Post \"https://api-int.crc.testing:6443/apis/certificates.k8s.io/v1/certificatesigningrequests\": dial tcp 38.102.83.53:6443: connect: connection refused" logger="UnhandledError" Nov 28 16:10:19 crc kubenswrapper[4909]: I1128 16:10:19.915441 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"37dc41c4e260f683c1ae04ae87a883fe78e4f1f620a946ccb6a87191a5eae0ae"} Nov 28 16:10:19 crc kubenswrapper[4909]: I1128 16:10:19.915476 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"94501e940379fe9b429e532a91799701e733bb5c3be1c5f32da07a9957f955b3"} Nov 28 16:10:19 crc kubenswrapper[4909]: I1128 16:10:19.915487 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"ea29d0be350e786147f355c5bb4902924aa0f921413b432ad093a796d21b9d05"} Nov 28 16:10:19 crc kubenswrapper[4909]: I1128 16:10:19.915496 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"b41cf0481323080f4056d35add13c2254c6ffbe432b1651555067d3fc9d163eb"} Nov 28 16:10:19 crc kubenswrapper[4909]: I1128 16:10:19.915541 4909 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 16:10:19 crc kubenswrapper[4909]: I1128 16:10:19.916555 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:19 crc kubenswrapper[4909]: I1128 16:10:19.916596 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:19 crc kubenswrapper[4909]: I1128 16:10:19.916614 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:19 crc kubenswrapper[4909]: I1128 16:10:19.917736 4909 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="0700ae65eb592e5b2110d37bb15dbb1dbdd228986de015ab5dc8fe13672032ef" exitCode=0 Nov 28 16:10:19 crc kubenswrapper[4909]: I1128 16:10:19.917839 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"0700ae65eb592e5b2110d37bb15dbb1dbdd228986de015ab5dc8fe13672032ef"} Nov 28 16:10:19 crc kubenswrapper[4909]: I1128 16:10:19.917881 4909 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 16:10:19 crc kubenswrapper[4909]: I1128 16:10:19.919149 4909 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="87a0a3278c97e0b62095a0c356dc69625281a2162a4304008e43d5b4b797db32" exitCode=0 Nov 28 16:10:19 crc kubenswrapper[4909]: I1128 16:10:19.919195 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"87a0a3278c97e0b62095a0c356dc69625281a2162a4304008e43d5b4b797db32"} Nov 28 16:10:19 crc kubenswrapper[4909]: I1128 16:10:19.919288 4909 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 16:10:19 crc kubenswrapper[4909]: I1128 16:10:19.919615 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:19 crc kubenswrapper[4909]: I1128 16:10:19.919704 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:19 crc kubenswrapper[4909]: I1128 16:10:19.919734 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:19 crc kubenswrapper[4909]: I1128 16:10:19.920361 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:19 crc kubenswrapper[4909]: I1128 16:10:19.920426 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:19 crc kubenswrapper[4909]: I1128 16:10:19.920449 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:19 crc kubenswrapper[4909]: I1128 16:10:19.921052 4909 generic.go:334] "Generic (PLEG): container finished" podID="d1b160f5dda77d281dd8e69ec8d817f9" containerID="21c2d58c3f013ade0f092f36eefd505c6e49d502372094b569982b9f1b273887" exitCode=0 Nov 28 16:10:19 crc kubenswrapper[4909]: I1128 16:10:19.921099 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerDied","Data":"21c2d58c3f013ade0f092f36eefd505c6e49d502372094b569982b9f1b273887"} Nov 28 16:10:19 crc kubenswrapper[4909]: I1128 16:10:19.921155 4909 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 16:10:19 crc kubenswrapper[4909]: I1128 16:10:19.921573 4909 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 16:10:19 crc kubenswrapper[4909]: I1128 16:10:19.922310 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:19 crc kubenswrapper[4909]: I1128 16:10:19.922357 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:19 crc kubenswrapper[4909]: I1128 16:10:19.922375 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:19 crc kubenswrapper[4909]: I1128 16:10:19.922733 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:19 crc kubenswrapper[4909]: I1128 16:10:19.922752 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:19 crc kubenswrapper[4909]: I1128 16:10:19.922760 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:19 crc kubenswrapper[4909]: I1128 16:10:19.923734 4909 generic.go:334] "Generic (PLEG): container finished" podID="3dcd261975c3d6b9a6ad6367fd4facd3" containerID="a891f34668e5de053a5fef2b954e97fb437e1a9e3bc2ed26b9bb767a3dda592d" exitCode=0 Nov 28 16:10:19 crc kubenswrapper[4909]: I1128 16:10:19.923789 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerDied","Data":"a891f34668e5de053a5fef2b954e97fb437e1a9e3bc2ed26b9bb767a3dda592d"} Nov 28 16:10:19 crc kubenswrapper[4909]: I1128 16:10:19.923867 4909 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 16:10:19 crc kubenswrapper[4909]: I1128 16:10:19.925217 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:19 crc kubenswrapper[4909]: I1128 16:10:19.925236 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:19 crc kubenswrapper[4909]: I1128 16:10:19.925247 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:20 crc kubenswrapper[4909]: I1128 16:10:20.935466 4909 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 16:10:20 crc kubenswrapper[4909]: I1128 16:10:20.935959 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"56b9768e934f5eba0145cbfba9668e6215d26784cf74c2c687147f910dc5f398"} Nov 28 16:10:20 crc kubenswrapper[4909]: I1128 16:10:20.937896 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:20 crc kubenswrapper[4909]: I1128 16:10:20.937938 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:20 crc kubenswrapper[4909]: I1128 16:10:20.937950 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:20 crc kubenswrapper[4909]: I1128 16:10:20.940315 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"34312055c6f3a6714d732ad11b27c20139c8a9be7636a9dd215a6e680803afd6"} Nov 28 16:10:20 crc kubenswrapper[4909]: I1128 16:10:20.940350 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"5f88b0080f0e1e8677f2525f47faf3bcd7fa2f54bc91057b318d4c1f86a16f9f"} Nov 28 16:10:20 crc kubenswrapper[4909]: I1128 16:10:20.940361 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"3d62be23143f1f28ff523369b4a6b5cb91146ac54236b31cc8d91d200bd8598e"} Nov 28 16:10:20 crc kubenswrapper[4909]: I1128 16:10:20.940448 4909 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 16:10:20 crc kubenswrapper[4909]: I1128 16:10:20.941149 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:20 crc kubenswrapper[4909]: I1128 16:10:20.941180 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:20 crc kubenswrapper[4909]: I1128 16:10:20.941193 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:20 crc kubenswrapper[4909]: I1128 16:10:20.943011 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"da70cc63e1edf58e6d6fed49fc9266d54fabba5f7ce3216d910eb18aebc45c34"} Nov 28 16:10:20 crc kubenswrapper[4909]: I1128 16:10:20.943054 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"76ac0606fbf94f3746fb7ffa549a3cb684d40ee72224dd36c96bb47db493b482"} Nov 28 16:10:20 crc kubenswrapper[4909]: I1128 16:10:20.943071 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"1d5716d50bbed7c047029bbd7964431e97f78c6516505f57fe76d2a1548a665d"} Nov 28 16:10:20 crc kubenswrapper[4909]: I1128 16:10:20.943087 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"107b559f7d62f02041439f8c727185e9a819cd655afa0898e818d8618cc54cc7"} Nov 28 16:10:20 crc kubenswrapper[4909]: I1128 16:10:20.944786 4909 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="0a7656d77c9c3f1192ff7be4001b660acb744ce578f478091415c30149a0c0ab" exitCode=0 Nov 28 16:10:20 crc kubenswrapper[4909]: I1128 16:10:20.944835 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"0a7656d77c9c3f1192ff7be4001b660acb744ce578f478091415c30149a0c0ab"} Nov 28 16:10:20 crc kubenswrapper[4909]: I1128 16:10:20.944886 4909 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 16:10:20 crc kubenswrapper[4909]: I1128 16:10:20.944903 4909 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 16:10:20 crc kubenswrapper[4909]: I1128 16:10:20.945852 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:20 crc kubenswrapper[4909]: I1128 16:10:20.945878 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:20 crc kubenswrapper[4909]: I1128 16:10:20.945888 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:20 crc kubenswrapper[4909]: I1128 16:10:20.945900 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:20 crc kubenswrapper[4909]: I1128 16:10:20.945923 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:20 crc kubenswrapper[4909]: I1128 16:10:20.945934 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:21 crc kubenswrapper[4909]: I1128 16:10:21.084352 4909 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 16:10:21 crc kubenswrapper[4909]: I1128 16:10:21.087329 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:21 crc kubenswrapper[4909]: I1128 16:10:21.087356 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:21 crc kubenswrapper[4909]: I1128 16:10:21.087368 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:21 crc kubenswrapper[4909]: I1128 16:10:21.087392 4909 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 28 16:10:21 crc kubenswrapper[4909]: I1128 16:10:21.522586 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 16:10:21 crc kubenswrapper[4909]: I1128 16:10:21.529171 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 16:10:21 crc kubenswrapper[4909]: I1128 16:10:21.628958 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 16:10:21 crc kubenswrapper[4909]: I1128 16:10:21.951965 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"53a5c9679a2c94c1297318d2e554c2445efbfab9cf72d25e48d753d66fa217d5"} Nov 28 16:10:21 crc kubenswrapper[4909]: I1128 16:10:21.952110 4909 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 16:10:21 crc kubenswrapper[4909]: I1128 16:10:21.953387 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:21 crc kubenswrapper[4909]: I1128 16:10:21.953440 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:21 crc kubenswrapper[4909]: I1128 16:10:21.953457 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:21 crc kubenswrapper[4909]: I1128 16:10:21.955905 4909 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="c2a406b502d5a00682c557211732680e6d776128c3f8a57486fb07f999ffc020" exitCode=0 Nov 28 16:10:21 crc kubenswrapper[4909]: I1128 16:10:21.955972 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"c2a406b502d5a00682c557211732680e6d776128c3f8a57486fb07f999ffc020"} Nov 28 16:10:21 crc kubenswrapper[4909]: I1128 16:10:21.956048 4909 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 28 16:10:21 crc kubenswrapper[4909]: I1128 16:10:21.956078 4909 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 16:10:21 crc kubenswrapper[4909]: I1128 16:10:21.956099 4909 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 16:10:21 crc kubenswrapper[4909]: I1128 16:10:21.956107 4909 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 16:10:21 crc kubenswrapper[4909]: I1128 16:10:21.956144 4909 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 16:10:21 crc kubenswrapper[4909]: I1128 16:10:21.961296 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:21 crc kubenswrapper[4909]: I1128 16:10:21.961358 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:21 crc kubenswrapper[4909]: I1128 16:10:21.961383 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:21 crc kubenswrapper[4909]: I1128 16:10:21.961428 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:21 crc kubenswrapper[4909]: I1128 16:10:21.961385 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:21 crc kubenswrapper[4909]: I1128 16:10:21.961392 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:21 crc kubenswrapper[4909]: I1128 16:10:21.961515 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:21 crc kubenswrapper[4909]: I1128 16:10:21.961563 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:21 crc kubenswrapper[4909]: I1128 16:10:21.961592 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:21 crc kubenswrapper[4909]: I1128 16:10:21.961315 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:21 crc kubenswrapper[4909]: I1128 16:10:21.961818 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:21 crc kubenswrapper[4909]: I1128 16:10:21.961839 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:22 crc kubenswrapper[4909]: I1128 16:10:22.964274 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"cbfc56c8616e718aa84cff1a27953bf53545d58f4c85f134c3442cba7fd9b4a6"} Nov 28 16:10:22 crc kubenswrapper[4909]: I1128 16:10:22.964331 4909 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 28 16:10:22 crc kubenswrapper[4909]: I1128 16:10:22.964357 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"6a1d925d0d6217f6ec938e3762a8a5c9e2707133f94803b3c4903067158edaf1"} Nov 28 16:10:22 crc kubenswrapper[4909]: I1128 16:10:22.964376 4909 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 28 16:10:22 crc kubenswrapper[4909]: I1128 16:10:22.964447 4909 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 16:10:22 crc kubenswrapper[4909]: I1128 16:10:22.964391 4909 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 16:10:22 crc kubenswrapper[4909]: I1128 16:10:22.964388 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"2b21c0cfaf35d1a92957e4f56644186338f63d9a5f9af5c50bf1a949ac65623e"} Nov 28 16:10:22 crc kubenswrapper[4909]: I1128 16:10:22.965883 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:22 crc kubenswrapper[4909]: I1128 16:10:22.965932 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:22 crc kubenswrapper[4909]: I1128 16:10:22.965949 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:22 crc kubenswrapper[4909]: I1128 16:10:22.966694 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:22 crc kubenswrapper[4909]: I1128 16:10:22.966761 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:22 crc kubenswrapper[4909]: I1128 16:10:22.966783 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:23 crc kubenswrapper[4909]: I1128 16:10:23.985863 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"5745b6c3e0c8cd0022da7ac358e06ec452fe8f0263750213acbcb10513581517"} Nov 28 16:10:23 crc kubenswrapper[4909]: I1128 16:10:23.985924 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"16c92b6b79b474333b7703229e7f443cd1c5aa67a9b1ea28dd4eea4377c11486"} Nov 28 16:10:23 crc kubenswrapper[4909]: I1128 16:10:23.986046 4909 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 16:10:23 crc kubenswrapper[4909]: I1128 16:10:23.987464 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:23 crc kubenswrapper[4909]: I1128 16:10:23.987538 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:23 crc kubenswrapper[4909]: I1128 16:10:23.987555 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:24 crc kubenswrapper[4909]: I1128 16:10:24.040716 4909 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Rotating certificates Nov 28 16:10:24 crc kubenswrapper[4909]: I1128 16:10:24.629754 4909 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 28 16:10:24 crc kubenswrapper[4909]: I1128 16:10:24.629843 4909 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 28 16:10:24 crc kubenswrapper[4909]: I1128 16:10:24.988507 4909 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 16:10:24 crc kubenswrapper[4909]: I1128 16:10:24.989970 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:24 crc kubenswrapper[4909]: I1128 16:10:24.990037 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:24 crc kubenswrapper[4909]: I1128 16:10:24.990059 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:25 crc kubenswrapper[4909]: I1128 16:10:25.406160 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 16:10:25 crc kubenswrapper[4909]: I1128 16:10:25.406374 4909 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 28 16:10:25 crc kubenswrapper[4909]: I1128 16:10:25.406435 4909 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 16:10:25 crc kubenswrapper[4909]: I1128 16:10:25.408360 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:25 crc kubenswrapper[4909]: I1128 16:10:25.408424 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:25 crc kubenswrapper[4909]: I1128 16:10:25.408444 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:26 crc kubenswrapper[4909]: I1128 16:10:26.395709 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 16:10:26 crc kubenswrapper[4909]: I1128 16:10:26.395919 4909 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 28 16:10:26 crc kubenswrapper[4909]: I1128 16:10:26.395979 4909 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 16:10:26 crc kubenswrapper[4909]: I1128 16:10:26.397621 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:26 crc kubenswrapper[4909]: I1128 16:10:26.397683 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:26 crc kubenswrapper[4909]: I1128 16:10:26.397697 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:27 crc kubenswrapper[4909]: I1128 16:10:27.153022 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 28 16:10:27 crc kubenswrapper[4909]: I1128 16:10:27.153256 4909 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 16:10:27 crc kubenswrapper[4909]: I1128 16:10:27.155428 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:27 crc kubenswrapper[4909]: I1128 16:10:27.155499 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:27 crc kubenswrapper[4909]: I1128 16:10:27.155519 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:27 crc kubenswrapper[4909]: I1128 16:10:27.182757 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 16:10:27 crc kubenswrapper[4909]: I1128 16:10:27.182993 4909 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 16:10:27 crc kubenswrapper[4909]: I1128 16:10:27.184957 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:27 crc kubenswrapper[4909]: I1128 16:10:27.185015 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:27 crc kubenswrapper[4909]: I1128 16:10:27.185044 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:27 crc kubenswrapper[4909]: I1128 16:10:27.317614 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 16:10:27 crc kubenswrapper[4909]: I1128 16:10:27.317862 4909 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 28 16:10:27 crc kubenswrapper[4909]: I1128 16:10:27.317918 4909 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 16:10:27 crc kubenswrapper[4909]: I1128 16:10:27.319198 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:27 crc kubenswrapper[4909]: I1128 16:10:27.319267 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:27 crc kubenswrapper[4909]: I1128 16:10:27.319286 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:27 crc kubenswrapper[4909]: I1128 16:10:27.374317 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-etcd/etcd-crc" Nov 28 16:10:27 crc kubenswrapper[4909]: I1128 16:10:27.374533 4909 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 16:10:27 crc kubenswrapper[4909]: I1128 16:10:27.376028 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:27 crc kubenswrapper[4909]: I1128 16:10:27.376183 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:27 crc kubenswrapper[4909]: I1128 16:10:27.376293 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:27 crc kubenswrapper[4909]: I1128 16:10:27.569351 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 16:10:27 crc kubenswrapper[4909]: E1128 16:10:27.984937 4909 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Nov 28 16:10:27 crc kubenswrapper[4909]: I1128 16:10:27.997599 4909 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 16:10:28 crc kubenswrapper[4909]: I1128 16:10:28.000022 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:28 crc kubenswrapper[4909]: I1128 16:10:28.000089 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:28 crc kubenswrapper[4909]: I1128 16:10:28.000111 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:30 crc kubenswrapper[4909]: W1128 16:10:30.766330 4909 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": net/http: TLS handshake timeout Nov 28 16:10:30 crc kubenswrapper[4909]: I1128 16:10:30.766512 4909 trace.go:236] Trace[1145912160]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (28-Nov-2025 16:10:20.764) (total time: 10001ms): Nov 28 16:10:30 crc kubenswrapper[4909]: Trace[1145912160]: ---"Objects listed" error:Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": net/http: TLS handshake timeout 10001ms (16:10:30.766) Nov 28 16:10:30 crc kubenswrapper[4909]: Trace[1145912160]: [10.001750682s] [10.001750682s] END Nov 28 16:10:30 crc kubenswrapper[4909]: E1128 16:10:30.766556 4909 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": net/http: TLS handshake timeout" logger="UnhandledError" Nov 28 16:10:30 crc kubenswrapper[4909]: I1128 16:10:30.826823 4909 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": net/http: TLS handshake timeout Nov 28 16:10:30 crc kubenswrapper[4909]: E1128 16:10:30.840154 4909 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" interval="3.2s" Nov 28 16:10:30 crc kubenswrapper[4909]: W1128 16:10:30.907347 4909 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": net/http: TLS handshake timeout Nov 28 16:10:30 crc kubenswrapper[4909]: I1128 16:10:30.907482 4909 trace.go:236] Trace[312986653]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (28-Nov-2025 16:10:20.905) (total time: 10001ms): Nov 28 16:10:30 crc kubenswrapper[4909]: Trace[312986653]: ---"Objects listed" error:Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": net/http: TLS handshake timeout 10001ms (16:10:30.907) Nov 28 16:10:30 crc kubenswrapper[4909]: Trace[312986653]: [10.001879067s] [10.001879067s] END Nov 28 16:10:30 crc kubenswrapper[4909]: E1128 16:10:30.907536 4909 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": net/http: TLS handshake timeout" logger="UnhandledError" Nov 28 16:10:30 crc kubenswrapper[4909]: W1128 16:10:30.927140 4909 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": net/http: TLS handshake timeout Nov 28 16:10:30 crc kubenswrapper[4909]: I1128 16:10:30.927262 4909 trace.go:236] Trace[237800601]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (28-Nov-2025 16:10:20.925) (total time: 10001ms): Nov 28 16:10:30 crc kubenswrapper[4909]: Trace[237800601]: ---"Objects listed" error:Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": net/http: TLS handshake timeout 10001ms (16:10:30.927) Nov 28 16:10:30 crc kubenswrapper[4909]: Trace[237800601]: [10.001324082s] [10.001324082s] END Nov 28 16:10:30 crc kubenswrapper[4909]: E1128 16:10:30.927301 4909 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": net/http: TLS handshake timeout" logger="UnhandledError" Nov 28 16:10:31 crc kubenswrapper[4909]: E1128 16:10:31.088605 4909 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": net/http: TLS handshake timeout" node="crc" Nov 28 16:10:31 crc kubenswrapper[4909]: I1128 16:10:31.535259 4909 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Nov 28 16:10:31 crc kubenswrapper[4909]: I1128 16:10:31.535329 4909 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Nov 28 16:10:31 crc kubenswrapper[4909]: I1128 16:10:31.540044 4909 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Nov 28 16:10:31 crc kubenswrapper[4909]: I1128 16:10:31.540154 4909 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Nov 28 16:10:33 crc kubenswrapper[4909]: I1128 16:10:33.910335 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-etcd/etcd-crc" Nov 28 16:10:33 crc kubenswrapper[4909]: I1128 16:10:33.910778 4909 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 16:10:33 crc kubenswrapper[4909]: I1128 16:10:33.912918 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:33 crc kubenswrapper[4909]: I1128 16:10:33.913049 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:33 crc kubenswrapper[4909]: I1128 16:10:33.913076 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:33 crc kubenswrapper[4909]: I1128 16:10:33.950522 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-etcd/etcd-crc" Nov 28 16:10:34 crc kubenswrapper[4909]: I1128 16:10:34.017133 4909 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 16:10:34 crc kubenswrapper[4909]: I1128 16:10:34.018890 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:34 crc kubenswrapper[4909]: I1128 16:10:34.018964 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:34 crc kubenswrapper[4909]: I1128 16:10:34.018980 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:34 crc kubenswrapper[4909]: I1128 16:10:34.036718 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-etcd/etcd-crc" Nov 28 16:10:34 crc kubenswrapper[4909]: I1128 16:10:34.289268 4909 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 16:10:34 crc kubenswrapper[4909]: I1128 16:10:34.290627 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:34 crc kubenswrapper[4909]: I1128 16:10:34.290757 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:34 crc kubenswrapper[4909]: I1128 16:10:34.290789 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:34 crc kubenswrapper[4909]: I1128 16:10:34.290834 4909 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 28 16:10:34 crc kubenswrapper[4909]: E1128 16:10:34.295948 4909 kubelet_node_status.go:99] "Unable to register node with API server" err="nodes \"crc\" is forbidden: autoscaling.openshift.io/ManagedNode infra config cache not synchronized" node="crc" Nov 28 16:10:34 crc kubenswrapper[4909]: I1128 16:10:34.630244 4909 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 28 16:10:34 crc kubenswrapper[4909]: I1128 16:10:34.630317 4909 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Nov 28 16:10:35 crc kubenswrapper[4909]: I1128 16:10:35.019983 4909 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 16:10:35 crc kubenswrapper[4909]: I1128 16:10:35.021499 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:35 crc kubenswrapper[4909]: I1128 16:10:35.021571 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:35 crc kubenswrapper[4909]: I1128 16:10:35.021591 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:35 crc kubenswrapper[4909]: I1128 16:10:35.322765 4909 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Nov 28 16:10:35 crc kubenswrapper[4909]: I1128 16:10:35.412387 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 16:10:35 crc kubenswrapper[4909]: I1128 16:10:35.413126 4909 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Readiness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" start-of-body= Nov 28 16:10:35 crc kubenswrapper[4909]: I1128 16:10:35.413206 4909 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" Nov 28 16:10:35 crc kubenswrapper[4909]: I1128 16:10:35.418611 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 16:10:35 crc kubenswrapper[4909]: I1128 16:10:35.827759 4909 apiserver.go:52] "Watching apiserver" Nov 28 16:10:35 crc kubenswrapper[4909]: I1128 16:10:35.832143 4909 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Nov 28 16:10:35 crc kubenswrapper[4909]: I1128 16:10:35.832581 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc","openshift-network-console/networking-console-plugin-85b44fc459-gdk6g","openshift-network-diagnostics/network-check-source-55646444c4-trplf","openshift-network-diagnostics/network-check-target-xd92c","openshift-network-node-identity/network-node-identity-vrzqb","openshift-network-operator/iptables-alerter-4ln5h","openshift-network-operator/network-operator-58b4c7f79c-55gtf"] Nov 28 16:10:35 crc kubenswrapper[4909]: I1128 16:10:35.833172 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 16:10:35 crc kubenswrapper[4909]: I1128 16:10:35.833235 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 16:10:35 crc kubenswrapper[4909]: E1128 16:10:35.833244 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 16:10:35 crc kubenswrapper[4909]: E1128 16:10:35.833383 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 16:10:35 crc kubenswrapper[4909]: I1128 16:10:35.833435 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 28 16:10:35 crc kubenswrapper[4909]: I1128 16:10:35.833723 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 28 16:10:35 crc kubenswrapper[4909]: I1128 16:10:35.834912 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 28 16:10:35 crc kubenswrapper[4909]: I1128 16:10:35.835287 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 16:10:35 crc kubenswrapper[4909]: E1128 16:10:35.835397 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 16:10:35 crc kubenswrapper[4909]: I1128 16:10:35.836219 4909 desired_state_of_world_populator.go:154] "Finished populating initial desired state of world" Nov 28 16:10:35 crc kubenswrapper[4909]: I1128 16:10:35.837722 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Nov 28 16:10:35 crc kubenswrapper[4909]: I1128 16:10:35.837783 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Nov 28 16:10:35 crc kubenswrapper[4909]: I1128 16:10:35.837888 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Nov 28 16:10:35 crc kubenswrapper[4909]: I1128 16:10:35.838256 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Nov 28 16:10:35 crc kubenswrapper[4909]: I1128 16:10:35.838258 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Nov 28 16:10:35 crc kubenswrapper[4909]: I1128 16:10:35.839196 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Nov 28 16:10:35 crc kubenswrapper[4909]: I1128 16:10:35.839454 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Nov 28 16:10:35 crc kubenswrapper[4909]: I1128 16:10:35.839679 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Nov 28 16:10:35 crc kubenswrapper[4909]: I1128 16:10:35.841878 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Nov 28 16:10:35 crc kubenswrapper[4909]: I1128 16:10:35.885532 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 16:10:35 crc kubenswrapper[4909]: I1128 16:10:35.898537 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 16:10:35 crc kubenswrapper[4909]: I1128 16:10:35.912595 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 16:10:35 crc kubenswrapper[4909]: I1128 16:10:35.924211 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dda7db1a-d603-4121-8cd6-e72c9ae02961\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://107b559f7d62f02041439f8c727185e9a819cd655afa0898e818d8618cc54cc7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://76ac0606fbf94f3746fb7ffa549a3cb684d40ee72224dd36c96bb47db493b482\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d5716d50bbed7c047029bbd7964431e97f78c6516505f57fe76d2a1548a665d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://53a5c9679a2c94c1297318d2e554c2445efbfab9cf72d25e48d753d66fa217d5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://da70cc63e1edf58e6d6fed49fc9266d54fabba5f7ce3216d910eb18aebc45c34\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0700ae65eb592e5b2110d37bb15dbb1dbdd228986de015ab5dc8fe13672032ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0700ae65eb592e5b2110d37bb15dbb1dbdd228986de015ab5dc8fe13672032ef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:18Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 16:10:35 crc kubenswrapper[4909]: I1128 16:10:35.935151 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 16:10:35 crc kubenswrapper[4909]: I1128 16:10:35.949113 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 16:10:35 crc kubenswrapper[4909]: I1128 16:10:35.957564 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 16:10:35 crc kubenswrapper[4909]: I1128 16:10:35.970907 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.024161 4909 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Readiness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" start-of-body= Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.024220 4909 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" Nov 28 16:10:36 crc kubenswrapper[4909]: E1128 16:10:36.034499 4909 kubelet.go:1929] "Failed creating a mirror pod for" err="pods \"kube-apiserver-crc\" already exists" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.508132 4909 trace.go:236] Trace[760837503]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (28-Nov-2025 16:10:21.515) (total time: 14992ms): Nov 28 16:10:36 crc kubenswrapper[4909]: Trace[760837503]: ---"Objects listed" error: 14992ms (16:10:36.508) Nov 28 16:10:36 crc kubenswrapper[4909]: Trace[760837503]: [14.992650655s] [14.992650655s] END Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.508174 4909 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.511036 4909 reconstruct.go:205] "DevicePaths of reconstructed volumes updated" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.516687 4909 reflector.go:368] Caches populated for *v1.CertificateSigningRequest from k8s.io/client-go/tools/watch/informerwatcher.go:146 Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.554785 4909 csr.go:261] certificate signing request csr-wcvs8 is approved, waiting to be issued Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.579322 4909 csr.go:257] certificate signing request csr-wcvs8 is issued Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.628263 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.628605 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 16:10:36 crc kubenswrapper[4909]: E1128 16:10:36.628785 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 16:10:37.128763043 +0000 UTC m=+19.525447567 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.628895 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.629207 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.629451 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") pod \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\" (UID: \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.629536 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.630284 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.630355 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.630428 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.630506 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.630568 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.630630 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.630709 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.630783 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.630872 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.631634 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.631755 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.631827 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.629176 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.629417 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" (OuterVolumeSpecName: "webhook-certs") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "webhook-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.629432 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" (OuterVolumeSpecName: "config") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.629719 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" (OuterVolumeSpecName: "kube-api-access-jhbk2") pod "bd23aa5c-e532-4e53-bccf-e79f130c5ae8" (UID: "bd23aa5c-e532-4e53-bccf-e79f130c5ae8"). InnerVolumeSpecName "kube-api-access-jhbk2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.630245 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.631164 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" (OuterVolumeSpecName: "machine-api-operator-tls") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "machine-api-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.631403 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" (OuterVolumeSpecName: "kube-api-access-ngvvp") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "kube-api-access-ngvvp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.631407 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" (OuterVolumeSpecName: "package-server-manager-serving-cert") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "package-server-manager-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.631597 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" (OuterVolumeSpecName: "kube-api-access-htfz6") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "kube-api-access-htfz6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.631603 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.631908 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.632030 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" (OuterVolumeSpecName: "console-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.632169 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" (OuterVolumeSpecName: "client-ca") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.632232 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" (OuterVolumeSpecName: "kube-api-access-d4lsv") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "kube-api-access-d4lsv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.632266 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" (OuterVolumeSpecName: "config") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.632452 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" (OuterVolumeSpecName: "kube-api-access-v47cf") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "kube-api-access-v47cf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.632471 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" (OuterVolumeSpecName: "config") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.632520 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.632719 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.632789 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.632848 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.632939 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.632967 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.633056 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.633117 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.633174 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" (OuterVolumeSpecName: "utilities") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.633183 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.633252 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.633277 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.633296 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.633314 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.633333 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.633353 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.633371 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.633378 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" (OuterVolumeSpecName: "kube-api-access-7c4vf") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "kube-api-access-7c4vf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.633390 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.633492 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.633497 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" (OuterVolumeSpecName: "kube-api-access-w4xd4") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "kube-api-access-w4xd4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.633531 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.633561 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.633587 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.633611 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.633634 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.633677 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.633705 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.633729 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.633753 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.633779 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.633806 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.633831 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.633856 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.633883 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.633912 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.633939 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.633963 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.633991 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.634068 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.634093 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.634127 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.634151 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.634175 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.634200 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.634225 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.634249 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.634276 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.634299 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.634323 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.634347 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") pod \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\" (UID: \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.634371 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.634397 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.634426 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.634449 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.634470 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.634493 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.634515 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.634538 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.634560 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.634585 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.634608 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.634631 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.634669 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.634693 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.634718 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.634760 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.634783 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.634805 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.634828 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.634851 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.634881 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.634906 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.634932 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.634957 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.635012 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.635041 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.635064 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.635086 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") pod \"44663579-783b-4372-86d6-acf235a62d72\" (UID: \"44663579-783b-4372-86d6-acf235a62d72\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.635108 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.635132 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.633534 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" (OuterVolumeSpecName: "node-bootstrap-token") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "node-bootstrap-token". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.633849 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" (OuterVolumeSpecName: "config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.633951 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.634079 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" (OuterVolumeSpecName: "config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.634171 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" (OuterVolumeSpecName: "control-plane-machine-set-operator-tls") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "control-plane-machine-set-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.634260 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" (OuterVolumeSpecName: "kube-api-access-279lb") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "kube-api-access-279lb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.634363 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.634492 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" (OuterVolumeSpecName: "webhook-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "webhook-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.634559 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.634783 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" (OuterVolumeSpecName: "kube-api-access-s4n52") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "kube-api-access-s4n52". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.634897 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" (OuterVolumeSpecName: "etcd-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.635032 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" (OuterVolumeSpecName: "kube-api-access-x4zgh") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "kube-api-access-x4zgh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.635113 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.635302 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.635315 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.635438 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" (OuterVolumeSpecName: "kube-api-access-6ccd8") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "kube-api-access-6ccd8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.635517 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.635609 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" (OuterVolumeSpecName: "default-certificate") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "default-certificate". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.635841 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" (OuterVolumeSpecName: "kube-api-access-pj782") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "kube-api-access-pj782". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.635899 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" (OuterVolumeSpecName: "kube-api-access-6g6sz") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "kube-api-access-6g6sz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.636446 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" (OuterVolumeSpecName: "signing-cabundle") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-cabundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.636837 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" (OuterVolumeSpecName: "kube-api-access-sb6h7") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "kube-api-access-sb6h7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.636851 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.637300 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.637440 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" (OuterVolumeSpecName: "mcc-auth-proxy-config") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "mcc-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.637715 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" (OuterVolumeSpecName: "config") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.638208 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.638275 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.637410 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.638360 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.638504 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.638542 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.638570 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.638601 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.638596 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.638627 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.638633 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" (OuterVolumeSpecName: "kube-api-access-mnrrd") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "kube-api-access-mnrrd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.638669 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.638676 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.638694 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.638719 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.638747 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.638773 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.638798 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.638820 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.638843 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.638863 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.638867 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.638912 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.638935 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.638956 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.638974 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.638992 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.639009 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.639027 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.639043 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.639059 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.639077 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.639094 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.639112 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.639129 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.639146 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.639162 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.639177 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.639194 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.639210 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.639219 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.639227 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.639303 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" (OuterVolumeSpecName: "signing-key") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.639358 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.639362 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.639390 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.639426 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.639458 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.639473 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" (OuterVolumeSpecName: "kube-api-access-qg5z5") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "kube-api-access-qg5z5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.639480 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.639541 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.639569 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.639597 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.639623 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.639649 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.639704 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" (OuterVolumeSpecName: "available-featuregates") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "available-featuregates". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.639723 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.639750 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.639775 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.639800 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.639767 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" (OuterVolumeSpecName: "kube-api-access-gf66m") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "kube-api-access-gf66m". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.639796 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" (OuterVolumeSpecName: "utilities") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.639824 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.639942 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.639985 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.639984 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.640018 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.640049 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.640079 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.640092 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" (OuterVolumeSpecName: "client-ca") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.640104 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.640132 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.640161 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.640189 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.640213 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.640237 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.640263 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.640295 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.640328 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.640361 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.640385 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.640412 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.640439 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.640467 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.640493 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.640519 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.640545 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.640578 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.640609 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.640634 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.640689 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.640717 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.640744 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.640767 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.640788 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.640809 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.640833 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.640856 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.640941 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.640970 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.640998 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.641029 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.641052 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.641076 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.641102 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.641131 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.641157 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.641182 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.641209 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.641234 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.641257 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.641285 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") pod \"49ef4625-1d3a-4a9f-b595-c2433d32326d\" (UID: \"49ef4625-1d3a-4a9f-b595-c2433d32326d\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.641311 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.641335 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.641362 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.641387 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.641451 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.641484 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.641524 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.641550 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.641576 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.641601 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.641631 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.641680 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.641708 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.641746 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.641777 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.641800 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.641834 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.641867 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.641977 4909 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.642181 4909 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.642236 4909 reconciler_common.go:293] "Volume detached for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.642259 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.642277 4909 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.642297 4909 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.642312 4909 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.642324 4909 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.642343 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.642356 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.642370 4909 reconciler_common.go:293] "Volume detached for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.642383 4909 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.642397 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.642811 4909 reconciler_common.go:293] "Volume detached for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.642842 4909 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.642874 4909 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.642888 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.642902 4909 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.642915 4909 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.642931 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.642944 4909 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.642958 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.642971 4909 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.642985 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.642998 4909 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.643011 4909 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.643030 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.643043 4909 reconciler_common.go:293] "Volume detached for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.643056 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.643071 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.643085 4909 reconciler_common.go:293] "Volume detached for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.643098 4909 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.643110 4909 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.643123 4909 reconciler_common.go:293] "Volume detached for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.643137 4909 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.643150 4909 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.643163 4909 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.643176 4909 reconciler_common.go:293] "Volume detached for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.643189 4909 reconciler_common.go:293] "Volume detached for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.643204 4909 reconciler_common.go:293] "Volume detached for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.643218 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.643231 4909 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.643244 4909 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.643257 4909 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.643270 4909 reconciler_common.go:293] "Volume detached for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.643284 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.643298 4909 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.643310 4909 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.643323 4909 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.643336 4909 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.643349 4909 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.643363 4909 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.643378 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.643392 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.643406 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.643419 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.643432 4909 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.643446 4909 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.643459 4909 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.643472 4909 reconciler_common.go:293] "Volume detached for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.643485 4909 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.643498 4909 reconciler_common.go:293] "Volume detached for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.643513 4909 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.640088 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" (OuterVolumeSpecName: "kube-api-access-vt5rc") pod "44663579-783b-4372-86d6-acf235a62d72" (UID: "44663579-783b-4372-86d6-acf235a62d72"). InnerVolumeSpecName "kube-api-access-vt5rc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.640118 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.640129 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" (OuterVolumeSpecName: "apiservice-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "apiservice-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.640303 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.640344 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.640384 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" (OuterVolumeSpecName: "samples-operator-tls") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "samples-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.640414 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.640426 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.640453 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.650966 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.640893 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.641105 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" (OuterVolumeSpecName: "config") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.641105 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" (OuterVolumeSpecName: "kube-api-access-w7l8j") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "kube-api-access-w7l8j". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.641353 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.641732 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.642125 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: E1128 16:10:36.643756 4909 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.649692 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.650323 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.650391 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" (OuterVolumeSpecName: "images") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.650723 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" (OuterVolumeSpecName: "kube-api-access-lz9wn") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "kube-api-access-lz9wn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: E1128 16:10:36.651366 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-28 16:10:37.151336679 +0000 UTC m=+19.548021213 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.651425 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" (OuterVolumeSpecName: "kube-api-access-nzwt7") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "kube-api-access-nzwt7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.651551 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" (OuterVolumeSpecName: "utilities") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.651567 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.651832 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.651914 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" (OuterVolumeSpecName: "config") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.651956 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" (OuterVolumeSpecName: "kube-api-access-x2m85") pod "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" (UID: "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d"). InnerVolumeSpecName "kube-api-access-x2m85". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.652801 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" (OuterVolumeSpecName: "kube-api-access-tk88c") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "kube-api-access-tk88c". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.653070 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.653340 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.653398 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" (OuterVolumeSpecName: "utilities") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.653442 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" (OuterVolumeSpecName: "service-ca") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.653515 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.653581 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.653633 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.652981 4909 swap_util.go:74] "error creating dir to test if tmpfs noswap is enabled. Assuming not supported" mount path="" error="stat /var/lib/kubelet/plugins/kubernetes.io/empty-dir: no such file or directory" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.653204 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.654069 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.654181 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.654316 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" (OuterVolumeSpecName: "images") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.654317 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" (OuterVolumeSpecName: "kube-api-access-w9rds") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "kube-api-access-w9rds". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.654391 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" (OuterVolumeSpecName: "config") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.654549 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" (OuterVolumeSpecName: "kube-api-access-zgdk5") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "kube-api-access-zgdk5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.654813 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" (OuterVolumeSpecName: "kube-api-access-xcphl") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "kube-api-access-xcphl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.655046 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" (OuterVolumeSpecName: "kube-api-access-fcqwp") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "kube-api-access-fcqwp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.655219 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" (OuterVolumeSpecName: "certs") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.655585 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.655752 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.655811 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" (OuterVolumeSpecName: "service-ca") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.655857 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" (OuterVolumeSpecName: "machine-approver-tls") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "machine-approver-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.655976 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.656124 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" (OuterVolumeSpecName: "kube-api-access-zkvpv") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "kube-api-access-zkvpv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.656128 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" (OuterVolumeSpecName: "kube-api-access-cfbct") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "kube-api-access-cfbct". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.656391 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.656591 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" (OuterVolumeSpecName: "kube-api-access-x7zkh") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "kube-api-access-x7zkh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.656681 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.654452 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" (OuterVolumeSpecName: "stats-auth") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "stats-auth". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.656768 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.656833 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" (OuterVolumeSpecName: "kube-api-access-kfwg7") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "kube-api-access-kfwg7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.656861 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" (OuterVolumeSpecName: "ovn-control-plane-metrics-cert") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovn-control-plane-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.656906 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.657069 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" (OuterVolumeSpecName: "config") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.657277 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.657308 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.657871 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" (OuterVolumeSpecName: "kube-api-access-pjr6v") pod "49ef4625-1d3a-4a9f-b595-c2433d32326d" (UID: "49ef4625-1d3a-4a9f-b595-c2433d32326d"). InnerVolumeSpecName "kube-api-access-pjr6v". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.658144 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" (OuterVolumeSpecName: "cert") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: E1128 16:10:36.658207 4909 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.657043 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.658376 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" (OuterVolumeSpecName: "mcd-auth-proxy-config") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "mcd-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.658569 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.658586 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" (OuterVolumeSpecName: "kube-api-access-rnphk") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "kube-api-access-rnphk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.658600 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" (OuterVolumeSpecName: "image-registry-operator-tls") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "image-registry-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.658681 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" (OuterVolumeSpecName: "tmpfs") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "tmpfs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.658783 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 28 16:10:36 crc kubenswrapper[4909]: E1128 16:10:36.659171 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-28 16:10:37.159065347 +0000 UTC m=+19.555749891 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.659430 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.661778 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" (OuterVolumeSpecName: "kube-api-access-fqsjt") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "kube-api-access-fqsjt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.662587 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" (OuterVolumeSpecName: "kube-api-access-9xfj7") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "kube-api-access-9xfj7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.663129 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.663426 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.663590 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.663894 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.665760 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" (OuterVolumeSpecName: "kube-api-access-2d4wz") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "kube-api-access-2d4wz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.665756 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.666019 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.666040 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" (OuterVolumeSpecName: "kube-api-access-dbsvg") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "kube-api-access-dbsvg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.666298 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.666869 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.666958 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.667938 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.668362 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.670981 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.671989 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.674237 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.675092 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" (OuterVolumeSpecName: "kube-api-access-8tdtz") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "kube-api-access-8tdtz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.675970 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" (OuterVolumeSpecName: "kube-api-access-qs4fp") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "kube-api-access-qs4fp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.676107 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.676786 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.677608 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.677731 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.678785 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" (OuterVolumeSpecName: "etcd-service-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.679090 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.679212 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.679615 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" (OuterVolumeSpecName: "kube-api-access-2w9zh") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "kube-api-access-2w9zh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: E1128 16:10:36.679830 4909 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 28 16:10:36 crc kubenswrapper[4909]: E1128 16:10:36.679854 4909 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 28 16:10:36 crc kubenswrapper[4909]: E1128 16:10:36.679867 4909 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 16:10:36 crc kubenswrapper[4909]: E1128 16:10:36.679930 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-28 16:10:37.179909494 +0000 UTC m=+19.576594018 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 16:10:36 crc kubenswrapper[4909]: E1128 16:10:36.679996 4909 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 28 16:10:36 crc kubenswrapper[4909]: E1128 16:10:36.680015 4909 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 28 16:10:36 crc kubenswrapper[4909]: E1128 16:10:36.680024 4909 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 16:10:36 crc kubenswrapper[4909]: E1128 16:10:36.680053 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-28 16:10:37.180045068 +0000 UTC m=+19.576729832 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.680776 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.680935 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" (OuterVolumeSpecName: "multus-daemon-config") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "multus-daemon-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.681629 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" (OuterVolumeSpecName: "kube-api-access-wxkg8") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "kube-api-access-wxkg8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.681684 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" (OuterVolumeSpecName: "audit") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "audit". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.681794 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.681965 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.682857 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" (OuterVolumeSpecName: "config") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.683019 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.683071 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" (OuterVolumeSpecName: "serviceca") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "serviceca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.683473 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.684212 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.684895 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" (OuterVolumeSpecName: "config") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.685053 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" (OuterVolumeSpecName: "image-import-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "image-import-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.685336 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.685394 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" (OuterVolumeSpecName: "kube-api-access-pcxfs") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "kube-api-access-pcxfs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.687198 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" (OuterVolumeSpecName: "kube-api-access-jkwtn") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "kube-api-access-jkwtn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.687710 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" (OuterVolumeSpecName: "config") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.687823 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.687872 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" (OuterVolumeSpecName: "cni-sysctl-allowlist") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-sysctl-allowlist". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.688007 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.688067 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" (OuterVolumeSpecName: "config") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.689052 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.689111 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" (OuterVolumeSpecName: "kube-api-access-mg5zb") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "kube-api-access-mg5zb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.691705 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" (OuterVolumeSpecName: "config") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.691891 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" (OuterVolumeSpecName: "kube-api-access-xcgwh") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "kube-api-access-xcgwh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.692034 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" (OuterVolumeSpecName: "kube-api-access-249nr") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "kube-api-access-249nr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.692159 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.693721 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" (OuterVolumeSpecName: "kube-api-access-bf2bz") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "kube-api-access-bf2bz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.693843 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" (OuterVolumeSpecName: "kube-api-access-d6qdx") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "kube-api-access-d6qdx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.693867 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" (OuterVolumeSpecName: "kube-api-access-4d4hj") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "kube-api-access-4d4hj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.694169 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" (OuterVolumeSpecName: "kube-api-access-lzf88") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "kube-api-access-lzf88". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.694206 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.694304 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" (OuterVolumeSpecName: "config-volume") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.697934 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.698090 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.714501 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.719401 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.720853 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.731463 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.744332 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.744400 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.744452 4909 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.744462 4909 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.744472 4909 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.744481 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.744492 4909 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.744501 4909 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.744511 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.744520 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.744529 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.744537 4909 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.744546 4909 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.744555 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.744565 4909 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.744574 4909 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.744582 4909 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.744592 4909 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.744601 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.744610 4909 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.744618 4909 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.744542 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.744649 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.744627 4909 reconciler_common.go:293] "Volume detached for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.744756 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.744775 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.744790 4909 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.744856 4909 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.744871 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.744911 4909 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.744926 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.744938 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.744951 4909 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.744965 4909 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.744978 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.744990 4909 reconciler_common.go:293] "Volume detached for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.745004 4909 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.745017 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.745029 4909 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.745042 4909 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.745054 4909 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.745065 4909 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.745077 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.745089 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.745101 4909 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.745153 4909 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.745166 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.745179 4909 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.745192 4909 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.745206 4909 reconciler_common.go:293] "Volume detached for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.745218 4909 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.745230 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.745244 4909 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.745257 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.745270 4909 reconciler_common.go:293] "Volume detached for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.745285 4909 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.745297 4909 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.745312 4909 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.745329 4909 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.745347 4909 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.745366 4909 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.745378 4909 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.745391 4909 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.745404 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.745416 4909 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.745429 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.745441 4909 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.745454 4909 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.745466 4909 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.745479 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.745494 4909 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.745508 4909 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.745520 4909 reconciler_common.go:293] "Volume detached for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.745535 4909 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.745547 4909 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.745561 4909 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.745573 4909 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.745586 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.745599 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.745614 4909 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.745627 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.745639 4909 reconciler_common.go:293] "Volume detached for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.745652 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.745684 4909 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.745696 4909 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.745709 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.745721 4909 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.745734 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.745747 4909 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.745762 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.745775 4909 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.745788 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.745801 4909 reconciler_common.go:293] "Volume detached for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.745815 4909 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.745827 4909 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.745840 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.745852 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.745867 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.745880 4909 reconciler_common.go:293] "Volume detached for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.745892 4909 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.745904 4909 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.745916 4909 reconciler_common.go:293] "Volume detached for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.745928 4909 reconciler_common.go:293] "Volume detached for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.745940 4909 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.745955 4909 reconciler_common.go:293] "Volume detached for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.745968 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.745979 4909 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.745991 4909 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.746002 4909 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.746015 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.746027 4909 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.746040 4909 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.746052 4909 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.746065 4909 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.746077 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.746089 4909 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.746105 4909 reconciler_common.go:293] "Volume detached for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.746117 4909 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.746128 4909 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.746142 4909 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.746155 4909 reconciler_common.go:293] "Volume detached for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.746168 4909 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.746181 4909 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.746193 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.746205 4909 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.746217 4909 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.746230 4909 reconciler_common.go:293] "Volume detached for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.746242 4909 reconciler_common.go:293] "Volume detached for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.746254 4909 reconciler_common.go:293] "Volume detached for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.746266 4909 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.746278 4909 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.746290 4909 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.746302 4909 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.746317 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.746334 4909 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.756595 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 28 16:10:36 crc kubenswrapper[4909]: W1128 16:10:36.767933 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod37a5e44f_9a88_4405_be8a_b645485e7312.slice/crio-6f7ae925231f0586bc742931357ab21380a3c69cebb50f020767ed3ebd0579ae WatchSource:0}: Error finding container 6f7ae925231f0586bc742931357ab21380a3c69cebb50f020767ed3ebd0579ae: Status 404 returned error can't find the container with id 6f7ae925231f0586bc742931357ab21380a3c69cebb50f020767ed3ebd0579ae Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.769090 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 28 16:10:36 crc kubenswrapper[4909]: E1128 16:10:36.772364 4909 kuberuntime_manager.go:1274] "Unhandled Error" err=< Nov 28 16:10:36 crc kubenswrapper[4909]: container &Container{Name:network-operator,Image:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b,Command:[/bin/bash -c #!/bin/bash Nov 28 16:10:36 crc kubenswrapper[4909]: set -o allexport Nov 28 16:10:36 crc kubenswrapper[4909]: if [[ -f /etc/kubernetes/apiserver-url.env ]]; then Nov 28 16:10:36 crc kubenswrapper[4909]: source /etc/kubernetes/apiserver-url.env Nov 28 16:10:36 crc kubenswrapper[4909]: else Nov 28 16:10:36 crc kubenswrapper[4909]: echo "Error: /etc/kubernetes/apiserver-url.env is missing" Nov 28 16:10:36 crc kubenswrapper[4909]: exit 1 Nov 28 16:10:36 crc kubenswrapper[4909]: fi Nov 28 16:10:36 crc kubenswrapper[4909]: exec /usr/bin/cluster-network-operator start --listen=0.0.0.0:9104 Nov 28 16:10:36 crc kubenswrapper[4909]: ],Args:[],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:cno,HostPort:9104,ContainerPort:9104,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:RELEASE_VERSION,Value:4.18.1,ValueFrom:nil,},EnvVar{Name:KUBE_PROXY_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b97554198294bf544fbc116c94a0a1fb2ec8a4de0e926bf9d9e320135f0bee6f,ValueFrom:nil,},EnvVar{Name:KUBE_RBAC_PROXY_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09,ValueFrom:nil,},EnvVar{Name:MULTUS_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26,ValueFrom:nil,},EnvVar{Name:MULTUS_ADMISSION_CONTROLLER_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317,ValueFrom:nil,},EnvVar{Name:CNI_PLUGINS_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc,ValueFrom:nil,},EnvVar{Name:BOND_CNI_PLUGIN_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78,ValueFrom:nil,},EnvVar{Name:WHEREABOUTS_CNI_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4,ValueFrom:nil,},EnvVar{Name:ROUTE_OVERRRIDE_CNI_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa,ValueFrom:nil,},EnvVar{Name:MULTUS_NETWORKPOLICY_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:23f833d3738d68706eb2f2868bd76bd71cee016cffa6faf5f045a60cc8c6eddd,ValueFrom:nil,},EnvVar{Name:OVN_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2,ValueFrom:nil,},EnvVar{Name:OVN_NB_RAFT_ELECTION_TIMER,Value:10,ValueFrom:nil,},EnvVar{Name:OVN_SB_RAFT_ELECTION_TIMER,Value:16,ValueFrom:nil,},EnvVar{Name:OVN_NORTHD_PROBE_INTERVAL,Value:10000,ValueFrom:nil,},EnvVar{Name:OVN_CONTROLLER_INACTIVITY_PROBE,Value:180000,ValueFrom:nil,},EnvVar{Name:OVN_NB_INACTIVITY_PROBE,Value:60000,ValueFrom:nil,},EnvVar{Name:EGRESS_ROUTER_CNI_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c,ValueFrom:nil,},EnvVar{Name:NETWORK_METRICS_DAEMON_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d,ValueFrom:nil,},EnvVar{Name:NETWORK_CHECK_SOURCE_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b,ValueFrom:nil,},EnvVar{Name:NETWORK_CHECK_TARGET_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b,ValueFrom:nil,},EnvVar{Name:NETWORK_OPERATOR_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b,ValueFrom:nil,},EnvVar{Name:CLOUD_NETWORK_CONFIG_CONTROLLER_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8048f1cb0be521f09749c0a489503cd56d85b68c6ca93380e082cfd693cd97a8,ValueFrom:nil,},EnvVar{Name:CLI_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2,ValueFrom:nil,},EnvVar{Name:FRR_K8S_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5dbf844e49bb46b78586930149e5e5f5dc121014c8afd10fe36f3651967cc256,ValueFrom:nil,},EnvVar{Name:NETWORKING_CONSOLE_PLUGIN_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd,ValueFrom:nil,},EnvVar{Name:POD_NAME,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.name,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{52428800 0} {} 50Mi BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:host-etc-kube,ReadOnly:true,MountPath:/etc/kubernetes,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:metrics-tls,ReadOnly:false,MountPath:/var/run/secrets/serving-cert,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-rdwmf,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:nil,Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod network-operator-58b4c7f79c-55gtf_openshift-network-operator(37a5e44f-9a88-4405-be8a-b645485e7312): CreateContainerConfigError: services have not yet been read at least once, cannot construct envvars Nov 28 16:10:36 crc kubenswrapper[4909]: > logger="UnhandledError" Nov 28 16:10:36 crc kubenswrapper[4909]: E1128 16:10:36.773578 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"network-operator\" with CreateContainerConfigError: \"services have not yet been read at least once, cannot construct envvars\"" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" podUID="37a5e44f-9a88-4405-be8a-b645485e7312" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.776445 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/node-resolver-bwbm6"] Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.776794 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-bwbm6" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.777423 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.778951 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.778993 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.779120 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.791809 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 16:10:36 crc kubenswrapper[4909]: E1128 16:10:36.796090 4909 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:iptables-alerter,Image:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2,Command:[/iptables-alerter/iptables-alerter.sh],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONTAINER_RUNTIME_ENDPOINT,Value:unix:///run/crio/crio.sock,ValueFrom:nil,},EnvVar{Name:ALERTER_POD_NAME,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.name,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{68157440 0} {} 65Mi BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:iptables-alerter-script,ReadOnly:false,MountPath:/iptables-alerter,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:host-slash,ReadOnly:true,MountPath:/host,SubPath:,MountPropagation:*HostToContainer,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-rczfb,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:*true,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod iptables-alerter-4ln5h_openshift-network-operator(d75a4c96-2883-4a0b-bab2-0fab2b6c0b49): CreateContainerConfigError: services have not yet been read at least once, cannot construct envvars" logger="UnhandledError" Nov 28 16:10:36 crc kubenswrapper[4909]: E1128 16:10:36.797302 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"iptables-alerter\" with CreateContainerConfigError: \"services have not yet been read at least once, cannot construct envvars\"" pod="openshift-network-operator/iptables-alerter-4ln5h" podUID="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" Nov 28 16:10:36 crc kubenswrapper[4909]: E1128 16:10:36.802168 4909 kuberuntime_manager.go:1274] "Unhandled Error" err=< Nov 28 16:10:36 crc kubenswrapper[4909]: container &Container{Name:webhook,Image:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2,Command:[/bin/bash -c set -xe Nov 28 16:10:36 crc kubenswrapper[4909]: if [[ -f "/env/_master" ]]; then Nov 28 16:10:36 crc kubenswrapper[4909]: set -o allexport Nov 28 16:10:36 crc kubenswrapper[4909]: source "/env/_master" Nov 28 16:10:36 crc kubenswrapper[4909]: set +o allexport Nov 28 16:10:36 crc kubenswrapper[4909]: fi Nov 28 16:10:36 crc kubenswrapper[4909]: # OVN-K will try to remove hybrid overlay node annotations even when the hybrid overlay is not enabled. Nov 28 16:10:36 crc kubenswrapper[4909]: # https://github.com/ovn-org/ovn-kubernetes/blob/ac6820df0b338a246f10f412cd5ec903bd234694/go-controller/pkg/ovn/master.go#L791 Nov 28 16:10:36 crc kubenswrapper[4909]: ho_enable="--enable-hybrid-overlay" Nov 28 16:10:36 crc kubenswrapper[4909]: echo "I$(date "+%m%d %H:%M:%S.%N") - network-node-identity - start webhook" Nov 28 16:10:36 crc kubenswrapper[4909]: # extra-allowed-user: service account `ovn-kubernetes-control-plane` Nov 28 16:10:36 crc kubenswrapper[4909]: # sets pod annotations in multi-homing layer3 network controller (cluster-manager) Nov 28 16:10:36 crc kubenswrapper[4909]: exec /usr/bin/ovnkube-identity --k8s-apiserver=https://api-int.crc.testing:6443 \ Nov 28 16:10:36 crc kubenswrapper[4909]: --webhook-cert-dir="/etc/webhook-cert" \ Nov 28 16:10:36 crc kubenswrapper[4909]: --webhook-host=127.0.0.1 \ Nov 28 16:10:36 crc kubenswrapper[4909]: --webhook-port=9743 \ Nov 28 16:10:36 crc kubenswrapper[4909]: ${ho_enable} \ Nov 28 16:10:36 crc kubenswrapper[4909]: --enable-interconnect \ Nov 28 16:10:36 crc kubenswrapper[4909]: --disable-approver \ Nov 28 16:10:36 crc kubenswrapper[4909]: --extra-allowed-user="system:serviceaccount:openshift-ovn-kubernetes:ovn-kubernetes-control-plane" \ Nov 28 16:10:36 crc kubenswrapper[4909]: --wait-for-kubernetes-api=200s \ Nov 28 16:10:36 crc kubenswrapper[4909]: --pod-admission-conditions="/var/run/ovnkube-identity-config/additional-pod-admission-cond.json" \ Nov 28 16:10:36 crc kubenswrapper[4909]: --loglevel="${LOGLEVEL}" Nov 28 16:10:36 crc kubenswrapper[4909]: ],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LOGLEVEL,Value:2,ValueFrom:nil,},EnvVar{Name:KUBERNETES_NODE_NAME,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:spec.nodeName,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{52428800 0} {} 50Mi BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:webhook-cert,ReadOnly:false,MountPath:/etc/webhook-cert/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:env-overrides,ReadOnly:false,MountPath:/env,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovnkube-identity-cm,ReadOnly:false,MountPath:/var/run/ovnkube-identity-config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-s2kz5,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000470000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod network-node-identity-vrzqb_openshift-network-node-identity(ef543e1b-8068-4ea3-b32a-61027b32e95d): CreateContainerConfigError: services have not yet been read at least once, cannot construct envvars Nov 28 16:10:36 crc kubenswrapper[4909]: > logger="UnhandledError" Nov 28 16:10:36 crc kubenswrapper[4909]: E1128 16:10:36.804724 4909 kuberuntime_manager.go:1274] "Unhandled Error" err=< Nov 28 16:10:36 crc kubenswrapper[4909]: container &Container{Name:approver,Image:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2,Command:[/bin/bash -c set -xe Nov 28 16:10:36 crc kubenswrapper[4909]: if [[ -f "/env/_master" ]]; then Nov 28 16:10:36 crc kubenswrapper[4909]: set -o allexport Nov 28 16:10:36 crc kubenswrapper[4909]: source "/env/_master" Nov 28 16:10:36 crc kubenswrapper[4909]: set +o allexport Nov 28 16:10:36 crc kubenswrapper[4909]: fi Nov 28 16:10:36 crc kubenswrapper[4909]: Nov 28 16:10:36 crc kubenswrapper[4909]: echo "I$(date "+%m%d %H:%M:%S.%N") - network-node-identity - start approver" Nov 28 16:10:36 crc kubenswrapper[4909]: exec /usr/bin/ovnkube-identity --k8s-apiserver=https://api-int.crc.testing:6443 \ Nov 28 16:10:36 crc kubenswrapper[4909]: --disable-webhook \ Nov 28 16:10:36 crc kubenswrapper[4909]: --csr-acceptance-conditions="/var/run/ovnkube-identity-config/additional-cert-acceptance-cond.json" \ Nov 28 16:10:36 crc kubenswrapper[4909]: --loglevel="${LOGLEVEL}" Nov 28 16:10:36 crc kubenswrapper[4909]: ],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LOGLEVEL,Value:4,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{52428800 0} {} 50Mi BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:env-overrides,ReadOnly:false,MountPath:/env,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovnkube-identity-cm,ReadOnly:false,MountPath:/var/run/ovnkube-identity-config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-s2kz5,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000470000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod network-node-identity-vrzqb_openshift-network-node-identity(ef543e1b-8068-4ea3-b32a-61027b32e95d): CreateContainerConfigError: services have not yet been read at least once, cannot construct envvars Nov 28 16:10:36 crc kubenswrapper[4909]: > logger="UnhandledError" Nov 28 16:10:36 crc kubenswrapper[4909]: E1128 16:10:36.810026 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"webhook\" with CreateContainerConfigError: \"services have not yet been read at least once, cannot construct envvars\", failed to \"StartContainer\" for \"approver\" with CreateContainerConfigError: \"services have not yet been read at least once, cannot construct envvars\"]" pod="openshift-network-node-identity/network-node-identity-vrzqb" podUID="ef543e1b-8068-4ea3-b32a-61027b32e95d" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.821060 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.832327 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.840923 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.842592 4909 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.847187 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lmxp7\" (UniqueName: \"kubernetes.io/projected/4a458a03-0fea-47c0-9748-510145f40b30-kube-api-access-lmxp7\") pod \"node-resolver-bwbm6\" (UID: \"4a458a03-0fea-47c0-9748-510145f40b30\") " pod="openshift-dns/node-resolver-bwbm6" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.847259 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/4a458a03-0fea-47c0-9748-510145f40b30-hosts-file\") pod \"node-resolver-bwbm6\" (UID: \"4a458a03-0fea-47c0-9748-510145f40b30\") " pod="openshift-dns/node-resolver-bwbm6" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.856706 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dda7db1a-d603-4121-8cd6-e72c9ae02961\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://107b559f7d62f02041439f8c727185e9a819cd655afa0898e818d8618cc54cc7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://76ac0606fbf94f3746fb7ffa549a3cb684d40ee72224dd36c96bb47db493b482\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d5716d50bbed7c047029bbd7964431e97f78c6516505f57fe76d2a1548a665d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://53a5c9679a2c94c1297318d2e554c2445efbfab9cf72d25e48d753d66fa217d5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://da70cc63e1edf58e6d6fed49fc9266d54fabba5f7ce3216d910eb18aebc45c34\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0700ae65eb592e5b2110d37bb15dbb1dbdd228986de015ab5dc8fe13672032ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0700ae65eb592e5b2110d37bb15dbb1dbdd228986de015ab5dc8fe13672032ef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:18Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.868190 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.881313 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.889439 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-bwbm6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a458a03-0fea-47c0-9748-510145f40b30\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:36Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:36Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmxp7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:36Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-bwbm6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.948552 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lmxp7\" (UniqueName: \"kubernetes.io/projected/4a458a03-0fea-47c0-9748-510145f40b30-kube-api-access-lmxp7\") pod \"node-resolver-bwbm6\" (UID: \"4a458a03-0fea-47c0-9748-510145f40b30\") " pod="openshift-dns/node-resolver-bwbm6" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.948626 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/4a458a03-0fea-47c0-9748-510145f40b30-hosts-file\") pod \"node-resolver-bwbm6\" (UID: \"4a458a03-0fea-47c0-9748-510145f40b30\") " pod="openshift-dns/node-resolver-bwbm6" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.948772 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/4a458a03-0fea-47c0-9748-510145f40b30-hosts-file\") pod \"node-resolver-bwbm6\" (UID: \"4a458a03-0fea-47c0-9748-510145f40b30\") " pod="openshift-dns/node-resolver-bwbm6" Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.961047 4909 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Nov 28 16:10:36 crc kubenswrapper[4909]: I1128 16:10:36.969510 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lmxp7\" (UniqueName: \"kubernetes.io/projected/4a458a03-0fea-47c0-9748-510145f40b30-kube-api-access-lmxp7\") pod \"node-resolver-bwbm6\" (UID: \"4a458a03-0fea-47c0-9748-510145f40b30\") " pod="openshift-dns/node-resolver-bwbm6" Nov 28 16:10:37 crc kubenswrapper[4909]: I1128 16:10:37.025361 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"e462121655598cbc2b4a8cc914767d5ebf5820b762780f3f0c94868cecde7a9c"} Nov 28 16:10:37 crc kubenswrapper[4909]: I1128 16:10:37.027330 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"b036a5b6a07bbc7c516c12dc38ca38479f67f8c960094abd8899de7eb7a23e85"} Nov 28 16:10:37 crc kubenswrapper[4909]: I1128 16:10:37.030145 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"6f7ae925231f0586bc742931357ab21380a3c69cebb50f020767ed3ebd0579ae"} Nov 28 16:10:37 crc kubenswrapper[4909]: I1128 16:10:37.039632 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 16:10:37 crc kubenswrapper[4909]: I1128 16:10:37.053847 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 16:10:37 crc kubenswrapper[4909]: I1128 16:10:37.070842 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 16:10:37 crc kubenswrapper[4909]: I1128 16:10:37.088992 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dda7db1a-d603-4121-8cd6-e72c9ae02961\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://107b559f7d62f02041439f8c727185e9a819cd655afa0898e818d8618cc54cc7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://76ac0606fbf94f3746fb7ffa549a3cb684d40ee72224dd36c96bb47db493b482\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d5716d50bbed7c047029bbd7964431e97f78c6516505f57fe76d2a1548a665d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://53a5c9679a2c94c1297318d2e554c2445efbfab9cf72d25e48d753d66fa217d5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://da70cc63e1edf58e6d6fed49fc9266d54fabba5f7ce3216d910eb18aebc45c34\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0700ae65eb592e5b2110d37bb15dbb1dbdd228986de015ab5dc8fe13672032ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0700ae65eb592e5b2110d37bb15dbb1dbdd228986de015ab5dc8fe13672032ef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:18Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 16:10:37 crc kubenswrapper[4909]: I1128 16:10:37.094755 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-bwbm6" Nov 28 16:10:37 crc kubenswrapper[4909]: I1128 16:10:37.138354 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 16:10:37 crc kubenswrapper[4909]: I1128 16:10:37.150888 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 16:10:37 crc kubenswrapper[4909]: E1128 16:10:37.151027 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 16:10:38.151007933 +0000 UTC m=+20.547692457 (durationBeforeRetry 1s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:10:37 crc kubenswrapper[4909]: I1128 16:10:37.153439 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 16:10:37 crc kubenswrapper[4909]: I1128 16:10:37.186082 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 16:10:37 crc kubenswrapper[4909]: I1128 16:10:37.200988 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 16:10:37 crc kubenswrapper[4909]: I1128 16:10:37.210645 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-bwbm6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a458a03-0fea-47c0-9748-510145f40b30\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:36Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:36Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmxp7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:36Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-bwbm6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 16:10:37 crc kubenswrapper[4909]: I1128 16:10:37.220801 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 16:10:37 crc kubenswrapper[4909]: I1128 16:10:37.232964 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 16:10:37 crc kubenswrapper[4909]: I1128 16:10:37.243179 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 16:10:37 crc kubenswrapper[4909]: I1128 16:10:37.251843 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 16:10:37 crc kubenswrapper[4909]: I1128 16:10:37.251888 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 16:10:37 crc kubenswrapper[4909]: I1128 16:10:37.251911 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 16:10:37 crc kubenswrapper[4909]: I1128 16:10:37.251927 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 16:10:37 crc kubenswrapper[4909]: E1128 16:10:37.252047 4909 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 28 16:10:37 crc kubenswrapper[4909]: E1128 16:10:37.252093 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-28 16:10:38.252080502 +0000 UTC m=+20.648765026 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 28 16:10:37 crc kubenswrapper[4909]: E1128 16:10:37.252157 4909 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 28 16:10:37 crc kubenswrapper[4909]: E1128 16:10:37.252170 4909 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 28 16:10:37 crc kubenswrapper[4909]: E1128 16:10:37.252180 4909 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 16:10:37 crc kubenswrapper[4909]: E1128 16:10:37.252206 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-28 16:10:38.252200035 +0000 UTC m=+20.648884559 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 16:10:37 crc kubenswrapper[4909]: E1128 16:10:37.252260 4909 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 28 16:10:37 crc kubenswrapper[4909]: E1128 16:10:37.252269 4909 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 28 16:10:37 crc kubenswrapper[4909]: E1128 16:10:37.252275 4909 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 16:10:37 crc kubenswrapper[4909]: E1128 16:10:37.252293 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-28 16:10:38.252287438 +0000 UTC m=+20.648971962 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 16:10:37 crc kubenswrapper[4909]: E1128 16:10:37.252323 4909 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 28 16:10:37 crc kubenswrapper[4909]: E1128 16:10:37.252342 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-28 16:10:38.252336929 +0000 UTC m=+20.649021453 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 28 16:10:37 crc kubenswrapper[4909]: I1128 16:10:37.254891 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dda7db1a-d603-4121-8cd6-e72c9ae02961\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://107b559f7d62f02041439f8c727185e9a819cd655afa0898e818d8618cc54cc7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://76ac0606fbf94f3746fb7ffa549a3cb684d40ee72224dd36c96bb47db493b482\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d5716d50bbed7c047029bbd7964431e97f78c6516505f57fe76d2a1548a665d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://53a5c9679a2c94c1297318d2e554c2445efbfab9cf72d25e48d753d66fa217d5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://da70cc63e1edf58e6d6fed49fc9266d54fabba5f7ce3216d910eb18aebc45c34\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0700ae65eb592e5b2110d37bb15dbb1dbdd228986de015ab5dc8fe13672032ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0700ae65eb592e5b2110d37bb15dbb1dbdd228986de015ab5dc8fe13672032ef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:18Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 16:10:37 crc kubenswrapper[4909]: I1128 16:10:37.265000 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 16:10:37 crc kubenswrapper[4909]: I1128 16:10:37.273073 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-bwbm6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a458a03-0fea-47c0-9748-510145f40b30\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:36Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:36Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmxp7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:36Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-bwbm6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 16:10:37 crc kubenswrapper[4909]: I1128 16:10:37.282523 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 16:10:37 crc kubenswrapper[4909]: I1128 16:10:37.293485 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 16:10:37 crc kubenswrapper[4909]: I1128 16:10:37.575500 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 16:10:37 crc kubenswrapper[4909]: I1128 16:10:37.580787 4909 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate expiration is 2026-11-28 16:05:36 +0000 UTC, rotation deadline is 2026-10-10 07:55:01.171998395 +0000 UTC Nov 28 16:10:37 crc kubenswrapper[4909]: I1128 16:10:37.580878 4909 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Waiting 7575h44m23.591126446s for next certificate rotation Nov 28 16:10:37 crc kubenswrapper[4909]: I1128 16:10:37.584815 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/kube-controller-manager-crc"] Nov 28 16:10:37 crc kubenswrapper[4909]: I1128 16:10:37.603031 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:37Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:37 crc kubenswrapper[4909]: I1128 16:10:37.603800 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/node-ca-q8nfv"] Nov 28 16:10:37 crc kubenswrapper[4909]: I1128 16:10:37.604174 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-q8nfv" Nov 28 16:10:37 crc kubenswrapper[4909]: I1128 16:10:37.609342 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Nov 28 16:10:37 crc kubenswrapper[4909]: I1128 16:10:37.609433 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Nov 28 16:10:37 crc kubenswrapper[4909]: I1128 16:10:37.609816 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Nov 28 16:10:37 crc kubenswrapper[4909]: I1128 16:10:37.610770 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Nov 28 16:10:37 crc kubenswrapper[4909]: I1128 16:10:37.621242 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:37Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:37 crc kubenswrapper[4909]: I1128 16:10:37.637605 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:37Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:37 crc kubenswrapper[4909]: I1128 16:10:37.651831 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:37Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:37 crc kubenswrapper[4909]: I1128 16:10:37.655705 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-prc9l\" (UniqueName: \"kubernetes.io/projected/6b77cc4b-dc69-4ece-8e10-64eebc98a578-kube-api-access-prc9l\") pod \"node-ca-q8nfv\" (UID: \"6b77cc4b-dc69-4ece-8e10-64eebc98a578\") " pod="openshift-image-registry/node-ca-q8nfv" Nov 28 16:10:37 crc kubenswrapper[4909]: I1128 16:10:37.655744 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/6b77cc4b-dc69-4ece-8e10-64eebc98a578-host\") pod \"node-ca-q8nfv\" (UID: \"6b77cc4b-dc69-4ece-8e10-64eebc98a578\") " pod="openshift-image-registry/node-ca-q8nfv" Nov 28 16:10:37 crc kubenswrapper[4909]: I1128 16:10:37.655761 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/6b77cc4b-dc69-4ece-8e10-64eebc98a578-serviceca\") pod \"node-ca-q8nfv\" (UID: \"6b77cc4b-dc69-4ece-8e10-64eebc98a578\") " pod="openshift-image-registry/node-ca-q8nfv" Nov 28 16:10:37 crc kubenswrapper[4909]: I1128 16:10:37.661209 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:37Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:37 crc kubenswrapper[4909]: I1128 16:10:37.679126 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dda7db1a-d603-4121-8cd6-e72c9ae02961\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://107b559f7d62f02041439f8c727185e9a819cd655afa0898e818d8618cc54cc7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://76ac0606fbf94f3746fb7ffa549a3cb684d40ee72224dd36c96bb47db493b482\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d5716d50bbed7c047029bbd7964431e97f78c6516505f57fe76d2a1548a665d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://53a5c9679a2c94c1297318d2e554c2445efbfab9cf72d25e48d753d66fa217d5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://da70cc63e1edf58e6d6fed49fc9266d54fabba5f7ce3216d910eb18aebc45c34\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0700ae65eb592e5b2110d37bb15dbb1dbdd228986de015ab5dc8fe13672032ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0700ae65eb592e5b2110d37bb15dbb1dbdd228986de015ab5dc8fe13672032ef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:18Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:37Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:37 crc kubenswrapper[4909]: I1128 16:10:37.717500 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:37Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:37 crc kubenswrapper[4909]: I1128 16:10:37.738587 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-bwbm6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a458a03-0fea-47c0-9748-510145f40b30\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:36Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:36Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmxp7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:36Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-bwbm6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:37Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:37 crc kubenswrapper[4909]: I1128 16:10:37.748066 4909 transport.go:147] "Certificate rotation detected, shutting down client connections to start using new credentials" Nov 28 16:10:37 crc kubenswrapper[4909]: W1128 16:10:37.748325 4909 reflector.go:484] k8s.io/client-go/informers/factory.go:160: watch of *v1.CSIDriver ended with: very short watch: k8s.io/client-go/informers/factory.go:160: Unexpected watch close - watch lasted less than a second and no items received Nov 28 16:10:37 crc kubenswrapper[4909]: W1128 16:10:37.748495 4909 reflector.go:484] k8s.io/client-go/informers/factory.go:160: watch of *v1.Service ended with: very short watch: k8s.io/client-go/informers/factory.go:160: Unexpected watch close - watch lasted less than a second and no items received Nov 28 16:10:37 crc kubenswrapper[4909]: W1128 16:10:37.748570 4909 reflector.go:484] object-"openshift-dns"/"node-resolver-dockercfg-kz9s7": watch of *v1.Secret ended with: very short watch: object-"openshift-dns"/"node-resolver-dockercfg-kz9s7": Unexpected watch close - watch lasted less than a second and no items received Nov 28 16:10:37 crc kubenswrapper[4909]: W1128 16:10:37.748600 4909 reflector.go:484] object-"openshift-image-registry"/"openshift-service-ca.crt": watch of *v1.ConfigMap ended with: very short watch: object-"openshift-image-registry"/"openshift-service-ca.crt": Unexpected watch close - watch lasted less than a second and no items received Nov 28 16:10:37 crc kubenswrapper[4909]: W1128 16:10:37.748607 4909 reflector.go:484] object-"openshift-image-registry"/"kube-root-ca.crt": watch of *v1.ConfigMap ended with: very short watch: object-"openshift-image-registry"/"kube-root-ca.crt": Unexpected watch close - watch lasted less than a second and no items received Nov 28 16:10:37 crc kubenswrapper[4909]: I1128 16:10:37.748601 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Patch \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-operator/pods/network-operator-58b4c7f79c-55gtf/status\": read tcp 38.102.83.53:59536->38.102.83.53:6443: use of closed network connection" Nov 28 16:10:37 crc kubenswrapper[4909]: W1128 16:10:37.748682 4909 reflector.go:484] object-"openshift-dns"/"openshift-service-ca.crt": watch of *v1.ConfigMap ended with: very short watch: object-"openshift-dns"/"openshift-service-ca.crt": Unexpected watch close - watch lasted less than a second and no items received Nov 28 16:10:37 crc kubenswrapper[4909]: E1128 16:10:37.748442 4909 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/events\": read tcp 38.102.83.53:59536->38.102.83.53:6443: use of closed network connection" event="&Event{ObjectMeta:{kube-apiserver-crc.187c378a3842d3b7 openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-crc,UID:f4b27818a5e8e43d0dc095d08835c792,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-apiserver-insecure-readyz},},Reason:Created,Message:Created container kube-apiserver-insecure-readyz,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-11-28 16:10:20.811310007 +0000 UTC m=+3.207994531,LastTimestamp:2025-11-28 16:10:20.811310007 +0000 UTC m=+3.207994531,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Nov 28 16:10:37 crc kubenswrapper[4909]: W1128 16:10:37.748678 4909 reflector.go:484] object-"openshift-image-registry"/"image-registry-certificates": watch of *v1.ConfigMap ended with: very short watch: object-"openshift-image-registry"/"image-registry-certificates": Unexpected watch close - watch lasted less than a second and no items received Nov 28 16:10:37 crc kubenswrapper[4909]: W1128 16:10:37.748516 4909 reflector.go:484] object-"openshift-dns"/"kube-root-ca.crt": watch of *v1.ConfigMap ended with: very short watch: object-"openshift-dns"/"kube-root-ca.crt": Unexpected watch close - watch lasted less than a second and no items received Nov 28 16:10:37 crc kubenswrapper[4909]: W1128 16:10:37.748548 4909 reflector.go:484] object-"openshift-image-registry"/"node-ca-dockercfg-4777p": watch of *v1.Secret ended with: very short watch: object-"openshift-image-registry"/"node-ca-dockercfg-4777p": Unexpected watch close - watch lasted less than a second and no items received Nov 28 16:10:37 crc kubenswrapper[4909]: I1128 16:10:37.756297 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/6b77cc4b-dc69-4ece-8e10-64eebc98a578-serviceca\") pod \"node-ca-q8nfv\" (UID: \"6b77cc4b-dc69-4ece-8e10-64eebc98a578\") " pod="openshift-image-registry/node-ca-q8nfv" Nov 28 16:10:37 crc kubenswrapper[4909]: I1128 16:10:37.757354 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/6b77cc4b-dc69-4ece-8e10-64eebc98a578-serviceca\") pod \"node-ca-q8nfv\" (UID: \"6b77cc4b-dc69-4ece-8e10-64eebc98a578\") " pod="openshift-image-registry/node-ca-q8nfv" Nov 28 16:10:37 crc kubenswrapper[4909]: I1128 16:10:37.757482 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-prc9l\" (UniqueName: \"kubernetes.io/projected/6b77cc4b-dc69-4ece-8e10-64eebc98a578-kube-api-access-prc9l\") pod \"node-ca-q8nfv\" (UID: \"6b77cc4b-dc69-4ece-8e10-64eebc98a578\") " pod="openshift-image-registry/node-ca-q8nfv" Nov 28 16:10:37 crc kubenswrapper[4909]: I1128 16:10:37.757565 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/6b77cc4b-dc69-4ece-8e10-64eebc98a578-host\") pod \"node-ca-q8nfv\" (UID: \"6b77cc4b-dc69-4ece-8e10-64eebc98a578\") " pod="openshift-image-registry/node-ca-q8nfv" Nov 28 16:10:37 crc kubenswrapper[4909]: I1128 16:10:37.757664 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/6b77cc4b-dc69-4ece-8e10-64eebc98a578-host\") pod \"node-ca-q8nfv\" (UID: \"6b77cc4b-dc69-4ece-8e10-64eebc98a578\") " pod="openshift-image-registry/node-ca-q8nfv" Nov 28 16:10:37 crc kubenswrapper[4909]: I1128 16:10:37.774803 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:37Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:37 crc kubenswrapper[4909]: I1128 16:10:37.782393 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-prc9l\" (UniqueName: \"kubernetes.io/projected/6b77cc4b-dc69-4ece-8e10-64eebc98a578-kube-api-access-prc9l\") pod \"node-ca-q8nfv\" (UID: \"6b77cc4b-dc69-4ece-8e10-64eebc98a578\") " pod="openshift-image-registry/node-ca-q8nfv" Nov 28 16:10:37 crc kubenswrapper[4909]: I1128 16:10:37.790544 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-q8nfv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6b77cc4b-dc69-4ece-8e10-64eebc98a578\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-prc9l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:37Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-q8nfv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:37Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:37 crc kubenswrapper[4909]: I1128 16:10:37.804812 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:37Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:37 crc kubenswrapper[4909]: I1128 16:10:37.822568 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dda7db1a-d603-4121-8cd6-e72c9ae02961\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://107b559f7d62f02041439f8c727185e9a819cd655afa0898e818d8618cc54cc7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://76ac0606fbf94f3746fb7ffa549a3cb684d40ee72224dd36c96bb47db493b482\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d5716d50bbed7c047029bbd7964431e97f78c6516505f57fe76d2a1548a665d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://53a5c9679a2c94c1297318d2e554c2445efbfab9cf72d25e48d753d66fa217d5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://da70cc63e1edf58e6d6fed49fc9266d54fabba5f7ce3216d910eb18aebc45c34\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0700ae65eb592e5b2110d37bb15dbb1dbdd228986de015ab5dc8fe13672032ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0700ae65eb592e5b2110d37bb15dbb1dbdd228986de015ab5dc8fe13672032ef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:18Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:37Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:37 crc kubenswrapper[4909]: I1128 16:10:37.837153 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b5b5c4c2-af06-4771-b6eb-d13a2819665c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-policy-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-policy-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ea29d0be350e786147f355c5bb4902924aa0f921413b432ad093a796d21b9d05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b41cf0481323080f4056d35add13c2254c6ffbe432b1651555067d3fc9d163eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94501e940379fe9b429e532a91799701e733bb5c3be1c5f32da07a9957f955b3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://37dc41c4e260f683c1ae04ae87a883fe78e4f1f620a946ccb6a87191a5eae0ae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:18Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:37Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:37 crc kubenswrapper[4909]: I1128 16:10:37.849710 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:37Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:37 crc kubenswrapper[4909]: I1128 16:10:37.863365 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:37Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:37 crc kubenswrapper[4909]: I1128 16:10:37.878285 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:37Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:37 crc kubenswrapper[4909]: I1128 16:10:37.890519 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-bwbm6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a458a03-0fea-47c0-9748-510145f40b30\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:36Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:36Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmxp7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:36Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-bwbm6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:37Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:37 crc kubenswrapper[4909]: I1128 16:10:37.900530 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 16:10:37 crc kubenswrapper[4909]: I1128 16:10:37.900562 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 16:10:37 crc kubenswrapper[4909]: I1128 16:10:37.900543 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 16:10:37 crc kubenswrapper[4909]: E1128 16:10:37.900688 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 16:10:37 crc kubenswrapper[4909]: E1128 16:10:37.900747 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 16:10:37 crc kubenswrapper[4909]: E1128 16:10:37.900935 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 16:10:37 crc kubenswrapper[4909]: I1128 16:10:37.905019 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="01ab3dd5-8196-46d0-ad33-122e2ca51def" path="/var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes" Nov 28 16:10:37 crc kubenswrapper[4909]: I1128 16:10:37.906014 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" path="/var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes" Nov 28 16:10:37 crc kubenswrapper[4909]: I1128 16:10:37.906923 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09efc573-dbb6-4249-bd59-9b87aba8dd28" path="/var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes" Nov 28 16:10:37 crc kubenswrapper[4909]: I1128 16:10:37.908496 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b574797-001e-440a-8f4e-c0be86edad0f" path="/var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes" Nov 28 16:10:37 crc kubenswrapper[4909]: I1128 16:10:37.909175 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b78653f-4ff9-4508-8672-245ed9b561e3" path="/var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes" Nov 28 16:10:37 crc kubenswrapper[4909]: I1128 16:10:37.910089 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1386a44e-36a2-460c-96d0-0359d2b6f0f5" path="/var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes" Nov 28 16:10:37 crc kubenswrapper[4909]: I1128 16:10:37.910809 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1bf7eb37-55a3-4c65-b768-a94c82151e69" path="/var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes" Nov 28 16:10:37 crc kubenswrapper[4909]: I1128 16:10:37.911561 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1d611f23-29be-4491-8495-bee1670e935f" path="/var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes" Nov 28 16:10:37 crc kubenswrapper[4909]: I1128 16:10:37.912763 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="20b0d48f-5fd6-431c-a545-e3c800c7b866" path="/var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/volumes" Nov 28 16:10:37 crc kubenswrapper[4909]: I1128 16:10:37.913421 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" path="/var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes" Nov 28 16:10:37 crc kubenswrapper[4909]: I1128 16:10:37.914503 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="22c825df-677d-4ca6-82db-3454ed06e783" path="/var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes" Nov 28 16:10:37 crc kubenswrapper[4909]: I1128 16:10:37.915395 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="25e176fe-21b4-4974-b1ed-c8b94f112a7f" path="/var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes" Nov 28 16:10:37 crc kubenswrapper[4909]: I1128 16:10:37.916552 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" path="/var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes" Nov 28 16:10:37 crc kubenswrapper[4909]: I1128 16:10:37.916548 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:37Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:37 crc kubenswrapper[4909]: I1128 16:10:37.917453 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="31d8b7a1-420e-4252-a5b7-eebe8a111292" path="/var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes" Nov 28 16:10:37 crc kubenswrapper[4909]: I1128 16:10:37.917619 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-q8nfv" Nov 28 16:10:37 crc kubenswrapper[4909]: I1128 16:10:37.921039 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3ab1a177-2de0-46d9-b765-d0d0649bb42e" path="/var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/volumes" Nov 28 16:10:37 crc kubenswrapper[4909]: I1128 16:10:37.921727 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" path="/var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes" Nov 28 16:10:37 crc kubenswrapper[4909]: I1128 16:10:37.922509 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="43509403-f426-496e-be36-56cef71462f5" path="/var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes" Nov 28 16:10:37 crc kubenswrapper[4909]: I1128 16:10:37.922994 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="44663579-783b-4372-86d6-acf235a62d72" path="/var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/volumes" Nov 28 16:10:37 crc kubenswrapper[4909]: I1128 16:10:37.923777 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="496e6271-fb68-4057-954e-a0d97a4afa3f" path="/var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes" Nov 28 16:10:37 crc kubenswrapper[4909]: I1128 16:10:37.924445 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" path="/var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes" Nov 28 16:10:37 crc kubenswrapper[4909]: I1128 16:10:37.925107 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49ef4625-1d3a-4a9f-b595-c2433d32326d" path="/var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/volumes" Nov 28 16:10:37 crc kubenswrapper[4909]: I1128 16:10:37.926403 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4bb40260-dbaa-4fb0-84df-5e680505d512" path="/var/lib/kubelet/pods/4bb40260-dbaa-4fb0-84df-5e680505d512/volumes" Nov 28 16:10:37 crc kubenswrapper[4909]: I1128 16:10:37.927112 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5225d0e4-402f-4861-b410-819f433b1803" path="/var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes" Nov 28 16:10:37 crc kubenswrapper[4909]: I1128 16:10:37.927893 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5441d097-087c-4d9a-baa8-b210afa90fc9" path="/var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes" Nov 28 16:10:37 crc kubenswrapper[4909]: I1128 16:10:37.928378 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="57a731c4-ef35-47a8-b875-bfb08a7f8011" path="/var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes" Nov 28 16:10:37 crc kubenswrapper[4909]: I1128 16:10:37.929179 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5b88f790-22fa-440e-b583-365168c0b23d" path="/var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/volumes" Nov 28 16:10:37 crc kubenswrapper[4909]: I1128 16:10:37.930105 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5fe579f8-e8a6-4643-bce5-a661393c4dde" path="/var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/volumes" Nov 28 16:10:37 crc kubenswrapper[4909]: I1128 16:10:37.931958 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6402fda4-df10-493c-b4e5-d0569419652d" path="/var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes" Nov 28 16:10:37 crc kubenswrapper[4909]: I1128 16:10:37.932569 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6509e943-70c6-444c-bc41-48a544e36fbd" path="/var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes" Nov 28 16:10:37 crc kubenswrapper[4909]: I1128 16:10:37.933691 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6731426b-95fe-49ff-bb5f-40441049fde2" path="/var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/volumes" Nov 28 16:10:37 crc kubenswrapper[4909]: I1128 16:10:37.934430 4909 kubelet_volumes.go:152] "Cleaned up orphaned volume subpath from pod" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volume-subpaths/run-systemd/ovnkube-controller/6" Nov 28 16:10:37 crc kubenswrapper[4909]: I1128 16:10:37.934630 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volumes" Nov 28 16:10:37 crc kubenswrapper[4909]: I1128 16:10:37.935037 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:37Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:37 crc kubenswrapper[4909]: I1128 16:10:37.936858 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7539238d-5fe0-46ed-884e-1c3b566537ec" path="/var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes" Nov 28 16:10:37 crc kubenswrapper[4909]: I1128 16:10:37.937462 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7583ce53-e0fe-4a16-9e4d-50516596a136" path="/var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes" Nov 28 16:10:37 crc kubenswrapper[4909]: I1128 16:10:37.938067 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7bb08738-c794-4ee8-9972-3a62ca171029" path="/var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes" Nov 28 16:10:37 crc kubenswrapper[4909]: I1128 16:10:37.939619 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="87cf06ed-a83f-41a7-828d-70653580a8cb" path="/var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes" Nov 28 16:10:37 crc kubenswrapper[4909]: I1128 16:10:37.940856 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" path="/var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes" Nov 28 16:10:37 crc kubenswrapper[4909]: I1128 16:10:37.941488 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="925f1c65-6136-48ba-85aa-3a3b50560753" path="/var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes" Nov 28 16:10:37 crc kubenswrapper[4909]: I1128 16:10:37.942744 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" path="/var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/volumes" Nov 28 16:10:37 crc kubenswrapper[4909]: I1128 16:10:37.943605 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9d4552c7-cd75-42dd-8880-30dd377c49a4" path="/var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes" Nov 28 16:10:37 crc kubenswrapper[4909]: I1128 16:10:37.944786 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" path="/var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/volumes" Nov 28 16:10:37 crc kubenswrapper[4909]: I1128 16:10:37.945032 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-q8nfv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6b77cc4b-dc69-4ece-8e10-64eebc98a578\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-prc9l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:37Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-q8nfv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:37Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:37 crc kubenswrapper[4909]: I1128 16:10:37.945623 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a31745f5-9847-4afe-82a5-3161cc66ca93" path="/var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes" Nov 28 16:10:37 crc kubenswrapper[4909]: I1128 16:10:37.946636 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" path="/var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes" Nov 28 16:10:37 crc kubenswrapper[4909]: I1128 16:10:37.947706 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6312bbd-5731-4ea0-a20f-81d5a57df44a" path="/var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/volumes" Nov 28 16:10:37 crc kubenswrapper[4909]: I1128 16:10:37.948595 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" path="/var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes" Nov 28 16:10:37 crc kubenswrapper[4909]: I1128 16:10:37.950101 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" path="/var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes" Nov 28 16:10:37 crc kubenswrapper[4909]: I1128 16:10:37.952073 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" path="/var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/volumes" Nov 28 16:10:37 crc kubenswrapper[4909]: I1128 16:10:37.953512 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bf126b07-da06-4140-9a57-dfd54fc6b486" path="/var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes" Nov 28 16:10:37 crc kubenswrapper[4909]: I1128 16:10:37.961299 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:37Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:37 crc kubenswrapper[4909]: I1128 16:10:37.961891 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c03ee662-fb2f-4fc4-a2c1-af487c19d254" path="/var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes" Nov 28 16:10:37 crc kubenswrapper[4909]: I1128 16:10:37.962554 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" path="/var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/volumes" Nov 28 16:10:37 crc kubenswrapper[4909]: I1128 16:10:37.964257 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e7e6199b-1264-4501-8953-767f51328d08" path="/var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes" Nov 28 16:10:37 crc kubenswrapper[4909]: I1128 16:10:37.965014 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="efdd0498-1daa-4136-9a4a-3b948c2293fc" path="/var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/volumes" Nov 28 16:10:37 crc kubenswrapper[4909]: I1128 16:10:37.966194 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" path="/var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/volumes" Nov 28 16:10:37 crc kubenswrapper[4909]: I1128 16:10:37.966682 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fda69060-fa79-4696-b1a6-7980f124bf7c" path="/var/lib/kubelet/pods/fda69060-fa79-4696-b1a6-7980f124bf7c/volumes" Nov 28 16:10:37 crc kubenswrapper[4909]: I1128 16:10:37.972988 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:37Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:37 crc kubenswrapper[4909]: I1128 16:10:37.984785 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:37Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:37 crc kubenswrapper[4909]: I1128 16:10:37.998988 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dda7db1a-d603-4121-8cd6-e72c9ae02961\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://107b559f7d62f02041439f8c727185e9a819cd655afa0898e818d8618cc54cc7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://76ac0606fbf94f3746fb7ffa549a3cb684d40ee72224dd36c96bb47db493b482\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d5716d50bbed7c047029bbd7964431e97f78c6516505f57fe76d2a1548a665d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://53a5c9679a2c94c1297318d2e554c2445efbfab9cf72d25e48d753d66fa217d5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://da70cc63e1edf58e6d6fed49fc9266d54fabba5f7ce3216d910eb18aebc45c34\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0700ae65eb592e5b2110d37bb15dbb1dbdd228986de015ab5dc8fe13672032ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0700ae65eb592e5b2110d37bb15dbb1dbdd228986de015ab5dc8fe13672032ef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:18Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:37Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.015329 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b5b5c4c2-af06-4771-b6eb-d13a2819665c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-policy-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-policy-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ea29d0be350e786147f355c5bb4902924aa0f921413b432ad093a796d21b9d05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b41cf0481323080f4056d35add13c2254c6ffbe432b1651555067d3fc9d163eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94501e940379fe9b429e532a91799701e733bb5c3be1c5f32da07a9957f955b3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://37dc41c4e260f683c1ae04ae87a883fe78e4f1f620a946ccb6a87191a5eae0ae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:18Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:38Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.030204 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:38Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.033989 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"475d2c473e130345e0d544c2b7990c476e83c479c644db89e1ceda2f4278d667"} Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.034041 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"18059a14081d677863133d95ec3cecf9359d6464af62be1e53bcd9514311ff59"} Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.040476 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"b7907b546dbb614e80485187b026c4c5ca17f52d88d5c28ce26a7bf5e3c09e76"} Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.042911 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-q8nfv" event={"ID":"6b77cc4b-dc69-4ece-8e10-64eebc98a578","Type":"ContainerStarted","Data":"03081fc2fa31372ef6689458024940da94f588961428a75ce902e137eb8b4a46"} Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.043236 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-bwbm6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a458a03-0fea-47c0-9748-510145f40b30\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:36Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:36Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmxp7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:36Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-bwbm6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:38Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.045736 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-bwbm6" event={"ID":"4a458a03-0fea-47c0-9748-510145f40b30","Type":"ContainerStarted","Data":"dd8c3d64058ca1f2862ce478295e1a694117d00f9172f78c7c5e2945d7357aad"} Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.045782 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-bwbm6" event={"ID":"4a458a03-0fea-47c0-9748-510145f40b30","Type":"ContainerStarted","Data":"d8906e1d3229f31d9504626de3e2505221c43be2d8292d31311d26dc4aecacbf"} Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.057259 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7907b546dbb614e80485187b026c4c5ca17f52d88d5c28ce26a7bf5e3c09e76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:38Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.071711 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://475d2c473e130345e0d544c2b7990c476e83c479c644db89e1ceda2f4278d667\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://18059a14081d677863133d95ec3cecf9359d6464af62be1e53bcd9514311ff59\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:38Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.081957 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-q8nfv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6b77cc4b-dc69-4ece-8e10-64eebc98a578\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-prc9l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:37Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-q8nfv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:38Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.099931 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:38Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.127019 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:38Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.144420 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:38Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.160313 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 16:10:38 crc kubenswrapper[4909]: E1128 16:10:38.160493 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 16:10:40.160471866 +0000 UTC m=+22.557156390 (durationBeforeRetry 2s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.170157 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dda7db1a-d603-4121-8cd6-e72c9ae02961\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://107b559f7d62f02041439f8c727185e9a819cd655afa0898e818d8618cc54cc7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://76ac0606fbf94f3746fb7ffa549a3cb684d40ee72224dd36c96bb47db493b482\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d5716d50bbed7c047029bbd7964431e97f78c6516505f57fe76d2a1548a665d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://53a5c9679a2c94c1297318d2e554c2445efbfab9cf72d25e48d753d66fa217d5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://da70cc63e1edf58e6d6fed49fc9266d54fabba5f7ce3216d910eb18aebc45c34\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0700ae65eb592e5b2110d37bb15dbb1dbdd228986de015ab5dc8fe13672032ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0700ae65eb592e5b2110d37bb15dbb1dbdd228986de015ab5dc8fe13672032ef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:18Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:38Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.182147 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b5b5c4c2-af06-4771-b6eb-d13a2819665c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-policy-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-policy-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ea29d0be350e786147f355c5bb4902924aa0f921413b432ad093a796d21b9d05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b41cf0481323080f4056d35add13c2254c6ffbe432b1651555067d3fc9d163eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94501e940379fe9b429e532a91799701e733bb5c3be1c5f32da07a9957f955b3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://37dc41c4e260f683c1ae04ae87a883fe78e4f1f620a946ccb6a87191a5eae0ae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:18Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:38Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.192749 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:38Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.202040 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-bwbm6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a458a03-0fea-47c0-9748-510145f40b30\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:36Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:36Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmxp7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:36Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-bwbm6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:38Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.216950 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:38Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.232999 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dda7db1a-d603-4121-8cd6-e72c9ae02961\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://107b559f7d62f02041439f8c727185e9a819cd655afa0898e818d8618cc54cc7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://76ac0606fbf94f3746fb7ffa549a3cb684d40ee72224dd36c96bb47db493b482\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d5716d50bbed7c047029bbd7964431e97f78c6516505f57fe76d2a1548a665d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://53a5c9679a2c94c1297318d2e554c2445efbfab9cf72d25e48d753d66fa217d5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://da70cc63e1edf58e6d6fed49fc9266d54fabba5f7ce3216d910eb18aebc45c34\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0700ae65eb592e5b2110d37bb15dbb1dbdd228986de015ab5dc8fe13672032ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0700ae65eb592e5b2110d37bb15dbb1dbdd228986de015ab5dc8fe13672032ef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:18Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:38Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.246703 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b5b5c4c2-af06-4771-b6eb-d13a2819665c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-policy-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-policy-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ea29d0be350e786147f355c5bb4902924aa0f921413b432ad093a796d21b9d05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b41cf0481323080f4056d35add13c2254c6ffbe432b1651555067d3fc9d163eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94501e940379fe9b429e532a91799701e733bb5c3be1c5f32da07a9957f955b3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://37dc41c4e260f683c1ae04ae87a883fe78e4f1f620a946ccb6a87191a5eae0ae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:18Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:38Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.261163 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.261201 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.261219 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.261237 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 16:10:38 crc kubenswrapper[4909]: E1128 16:10:38.261329 4909 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 28 16:10:38 crc kubenswrapper[4909]: E1128 16:10:38.261373 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-28 16:10:40.261359369 +0000 UTC m=+22.658043893 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 28 16:10:38 crc kubenswrapper[4909]: E1128 16:10:38.261426 4909 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 28 16:10:38 crc kubenswrapper[4909]: E1128 16:10:38.261438 4909 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 28 16:10:38 crc kubenswrapper[4909]: E1128 16:10:38.261447 4909 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 16:10:38 crc kubenswrapper[4909]: E1128 16:10:38.261467 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-28 16:10:40.261461412 +0000 UTC m=+22.658145936 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 16:10:38 crc kubenswrapper[4909]: E1128 16:10:38.261503 4909 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 28 16:10:38 crc kubenswrapper[4909]: E1128 16:10:38.261511 4909 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 28 16:10:38 crc kubenswrapper[4909]: E1128 16:10:38.261517 4909 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 16:10:38 crc kubenswrapper[4909]: E1128 16:10:38.261534 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-28 16:10:40.261528914 +0000 UTC m=+22.658213438 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 16:10:38 crc kubenswrapper[4909]: E1128 16:10:38.261557 4909 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 28 16:10:38 crc kubenswrapper[4909]: E1128 16:10:38.261575 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-28 16:10:40.261570105 +0000 UTC m=+22.658254629 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.268814 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:38Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.304932 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:38Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.329831 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:38Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.344618 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-bwbm6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a458a03-0fea-47c0-9748-510145f40b30\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dd8c3d64058ca1f2862ce478295e1a694117d00f9172f78c7c5e2945d7357aad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmxp7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:36Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-bwbm6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:38Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.371232 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7907b546dbb614e80485187b026c4c5ca17f52d88d5c28ce26a7bf5e3c09e76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:38Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.393745 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://475d2c473e130345e0d544c2b7990c476e83c479c644db89e1ceda2f4278d667\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://18059a14081d677863133d95ec3cecf9359d6464af62be1e53bcd9514311ff59\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:38Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.412455 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-q8nfv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6b77cc4b-dc69-4ece-8e10-64eebc98a578\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-prc9l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:37Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-q8nfv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:38Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.481009 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-daemon-d5nd7"] Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.481459 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.483302 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-additional-cni-plugins-gdz9b"] Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.483887 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-qxw94"] Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.484567 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-wx2jj"] Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.484817 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-wx2jj" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.485162 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-gdz9b" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.485450 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.487587 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.488091 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.488201 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.488364 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.488502 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.488774 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.488900 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.489075 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.489269 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.489430 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.489592 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.489765 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.490361 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.490370 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.490423 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.490603 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.490681 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.491078 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.497024 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.506840 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7907b546dbb614e80485187b026c4c5ca17f52d88d5c28ce26a7bf5e3c09e76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:38Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.524931 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5f0ac931-d37b-4342-8c12-c2779b455cc5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5xrns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5xrns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:38Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-d5nd7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:38Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.538970 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:38Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.555441 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:38Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.563593 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/c17e2fff-c7ee-475c-8c17-58a394744b91-run-systemd\") pod \"ovnkube-node-qxw94\" (UID: \"c17e2fff-c7ee-475c-8c17-58a394744b91\") " pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.563638 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/c17e2fff-c7ee-475c-8c17-58a394744b91-run-ovn\") pod \"ovnkube-node-qxw94\" (UID: \"c17e2fff-c7ee-475c-8c17-58a394744b91\") " pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.563677 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/c17e2fff-c7ee-475c-8c17-58a394744b91-host-run-netns\") pod \"ovnkube-node-qxw94\" (UID: \"c17e2fff-c7ee-475c-8c17-58a394744b91\") " pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.563701 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8wwjw\" (UniqueName: \"kubernetes.io/projected/d9d93f2d-2a90-4d2d-b8e6-e48973be876f-kube-api-access-8wwjw\") pod \"multus-additional-cni-plugins-gdz9b\" (UID: \"d9d93f2d-2a90-4d2d-b8e6-e48973be876f\") " pod="openshift-multus/multus-additional-cni-plugins-gdz9b" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.563726 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x6lxd\" (UniqueName: \"kubernetes.io/projected/6e3805b2-8ad3-4fa6-b88f-e0ae42294202-kube-api-access-x6lxd\") pod \"multus-wx2jj\" (UID: \"6e3805b2-8ad3-4fa6-b88f-e0ae42294202\") " pod="openshift-multus/multus-wx2jj" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.563746 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/6e3805b2-8ad3-4fa6-b88f-e0ae42294202-host-var-lib-kubelet\") pod \"multus-wx2jj\" (UID: \"6e3805b2-8ad3-4fa6-b88f-e0ae42294202\") " pod="openshift-multus/multus-wx2jj" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.563765 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/c17e2fff-c7ee-475c-8c17-58a394744b91-host-kubelet\") pod \"ovnkube-node-qxw94\" (UID: \"c17e2fff-c7ee-475c-8c17-58a394744b91\") " pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.563786 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/6e3805b2-8ad3-4fa6-b88f-e0ae42294202-etc-kubernetes\") pod \"multus-wx2jj\" (UID: \"6e3805b2-8ad3-4fa6-b88f-e0ae42294202\") " pod="openshift-multus/multus-wx2jj" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.563805 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/6e3805b2-8ad3-4fa6-b88f-e0ae42294202-host-run-netns\") pod \"multus-wx2jj\" (UID: \"6e3805b2-8ad3-4fa6-b88f-e0ae42294202\") " pod="openshift-multus/multus-wx2jj" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.563825 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/6e3805b2-8ad3-4fa6-b88f-e0ae42294202-multus-daemon-config\") pod \"multus-wx2jj\" (UID: \"6e3805b2-8ad3-4fa6-b88f-e0ae42294202\") " pod="openshift-multus/multus-wx2jj" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.563845 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zbrc9\" (UniqueName: \"kubernetes.io/projected/c17e2fff-c7ee-475c-8c17-58a394744b91-kube-api-access-zbrc9\") pod \"ovnkube-node-qxw94\" (UID: \"c17e2fff-c7ee-475c-8c17-58a394744b91\") " pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.563867 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5xrns\" (UniqueName: \"kubernetes.io/projected/5f0ac931-d37b-4342-8c12-c2779b455cc5-kube-api-access-5xrns\") pod \"machine-config-daemon-d5nd7\" (UID: \"5f0ac931-d37b-4342-8c12-c2779b455cc5\") " pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.563891 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/6e3805b2-8ad3-4fa6-b88f-e0ae42294202-cni-binary-copy\") pod \"multus-wx2jj\" (UID: \"6e3805b2-8ad3-4fa6-b88f-e0ae42294202\") " pod="openshift-multus/multus-wx2jj" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.563910 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/6e3805b2-8ad3-4fa6-b88f-e0ae42294202-multus-socket-dir-parent\") pod \"multus-wx2jj\" (UID: \"6e3805b2-8ad3-4fa6-b88f-e0ae42294202\") " pod="openshift-multus/multus-wx2jj" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.563929 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/6e3805b2-8ad3-4fa6-b88f-e0ae42294202-host-run-multus-certs\") pod \"multus-wx2jj\" (UID: \"6e3805b2-8ad3-4fa6-b88f-e0ae42294202\") " pod="openshift-multus/multus-wx2jj" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.563949 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/c17e2fff-c7ee-475c-8c17-58a394744b91-host-cni-netd\") pod \"ovnkube-node-qxw94\" (UID: \"c17e2fff-c7ee-475c-8c17-58a394744b91\") " pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.563973 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/c17e2fff-c7ee-475c-8c17-58a394744b91-ovn-node-metrics-cert\") pod \"ovnkube-node-qxw94\" (UID: \"c17e2fff-c7ee-475c-8c17-58a394744b91\") " pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.564005 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/d9d93f2d-2a90-4d2d-b8e6-e48973be876f-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-gdz9b\" (UID: \"d9d93f2d-2a90-4d2d-b8e6-e48973be876f\") " pod="openshift-multus/multus-additional-cni-plugins-gdz9b" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.564026 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/5f0ac931-d37b-4342-8c12-c2779b455cc5-mcd-auth-proxy-config\") pod \"machine-config-daemon-d5nd7\" (UID: \"5f0ac931-d37b-4342-8c12-c2779b455cc5\") " pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.564047 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/6e3805b2-8ad3-4fa6-b88f-e0ae42294202-system-cni-dir\") pod \"multus-wx2jj\" (UID: \"6e3805b2-8ad3-4fa6-b88f-e0ae42294202\") " pod="openshift-multus/multus-wx2jj" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.564067 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/d9d93f2d-2a90-4d2d-b8e6-e48973be876f-os-release\") pod \"multus-additional-cni-plugins-gdz9b\" (UID: \"d9d93f2d-2a90-4d2d-b8e6-e48973be876f\") " pod="openshift-multus/multus-additional-cni-plugins-gdz9b" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.564091 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/c17e2fff-c7ee-475c-8c17-58a394744b91-systemd-units\") pod \"ovnkube-node-qxw94\" (UID: \"c17e2fff-c7ee-475c-8c17-58a394744b91\") " pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.564111 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/c17e2fff-c7ee-475c-8c17-58a394744b91-host-run-ovn-kubernetes\") pod \"ovnkube-node-qxw94\" (UID: \"c17e2fff-c7ee-475c-8c17-58a394744b91\") " pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.564133 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/6e3805b2-8ad3-4fa6-b88f-e0ae42294202-host-var-lib-cni-multus\") pod \"multus-wx2jj\" (UID: \"6e3805b2-8ad3-4fa6-b88f-e0ae42294202\") " pod="openshift-multus/multus-wx2jj" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.564153 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/c17e2fff-c7ee-475c-8c17-58a394744b91-log-socket\") pod \"ovnkube-node-qxw94\" (UID: \"c17e2fff-c7ee-475c-8c17-58a394744b91\") " pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.564172 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/5f0ac931-d37b-4342-8c12-c2779b455cc5-rootfs\") pod \"machine-config-daemon-d5nd7\" (UID: \"5f0ac931-d37b-4342-8c12-c2779b455cc5\") " pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.564191 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/c17e2fff-c7ee-475c-8c17-58a394744b91-ovnkube-script-lib\") pod \"ovnkube-node-qxw94\" (UID: \"c17e2fff-c7ee-475c-8c17-58a394744b91\") " pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.564212 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/6e3805b2-8ad3-4fa6-b88f-e0ae42294202-host-run-k8s-cni-cncf-io\") pod \"multus-wx2jj\" (UID: \"6e3805b2-8ad3-4fa6-b88f-e0ae42294202\") " pod="openshift-multus/multus-wx2jj" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.564232 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/c17e2fff-c7ee-475c-8c17-58a394744b91-run-openvswitch\") pod \"ovnkube-node-qxw94\" (UID: \"c17e2fff-c7ee-475c-8c17-58a394744b91\") " pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.564253 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/c17e2fff-c7ee-475c-8c17-58a394744b91-ovnkube-config\") pod \"ovnkube-node-qxw94\" (UID: \"c17e2fff-c7ee-475c-8c17-58a394744b91\") " pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.564272 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/c17e2fff-c7ee-475c-8c17-58a394744b91-node-log\") pod \"ovnkube-node-qxw94\" (UID: \"c17e2fff-c7ee-475c-8c17-58a394744b91\") " pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.564291 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/5f0ac931-d37b-4342-8c12-c2779b455cc5-proxy-tls\") pod \"machine-config-daemon-d5nd7\" (UID: \"5f0ac931-d37b-4342-8c12-c2779b455cc5\") " pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.564322 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/d9d93f2d-2a90-4d2d-b8e6-e48973be876f-system-cni-dir\") pod \"multus-additional-cni-plugins-gdz9b\" (UID: \"d9d93f2d-2a90-4d2d-b8e6-e48973be876f\") " pod="openshift-multus/multus-additional-cni-plugins-gdz9b" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.564341 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/6e3805b2-8ad3-4fa6-b88f-e0ae42294202-multus-conf-dir\") pod \"multus-wx2jj\" (UID: \"6e3805b2-8ad3-4fa6-b88f-e0ae42294202\") " pod="openshift-multus/multus-wx2jj" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.564362 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/c17e2fff-c7ee-475c-8c17-58a394744b91-etc-openvswitch\") pod \"ovnkube-node-qxw94\" (UID: \"c17e2fff-c7ee-475c-8c17-58a394744b91\") " pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.564381 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/d9d93f2d-2a90-4d2d-b8e6-e48973be876f-cnibin\") pod \"multus-additional-cni-plugins-gdz9b\" (UID: \"d9d93f2d-2a90-4d2d-b8e6-e48973be876f\") " pod="openshift-multus/multus-additional-cni-plugins-gdz9b" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.564403 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/d9d93f2d-2a90-4d2d-b8e6-e48973be876f-tuning-conf-dir\") pod \"multus-additional-cni-plugins-gdz9b\" (UID: \"d9d93f2d-2a90-4d2d-b8e6-e48973be876f\") " pod="openshift-multus/multus-additional-cni-plugins-gdz9b" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.564424 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/6e3805b2-8ad3-4fa6-b88f-e0ae42294202-host-var-lib-cni-bin\") pod \"multus-wx2jj\" (UID: \"6e3805b2-8ad3-4fa6-b88f-e0ae42294202\") " pod="openshift-multus/multus-wx2jj" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.564443 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/c17e2fff-c7ee-475c-8c17-58a394744b91-host-slash\") pod \"ovnkube-node-qxw94\" (UID: \"c17e2fff-c7ee-475c-8c17-58a394744b91\") " pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.564462 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/c17e2fff-c7ee-475c-8c17-58a394744b91-var-lib-openvswitch\") pod \"ovnkube-node-qxw94\" (UID: \"c17e2fff-c7ee-475c-8c17-58a394744b91\") " pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.564483 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/c17e2fff-c7ee-475c-8c17-58a394744b91-host-cni-bin\") pod \"ovnkube-node-qxw94\" (UID: \"c17e2fff-c7ee-475c-8c17-58a394744b91\") " pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.564504 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/c17e2fff-c7ee-475c-8c17-58a394744b91-env-overrides\") pod \"ovnkube-node-qxw94\" (UID: \"c17e2fff-c7ee-475c-8c17-58a394744b91\") " pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.564534 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/6e3805b2-8ad3-4fa6-b88f-e0ae42294202-cnibin\") pod \"multus-wx2jj\" (UID: \"6e3805b2-8ad3-4fa6-b88f-e0ae42294202\") " pod="openshift-multus/multus-wx2jj" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.564560 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/6e3805b2-8ad3-4fa6-b88f-e0ae42294202-multus-cni-dir\") pod \"multus-wx2jj\" (UID: \"6e3805b2-8ad3-4fa6-b88f-e0ae42294202\") " pod="openshift-multus/multus-wx2jj" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.564580 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/6e3805b2-8ad3-4fa6-b88f-e0ae42294202-hostroot\") pod \"multus-wx2jj\" (UID: \"6e3805b2-8ad3-4fa6-b88f-e0ae42294202\") " pod="openshift-multus/multus-wx2jj" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.564602 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/c17e2fff-c7ee-475c-8c17-58a394744b91-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-qxw94\" (UID: \"c17e2fff-c7ee-475c-8c17-58a394744b91\") " pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.564626 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/d9d93f2d-2a90-4d2d-b8e6-e48973be876f-cni-binary-copy\") pod \"multus-additional-cni-plugins-gdz9b\" (UID: \"d9d93f2d-2a90-4d2d-b8e6-e48973be876f\") " pod="openshift-multus/multus-additional-cni-plugins-gdz9b" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.564644 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/6e3805b2-8ad3-4fa6-b88f-e0ae42294202-os-release\") pod \"multus-wx2jj\" (UID: \"6e3805b2-8ad3-4fa6-b88f-e0ae42294202\") " pod="openshift-multus/multus-wx2jj" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.565129 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-bwbm6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a458a03-0fea-47c0-9748-510145f40b30\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dd8c3d64058ca1f2862ce478295e1a694117d00f9172f78c7c5e2945d7357aad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmxp7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:36Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-bwbm6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:38Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.577013 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://475d2c473e130345e0d544c2b7990c476e83c479c644db89e1ceda2f4278d667\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://18059a14081d677863133d95ec3cecf9359d6464af62be1e53bcd9514311ff59\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:38Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.608167 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-q8nfv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6b77cc4b-dc69-4ece-8e10-64eebc98a578\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-prc9l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:37Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-q8nfv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:38Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.650313 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:38Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.666028 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/6e3805b2-8ad3-4fa6-b88f-e0ae42294202-cnibin\") pod \"multus-wx2jj\" (UID: \"6e3805b2-8ad3-4fa6-b88f-e0ae42294202\") " pod="openshift-multus/multus-wx2jj" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.666259 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/c17e2fff-c7ee-475c-8c17-58a394744b91-host-slash\") pod \"ovnkube-node-qxw94\" (UID: \"c17e2fff-c7ee-475c-8c17-58a394744b91\") " pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.666352 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/c17e2fff-c7ee-475c-8c17-58a394744b91-host-slash\") pod \"ovnkube-node-qxw94\" (UID: \"c17e2fff-c7ee-475c-8c17-58a394744b91\") " pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.666360 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/c17e2fff-c7ee-475c-8c17-58a394744b91-var-lib-openvswitch\") pod \"ovnkube-node-qxw94\" (UID: \"c17e2fff-c7ee-475c-8c17-58a394744b91\") " pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.666208 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/6e3805b2-8ad3-4fa6-b88f-e0ae42294202-cnibin\") pod \"multus-wx2jj\" (UID: \"6e3805b2-8ad3-4fa6-b88f-e0ae42294202\") " pod="openshift-multus/multus-wx2jj" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.666450 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/c17e2fff-c7ee-475c-8c17-58a394744b91-host-cni-bin\") pod \"ovnkube-node-qxw94\" (UID: \"c17e2fff-c7ee-475c-8c17-58a394744b91\") " pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.666481 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/c17e2fff-c7ee-475c-8c17-58a394744b91-env-overrides\") pod \"ovnkube-node-qxw94\" (UID: \"c17e2fff-c7ee-475c-8c17-58a394744b91\") " pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.666577 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/6e3805b2-8ad3-4fa6-b88f-e0ae42294202-multus-cni-dir\") pod \"multus-wx2jj\" (UID: \"6e3805b2-8ad3-4fa6-b88f-e0ae42294202\") " pod="openshift-multus/multus-wx2jj" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.666614 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/c17e2fff-c7ee-475c-8c17-58a394744b91-host-cni-bin\") pod \"ovnkube-node-qxw94\" (UID: \"c17e2fff-c7ee-475c-8c17-58a394744b91\") " pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.666691 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/d9d93f2d-2a90-4d2d-b8e6-e48973be876f-cni-binary-copy\") pod \"multus-additional-cni-plugins-gdz9b\" (UID: \"d9d93f2d-2a90-4d2d-b8e6-e48973be876f\") " pod="openshift-multus/multus-additional-cni-plugins-gdz9b" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.666805 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/c17e2fff-c7ee-475c-8c17-58a394744b91-var-lib-openvswitch\") pod \"ovnkube-node-qxw94\" (UID: \"c17e2fff-c7ee-475c-8c17-58a394744b91\") " pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.666937 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/6e3805b2-8ad3-4fa6-b88f-e0ae42294202-multus-cni-dir\") pod \"multus-wx2jj\" (UID: \"6e3805b2-8ad3-4fa6-b88f-e0ae42294202\") " pod="openshift-multus/multus-wx2jj" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.667136 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/6e3805b2-8ad3-4fa6-b88f-e0ae42294202-os-release\") pod \"multus-wx2jj\" (UID: \"6e3805b2-8ad3-4fa6-b88f-e0ae42294202\") " pod="openshift-multus/multus-wx2jj" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.667155 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/c17e2fff-c7ee-475c-8c17-58a394744b91-env-overrides\") pod \"ovnkube-node-qxw94\" (UID: \"c17e2fff-c7ee-475c-8c17-58a394744b91\") " pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.667173 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/6e3805b2-8ad3-4fa6-b88f-e0ae42294202-hostroot\") pod \"multus-wx2jj\" (UID: \"6e3805b2-8ad3-4fa6-b88f-e0ae42294202\") " pod="openshift-multus/multus-wx2jj" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.667202 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/c17e2fff-c7ee-475c-8c17-58a394744b91-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-qxw94\" (UID: \"c17e2fff-c7ee-475c-8c17-58a394744b91\") " pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.667236 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/c17e2fff-c7ee-475c-8c17-58a394744b91-host-run-netns\") pod \"ovnkube-node-qxw94\" (UID: \"c17e2fff-c7ee-475c-8c17-58a394744b91\") " pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.667256 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/c17e2fff-c7ee-475c-8c17-58a394744b91-run-systemd\") pod \"ovnkube-node-qxw94\" (UID: \"c17e2fff-c7ee-475c-8c17-58a394744b91\") " pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.667279 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/c17e2fff-c7ee-475c-8c17-58a394744b91-run-ovn\") pod \"ovnkube-node-qxw94\" (UID: \"c17e2fff-c7ee-475c-8c17-58a394744b91\") " pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.667290 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/c17e2fff-c7ee-475c-8c17-58a394744b91-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-qxw94\" (UID: \"c17e2fff-c7ee-475c-8c17-58a394744b91\") " pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.667303 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x6lxd\" (UniqueName: \"kubernetes.io/projected/6e3805b2-8ad3-4fa6-b88f-e0ae42294202-kube-api-access-x6lxd\") pod \"multus-wx2jj\" (UID: \"6e3805b2-8ad3-4fa6-b88f-e0ae42294202\") " pod="openshift-multus/multus-wx2jj" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.667330 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8wwjw\" (UniqueName: \"kubernetes.io/projected/d9d93f2d-2a90-4d2d-b8e6-e48973be876f-kube-api-access-8wwjw\") pod \"multus-additional-cni-plugins-gdz9b\" (UID: \"d9d93f2d-2a90-4d2d-b8e6-e48973be876f\") " pod="openshift-multus/multus-additional-cni-plugins-gdz9b" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.667357 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/c17e2fff-c7ee-475c-8c17-58a394744b91-run-systemd\") pod \"ovnkube-node-qxw94\" (UID: \"c17e2fff-c7ee-475c-8c17-58a394744b91\") " pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.667357 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/6e3805b2-8ad3-4fa6-b88f-e0ae42294202-host-var-lib-kubelet\") pod \"multus-wx2jj\" (UID: \"6e3805b2-8ad3-4fa6-b88f-e0ae42294202\") " pod="openshift-multus/multus-wx2jj" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.667390 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/6e3805b2-8ad3-4fa6-b88f-e0ae42294202-host-var-lib-kubelet\") pod \"multus-wx2jj\" (UID: \"6e3805b2-8ad3-4fa6-b88f-e0ae42294202\") " pod="openshift-multus/multus-wx2jj" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.667395 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/c17e2fff-c7ee-475c-8c17-58a394744b91-host-kubelet\") pod \"ovnkube-node-qxw94\" (UID: \"c17e2fff-c7ee-475c-8c17-58a394744b91\") " pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.667416 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/c17e2fff-c7ee-475c-8c17-58a394744b91-host-kubelet\") pod \"ovnkube-node-qxw94\" (UID: \"c17e2fff-c7ee-475c-8c17-58a394744b91\") " pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.667410 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/6e3805b2-8ad3-4fa6-b88f-e0ae42294202-os-release\") pod \"multus-wx2jj\" (UID: \"6e3805b2-8ad3-4fa6-b88f-e0ae42294202\") " pod="openshift-multus/multus-wx2jj" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.667448 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/6e3805b2-8ad3-4fa6-b88f-e0ae42294202-etc-kubernetes\") pod \"multus-wx2jj\" (UID: \"6e3805b2-8ad3-4fa6-b88f-e0ae42294202\") " pod="openshift-multus/multus-wx2jj" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.667473 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/6e3805b2-8ad3-4fa6-b88f-e0ae42294202-cni-binary-copy\") pod \"multus-wx2jj\" (UID: \"6e3805b2-8ad3-4fa6-b88f-e0ae42294202\") " pod="openshift-multus/multus-wx2jj" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.667481 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/c17e2fff-c7ee-475c-8c17-58a394744b91-host-run-netns\") pod \"ovnkube-node-qxw94\" (UID: \"c17e2fff-c7ee-475c-8c17-58a394744b91\") " pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.667494 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/6e3805b2-8ad3-4fa6-b88f-e0ae42294202-multus-socket-dir-parent\") pod \"multus-wx2jj\" (UID: \"6e3805b2-8ad3-4fa6-b88f-e0ae42294202\") " pod="openshift-multus/multus-wx2jj" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.667506 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/c17e2fff-c7ee-475c-8c17-58a394744b91-run-ovn\") pod \"ovnkube-node-qxw94\" (UID: \"c17e2fff-c7ee-475c-8c17-58a394744b91\") " pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.667333 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/6e3805b2-8ad3-4fa6-b88f-e0ae42294202-hostroot\") pod \"multus-wx2jj\" (UID: \"6e3805b2-8ad3-4fa6-b88f-e0ae42294202\") " pod="openshift-multus/multus-wx2jj" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.667518 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/6e3805b2-8ad3-4fa6-b88f-e0ae42294202-host-run-netns\") pod \"multus-wx2jj\" (UID: \"6e3805b2-8ad3-4fa6-b88f-e0ae42294202\") " pod="openshift-multus/multus-wx2jj" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.667589 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/6e3805b2-8ad3-4fa6-b88f-e0ae42294202-multus-daemon-config\") pod \"multus-wx2jj\" (UID: \"6e3805b2-8ad3-4fa6-b88f-e0ae42294202\") " pod="openshift-multus/multus-wx2jj" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.667592 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/6e3805b2-8ad3-4fa6-b88f-e0ae42294202-multus-socket-dir-parent\") pod \"multus-wx2jj\" (UID: \"6e3805b2-8ad3-4fa6-b88f-e0ae42294202\") " pod="openshift-multus/multus-wx2jj" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.667616 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zbrc9\" (UniqueName: \"kubernetes.io/projected/c17e2fff-c7ee-475c-8c17-58a394744b91-kube-api-access-zbrc9\") pod \"ovnkube-node-qxw94\" (UID: \"c17e2fff-c7ee-475c-8c17-58a394744b91\") " pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.667791 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5xrns\" (UniqueName: \"kubernetes.io/projected/5f0ac931-d37b-4342-8c12-c2779b455cc5-kube-api-access-5xrns\") pod \"machine-config-daemon-d5nd7\" (UID: \"5f0ac931-d37b-4342-8c12-c2779b455cc5\") " pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.667823 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/6e3805b2-8ad3-4fa6-b88f-e0ae42294202-host-run-multus-certs\") pod \"multus-wx2jj\" (UID: \"6e3805b2-8ad3-4fa6-b88f-e0ae42294202\") " pod="openshift-multus/multus-wx2jj" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.667843 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/c17e2fff-c7ee-475c-8c17-58a394744b91-host-cni-netd\") pod \"ovnkube-node-qxw94\" (UID: \"c17e2fff-c7ee-475c-8c17-58a394744b91\") " pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.667946 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/6e3805b2-8ad3-4fa6-b88f-e0ae42294202-host-run-netns\") pod \"multus-wx2jj\" (UID: \"6e3805b2-8ad3-4fa6-b88f-e0ae42294202\") " pod="openshift-multus/multus-wx2jj" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.668227 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/6e3805b2-8ad3-4fa6-b88f-e0ae42294202-host-run-multus-certs\") pod \"multus-wx2jj\" (UID: \"6e3805b2-8ad3-4fa6-b88f-e0ae42294202\") " pod="openshift-multus/multus-wx2jj" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.668249 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/6e3805b2-8ad3-4fa6-b88f-e0ae42294202-etc-kubernetes\") pod \"multus-wx2jj\" (UID: \"6e3805b2-8ad3-4fa6-b88f-e0ae42294202\") " pod="openshift-multus/multus-wx2jj" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.668309 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/d9d93f2d-2a90-4d2d-b8e6-e48973be876f-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-gdz9b\" (UID: \"d9d93f2d-2a90-4d2d-b8e6-e48973be876f\") " pod="openshift-multus/multus-additional-cni-plugins-gdz9b" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.668308 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/c17e2fff-c7ee-475c-8c17-58a394744b91-host-cni-netd\") pod \"ovnkube-node-qxw94\" (UID: \"c17e2fff-c7ee-475c-8c17-58a394744b91\") " pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.668605 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/c17e2fff-c7ee-475c-8c17-58a394744b91-ovn-node-metrics-cert\") pod \"ovnkube-node-qxw94\" (UID: \"c17e2fff-c7ee-475c-8c17-58a394744b91\") " pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.668737 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/6e3805b2-8ad3-4fa6-b88f-e0ae42294202-system-cni-dir\") pod \"multus-wx2jj\" (UID: \"6e3805b2-8ad3-4fa6-b88f-e0ae42294202\") " pod="openshift-multus/multus-wx2jj" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.668797 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/5f0ac931-d37b-4342-8c12-c2779b455cc5-mcd-auth-proxy-config\") pod \"machine-config-daemon-d5nd7\" (UID: \"5f0ac931-d37b-4342-8c12-c2779b455cc5\") " pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.668842 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/d9d93f2d-2a90-4d2d-b8e6-e48973be876f-os-release\") pod \"multus-additional-cni-plugins-gdz9b\" (UID: \"d9d93f2d-2a90-4d2d-b8e6-e48973be876f\") " pod="openshift-multus/multus-additional-cni-plugins-gdz9b" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.668876 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/c17e2fff-c7ee-475c-8c17-58a394744b91-systemd-units\") pod \"ovnkube-node-qxw94\" (UID: \"c17e2fff-c7ee-475c-8c17-58a394744b91\") " pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.668914 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/c17e2fff-c7ee-475c-8c17-58a394744b91-host-run-ovn-kubernetes\") pod \"ovnkube-node-qxw94\" (UID: \"c17e2fff-c7ee-475c-8c17-58a394744b91\") " pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.668946 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/c17e2fff-c7ee-475c-8c17-58a394744b91-log-socket\") pod \"ovnkube-node-qxw94\" (UID: \"c17e2fff-c7ee-475c-8c17-58a394744b91\") " pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.668979 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/5f0ac931-d37b-4342-8c12-c2779b455cc5-rootfs\") pod \"machine-config-daemon-d5nd7\" (UID: \"5f0ac931-d37b-4342-8c12-c2779b455cc5\") " pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.669028 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/6e3805b2-8ad3-4fa6-b88f-e0ae42294202-host-var-lib-cni-multus\") pod \"multus-wx2jj\" (UID: \"6e3805b2-8ad3-4fa6-b88f-e0ae42294202\") " pod="openshift-multus/multus-wx2jj" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.669059 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/c17e2fff-c7ee-475c-8c17-58a394744b91-run-openvswitch\") pod \"ovnkube-node-qxw94\" (UID: \"c17e2fff-c7ee-475c-8c17-58a394744b91\") " pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.669090 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/c17e2fff-c7ee-475c-8c17-58a394744b91-ovnkube-config\") pod \"ovnkube-node-qxw94\" (UID: \"c17e2fff-c7ee-475c-8c17-58a394744b91\") " pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.669119 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/c17e2fff-c7ee-475c-8c17-58a394744b91-ovnkube-script-lib\") pod \"ovnkube-node-qxw94\" (UID: \"c17e2fff-c7ee-475c-8c17-58a394744b91\") " pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.669152 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/6e3805b2-8ad3-4fa6-b88f-e0ae42294202-host-run-k8s-cni-cncf-io\") pod \"multus-wx2jj\" (UID: \"6e3805b2-8ad3-4fa6-b88f-e0ae42294202\") " pod="openshift-multus/multus-wx2jj" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.669175 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/d9d93f2d-2a90-4d2d-b8e6-e48973be876f-system-cni-dir\") pod \"multus-additional-cni-plugins-gdz9b\" (UID: \"d9d93f2d-2a90-4d2d-b8e6-e48973be876f\") " pod="openshift-multus/multus-additional-cni-plugins-gdz9b" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.669196 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/c17e2fff-c7ee-475c-8c17-58a394744b91-node-log\") pod \"ovnkube-node-qxw94\" (UID: \"c17e2fff-c7ee-475c-8c17-58a394744b91\") " pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.669218 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/5f0ac931-d37b-4342-8c12-c2779b455cc5-proxy-tls\") pod \"machine-config-daemon-d5nd7\" (UID: \"5f0ac931-d37b-4342-8c12-c2779b455cc5\") " pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.669257 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/d9d93f2d-2a90-4d2d-b8e6-e48973be876f-tuning-conf-dir\") pod \"multus-additional-cni-plugins-gdz9b\" (UID: \"d9d93f2d-2a90-4d2d-b8e6-e48973be876f\") " pod="openshift-multus/multus-additional-cni-plugins-gdz9b" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.669283 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/6e3805b2-8ad3-4fa6-b88f-e0ae42294202-host-var-lib-cni-bin\") pod \"multus-wx2jj\" (UID: \"6e3805b2-8ad3-4fa6-b88f-e0ae42294202\") " pod="openshift-multus/multus-wx2jj" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.669304 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/6e3805b2-8ad3-4fa6-b88f-e0ae42294202-multus-conf-dir\") pod \"multus-wx2jj\" (UID: \"6e3805b2-8ad3-4fa6-b88f-e0ae42294202\") " pod="openshift-multus/multus-wx2jj" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.669325 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/c17e2fff-c7ee-475c-8c17-58a394744b91-etc-openvswitch\") pod \"ovnkube-node-qxw94\" (UID: \"c17e2fff-c7ee-475c-8c17-58a394744b91\") " pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.669350 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/d9d93f2d-2a90-4d2d-b8e6-e48973be876f-cnibin\") pod \"multus-additional-cni-plugins-gdz9b\" (UID: \"d9d93f2d-2a90-4d2d-b8e6-e48973be876f\") " pod="openshift-multus/multus-additional-cni-plugins-gdz9b" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.669426 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/d9d93f2d-2a90-4d2d-b8e6-e48973be876f-cnibin\") pod \"multus-additional-cni-plugins-gdz9b\" (UID: \"d9d93f2d-2a90-4d2d-b8e6-e48973be876f\") " pod="openshift-multus/multus-additional-cni-plugins-gdz9b" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.669499 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/c17e2fff-c7ee-475c-8c17-58a394744b91-host-run-ovn-kubernetes\") pod \"ovnkube-node-qxw94\" (UID: \"c17e2fff-c7ee-475c-8c17-58a394744b91\") " pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.669560 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/6e3805b2-8ad3-4fa6-b88f-e0ae42294202-system-cni-dir\") pod \"multus-wx2jj\" (UID: \"6e3805b2-8ad3-4fa6-b88f-e0ae42294202\") " pod="openshift-multus/multus-wx2jj" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.669593 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/c17e2fff-c7ee-475c-8c17-58a394744b91-node-log\") pod \"ovnkube-node-qxw94\" (UID: \"c17e2fff-c7ee-475c-8c17-58a394744b91\") " pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.669577 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/d9d93f2d-2a90-4d2d-b8e6-e48973be876f-system-cni-dir\") pod \"multus-additional-cni-plugins-gdz9b\" (UID: \"d9d93f2d-2a90-4d2d-b8e6-e48973be876f\") " pod="openshift-multus/multus-additional-cni-plugins-gdz9b" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.669640 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/6e3805b2-8ad3-4fa6-b88f-e0ae42294202-host-var-lib-cni-bin\") pod \"multus-wx2jj\" (UID: \"6e3805b2-8ad3-4fa6-b88f-e0ae42294202\") " pod="openshift-multus/multus-wx2jj" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.669709 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/6e3805b2-8ad3-4fa6-b88f-e0ae42294202-host-run-k8s-cni-cncf-io\") pod \"multus-wx2jj\" (UID: \"6e3805b2-8ad3-4fa6-b88f-e0ae42294202\") " pod="openshift-multus/multus-wx2jj" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.669748 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/c17e2fff-c7ee-475c-8c17-58a394744b91-systemd-units\") pod \"ovnkube-node-qxw94\" (UID: \"c17e2fff-c7ee-475c-8c17-58a394744b91\") " pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.669748 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/6e3805b2-8ad3-4fa6-b88f-e0ae42294202-host-var-lib-cni-multus\") pod \"multus-wx2jj\" (UID: \"6e3805b2-8ad3-4fa6-b88f-e0ae42294202\") " pod="openshift-multus/multus-wx2jj" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.669771 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/d9d93f2d-2a90-4d2d-b8e6-e48973be876f-os-release\") pod \"multus-additional-cni-plugins-gdz9b\" (UID: \"d9d93f2d-2a90-4d2d-b8e6-e48973be876f\") " pod="openshift-multus/multus-additional-cni-plugins-gdz9b" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.669779 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/c17e2fff-c7ee-475c-8c17-58a394744b91-run-openvswitch\") pod \"ovnkube-node-qxw94\" (UID: \"c17e2fff-c7ee-475c-8c17-58a394744b91\") " pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.669751 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/c17e2fff-c7ee-475c-8c17-58a394744b91-etc-openvswitch\") pod \"ovnkube-node-qxw94\" (UID: \"c17e2fff-c7ee-475c-8c17-58a394744b91\") " pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.669715 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/5f0ac931-d37b-4342-8c12-c2779b455cc5-rootfs\") pod \"machine-config-daemon-d5nd7\" (UID: \"5f0ac931-d37b-4342-8c12-c2779b455cc5\") " pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.669718 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/c17e2fff-c7ee-475c-8c17-58a394744b91-log-socket\") pod \"ovnkube-node-qxw94\" (UID: \"c17e2fff-c7ee-475c-8c17-58a394744b91\") " pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.669691 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/6e3805b2-8ad3-4fa6-b88f-e0ae42294202-multus-conf-dir\") pod \"multus-wx2jj\" (UID: \"6e3805b2-8ad3-4fa6-b88f-e0ae42294202\") " pod="openshift-multus/multus-wx2jj" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.687182 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dda7db1a-d603-4121-8cd6-e72c9ae02961\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://107b559f7d62f02041439f8c727185e9a819cd655afa0898e818d8618cc54cc7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://76ac0606fbf94f3746fb7ffa549a3cb684d40ee72224dd36c96bb47db493b482\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d5716d50bbed7c047029bbd7964431e97f78c6516505f57fe76d2a1548a665d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://53a5c9679a2c94c1297318d2e554c2445efbfab9cf72d25e48d753d66fa217d5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://da70cc63e1edf58e6d6fed49fc9266d54fabba5f7ce3216d910eb18aebc45c34\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0700ae65eb592e5b2110d37bb15dbb1dbdd228986de015ab5dc8fe13672032ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0700ae65eb592e5b2110d37bb15dbb1dbdd228986de015ab5dc8fe13672032ef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:18Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:38Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.726176 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/d9d93f2d-2a90-4d2d-b8e6-e48973be876f-cni-binary-copy\") pod \"multus-additional-cni-plugins-gdz9b\" (UID: \"d9d93f2d-2a90-4d2d-b8e6-e48973be876f\") " pod="openshift-multus/multus-additional-cni-plugins-gdz9b" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.726248 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/d9d93f2d-2a90-4d2d-b8e6-e48973be876f-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-gdz9b\" (UID: \"d9d93f2d-2a90-4d2d-b8e6-e48973be876f\") " pod="openshift-multus/multus-additional-cni-plugins-gdz9b" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.726288 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/6e3805b2-8ad3-4fa6-b88f-e0ae42294202-cni-binary-copy\") pod \"multus-wx2jj\" (UID: \"6e3805b2-8ad3-4fa6-b88f-e0ae42294202\") " pod="openshift-multus/multus-wx2jj" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.726249 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/c17e2fff-c7ee-475c-8c17-58a394744b91-ovnkube-script-lib\") pod \"ovnkube-node-qxw94\" (UID: \"c17e2fff-c7ee-475c-8c17-58a394744b91\") " pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.726379 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/6e3805b2-8ad3-4fa6-b88f-e0ae42294202-multus-daemon-config\") pod \"multus-wx2jj\" (UID: \"6e3805b2-8ad3-4fa6-b88f-e0ae42294202\") " pod="openshift-multus/multus-wx2jj" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.726896 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/c17e2fff-c7ee-475c-8c17-58a394744b91-ovnkube-config\") pod \"ovnkube-node-qxw94\" (UID: \"c17e2fff-c7ee-475c-8c17-58a394744b91\") " pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.728371 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/d9d93f2d-2a90-4d2d-b8e6-e48973be876f-tuning-conf-dir\") pod \"multus-additional-cni-plugins-gdz9b\" (UID: \"d9d93f2d-2a90-4d2d-b8e6-e48973be876f\") " pod="openshift-multus/multus-additional-cni-plugins-gdz9b" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.728935 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/5f0ac931-d37b-4342-8c12-c2779b455cc5-mcd-auth-proxy-config\") pod \"machine-config-daemon-d5nd7\" (UID: \"5f0ac931-d37b-4342-8c12-c2779b455cc5\") " pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.733528 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x6lxd\" (UniqueName: \"kubernetes.io/projected/6e3805b2-8ad3-4fa6-b88f-e0ae42294202-kube-api-access-x6lxd\") pod \"multus-wx2jj\" (UID: \"6e3805b2-8ad3-4fa6-b88f-e0ae42294202\") " pod="openshift-multus/multus-wx2jj" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.738224 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/c17e2fff-c7ee-475c-8c17-58a394744b91-ovn-node-metrics-cert\") pod \"ovnkube-node-qxw94\" (UID: \"c17e2fff-c7ee-475c-8c17-58a394744b91\") " pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.746702 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/5f0ac931-d37b-4342-8c12-c2779b455cc5-proxy-tls\") pod \"machine-config-daemon-d5nd7\" (UID: \"5f0ac931-d37b-4342-8c12-c2779b455cc5\") " pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.755100 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8wwjw\" (UniqueName: \"kubernetes.io/projected/d9d93f2d-2a90-4d2d-b8e6-e48973be876f-kube-api-access-8wwjw\") pod \"multus-additional-cni-plugins-gdz9b\" (UID: \"d9d93f2d-2a90-4d2d-b8e6-e48973be876f\") " pod="openshift-multus/multus-additional-cni-plugins-gdz9b" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.758370 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zbrc9\" (UniqueName: \"kubernetes.io/projected/c17e2fff-c7ee-475c-8c17-58a394744b91-kube-api-access-zbrc9\") pod \"ovnkube-node-qxw94\" (UID: \"c17e2fff-c7ee-475c-8c17-58a394744b91\") " pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.772810 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5xrns\" (UniqueName: \"kubernetes.io/projected/5f0ac931-d37b-4342-8c12-c2779b455cc5-kube-api-access-5xrns\") pod \"machine-config-daemon-d5nd7\" (UID: \"5f0ac931-d37b-4342-8c12-c2779b455cc5\") " pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.799732 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.802245 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.811578 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-wx2jj" Nov 28 16:10:38 crc kubenswrapper[4909]: W1128 16:10:38.814168 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5f0ac931_d37b_4342_8c12_c2779b455cc5.slice/crio-738e681d598b477dd2ecdf03c23e4f30df4d8bbc98ef9a78cc478312345ca4ed WatchSource:0}: Error finding container 738e681d598b477dd2ecdf03c23e4f30df4d8bbc98ef9a78cc478312345ca4ed: Status 404 returned error can't find the container with id 738e681d598b477dd2ecdf03c23e4f30df4d8bbc98ef9a78cc478312345ca4ed Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.818043 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-gdz9b" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.819740 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.824214 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" Nov 28 16:10:38 crc kubenswrapper[4909]: W1128 16:10:38.845524 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6e3805b2_8ad3_4fa6_b88f_e0ae42294202.slice/crio-e28172655fbcb6f060700d38e2de0da2f80d0c4e23cab1ebc7f28c8efe23e506 WatchSource:0}: Error finding container e28172655fbcb6f060700d38e2de0da2f80d0c4e23cab1ebc7f28c8efe23e506: Status 404 returned error can't find the container with id e28172655fbcb6f060700d38e2de0da2f80d0c4e23cab1ebc7f28c8efe23e506 Nov 28 16:10:38 crc kubenswrapper[4909]: W1128 16:10:38.849001 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd9d93f2d_2a90_4d2d_b8e6_e48973be876f.slice/crio-33cbd3eeb330037b583ac95d4b22a7a5c03ff77bb05bd5fb5d5d2aa09988a7a5 WatchSource:0}: Error finding container 33cbd3eeb330037b583ac95d4b22a7a5c03ff77bb05bd5fb5d5d2aa09988a7a5: Status 404 returned error can't find the container with id 33cbd3eeb330037b583ac95d4b22a7a5c03ff77bb05bd5fb5d5d2aa09988a7a5 Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.849608 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b5b5c4c2-af06-4771-b6eb-d13a2819665c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-policy-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-policy-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ea29d0be350e786147f355c5bb4902924aa0f921413b432ad093a796d21b9d05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b41cf0481323080f4056d35add13c2254c6ffbe432b1651555067d3fc9d163eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94501e940379fe9b429e532a91799701e733bb5c3be1c5f32da07a9957f955b3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://37dc41c4e260f683c1ae04ae87a883fe78e4f1f620a946ccb6a87191a5eae0ae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:18Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:38Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.859438 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.879452 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.932778 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:38Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:38 crc kubenswrapper[4909]: I1128 16:10:38.974128 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://475d2c473e130345e0d544c2b7990c476e83c479c644db89e1ceda2f4278d667\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://18059a14081d677863133d95ec3cecf9359d6464af62be1e53bcd9514311ff59\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:38Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:39 crc kubenswrapper[4909]: I1128 16:10:39.006337 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-q8nfv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6b77cc4b-dc69-4ece-8e10-64eebc98a578\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-prc9l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:37Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-q8nfv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:39Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:39 crc kubenswrapper[4909]: I1128 16:10:39.049214 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dda7db1a-d603-4121-8cd6-e72c9ae02961\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://107b559f7d62f02041439f8c727185e9a819cd655afa0898e818d8618cc54cc7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://76ac0606fbf94f3746fb7ffa549a3cb684d40ee72224dd36c96bb47db493b482\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d5716d50bbed7c047029bbd7964431e97f78c6516505f57fe76d2a1548a665d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://53a5c9679a2c94c1297318d2e554c2445efbfab9cf72d25e48d753d66fa217d5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://da70cc63e1edf58e6d6fed49fc9266d54fabba5f7ce3216d910eb18aebc45c34\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0700ae65eb592e5b2110d37bb15dbb1dbdd228986de015ab5dc8fe13672032ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0700ae65eb592e5b2110d37bb15dbb1dbdd228986de015ab5dc8fe13672032ef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:18Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:39Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:39 crc kubenswrapper[4909]: I1128 16:10:39.051331 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-wx2jj" event={"ID":"6e3805b2-8ad3-4fa6-b88f-e0ae42294202","Type":"ContainerStarted","Data":"e28172655fbcb6f060700d38e2de0da2f80d0c4e23cab1ebc7f28c8efe23e506"} Nov 28 16:10:39 crc kubenswrapper[4909]: I1128 16:10:39.054675 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" event={"ID":"c17e2fff-c7ee-475c-8c17-58a394744b91","Type":"ContainerStarted","Data":"483b860a481510c4981596c62db5b70137ac2db168b668b14bc7b85e6ded95e3"} Nov 28 16:10:39 crc kubenswrapper[4909]: I1128 16:10:39.056408 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-q8nfv" event={"ID":"6b77cc4b-dc69-4ece-8e10-64eebc98a578","Type":"ContainerStarted","Data":"71f6384b2d5c86c668a4d7dfc23b14a893f93b9ec587bec43f74eb0926cc2c64"} Nov 28 16:10:39 crc kubenswrapper[4909]: I1128 16:10:39.058484 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" event={"ID":"5f0ac931-d37b-4342-8c12-c2779b455cc5","Type":"ContainerStarted","Data":"738e681d598b477dd2ecdf03c23e4f30df4d8bbc98ef9a78cc478312345ca4ed"} Nov 28 16:10:39 crc kubenswrapper[4909]: I1128 16:10:39.058742 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Nov 28 16:10:39 crc kubenswrapper[4909]: I1128 16:10:39.060521 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-gdz9b" event={"ID":"d9d93f2d-2a90-4d2d-b8e6-e48973be876f","Type":"ContainerStarted","Data":"33cbd3eeb330037b583ac95d4b22a7a5c03ff77bb05bd5fb5d5d2aa09988a7a5"} Nov 28 16:10:39 crc kubenswrapper[4909]: I1128 16:10:39.099472 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Nov 28 16:10:39 crc kubenswrapper[4909]: I1128 16:10:39.129603 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b5b5c4c2-af06-4771-b6eb-d13a2819665c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-policy-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-policy-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ea29d0be350e786147f355c5bb4902924aa0f921413b432ad093a796d21b9d05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b41cf0481323080f4056d35add13c2254c6ffbe432b1651555067d3fc9d163eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94501e940379fe9b429e532a91799701e733bb5c3be1c5f32da07a9957f955b3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://37dc41c4e260f683c1ae04ae87a883fe78e4f1f620a946ccb6a87191a5eae0ae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:18Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:39Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:39 crc kubenswrapper[4909]: I1128 16:10:39.167322 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:39Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:39 crc kubenswrapper[4909]: I1128 16:10:39.181327 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Nov 28 16:10:39 crc kubenswrapper[4909]: I1128 16:10:39.235269 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:39Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:39 crc kubenswrapper[4909]: I1128 16:10:39.265799 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-wx2jj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6e3805b2-8ad3-4fa6-b88f-e0ae42294202\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x6lxd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-wx2jj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:39Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:39 crc kubenswrapper[4909]: I1128 16:10:39.306678 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7907b546dbb614e80485187b026c4c5ca17f52d88d5c28ce26a7bf5e3c09e76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:39Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:39 crc kubenswrapper[4909]: I1128 16:10:39.348558 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5f0ac931-d37b-4342-8c12-c2779b455cc5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5xrns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5xrns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:38Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-d5nd7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:39Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:39 crc kubenswrapper[4909]: I1128 16:10:39.404851 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c17e2fff-c7ee-475c-8c17-58a394744b91\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-qxw94\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:39Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:39 crc kubenswrapper[4909]: I1128 16:10:39.429136 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-bwbm6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a458a03-0fea-47c0-9748-510145f40b30\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dd8c3d64058ca1f2862ce478295e1a694117d00f9172f78c7c5e2945d7357aad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmxp7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:36Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-bwbm6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:39Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:39 crc kubenswrapper[4909]: I1128 16:10:39.472433 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gdz9b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d9d93f2d-2a90-4d2d-b8e6-e48973be876f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gdz9b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:39Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:39 crc kubenswrapper[4909]: I1128 16:10:39.507403 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:39Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:39 crc kubenswrapper[4909]: I1128 16:10:39.551355 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:39Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:39 crc kubenswrapper[4909]: I1128 16:10:39.599121 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7907b546dbb614e80485187b026c4c5ca17f52d88d5c28ce26a7bf5e3c09e76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:39Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:39 crc kubenswrapper[4909]: I1128 16:10:39.628761 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5f0ac931-d37b-4342-8c12-c2779b455cc5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5xrns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5xrns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:38Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-d5nd7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:39Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:39 crc kubenswrapper[4909]: I1128 16:10:39.677739 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c17e2fff-c7ee-475c-8c17-58a394744b91\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-qxw94\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:39Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:39 crc kubenswrapper[4909]: I1128 16:10:39.711741 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gdz9b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d9d93f2d-2a90-4d2d-b8e6-e48973be876f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gdz9b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:39Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:39 crc kubenswrapper[4909]: I1128 16:10:39.753161 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:39Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:39 crc kubenswrapper[4909]: I1128 16:10:39.791498 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:39Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:39 crc kubenswrapper[4909]: I1128 16:10:39.838795 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-bwbm6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a458a03-0fea-47c0-9748-510145f40b30\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dd8c3d64058ca1f2862ce478295e1a694117d00f9172f78c7c5e2945d7357aad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmxp7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:36Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-bwbm6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:39Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:39 crc kubenswrapper[4909]: I1128 16:10:39.874420 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://475d2c473e130345e0d544c2b7990c476e83c479c644db89e1ceda2f4278d667\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://18059a14081d677863133d95ec3cecf9359d6464af62be1e53bcd9514311ff59\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:39Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:39 crc kubenswrapper[4909]: I1128 16:10:39.901219 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 16:10:39 crc kubenswrapper[4909]: E1128 16:10:39.901349 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 16:10:39 crc kubenswrapper[4909]: I1128 16:10:39.901236 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 16:10:39 crc kubenswrapper[4909]: E1128 16:10:39.901408 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 16:10:39 crc kubenswrapper[4909]: I1128 16:10:39.901219 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 16:10:39 crc kubenswrapper[4909]: E1128 16:10:39.901452 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 16:10:39 crc kubenswrapper[4909]: I1128 16:10:39.907221 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-q8nfv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6b77cc4b-dc69-4ece-8e10-64eebc98a578\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://71f6384b2d5c86c668a4d7dfc23b14a893f93b9ec587bec43f74eb0926cc2c64\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-prc9l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:37Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-q8nfv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:39Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:39 crc kubenswrapper[4909]: I1128 16:10:39.949192 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dda7db1a-d603-4121-8cd6-e72c9ae02961\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://107b559f7d62f02041439f8c727185e9a819cd655afa0898e818d8618cc54cc7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://76ac0606fbf94f3746fb7ffa549a3cb684d40ee72224dd36c96bb47db493b482\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d5716d50bbed7c047029bbd7964431e97f78c6516505f57fe76d2a1548a665d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://53a5c9679a2c94c1297318d2e554c2445efbfab9cf72d25e48d753d66fa217d5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://da70cc63e1edf58e6d6fed49fc9266d54fabba5f7ce3216d910eb18aebc45c34\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0700ae65eb592e5b2110d37bb15dbb1dbdd228986de015ab5dc8fe13672032ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0700ae65eb592e5b2110d37bb15dbb1dbdd228986de015ab5dc8fe13672032ef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:18Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:39Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:39 crc kubenswrapper[4909]: I1128 16:10:39.995794 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b5b5c4c2-af06-4771-b6eb-d13a2819665c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-policy-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-policy-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ea29d0be350e786147f355c5bb4902924aa0f921413b432ad093a796d21b9d05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b41cf0481323080f4056d35add13c2254c6ffbe432b1651555067d3fc9d163eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94501e940379fe9b429e532a91799701e733bb5c3be1c5f32da07a9957f955b3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://37dc41c4e260f683c1ae04ae87a883fe78e4f1f620a946ccb6a87191a5eae0ae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:18Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:39Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:40 crc kubenswrapper[4909]: I1128 16:10:40.028124 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:40Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:40 crc kubenswrapper[4909]: I1128 16:10:40.065887 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"31f84564a3775e0ece3a5c0f176e8d9607466d4a7a505173e9668c51fa2229e1"} Nov 28 16:10:40 crc kubenswrapper[4909]: I1128 16:10:40.067831 4909 generic.go:334] "Generic (PLEG): container finished" podID="d9d93f2d-2a90-4d2d-b8e6-e48973be876f" containerID="b45f41a92db18043fc03c9cd7d9bbeef0020790595045abd17a9105d0f01367b" exitCode=0 Nov 28 16:10:40 crc kubenswrapper[4909]: I1128 16:10:40.067954 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-gdz9b" event={"ID":"d9d93f2d-2a90-4d2d-b8e6-e48973be876f","Type":"ContainerDied","Data":"b45f41a92db18043fc03c9cd7d9bbeef0020790595045abd17a9105d0f01367b"} Nov 28 16:10:40 crc kubenswrapper[4909]: I1128 16:10:40.069715 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-wx2jj" event={"ID":"6e3805b2-8ad3-4fa6-b88f-e0ae42294202","Type":"ContainerStarted","Data":"0777b571ce4049338437a97264761c89ab7517b4da8400edcd3381d58aef32e4"} Nov 28 16:10:40 crc kubenswrapper[4909]: I1128 16:10:40.075505 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:40Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:40 crc kubenswrapper[4909]: I1128 16:10:40.076704 4909 generic.go:334] "Generic (PLEG): container finished" podID="c17e2fff-c7ee-475c-8c17-58a394744b91" containerID="e1fffda4b5571ab3fe797073c7201feadb7818eac66f6dc76b7761ef13a8d0ef" exitCode=0 Nov 28 16:10:40 crc kubenswrapper[4909]: I1128 16:10:40.076808 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" event={"ID":"c17e2fff-c7ee-475c-8c17-58a394744b91","Type":"ContainerDied","Data":"e1fffda4b5571ab3fe797073c7201feadb7818eac66f6dc76b7761ef13a8d0ef"} Nov 28 16:10:40 crc kubenswrapper[4909]: I1128 16:10:40.078830 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" event={"ID":"5f0ac931-d37b-4342-8c12-c2779b455cc5","Type":"ContainerStarted","Data":"1c2757c4dc287e41bc57c065df2906fc5961d005829fa24f22d3b5078d17555a"} Nov 28 16:10:40 crc kubenswrapper[4909]: I1128 16:10:40.078872 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" event={"ID":"5f0ac931-d37b-4342-8c12-c2779b455cc5","Type":"ContainerStarted","Data":"ae003675d8b34489b946224bcd380cc68ff49acec6769edfe74a8345018e7909"} Nov 28 16:10:40 crc kubenswrapper[4909]: I1128 16:10:40.107501 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-wx2jj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6e3805b2-8ad3-4fa6-b88f-e0ae42294202\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x6lxd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-wx2jj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:40Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:40 crc kubenswrapper[4909]: I1128 16:10:40.153535 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c17e2fff-c7ee-475c-8c17-58a394744b91\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e1fffda4b5571ab3fe797073c7201feadb7818eac66f6dc76b7761ef13a8d0ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e1fffda4b5571ab3fe797073c7201feadb7818eac66f6dc76b7761ef13a8d0ef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-qxw94\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:40Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:40 crc kubenswrapper[4909]: I1128 16:10:40.186539 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 16:10:40 crc kubenswrapper[4909]: E1128 16:10:40.187242 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 16:10:44.187214991 +0000 UTC m=+26.583899535 (durationBeforeRetry 4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:10:40 crc kubenswrapper[4909]: I1128 16:10:40.190575 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7907b546dbb614e80485187b026c4c5ca17f52d88d5c28ce26a7bf5e3c09e76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:40Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:40 crc kubenswrapper[4909]: I1128 16:10:40.225621 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5f0ac931-d37b-4342-8c12-c2779b455cc5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae003675d8b34489b946224bcd380cc68ff49acec6769edfe74a8345018e7909\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5xrns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c2757c4dc287e41bc57c065df2906fc5961d005829fa24f22d3b5078d17555a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5xrns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:38Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-d5nd7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:40Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:40 crc kubenswrapper[4909]: I1128 16:10:40.267005 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:40Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:40 crc kubenswrapper[4909]: I1128 16:10:40.288069 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 16:10:40 crc kubenswrapper[4909]: I1128 16:10:40.288137 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 16:10:40 crc kubenswrapper[4909]: I1128 16:10:40.288163 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 16:10:40 crc kubenswrapper[4909]: I1128 16:10:40.288181 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 16:10:40 crc kubenswrapper[4909]: E1128 16:10:40.288233 4909 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 28 16:10:40 crc kubenswrapper[4909]: E1128 16:10:40.288289 4909 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 28 16:10:40 crc kubenswrapper[4909]: E1128 16:10:40.288310 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-28 16:10:44.288288939 +0000 UTC m=+26.684973463 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 28 16:10:40 crc kubenswrapper[4909]: E1128 16:10:40.288342 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-28 16:10:44.28832493 +0000 UTC m=+26.685009454 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 28 16:10:40 crc kubenswrapper[4909]: E1128 16:10:40.288396 4909 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 28 16:10:40 crc kubenswrapper[4909]: E1128 16:10:40.288406 4909 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 28 16:10:40 crc kubenswrapper[4909]: E1128 16:10:40.288417 4909 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 16:10:40 crc kubenswrapper[4909]: E1128 16:10:40.288438 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-28 16:10:44.288432114 +0000 UTC m=+26.685116638 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 16:10:40 crc kubenswrapper[4909]: E1128 16:10:40.288478 4909 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 28 16:10:40 crc kubenswrapper[4909]: E1128 16:10:40.288486 4909 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 28 16:10:40 crc kubenswrapper[4909]: E1128 16:10:40.288493 4909 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 16:10:40 crc kubenswrapper[4909]: E1128 16:10:40.288511 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-28 16:10:44.288505036 +0000 UTC m=+26.685189560 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 16:10:40 crc kubenswrapper[4909]: I1128 16:10:40.308731 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:40Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:40 crc kubenswrapper[4909]: I1128 16:10:40.345578 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-bwbm6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a458a03-0fea-47c0-9748-510145f40b30\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dd8c3d64058ca1f2862ce478295e1a694117d00f9172f78c7c5e2945d7357aad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmxp7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:36Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-bwbm6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:40Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:40 crc kubenswrapper[4909]: I1128 16:10:40.391784 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gdz9b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d9d93f2d-2a90-4d2d-b8e6-e48973be876f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b45f41a92db18043fc03c9cd7d9bbeef0020790595045abd17a9105d0f01367b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b45f41a92db18043fc03c9cd7d9bbeef0020790595045abd17a9105d0f01367b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gdz9b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:40Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:40 crc kubenswrapper[4909]: I1128 16:10:40.429139 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://475d2c473e130345e0d544c2b7990c476e83c479c644db89e1ceda2f4278d667\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://18059a14081d677863133d95ec3cecf9359d6464af62be1e53bcd9514311ff59\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:40Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:40 crc kubenswrapper[4909]: I1128 16:10:40.467878 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-q8nfv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6b77cc4b-dc69-4ece-8e10-64eebc98a578\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://71f6384b2d5c86c668a4d7dfc23b14a893f93b9ec587bec43f74eb0926cc2c64\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-prc9l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:37Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-q8nfv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:40Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:40 crc kubenswrapper[4909]: I1128 16:10:40.515729 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31f84564a3775e0ece3a5c0f176e8d9607466d4a7a505173e9668c51fa2229e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:40Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:40 crc kubenswrapper[4909]: I1128 16:10:40.546504 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:40Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:40 crc kubenswrapper[4909]: I1128 16:10:40.589506 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-wx2jj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6e3805b2-8ad3-4fa6-b88f-e0ae42294202\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0777b571ce4049338437a97264761c89ab7517b4da8400edcd3381d58aef32e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x6lxd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-wx2jj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:40Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:40 crc kubenswrapper[4909]: I1128 16:10:40.630250 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dda7db1a-d603-4121-8cd6-e72c9ae02961\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://107b559f7d62f02041439f8c727185e9a819cd655afa0898e818d8618cc54cc7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://76ac0606fbf94f3746fb7ffa549a3cb684d40ee72224dd36c96bb47db493b482\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d5716d50bbed7c047029bbd7964431e97f78c6516505f57fe76d2a1548a665d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://53a5c9679a2c94c1297318d2e554c2445efbfab9cf72d25e48d753d66fa217d5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://da70cc63e1edf58e6d6fed49fc9266d54fabba5f7ce3216d910eb18aebc45c34\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0700ae65eb592e5b2110d37bb15dbb1dbdd228986de015ab5dc8fe13672032ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0700ae65eb592e5b2110d37bb15dbb1dbdd228986de015ab5dc8fe13672032ef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:18Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:40Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:40 crc kubenswrapper[4909]: I1128 16:10:40.668214 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b5b5c4c2-af06-4771-b6eb-d13a2819665c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-policy-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-policy-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ea29d0be350e786147f355c5bb4902924aa0f921413b432ad093a796d21b9d05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b41cf0481323080f4056d35add13c2254c6ffbe432b1651555067d3fc9d163eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94501e940379fe9b429e532a91799701e733bb5c3be1c5f32da07a9957f955b3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://37dc41c4e260f683c1ae04ae87a883fe78e4f1f620a946ccb6a87191a5eae0ae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:18Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:40Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:40 crc kubenswrapper[4909]: I1128 16:10:40.697059 4909 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 16:10:40 crc kubenswrapper[4909]: I1128 16:10:40.699468 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:40 crc kubenswrapper[4909]: I1128 16:10:40.699528 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:40 crc kubenswrapper[4909]: I1128 16:10:40.699548 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:40 crc kubenswrapper[4909]: I1128 16:10:40.699676 4909 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 28 16:10:40 crc kubenswrapper[4909]: I1128 16:10:40.706224 4909 kubelet_node_status.go:115] "Node was previously registered" node="crc" Nov 28 16:10:40 crc kubenswrapper[4909]: I1128 16:10:40.706609 4909 kubelet_node_status.go:79] "Successfully registered node" node="crc" Nov 28 16:10:40 crc kubenswrapper[4909]: I1128 16:10:40.708611 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:40 crc kubenswrapper[4909]: I1128 16:10:40.708694 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:40 crc kubenswrapper[4909]: I1128 16:10:40.708712 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:40 crc kubenswrapper[4909]: I1128 16:10:40.708736 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:40 crc kubenswrapper[4909]: I1128 16:10:40.708758 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:40Z","lastTransitionTime":"2025-11-28T16:10:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:40 crc kubenswrapper[4909]: E1128 16:10:40.724767 4909 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"b44f6a6c-5ae2-4ed6-9fc9-6c0acf034e9d\\\",\\\"systemUUID\\\":\\\"1e8d38e9-395c-4d37-b567-3bfe4869e3f7\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:40Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:40 crc kubenswrapper[4909]: I1128 16:10:40.733528 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:40 crc kubenswrapper[4909]: I1128 16:10:40.733579 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:40 crc kubenswrapper[4909]: I1128 16:10:40.733597 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:40 crc kubenswrapper[4909]: I1128 16:10:40.733622 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:40 crc kubenswrapper[4909]: I1128 16:10:40.733640 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:40Z","lastTransitionTime":"2025-11-28T16:10:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:40 crc kubenswrapper[4909]: E1128 16:10:40.760020 4909 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"b44f6a6c-5ae2-4ed6-9fc9-6c0acf034e9d\\\",\\\"systemUUID\\\":\\\"1e8d38e9-395c-4d37-b567-3bfe4869e3f7\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:40Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:40 crc kubenswrapper[4909]: I1128 16:10:40.764691 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:40 crc kubenswrapper[4909]: I1128 16:10:40.764733 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:40 crc kubenswrapper[4909]: I1128 16:10:40.764743 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:40 crc kubenswrapper[4909]: I1128 16:10:40.764758 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:40 crc kubenswrapper[4909]: I1128 16:10:40.764769 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:40Z","lastTransitionTime":"2025-11-28T16:10:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:40 crc kubenswrapper[4909]: E1128 16:10:40.785527 4909 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"b44f6a6c-5ae2-4ed6-9fc9-6c0acf034e9d\\\",\\\"systemUUID\\\":\\\"1e8d38e9-395c-4d37-b567-3bfe4869e3f7\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:40Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:40 crc kubenswrapper[4909]: I1128 16:10:40.789705 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:40 crc kubenswrapper[4909]: I1128 16:10:40.789773 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:40 crc kubenswrapper[4909]: I1128 16:10:40.789785 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:40 crc kubenswrapper[4909]: I1128 16:10:40.789804 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:40 crc kubenswrapper[4909]: I1128 16:10:40.789816 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:40Z","lastTransitionTime":"2025-11-28T16:10:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:40 crc kubenswrapper[4909]: E1128 16:10:40.810339 4909 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"b44f6a6c-5ae2-4ed6-9fc9-6c0acf034e9d\\\",\\\"systemUUID\\\":\\\"1e8d38e9-395c-4d37-b567-3bfe4869e3f7\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:40Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:40 crc kubenswrapper[4909]: I1128 16:10:40.822047 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:40 crc kubenswrapper[4909]: I1128 16:10:40.822089 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:40 crc kubenswrapper[4909]: I1128 16:10:40.822100 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:40 crc kubenswrapper[4909]: I1128 16:10:40.822116 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:40 crc kubenswrapper[4909]: I1128 16:10:40.822126 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:40Z","lastTransitionTime":"2025-11-28T16:10:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:40 crc kubenswrapper[4909]: E1128 16:10:40.833755 4909 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"b44f6a6c-5ae2-4ed6-9fc9-6c0acf034e9d\\\",\\\"systemUUID\\\":\\\"1e8d38e9-395c-4d37-b567-3bfe4869e3f7\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:40Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:40 crc kubenswrapper[4909]: E1128 16:10:40.833862 4909 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 28 16:10:40 crc kubenswrapper[4909]: I1128 16:10:40.835110 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:40 crc kubenswrapper[4909]: I1128 16:10:40.835129 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:40 crc kubenswrapper[4909]: I1128 16:10:40.835137 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:40 crc kubenswrapper[4909]: I1128 16:10:40.835149 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:40 crc kubenswrapper[4909]: I1128 16:10:40.835158 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:40Z","lastTransitionTime":"2025-11-28T16:10:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:40 crc kubenswrapper[4909]: I1128 16:10:40.937541 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:40 crc kubenswrapper[4909]: I1128 16:10:40.937591 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:40 crc kubenswrapper[4909]: I1128 16:10:40.937603 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:40 crc kubenswrapper[4909]: I1128 16:10:40.937622 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:40 crc kubenswrapper[4909]: I1128 16:10:40.937636 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:40Z","lastTransitionTime":"2025-11-28T16:10:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:41 crc kubenswrapper[4909]: I1128 16:10:41.040949 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:41 crc kubenswrapper[4909]: I1128 16:10:41.041006 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:41 crc kubenswrapper[4909]: I1128 16:10:41.041022 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:41 crc kubenswrapper[4909]: I1128 16:10:41.041045 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:41 crc kubenswrapper[4909]: I1128 16:10:41.041062 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:41Z","lastTransitionTime":"2025-11-28T16:10:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:41 crc kubenswrapper[4909]: I1128 16:10:41.086617 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" event={"ID":"c17e2fff-c7ee-475c-8c17-58a394744b91","Type":"ContainerStarted","Data":"d0b87eb388aab62c94756b5f20b73cf95fc80da8aa3d83f2848757f860cdd38e"} Nov 28 16:10:41 crc kubenswrapper[4909]: I1128 16:10:41.086691 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" event={"ID":"c17e2fff-c7ee-475c-8c17-58a394744b91","Type":"ContainerStarted","Data":"031c30950c06bdcae1736062069b96a6ce7cb66d5f286b73d691ee2d2db5832e"} Nov 28 16:10:41 crc kubenswrapper[4909]: I1128 16:10:41.086705 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" event={"ID":"c17e2fff-c7ee-475c-8c17-58a394744b91","Type":"ContainerStarted","Data":"697293d217dd4fbc4cecc3bc4be7acc14dbb5aa97b9faf0a608792ef7e4c8dba"} Nov 28 16:10:41 crc kubenswrapper[4909]: I1128 16:10:41.086718 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" event={"ID":"c17e2fff-c7ee-475c-8c17-58a394744b91","Type":"ContainerStarted","Data":"c9f22dca75c10791e7f5e083e737d4d6aad20598ff0d29dd7562fa99b5dbdfd4"} Nov 28 16:10:41 crc kubenswrapper[4909]: I1128 16:10:41.086728 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" event={"ID":"c17e2fff-c7ee-475c-8c17-58a394744b91","Type":"ContainerStarted","Data":"66731a2e0518dd575f621ddf9e760e844e647a224138957440ccaecad3e4b8b7"} Nov 28 16:10:41 crc kubenswrapper[4909]: I1128 16:10:41.086740 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" event={"ID":"c17e2fff-c7ee-475c-8c17-58a394744b91","Type":"ContainerStarted","Data":"983e9f73b8e344dbe445f95436cbe143d63f1da2e868201b441496df8cdc99ef"} Nov 28 16:10:41 crc kubenswrapper[4909]: I1128 16:10:41.089651 4909 generic.go:334] "Generic (PLEG): container finished" podID="d9d93f2d-2a90-4d2d-b8e6-e48973be876f" containerID="f7a3a2a5eea9704ba4cd09ac6fa00e33c97d163973a852882979b1c8f2064979" exitCode=0 Nov 28 16:10:41 crc kubenswrapper[4909]: I1128 16:10:41.089699 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-gdz9b" event={"ID":"d9d93f2d-2a90-4d2d-b8e6-e48973be876f","Type":"ContainerDied","Data":"f7a3a2a5eea9704ba4cd09ac6fa00e33c97d163973a852882979b1c8f2064979"} Nov 28 16:10:41 crc kubenswrapper[4909]: I1128 16:10:41.113635 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7907b546dbb614e80485187b026c4c5ca17f52d88d5c28ce26a7bf5e3c09e76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:41Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:41 crc kubenswrapper[4909]: I1128 16:10:41.129515 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5f0ac931-d37b-4342-8c12-c2779b455cc5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae003675d8b34489b946224bcd380cc68ff49acec6769edfe74a8345018e7909\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5xrns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c2757c4dc287e41bc57c065df2906fc5961d005829fa24f22d3b5078d17555a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5xrns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:38Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-d5nd7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:41Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:41 crc kubenswrapper[4909]: I1128 16:10:41.143527 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:41 crc kubenswrapper[4909]: I1128 16:10:41.143581 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:41 crc kubenswrapper[4909]: I1128 16:10:41.143593 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:41 crc kubenswrapper[4909]: I1128 16:10:41.143617 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:41 crc kubenswrapper[4909]: I1128 16:10:41.143629 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:41Z","lastTransitionTime":"2025-11-28T16:10:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:41 crc kubenswrapper[4909]: I1128 16:10:41.163087 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c17e2fff-c7ee-475c-8c17-58a394744b91\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e1fffda4b5571ab3fe797073c7201feadb7818eac66f6dc76b7761ef13a8d0ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e1fffda4b5571ab3fe797073c7201feadb7818eac66f6dc76b7761ef13a8d0ef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-qxw94\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:41Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:41 crc kubenswrapper[4909]: I1128 16:10:41.178743 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:41Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:41 crc kubenswrapper[4909]: I1128 16:10:41.196950 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:41Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:41 crc kubenswrapper[4909]: I1128 16:10:41.211897 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-bwbm6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a458a03-0fea-47c0-9748-510145f40b30\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dd8c3d64058ca1f2862ce478295e1a694117d00f9172f78c7c5e2945d7357aad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmxp7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:36Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-bwbm6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:41Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:41 crc kubenswrapper[4909]: I1128 16:10:41.226420 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gdz9b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d9d93f2d-2a90-4d2d-b8e6-e48973be876f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"message\\\":\\\"containers with incomplete status: [bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b45f41a92db18043fc03c9cd7d9bbeef0020790595045abd17a9105d0f01367b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b45f41a92db18043fc03c9cd7d9bbeef0020790595045abd17a9105d0f01367b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f7a3a2a5eea9704ba4cd09ac6fa00e33c97d163973a852882979b1c8f2064979\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f7a3a2a5eea9704ba4cd09ac6fa00e33c97d163973a852882979b1c8f2064979\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gdz9b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:41Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:41 crc kubenswrapper[4909]: I1128 16:10:41.245438 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://475d2c473e130345e0d544c2b7990c476e83c479c644db89e1ceda2f4278d667\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://18059a14081d677863133d95ec3cecf9359d6464af62be1e53bcd9514311ff59\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:41Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:41 crc kubenswrapper[4909]: I1128 16:10:41.252200 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:41 crc kubenswrapper[4909]: I1128 16:10:41.252249 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:41 crc kubenswrapper[4909]: I1128 16:10:41.252257 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:41 crc kubenswrapper[4909]: I1128 16:10:41.252272 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:41 crc kubenswrapper[4909]: I1128 16:10:41.252301 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:41Z","lastTransitionTime":"2025-11-28T16:10:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:41 crc kubenswrapper[4909]: I1128 16:10:41.255177 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-q8nfv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6b77cc4b-dc69-4ece-8e10-64eebc98a578\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://71f6384b2d5c86c668a4d7dfc23b14a893f93b9ec587bec43f74eb0926cc2c64\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-prc9l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:37Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-q8nfv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:41Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:41 crc kubenswrapper[4909]: I1128 16:10:41.266604 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:41Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:41 crc kubenswrapper[4909]: I1128 16:10:41.283016 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-wx2jj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6e3805b2-8ad3-4fa6-b88f-e0ae42294202\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0777b571ce4049338437a97264761c89ab7517b4da8400edcd3381d58aef32e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x6lxd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-wx2jj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:41Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:41 crc kubenswrapper[4909]: I1128 16:10:41.295201 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dda7db1a-d603-4121-8cd6-e72c9ae02961\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://107b559f7d62f02041439f8c727185e9a819cd655afa0898e818d8618cc54cc7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://76ac0606fbf94f3746fb7ffa549a3cb684d40ee72224dd36c96bb47db493b482\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d5716d50bbed7c047029bbd7964431e97f78c6516505f57fe76d2a1548a665d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://53a5c9679a2c94c1297318d2e554c2445efbfab9cf72d25e48d753d66fa217d5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://da70cc63e1edf58e6d6fed49fc9266d54fabba5f7ce3216d910eb18aebc45c34\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0700ae65eb592e5b2110d37bb15dbb1dbdd228986de015ab5dc8fe13672032ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0700ae65eb592e5b2110d37bb15dbb1dbdd228986de015ab5dc8fe13672032ef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:18Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:41Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:41 crc kubenswrapper[4909]: I1128 16:10:41.308740 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b5b5c4c2-af06-4771-b6eb-d13a2819665c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-policy-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-policy-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ea29d0be350e786147f355c5bb4902924aa0f921413b432ad093a796d21b9d05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b41cf0481323080f4056d35add13c2254c6ffbe432b1651555067d3fc9d163eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94501e940379fe9b429e532a91799701e733bb5c3be1c5f32da07a9957f955b3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://37dc41c4e260f683c1ae04ae87a883fe78e4f1f620a946ccb6a87191a5eae0ae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:18Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:41Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:41 crc kubenswrapper[4909]: I1128 16:10:41.320462 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31f84564a3775e0ece3a5c0f176e8d9607466d4a7a505173e9668c51fa2229e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:41Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:41 crc kubenswrapper[4909]: I1128 16:10:41.354003 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:41 crc kubenswrapper[4909]: I1128 16:10:41.354035 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:41 crc kubenswrapper[4909]: I1128 16:10:41.354043 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:41 crc kubenswrapper[4909]: I1128 16:10:41.354056 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:41 crc kubenswrapper[4909]: I1128 16:10:41.354065 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:41Z","lastTransitionTime":"2025-11-28T16:10:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:41 crc kubenswrapper[4909]: I1128 16:10:41.456856 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:41 crc kubenswrapper[4909]: I1128 16:10:41.456908 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:41 crc kubenswrapper[4909]: I1128 16:10:41.456918 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:41 crc kubenswrapper[4909]: I1128 16:10:41.456937 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:41 crc kubenswrapper[4909]: I1128 16:10:41.456948 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:41Z","lastTransitionTime":"2025-11-28T16:10:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:41 crc kubenswrapper[4909]: I1128 16:10:41.559801 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:41 crc kubenswrapper[4909]: I1128 16:10:41.560184 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:41 crc kubenswrapper[4909]: I1128 16:10:41.560198 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:41 crc kubenswrapper[4909]: I1128 16:10:41.560217 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:41 crc kubenswrapper[4909]: I1128 16:10:41.560235 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:41Z","lastTransitionTime":"2025-11-28T16:10:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:41 crc kubenswrapper[4909]: I1128 16:10:41.633905 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 16:10:41 crc kubenswrapper[4909]: I1128 16:10:41.637239 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 16:10:41 crc kubenswrapper[4909]: I1128 16:10:41.649870 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://475d2c473e130345e0d544c2b7990c476e83c479c644db89e1ceda2f4278d667\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://18059a14081d677863133d95ec3cecf9359d6464af62be1e53bcd9514311ff59\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:41Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:41 crc kubenswrapper[4909]: I1128 16:10:41.659815 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-q8nfv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6b77cc4b-dc69-4ece-8e10-64eebc98a578\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://71f6384b2d5c86c668a4d7dfc23b14a893f93b9ec587bec43f74eb0926cc2c64\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-prc9l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:37Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-q8nfv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:41Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:41 crc kubenswrapper[4909]: I1128 16:10:41.662469 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:41 crc kubenswrapper[4909]: I1128 16:10:41.662496 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:41 crc kubenswrapper[4909]: I1128 16:10:41.662506 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:41 crc kubenswrapper[4909]: I1128 16:10:41.662524 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:41 crc kubenswrapper[4909]: I1128 16:10:41.662535 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:41Z","lastTransitionTime":"2025-11-28T16:10:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:41 crc kubenswrapper[4909]: I1128 16:10:41.672058 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dda7db1a-d603-4121-8cd6-e72c9ae02961\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://107b559f7d62f02041439f8c727185e9a819cd655afa0898e818d8618cc54cc7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://76ac0606fbf94f3746fb7ffa549a3cb684d40ee72224dd36c96bb47db493b482\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d5716d50bbed7c047029bbd7964431e97f78c6516505f57fe76d2a1548a665d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://53a5c9679a2c94c1297318d2e554c2445efbfab9cf72d25e48d753d66fa217d5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://da70cc63e1edf58e6d6fed49fc9266d54fabba5f7ce3216d910eb18aebc45c34\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0700ae65eb592e5b2110d37bb15dbb1dbdd228986de015ab5dc8fe13672032ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0700ae65eb592e5b2110d37bb15dbb1dbdd228986de015ab5dc8fe13672032ef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:18Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:41Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:41 crc kubenswrapper[4909]: I1128 16:10:41.687353 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b5b5c4c2-af06-4771-b6eb-d13a2819665c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-policy-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-policy-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ea29d0be350e786147f355c5bb4902924aa0f921413b432ad093a796d21b9d05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b41cf0481323080f4056d35add13c2254c6ffbe432b1651555067d3fc9d163eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94501e940379fe9b429e532a91799701e733bb5c3be1c5f32da07a9957f955b3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://37dc41c4e260f683c1ae04ae87a883fe78e4f1f620a946ccb6a87191a5eae0ae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:18Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:41Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:41 crc kubenswrapper[4909]: I1128 16:10:41.702175 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31f84564a3775e0ece3a5c0f176e8d9607466d4a7a505173e9668c51fa2229e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:41Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:41 crc kubenswrapper[4909]: I1128 16:10:41.713163 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:41Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:41 crc kubenswrapper[4909]: I1128 16:10:41.725076 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-wx2jj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6e3805b2-8ad3-4fa6-b88f-e0ae42294202\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0777b571ce4049338437a97264761c89ab7517b4da8400edcd3381d58aef32e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x6lxd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-wx2jj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:41Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:41 crc kubenswrapper[4909]: I1128 16:10:41.738566 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7907b546dbb614e80485187b026c4c5ca17f52d88d5c28ce26a7bf5e3c09e76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:41Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:41 crc kubenswrapper[4909]: I1128 16:10:41.750950 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5f0ac931-d37b-4342-8c12-c2779b455cc5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae003675d8b34489b946224bcd380cc68ff49acec6769edfe74a8345018e7909\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5xrns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c2757c4dc287e41bc57c065df2906fc5961d005829fa24f22d3b5078d17555a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5xrns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:38Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-d5nd7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:41Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:41 crc kubenswrapper[4909]: I1128 16:10:41.766154 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:41 crc kubenswrapper[4909]: I1128 16:10:41.766196 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:41 crc kubenswrapper[4909]: I1128 16:10:41.766204 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:41 crc kubenswrapper[4909]: I1128 16:10:41.766219 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:41 crc kubenswrapper[4909]: I1128 16:10:41.766228 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:41Z","lastTransitionTime":"2025-11-28T16:10:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:41 crc kubenswrapper[4909]: I1128 16:10:41.771624 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c17e2fff-c7ee-475c-8c17-58a394744b91\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e1fffda4b5571ab3fe797073c7201feadb7818eac66f6dc76b7761ef13a8d0ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e1fffda4b5571ab3fe797073c7201feadb7818eac66f6dc76b7761ef13a8d0ef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-qxw94\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:41Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:41 crc kubenswrapper[4909]: I1128 16:10:41.784777 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:41Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:41 crc kubenswrapper[4909]: I1128 16:10:41.799162 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:41Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:41 crc kubenswrapper[4909]: I1128 16:10:41.811766 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-bwbm6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a458a03-0fea-47c0-9748-510145f40b30\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dd8c3d64058ca1f2862ce478295e1a694117d00f9172f78c7c5e2945d7357aad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmxp7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:36Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-bwbm6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:41Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:41 crc kubenswrapper[4909]: I1128 16:10:41.828277 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gdz9b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d9d93f2d-2a90-4d2d-b8e6-e48973be876f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"message\\\":\\\"containers with incomplete status: [bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b45f41a92db18043fc03c9cd7d9bbeef0020790595045abd17a9105d0f01367b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b45f41a92db18043fc03c9cd7d9bbeef0020790595045abd17a9105d0f01367b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f7a3a2a5eea9704ba4cd09ac6fa00e33c97d163973a852882979b1c8f2064979\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f7a3a2a5eea9704ba4cd09ac6fa00e33c97d163973a852882979b1c8f2064979\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gdz9b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:41Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:41 crc kubenswrapper[4909]: I1128 16:10:41.869238 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:41 crc kubenswrapper[4909]: I1128 16:10:41.869294 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:41 crc kubenswrapper[4909]: I1128 16:10:41.869307 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:41 crc kubenswrapper[4909]: I1128 16:10:41.869332 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:41 crc kubenswrapper[4909]: I1128 16:10:41.869347 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:41Z","lastTransitionTime":"2025-11-28T16:10:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:41 crc kubenswrapper[4909]: I1128 16:10:41.872304 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:41Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:41 crc kubenswrapper[4909]: I1128 16:10:41.900727 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 16:10:41 crc kubenswrapper[4909]: I1128 16:10:41.900775 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 16:10:41 crc kubenswrapper[4909]: I1128 16:10:41.900793 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 16:10:41 crc kubenswrapper[4909]: E1128 16:10:41.900945 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 16:10:41 crc kubenswrapper[4909]: E1128 16:10:41.901047 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 16:10:41 crc kubenswrapper[4909]: E1128 16:10:41.901143 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 16:10:41 crc kubenswrapper[4909]: I1128 16:10:41.912726 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-wx2jj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6e3805b2-8ad3-4fa6-b88f-e0ae42294202\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0777b571ce4049338437a97264761c89ab7517b4da8400edcd3381d58aef32e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x6lxd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-wx2jj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:41Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:41 crc kubenswrapper[4909]: I1128 16:10:41.952670 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dda7db1a-d603-4121-8cd6-e72c9ae02961\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://107b559f7d62f02041439f8c727185e9a819cd655afa0898e818d8618cc54cc7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://76ac0606fbf94f3746fb7ffa549a3cb684d40ee72224dd36c96bb47db493b482\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d5716d50bbed7c047029bbd7964431e97f78c6516505f57fe76d2a1548a665d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://53a5c9679a2c94c1297318d2e554c2445efbfab9cf72d25e48d753d66fa217d5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://da70cc63e1edf58e6d6fed49fc9266d54fabba5f7ce3216d910eb18aebc45c34\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0700ae65eb592e5b2110d37bb15dbb1dbdd228986de015ab5dc8fe13672032ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0700ae65eb592e5b2110d37bb15dbb1dbdd228986de015ab5dc8fe13672032ef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:18Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:41Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:41 crc kubenswrapper[4909]: I1128 16:10:41.971149 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:41 crc kubenswrapper[4909]: I1128 16:10:41.971210 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:41 crc kubenswrapper[4909]: I1128 16:10:41.971227 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:41 crc kubenswrapper[4909]: I1128 16:10:41.971251 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:41 crc kubenswrapper[4909]: I1128 16:10:41.971268 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:41Z","lastTransitionTime":"2025-11-28T16:10:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:41 crc kubenswrapper[4909]: I1128 16:10:41.986807 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b5b5c4c2-af06-4771-b6eb-d13a2819665c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ea29d0be350e786147f355c5bb4902924aa0f921413b432ad093a796d21b9d05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b41cf0481323080f4056d35add13c2254c6ffbe432b1651555067d3fc9d163eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94501e940379fe9b429e532a91799701e733bb5c3be1c5f32da07a9957f955b3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://37dc41c4e260f683c1ae04ae87a883fe78e4f1f620a946ccb6a87191a5eae0ae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:18Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:41Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:42 crc kubenswrapper[4909]: I1128 16:10:42.026772 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31f84564a3775e0ece3a5c0f176e8d9607466d4a7a505173e9668c51fa2229e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:42Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:42 crc kubenswrapper[4909]: I1128 16:10:42.067542 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7907b546dbb614e80485187b026c4c5ca17f52d88d5c28ce26a7bf5e3c09e76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:42Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:42 crc kubenswrapper[4909]: I1128 16:10:42.073250 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:42 crc kubenswrapper[4909]: I1128 16:10:42.073295 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:42 crc kubenswrapper[4909]: I1128 16:10:42.073306 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:42 crc kubenswrapper[4909]: I1128 16:10:42.073326 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:42 crc kubenswrapper[4909]: I1128 16:10:42.073338 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:42Z","lastTransitionTime":"2025-11-28T16:10:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:42 crc kubenswrapper[4909]: I1128 16:10:42.095573 4909 generic.go:334] "Generic (PLEG): container finished" podID="d9d93f2d-2a90-4d2d-b8e6-e48973be876f" containerID="0348e1d7066423ab9a595f60893021830d02de3c18fca2fdb0082efa41644124" exitCode=0 Nov 28 16:10:42 crc kubenswrapper[4909]: I1128 16:10:42.095624 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-gdz9b" event={"ID":"d9d93f2d-2a90-4d2d-b8e6-e48973be876f","Type":"ContainerDied","Data":"0348e1d7066423ab9a595f60893021830d02de3c18fca2fdb0082efa41644124"} Nov 28 16:10:42 crc kubenswrapper[4909]: I1128 16:10:42.109977 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5f0ac931-d37b-4342-8c12-c2779b455cc5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae003675d8b34489b946224bcd380cc68ff49acec6769edfe74a8345018e7909\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5xrns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c2757c4dc287e41bc57c065df2906fc5961d005829fa24f22d3b5078d17555a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5xrns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:38Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-d5nd7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:42Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:42 crc kubenswrapper[4909]: I1128 16:10:42.151707 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c17e2fff-c7ee-475c-8c17-58a394744b91\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e1fffda4b5571ab3fe797073c7201feadb7818eac66f6dc76b7761ef13a8d0ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e1fffda4b5571ab3fe797073c7201feadb7818eac66f6dc76b7761ef13a8d0ef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-qxw94\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:42Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:42 crc kubenswrapper[4909]: I1128 16:10:42.175551 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:42 crc kubenswrapper[4909]: I1128 16:10:42.175586 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:42 crc kubenswrapper[4909]: I1128 16:10:42.175597 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:42 crc kubenswrapper[4909]: I1128 16:10:42.175613 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:42 crc kubenswrapper[4909]: I1128 16:10:42.175624 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:42Z","lastTransitionTime":"2025-11-28T16:10:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:42 crc kubenswrapper[4909]: I1128 16:10:42.190111 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:42Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:42 crc kubenswrapper[4909]: I1128 16:10:42.229722 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:42Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:42 crc kubenswrapper[4909]: I1128 16:10:42.265293 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-bwbm6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a458a03-0fea-47c0-9748-510145f40b30\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dd8c3d64058ca1f2862ce478295e1a694117d00f9172f78c7c5e2945d7357aad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmxp7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:36Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-bwbm6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:42Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:42 crc kubenswrapper[4909]: I1128 16:10:42.279408 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:42 crc kubenswrapper[4909]: I1128 16:10:42.279731 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:42 crc kubenswrapper[4909]: I1128 16:10:42.279750 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:42 crc kubenswrapper[4909]: I1128 16:10:42.279767 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:42 crc kubenswrapper[4909]: I1128 16:10:42.279777 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:42Z","lastTransitionTime":"2025-11-28T16:10:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:42 crc kubenswrapper[4909]: I1128 16:10:42.311246 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gdz9b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d9d93f2d-2a90-4d2d-b8e6-e48973be876f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"message\\\":\\\"containers with incomplete status: [bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b45f41a92db18043fc03c9cd7d9bbeef0020790595045abd17a9105d0f01367b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b45f41a92db18043fc03c9cd7d9bbeef0020790595045abd17a9105d0f01367b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f7a3a2a5eea9704ba4cd09ac6fa00e33c97d163973a852882979b1c8f2064979\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f7a3a2a5eea9704ba4cd09ac6fa00e33c97d163973a852882979b1c8f2064979\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gdz9b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:42Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:42 crc kubenswrapper[4909]: I1128 16:10:42.352208 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://475d2c473e130345e0d544c2b7990c476e83c479c644db89e1ceda2f4278d667\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://18059a14081d677863133d95ec3cecf9359d6464af62be1e53bcd9514311ff59\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:42Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:42 crc kubenswrapper[4909]: I1128 16:10:42.382169 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:42 crc kubenswrapper[4909]: I1128 16:10:42.382273 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:42 crc kubenswrapper[4909]: I1128 16:10:42.382295 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:42 crc kubenswrapper[4909]: I1128 16:10:42.382357 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:42 crc kubenswrapper[4909]: I1128 16:10:42.382376 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:42Z","lastTransitionTime":"2025-11-28T16:10:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:42 crc kubenswrapper[4909]: I1128 16:10:42.388184 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-q8nfv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6b77cc4b-dc69-4ece-8e10-64eebc98a578\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://71f6384b2d5c86c668a4d7dfc23b14a893f93b9ec587bec43f74eb0926cc2c64\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-prc9l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:37Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-q8nfv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:42Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:42 crc kubenswrapper[4909]: I1128 16:10:42.429639 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b5b5c4c2-af06-4771-b6eb-d13a2819665c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ea29d0be350e786147f355c5bb4902924aa0f921413b432ad093a796d21b9d05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b41cf0481323080f4056d35add13c2254c6ffbe432b1651555067d3fc9d163eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94501e940379fe9b429e532a91799701e733bb5c3be1c5f32da07a9957f955b3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://37dc41c4e260f683c1ae04ae87a883fe78e4f1f620a946ccb6a87191a5eae0ae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:18Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:42Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:42 crc kubenswrapper[4909]: I1128 16:10:42.471420 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31f84564a3775e0ece3a5c0f176e8d9607466d4a7a505173e9668c51fa2229e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:42Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:42 crc kubenswrapper[4909]: I1128 16:10:42.484969 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:42 crc kubenswrapper[4909]: I1128 16:10:42.485035 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:42 crc kubenswrapper[4909]: I1128 16:10:42.485059 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:42 crc kubenswrapper[4909]: I1128 16:10:42.485088 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:42 crc kubenswrapper[4909]: I1128 16:10:42.485112 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:42Z","lastTransitionTime":"2025-11-28T16:10:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:42 crc kubenswrapper[4909]: I1128 16:10:42.514497 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:42Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:42 crc kubenswrapper[4909]: I1128 16:10:42.552916 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-wx2jj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6e3805b2-8ad3-4fa6-b88f-e0ae42294202\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0777b571ce4049338437a97264761c89ab7517b4da8400edcd3381d58aef32e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x6lxd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-wx2jj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:42Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:42 crc kubenswrapper[4909]: I1128 16:10:42.594733 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:42 crc kubenswrapper[4909]: I1128 16:10:42.594771 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:42 crc kubenswrapper[4909]: I1128 16:10:42.594780 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:42 crc kubenswrapper[4909]: I1128 16:10:42.594796 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:42 crc kubenswrapper[4909]: I1128 16:10:42.594806 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:42Z","lastTransitionTime":"2025-11-28T16:10:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:42 crc kubenswrapper[4909]: I1128 16:10:42.597587 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dda7db1a-d603-4121-8cd6-e72c9ae02961\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://107b559f7d62f02041439f8c727185e9a819cd655afa0898e818d8618cc54cc7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://76ac0606fbf94f3746fb7ffa549a3cb684d40ee72224dd36c96bb47db493b482\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d5716d50bbed7c047029bbd7964431e97f78c6516505f57fe76d2a1548a665d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://53a5c9679a2c94c1297318d2e554c2445efbfab9cf72d25e48d753d66fa217d5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://da70cc63e1edf58e6d6fed49fc9266d54fabba5f7ce3216d910eb18aebc45c34\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0700ae65eb592e5b2110d37bb15dbb1dbdd228986de015ab5dc8fe13672032ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0700ae65eb592e5b2110d37bb15dbb1dbdd228986de015ab5dc8fe13672032ef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:18Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:42Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:42 crc kubenswrapper[4909]: I1128 16:10:42.632934 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5f0ac931-d37b-4342-8c12-c2779b455cc5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae003675d8b34489b946224bcd380cc68ff49acec6769edfe74a8345018e7909\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5xrns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c2757c4dc287e41bc57c065df2906fc5961d005829fa24f22d3b5078d17555a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5xrns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:38Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-d5nd7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:42Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:42 crc kubenswrapper[4909]: I1128 16:10:42.674849 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c17e2fff-c7ee-475c-8c17-58a394744b91\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e1fffda4b5571ab3fe797073c7201feadb7818eac66f6dc76b7761ef13a8d0ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e1fffda4b5571ab3fe797073c7201feadb7818eac66f6dc76b7761ef13a8d0ef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-qxw94\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:42Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:42 crc kubenswrapper[4909]: I1128 16:10:42.697008 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:42 crc kubenswrapper[4909]: I1128 16:10:42.697069 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:42 crc kubenswrapper[4909]: I1128 16:10:42.697087 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:42 crc kubenswrapper[4909]: I1128 16:10:42.697112 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:42 crc kubenswrapper[4909]: I1128 16:10:42.697130 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:42Z","lastTransitionTime":"2025-11-28T16:10:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:42 crc kubenswrapper[4909]: I1128 16:10:42.715602 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7907b546dbb614e80485187b026c4c5ca17f52d88d5c28ce26a7bf5e3c09e76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:42Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:42 crc kubenswrapper[4909]: I1128 16:10:42.748954 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:42Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:42 crc kubenswrapper[4909]: I1128 16:10:42.798444 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:42Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:42 crc kubenswrapper[4909]: I1128 16:10:42.799947 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:42 crc kubenswrapper[4909]: I1128 16:10:42.800010 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:42 crc kubenswrapper[4909]: I1128 16:10:42.800031 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:42 crc kubenswrapper[4909]: I1128 16:10:42.800055 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:42 crc kubenswrapper[4909]: I1128 16:10:42.800072 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:42Z","lastTransitionTime":"2025-11-28T16:10:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:42 crc kubenswrapper[4909]: I1128 16:10:42.831808 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-bwbm6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a458a03-0fea-47c0-9748-510145f40b30\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dd8c3d64058ca1f2862ce478295e1a694117d00f9172f78c7c5e2945d7357aad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmxp7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:36Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-bwbm6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:42Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:42 crc kubenswrapper[4909]: I1128 16:10:42.879200 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gdz9b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d9d93f2d-2a90-4d2d-b8e6-e48973be876f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"message\\\":\\\"containers with incomplete status: [routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b45f41a92db18043fc03c9cd7d9bbeef0020790595045abd17a9105d0f01367b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b45f41a92db18043fc03c9cd7d9bbeef0020790595045abd17a9105d0f01367b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f7a3a2a5eea9704ba4cd09ac6fa00e33c97d163973a852882979b1c8f2064979\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f7a3a2a5eea9704ba4cd09ac6fa00e33c97d163973a852882979b1c8f2064979\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0348e1d7066423ab9a595f60893021830d02de3c18fca2fdb0082efa41644124\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0348e1d7066423ab9a595f60893021830d02de3c18fca2fdb0082efa41644124\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gdz9b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:42Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:42 crc kubenswrapper[4909]: I1128 16:10:42.903592 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:42 crc kubenswrapper[4909]: I1128 16:10:42.903644 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:42 crc kubenswrapper[4909]: I1128 16:10:42.903692 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:42 crc kubenswrapper[4909]: I1128 16:10:42.903714 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:42 crc kubenswrapper[4909]: I1128 16:10:42.903732 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:42Z","lastTransitionTime":"2025-11-28T16:10:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:42 crc kubenswrapper[4909]: I1128 16:10:42.911975 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-q8nfv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6b77cc4b-dc69-4ece-8e10-64eebc98a578\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://71f6384b2d5c86c668a4d7dfc23b14a893f93b9ec587bec43f74eb0926cc2c64\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-prc9l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:37Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-q8nfv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:42Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:42 crc kubenswrapper[4909]: I1128 16:10:42.953945 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://475d2c473e130345e0d544c2b7990c476e83c479c644db89e1ceda2f4278d667\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://18059a14081d677863133d95ec3cecf9359d6464af62be1e53bcd9514311ff59\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:42Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:43 crc kubenswrapper[4909]: I1128 16:10:43.008277 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:43 crc kubenswrapper[4909]: I1128 16:10:43.008336 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:43 crc kubenswrapper[4909]: I1128 16:10:43.008354 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:43 crc kubenswrapper[4909]: I1128 16:10:43.008378 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:43 crc kubenswrapper[4909]: I1128 16:10:43.008394 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:43Z","lastTransitionTime":"2025-11-28T16:10:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:43 crc kubenswrapper[4909]: I1128 16:10:43.105399 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" event={"ID":"c17e2fff-c7ee-475c-8c17-58a394744b91","Type":"ContainerStarted","Data":"841b67fe2e8ae0e4ebf834f141b83ad5e850b8e81c3f6410cef7df9894c33dc6"} Nov 28 16:10:43 crc kubenswrapper[4909]: I1128 16:10:43.110409 4909 generic.go:334] "Generic (PLEG): container finished" podID="d9d93f2d-2a90-4d2d-b8e6-e48973be876f" containerID="cbb9ad3dff8afd5ac9ec18bd48d273f46b76a21b791dc6a17aea1d7559750c93" exitCode=0 Nov 28 16:10:43 crc kubenswrapper[4909]: I1128 16:10:43.110460 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:43 crc kubenswrapper[4909]: I1128 16:10:43.110898 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:43 crc kubenswrapper[4909]: I1128 16:10:43.111100 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:43 crc kubenswrapper[4909]: I1128 16:10:43.111285 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:43 crc kubenswrapper[4909]: I1128 16:10:43.110635 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-gdz9b" event={"ID":"d9d93f2d-2a90-4d2d-b8e6-e48973be876f","Type":"ContainerDied","Data":"cbb9ad3dff8afd5ac9ec18bd48d273f46b76a21b791dc6a17aea1d7559750c93"} Nov 28 16:10:43 crc kubenswrapper[4909]: I1128 16:10:43.111432 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:43Z","lastTransitionTime":"2025-11-28T16:10:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:43 crc kubenswrapper[4909]: I1128 16:10:43.128949 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5f0ac931-d37b-4342-8c12-c2779b455cc5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae003675d8b34489b946224bcd380cc68ff49acec6769edfe74a8345018e7909\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5xrns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c2757c4dc287e41bc57c065df2906fc5961d005829fa24f22d3b5078d17555a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5xrns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:38Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-d5nd7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:43Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:43 crc kubenswrapper[4909]: I1128 16:10:43.161069 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c17e2fff-c7ee-475c-8c17-58a394744b91\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e1fffda4b5571ab3fe797073c7201feadb7818eac66f6dc76b7761ef13a8d0ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e1fffda4b5571ab3fe797073c7201feadb7818eac66f6dc76b7761ef13a8d0ef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-qxw94\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:43Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:43 crc kubenswrapper[4909]: I1128 16:10:43.186068 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7907b546dbb614e80485187b026c4c5ca17f52d88d5c28ce26a7bf5e3c09e76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:43Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:43 crc kubenswrapper[4909]: I1128 16:10:43.209084 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:43Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:43 crc kubenswrapper[4909]: I1128 16:10:43.214823 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:43 crc kubenswrapper[4909]: I1128 16:10:43.214860 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:43 crc kubenswrapper[4909]: I1128 16:10:43.214871 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:43 crc kubenswrapper[4909]: I1128 16:10:43.214888 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:43 crc kubenswrapper[4909]: I1128 16:10:43.214899 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:43Z","lastTransitionTime":"2025-11-28T16:10:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:43 crc kubenswrapper[4909]: I1128 16:10:43.229180 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:43Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:43 crc kubenswrapper[4909]: I1128 16:10:43.247699 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-bwbm6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a458a03-0fea-47c0-9748-510145f40b30\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dd8c3d64058ca1f2862ce478295e1a694117d00f9172f78c7c5e2945d7357aad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmxp7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:36Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-bwbm6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:43Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:43 crc kubenswrapper[4909]: I1128 16:10:43.266483 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gdz9b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d9d93f2d-2a90-4d2d-b8e6-e48973be876f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b45f41a92db18043fc03c9cd7d9bbeef0020790595045abd17a9105d0f01367b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b45f41a92db18043fc03c9cd7d9bbeef0020790595045abd17a9105d0f01367b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f7a3a2a5eea9704ba4cd09ac6fa00e33c97d163973a852882979b1c8f2064979\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f7a3a2a5eea9704ba4cd09ac6fa00e33c97d163973a852882979b1c8f2064979\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0348e1d7066423ab9a595f60893021830d02de3c18fca2fdb0082efa41644124\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0348e1d7066423ab9a595f60893021830d02de3c18fca2fdb0082efa41644124\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbb9ad3dff8afd5ac9ec18bd48d273f46b76a21b791dc6a17aea1d7559750c93\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cbb9ad3dff8afd5ac9ec18bd48d273f46b76a21b791dc6a17aea1d7559750c93\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gdz9b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:43Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:43 crc kubenswrapper[4909]: I1128 16:10:43.278858 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-q8nfv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6b77cc4b-dc69-4ece-8e10-64eebc98a578\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://71f6384b2d5c86c668a4d7dfc23b14a893f93b9ec587bec43f74eb0926cc2c64\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-prc9l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:37Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-q8nfv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:43Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:43 crc kubenswrapper[4909]: I1128 16:10:43.310200 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://475d2c473e130345e0d544c2b7990c476e83c479c644db89e1ceda2f4278d667\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://18059a14081d677863133d95ec3cecf9359d6464af62be1e53bcd9514311ff59\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:43Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:43 crc kubenswrapper[4909]: I1128 16:10:43.317207 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:43 crc kubenswrapper[4909]: I1128 16:10:43.317370 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:43 crc kubenswrapper[4909]: I1128 16:10:43.317393 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:43 crc kubenswrapper[4909]: I1128 16:10:43.317413 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:43 crc kubenswrapper[4909]: I1128 16:10:43.317426 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:43Z","lastTransitionTime":"2025-11-28T16:10:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:43 crc kubenswrapper[4909]: I1128 16:10:43.350193 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b5b5c4c2-af06-4771-b6eb-d13a2819665c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ea29d0be350e786147f355c5bb4902924aa0f921413b432ad093a796d21b9d05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b41cf0481323080f4056d35add13c2254c6ffbe432b1651555067d3fc9d163eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94501e940379fe9b429e532a91799701e733bb5c3be1c5f32da07a9957f955b3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://37dc41c4e260f683c1ae04ae87a883fe78e4f1f620a946ccb6a87191a5eae0ae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:18Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:43Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:43 crc kubenswrapper[4909]: I1128 16:10:43.391391 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31f84564a3775e0ece3a5c0f176e8d9607466d4a7a505173e9668c51fa2229e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:43Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:43 crc kubenswrapper[4909]: I1128 16:10:43.419793 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:43 crc kubenswrapper[4909]: I1128 16:10:43.419832 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:43 crc kubenswrapper[4909]: I1128 16:10:43.419843 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:43 crc kubenswrapper[4909]: I1128 16:10:43.419860 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:43 crc kubenswrapper[4909]: I1128 16:10:43.419874 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:43Z","lastTransitionTime":"2025-11-28T16:10:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:43 crc kubenswrapper[4909]: I1128 16:10:43.430304 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:43Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:43 crc kubenswrapper[4909]: I1128 16:10:43.470526 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-wx2jj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6e3805b2-8ad3-4fa6-b88f-e0ae42294202\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0777b571ce4049338437a97264761c89ab7517b4da8400edcd3381d58aef32e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x6lxd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-wx2jj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:43Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:43 crc kubenswrapper[4909]: I1128 16:10:43.509893 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dda7db1a-d603-4121-8cd6-e72c9ae02961\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://107b559f7d62f02041439f8c727185e9a819cd655afa0898e818d8618cc54cc7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://76ac0606fbf94f3746fb7ffa549a3cb684d40ee72224dd36c96bb47db493b482\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d5716d50bbed7c047029bbd7964431e97f78c6516505f57fe76d2a1548a665d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://53a5c9679a2c94c1297318d2e554c2445efbfab9cf72d25e48d753d66fa217d5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://da70cc63e1edf58e6d6fed49fc9266d54fabba5f7ce3216d910eb18aebc45c34\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0700ae65eb592e5b2110d37bb15dbb1dbdd228986de015ab5dc8fe13672032ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0700ae65eb592e5b2110d37bb15dbb1dbdd228986de015ab5dc8fe13672032ef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:18Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:43Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:43 crc kubenswrapper[4909]: I1128 16:10:43.522243 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:43 crc kubenswrapper[4909]: I1128 16:10:43.522437 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:43 crc kubenswrapper[4909]: I1128 16:10:43.522518 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:43 crc kubenswrapper[4909]: I1128 16:10:43.522598 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:43 crc kubenswrapper[4909]: I1128 16:10:43.522696 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:43Z","lastTransitionTime":"2025-11-28T16:10:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:43 crc kubenswrapper[4909]: I1128 16:10:43.625954 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:43 crc kubenswrapper[4909]: I1128 16:10:43.625998 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:43 crc kubenswrapper[4909]: I1128 16:10:43.626007 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:43 crc kubenswrapper[4909]: I1128 16:10:43.626024 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:43 crc kubenswrapper[4909]: I1128 16:10:43.626033 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:43Z","lastTransitionTime":"2025-11-28T16:10:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:43 crc kubenswrapper[4909]: I1128 16:10:43.728919 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:43 crc kubenswrapper[4909]: I1128 16:10:43.728969 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:43 crc kubenswrapper[4909]: I1128 16:10:43.728981 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:43 crc kubenswrapper[4909]: I1128 16:10:43.728999 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:43 crc kubenswrapper[4909]: I1128 16:10:43.729013 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:43Z","lastTransitionTime":"2025-11-28T16:10:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:43 crc kubenswrapper[4909]: I1128 16:10:43.832457 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:43 crc kubenswrapper[4909]: I1128 16:10:43.832514 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:43 crc kubenswrapper[4909]: I1128 16:10:43.832528 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:43 crc kubenswrapper[4909]: I1128 16:10:43.832548 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:43 crc kubenswrapper[4909]: I1128 16:10:43.832560 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:43Z","lastTransitionTime":"2025-11-28T16:10:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:43 crc kubenswrapper[4909]: I1128 16:10:43.900768 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 16:10:43 crc kubenswrapper[4909]: E1128 16:10:43.900965 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 16:10:43 crc kubenswrapper[4909]: I1128 16:10:43.901541 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 16:10:43 crc kubenswrapper[4909]: I1128 16:10:43.901606 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 16:10:43 crc kubenswrapper[4909]: E1128 16:10:43.901646 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 16:10:43 crc kubenswrapper[4909]: E1128 16:10:43.901829 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 16:10:43 crc kubenswrapper[4909]: I1128 16:10:43.936309 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:43 crc kubenswrapper[4909]: I1128 16:10:43.936372 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:43 crc kubenswrapper[4909]: I1128 16:10:43.936392 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:43 crc kubenswrapper[4909]: I1128 16:10:43.936417 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:43 crc kubenswrapper[4909]: I1128 16:10:43.936434 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:43Z","lastTransitionTime":"2025-11-28T16:10:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:44 crc kubenswrapper[4909]: I1128 16:10:44.039705 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:44 crc kubenswrapper[4909]: I1128 16:10:44.039773 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:44 crc kubenswrapper[4909]: I1128 16:10:44.039791 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:44 crc kubenswrapper[4909]: I1128 16:10:44.039848 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:44 crc kubenswrapper[4909]: I1128 16:10:44.039868 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:44Z","lastTransitionTime":"2025-11-28T16:10:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:44 crc kubenswrapper[4909]: I1128 16:10:44.120354 4909 generic.go:334] "Generic (PLEG): container finished" podID="d9d93f2d-2a90-4d2d-b8e6-e48973be876f" containerID="5898b3475414a39ef005c45982f12791683eb027e734ec8d25dc3489c93026a2" exitCode=0 Nov 28 16:10:44 crc kubenswrapper[4909]: I1128 16:10:44.120469 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-gdz9b" event={"ID":"d9d93f2d-2a90-4d2d-b8e6-e48973be876f","Type":"ContainerDied","Data":"5898b3475414a39ef005c45982f12791683eb027e734ec8d25dc3489c93026a2"} Nov 28 16:10:44 crc kubenswrapper[4909]: I1128 16:10:44.142850 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:44 crc kubenswrapper[4909]: I1128 16:10:44.142918 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:44 crc kubenswrapper[4909]: I1128 16:10:44.142942 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:44 crc kubenswrapper[4909]: I1128 16:10:44.142974 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:44 crc kubenswrapper[4909]: I1128 16:10:44.142997 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:44Z","lastTransitionTime":"2025-11-28T16:10:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:44 crc kubenswrapper[4909]: I1128 16:10:44.144977 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7907b546dbb614e80485187b026c4c5ca17f52d88d5c28ce26a7bf5e3c09e76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:44Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:44 crc kubenswrapper[4909]: I1128 16:10:44.158395 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5f0ac931-d37b-4342-8c12-c2779b455cc5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae003675d8b34489b946224bcd380cc68ff49acec6769edfe74a8345018e7909\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5xrns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c2757c4dc287e41bc57c065df2906fc5961d005829fa24f22d3b5078d17555a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5xrns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:38Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-d5nd7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:44Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:44 crc kubenswrapper[4909]: I1128 16:10:44.199404 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c17e2fff-c7ee-475c-8c17-58a394744b91\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e1fffda4b5571ab3fe797073c7201feadb7818eac66f6dc76b7761ef13a8d0ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e1fffda4b5571ab3fe797073c7201feadb7818eac66f6dc76b7761ef13a8d0ef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-qxw94\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:44Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:44 crc kubenswrapper[4909]: I1128 16:10:44.218562 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:44Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:44 crc kubenswrapper[4909]: I1128 16:10:44.234036 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 16:10:44 crc kubenswrapper[4909]: E1128 16:10:44.234313 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 16:10:52.23427276 +0000 UTC m=+34.630957294 (durationBeforeRetry 8s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:10:44 crc kubenswrapper[4909]: I1128 16:10:44.247242 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:44Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:44 crc kubenswrapper[4909]: I1128 16:10:44.249232 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:44 crc kubenswrapper[4909]: I1128 16:10:44.249268 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:44 crc kubenswrapper[4909]: I1128 16:10:44.249282 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:44 crc kubenswrapper[4909]: I1128 16:10:44.249304 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:44 crc kubenswrapper[4909]: I1128 16:10:44.249318 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:44Z","lastTransitionTime":"2025-11-28T16:10:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:44 crc kubenswrapper[4909]: I1128 16:10:44.268641 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-bwbm6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a458a03-0fea-47c0-9748-510145f40b30\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dd8c3d64058ca1f2862ce478295e1a694117d00f9172f78c7c5e2945d7357aad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmxp7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:36Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-bwbm6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:44Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:44 crc kubenswrapper[4909]: I1128 16:10:44.285864 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gdz9b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d9d93f2d-2a90-4d2d-b8e6-e48973be876f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b45f41a92db18043fc03c9cd7d9bbeef0020790595045abd17a9105d0f01367b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b45f41a92db18043fc03c9cd7d9bbeef0020790595045abd17a9105d0f01367b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f7a3a2a5eea9704ba4cd09ac6fa00e33c97d163973a852882979b1c8f2064979\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f7a3a2a5eea9704ba4cd09ac6fa00e33c97d163973a852882979b1c8f2064979\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0348e1d7066423ab9a595f60893021830d02de3c18fca2fdb0082efa41644124\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0348e1d7066423ab9a595f60893021830d02de3c18fca2fdb0082efa41644124\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbb9ad3dff8afd5ac9ec18bd48d273f46b76a21b791dc6a17aea1d7559750c93\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cbb9ad3dff8afd5ac9ec18bd48d273f46b76a21b791dc6a17aea1d7559750c93\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5898b3475414a39ef005c45982f12791683eb027e734ec8d25dc3489c93026a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5898b3475414a39ef005c45982f12791683eb027e734ec8d25dc3489c93026a2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gdz9b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:44Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:44 crc kubenswrapper[4909]: I1128 16:10:44.299943 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://475d2c473e130345e0d544c2b7990c476e83c479c644db89e1ceda2f4278d667\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://18059a14081d677863133d95ec3cecf9359d6464af62be1e53bcd9514311ff59\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:44Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:44 crc kubenswrapper[4909]: I1128 16:10:44.311999 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-q8nfv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6b77cc4b-dc69-4ece-8e10-64eebc98a578\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://71f6384b2d5c86c668a4d7dfc23b14a893f93b9ec587bec43f74eb0926cc2c64\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-prc9l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:37Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-q8nfv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:44Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:44 crc kubenswrapper[4909]: I1128 16:10:44.328727 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dda7db1a-d603-4121-8cd6-e72c9ae02961\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://107b559f7d62f02041439f8c727185e9a819cd655afa0898e818d8618cc54cc7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://76ac0606fbf94f3746fb7ffa549a3cb684d40ee72224dd36c96bb47db493b482\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d5716d50bbed7c047029bbd7964431e97f78c6516505f57fe76d2a1548a665d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://53a5c9679a2c94c1297318d2e554c2445efbfab9cf72d25e48d753d66fa217d5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://da70cc63e1edf58e6d6fed49fc9266d54fabba5f7ce3216d910eb18aebc45c34\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0700ae65eb592e5b2110d37bb15dbb1dbdd228986de015ab5dc8fe13672032ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0700ae65eb592e5b2110d37bb15dbb1dbdd228986de015ab5dc8fe13672032ef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:18Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:44Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:44 crc kubenswrapper[4909]: I1128 16:10:44.335071 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 16:10:44 crc kubenswrapper[4909]: I1128 16:10:44.335144 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 16:10:44 crc kubenswrapper[4909]: I1128 16:10:44.335201 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 16:10:44 crc kubenswrapper[4909]: I1128 16:10:44.335274 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 16:10:44 crc kubenswrapper[4909]: E1128 16:10:44.335287 4909 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 28 16:10:44 crc kubenswrapper[4909]: E1128 16:10:44.335322 4909 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 28 16:10:44 crc kubenswrapper[4909]: E1128 16:10:44.335335 4909 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 16:10:44 crc kubenswrapper[4909]: E1128 16:10:44.335396 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-28 16:10:52.33537695 +0000 UTC m=+34.732061474 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 16:10:44 crc kubenswrapper[4909]: E1128 16:10:44.335416 4909 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 28 16:10:44 crc kubenswrapper[4909]: E1128 16:10:44.335494 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-28 16:10:52.335469532 +0000 UTC m=+34.732154096 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 28 16:10:44 crc kubenswrapper[4909]: E1128 16:10:44.335613 4909 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 28 16:10:44 crc kubenswrapper[4909]: E1128 16:10:44.335639 4909 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 28 16:10:44 crc kubenswrapper[4909]: E1128 16:10:44.335693 4909 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 16:10:44 crc kubenswrapper[4909]: E1128 16:10:44.335742 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-28 16:10:52.33572739 +0000 UTC m=+34.732411954 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 16:10:44 crc kubenswrapper[4909]: E1128 16:10:44.335786 4909 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 28 16:10:44 crc kubenswrapper[4909]: E1128 16:10:44.335817 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-28 16:10:52.335809392 +0000 UTC m=+34.732493916 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 28 16:10:44 crc kubenswrapper[4909]: I1128 16:10:44.345984 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b5b5c4c2-af06-4771-b6eb-d13a2819665c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ea29d0be350e786147f355c5bb4902924aa0f921413b432ad093a796d21b9d05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b41cf0481323080f4056d35add13c2254c6ffbe432b1651555067d3fc9d163eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94501e940379fe9b429e532a91799701e733bb5c3be1c5f32da07a9957f955b3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://37dc41c4e260f683c1ae04ae87a883fe78e4f1f620a946ccb6a87191a5eae0ae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:18Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:44Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:44 crc kubenswrapper[4909]: I1128 16:10:44.353912 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:44 crc kubenswrapper[4909]: I1128 16:10:44.353953 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:44 crc kubenswrapper[4909]: I1128 16:10:44.353963 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:44 crc kubenswrapper[4909]: I1128 16:10:44.353981 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:44 crc kubenswrapper[4909]: I1128 16:10:44.353996 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:44Z","lastTransitionTime":"2025-11-28T16:10:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:44 crc kubenswrapper[4909]: I1128 16:10:44.358669 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31f84564a3775e0ece3a5c0f176e8d9607466d4a7a505173e9668c51fa2229e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:44Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:44 crc kubenswrapper[4909]: I1128 16:10:44.375973 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:44Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:44 crc kubenswrapper[4909]: I1128 16:10:44.396242 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-wx2jj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6e3805b2-8ad3-4fa6-b88f-e0ae42294202\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0777b571ce4049338437a97264761c89ab7517b4da8400edcd3381d58aef32e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x6lxd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-wx2jj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:44Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:44 crc kubenswrapper[4909]: I1128 16:10:44.456643 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:44 crc kubenswrapper[4909]: I1128 16:10:44.456759 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:44 crc kubenswrapper[4909]: I1128 16:10:44.456783 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:44 crc kubenswrapper[4909]: I1128 16:10:44.456816 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:44 crc kubenswrapper[4909]: I1128 16:10:44.456840 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:44Z","lastTransitionTime":"2025-11-28T16:10:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:44 crc kubenswrapper[4909]: I1128 16:10:44.559829 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:44 crc kubenswrapper[4909]: I1128 16:10:44.559893 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:44 crc kubenswrapper[4909]: I1128 16:10:44.559913 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:44 crc kubenswrapper[4909]: I1128 16:10:44.559965 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:44 crc kubenswrapper[4909]: I1128 16:10:44.559985 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:44Z","lastTransitionTime":"2025-11-28T16:10:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:44 crc kubenswrapper[4909]: I1128 16:10:44.662196 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:44 crc kubenswrapper[4909]: I1128 16:10:44.662237 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:44 crc kubenswrapper[4909]: I1128 16:10:44.662251 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:44 crc kubenswrapper[4909]: I1128 16:10:44.662268 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:44 crc kubenswrapper[4909]: I1128 16:10:44.662278 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:44Z","lastTransitionTime":"2025-11-28T16:10:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:44 crc kubenswrapper[4909]: I1128 16:10:44.765735 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:44 crc kubenswrapper[4909]: I1128 16:10:44.765786 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:44 crc kubenswrapper[4909]: I1128 16:10:44.765799 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:44 crc kubenswrapper[4909]: I1128 16:10:44.765823 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:44 crc kubenswrapper[4909]: I1128 16:10:44.765838 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:44Z","lastTransitionTime":"2025-11-28T16:10:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:44 crc kubenswrapper[4909]: I1128 16:10:44.869709 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:44 crc kubenswrapper[4909]: I1128 16:10:44.869770 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:44 crc kubenswrapper[4909]: I1128 16:10:44.869783 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:44 crc kubenswrapper[4909]: I1128 16:10:44.869806 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:44 crc kubenswrapper[4909]: I1128 16:10:44.869818 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:44Z","lastTransitionTime":"2025-11-28T16:10:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:44 crc kubenswrapper[4909]: I1128 16:10:44.972255 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:44 crc kubenswrapper[4909]: I1128 16:10:44.972311 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:44 crc kubenswrapper[4909]: I1128 16:10:44.972328 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:44 crc kubenswrapper[4909]: I1128 16:10:44.972353 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:44 crc kubenswrapper[4909]: I1128 16:10:44.972375 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:44Z","lastTransitionTime":"2025-11-28T16:10:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:45 crc kubenswrapper[4909]: I1128 16:10:45.075766 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:45 crc kubenswrapper[4909]: I1128 16:10:45.075812 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:45 crc kubenswrapper[4909]: I1128 16:10:45.075823 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:45 crc kubenswrapper[4909]: I1128 16:10:45.075840 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:45 crc kubenswrapper[4909]: I1128 16:10:45.075849 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:45Z","lastTransitionTime":"2025-11-28T16:10:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:45 crc kubenswrapper[4909]: I1128 16:10:45.134878 4909 generic.go:334] "Generic (PLEG): container finished" podID="d9d93f2d-2a90-4d2d-b8e6-e48973be876f" containerID="2056e26b7e46cd32f8a12d6d054e249ed43f9854c0f7720791e98f9e6c5b5346" exitCode=0 Nov 28 16:10:45 crc kubenswrapper[4909]: I1128 16:10:45.134947 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-gdz9b" event={"ID":"d9d93f2d-2a90-4d2d-b8e6-e48973be876f","Type":"ContainerDied","Data":"2056e26b7e46cd32f8a12d6d054e249ed43f9854c0f7720791e98f9e6c5b5346"} Nov 28 16:10:45 crc kubenswrapper[4909]: I1128 16:10:45.157933 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dda7db1a-d603-4121-8cd6-e72c9ae02961\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://107b559f7d62f02041439f8c727185e9a819cd655afa0898e818d8618cc54cc7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://76ac0606fbf94f3746fb7ffa549a3cb684d40ee72224dd36c96bb47db493b482\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d5716d50bbed7c047029bbd7964431e97f78c6516505f57fe76d2a1548a665d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://53a5c9679a2c94c1297318d2e554c2445efbfab9cf72d25e48d753d66fa217d5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://da70cc63e1edf58e6d6fed49fc9266d54fabba5f7ce3216d910eb18aebc45c34\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0700ae65eb592e5b2110d37bb15dbb1dbdd228986de015ab5dc8fe13672032ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0700ae65eb592e5b2110d37bb15dbb1dbdd228986de015ab5dc8fe13672032ef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:18Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:45Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:45 crc kubenswrapper[4909]: I1128 16:10:45.176125 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b5b5c4c2-af06-4771-b6eb-d13a2819665c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ea29d0be350e786147f355c5bb4902924aa0f921413b432ad093a796d21b9d05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b41cf0481323080f4056d35add13c2254c6ffbe432b1651555067d3fc9d163eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94501e940379fe9b429e532a91799701e733bb5c3be1c5f32da07a9957f955b3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://37dc41c4e260f683c1ae04ae87a883fe78e4f1f620a946ccb6a87191a5eae0ae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:18Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:45Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:45 crc kubenswrapper[4909]: I1128 16:10:45.181121 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:45 crc kubenswrapper[4909]: I1128 16:10:45.181173 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:45 crc kubenswrapper[4909]: I1128 16:10:45.181191 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:45 crc kubenswrapper[4909]: I1128 16:10:45.181211 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:45 crc kubenswrapper[4909]: I1128 16:10:45.181223 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:45Z","lastTransitionTime":"2025-11-28T16:10:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:45 crc kubenswrapper[4909]: I1128 16:10:45.197205 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31f84564a3775e0ece3a5c0f176e8d9607466d4a7a505173e9668c51fa2229e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:45Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:45 crc kubenswrapper[4909]: I1128 16:10:45.221552 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:45Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:45 crc kubenswrapper[4909]: I1128 16:10:45.237299 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-wx2jj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6e3805b2-8ad3-4fa6-b88f-e0ae42294202\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0777b571ce4049338437a97264761c89ab7517b4da8400edcd3381d58aef32e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x6lxd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-wx2jj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:45Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:45 crc kubenswrapper[4909]: I1128 16:10:45.254626 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7907b546dbb614e80485187b026c4c5ca17f52d88d5c28ce26a7bf5e3c09e76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:45Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:45 crc kubenswrapper[4909]: I1128 16:10:45.268706 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5f0ac931-d37b-4342-8c12-c2779b455cc5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae003675d8b34489b946224bcd380cc68ff49acec6769edfe74a8345018e7909\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5xrns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c2757c4dc287e41bc57c065df2906fc5961d005829fa24f22d3b5078d17555a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5xrns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:38Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-d5nd7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:45Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:45 crc kubenswrapper[4909]: I1128 16:10:45.288042 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c17e2fff-c7ee-475c-8c17-58a394744b91\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e1fffda4b5571ab3fe797073c7201feadb7818eac66f6dc76b7761ef13a8d0ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e1fffda4b5571ab3fe797073c7201feadb7818eac66f6dc76b7761ef13a8d0ef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-qxw94\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:45Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:45 crc kubenswrapper[4909]: I1128 16:10:45.288444 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:45 crc kubenswrapper[4909]: I1128 16:10:45.288496 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:45 crc kubenswrapper[4909]: I1128 16:10:45.288509 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:45 crc kubenswrapper[4909]: I1128 16:10:45.288526 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:45 crc kubenswrapper[4909]: I1128 16:10:45.288537 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:45Z","lastTransitionTime":"2025-11-28T16:10:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:45 crc kubenswrapper[4909]: I1128 16:10:45.302920 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:45Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:45 crc kubenswrapper[4909]: I1128 16:10:45.317397 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:45Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:45 crc kubenswrapper[4909]: I1128 16:10:45.328937 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-bwbm6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a458a03-0fea-47c0-9748-510145f40b30\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dd8c3d64058ca1f2862ce478295e1a694117d00f9172f78c7c5e2945d7357aad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmxp7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:36Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-bwbm6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:45Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:45 crc kubenswrapper[4909]: I1128 16:10:45.344529 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gdz9b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d9d93f2d-2a90-4d2d-b8e6-e48973be876f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b45f41a92db18043fc03c9cd7d9bbeef0020790595045abd17a9105d0f01367b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b45f41a92db18043fc03c9cd7d9bbeef0020790595045abd17a9105d0f01367b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f7a3a2a5eea9704ba4cd09ac6fa00e33c97d163973a852882979b1c8f2064979\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f7a3a2a5eea9704ba4cd09ac6fa00e33c97d163973a852882979b1c8f2064979\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0348e1d7066423ab9a595f60893021830d02de3c18fca2fdb0082efa41644124\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0348e1d7066423ab9a595f60893021830d02de3c18fca2fdb0082efa41644124\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbb9ad3dff8afd5ac9ec18bd48d273f46b76a21b791dc6a17aea1d7559750c93\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cbb9ad3dff8afd5ac9ec18bd48d273f46b76a21b791dc6a17aea1d7559750c93\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5898b3475414a39ef005c45982f12791683eb027e734ec8d25dc3489c93026a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5898b3475414a39ef005c45982f12791683eb027e734ec8d25dc3489c93026a2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2056e26b7e46cd32f8a12d6d054e249ed43f9854c0f7720791e98f9e6c5b5346\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2056e26b7e46cd32f8a12d6d054e249ed43f9854c0f7720791e98f9e6c5b5346\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gdz9b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:45Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:45 crc kubenswrapper[4909]: I1128 16:10:45.356888 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://475d2c473e130345e0d544c2b7990c476e83c479c644db89e1ceda2f4278d667\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://18059a14081d677863133d95ec3cecf9359d6464af62be1e53bcd9514311ff59\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:45Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:45 crc kubenswrapper[4909]: I1128 16:10:45.368620 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-q8nfv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6b77cc4b-dc69-4ece-8e10-64eebc98a578\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://71f6384b2d5c86c668a4d7dfc23b14a893f93b9ec587bec43f74eb0926cc2c64\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-prc9l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:37Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-q8nfv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:45Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:45 crc kubenswrapper[4909]: I1128 16:10:45.390529 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:45 crc kubenswrapper[4909]: I1128 16:10:45.390568 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:45 crc kubenswrapper[4909]: I1128 16:10:45.390578 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:45 crc kubenswrapper[4909]: I1128 16:10:45.390594 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:45 crc kubenswrapper[4909]: I1128 16:10:45.390607 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:45Z","lastTransitionTime":"2025-11-28T16:10:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:45 crc kubenswrapper[4909]: I1128 16:10:45.493192 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:45 crc kubenswrapper[4909]: I1128 16:10:45.493236 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:45 crc kubenswrapper[4909]: I1128 16:10:45.493247 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:45 crc kubenswrapper[4909]: I1128 16:10:45.493266 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:45 crc kubenswrapper[4909]: I1128 16:10:45.493277 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:45Z","lastTransitionTime":"2025-11-28T16:10:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:45 crc kubenswrapper[4909]: I1128 16:10:45.596586 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:45 crc kubenswrapper[4909]: I1128 16:10:45.596639 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:45 crc kubenswrapper[4909]: I1128 16:10:45.596682 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:45 crc kubenswrapper[4909]: I1128 16:10:45.596705 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:45 crc kubenswrapper[4909]: I1128 16:10:45.596734 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:45Z","lastTransitionTime":"2025-11-28T16:10:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:45 crc kubenswrapper[4909]: I1128 16:10:45.699743 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:45 crc kubenswrapper[4909]: I1128 16:10:45.699782 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:45 crc kubenswrapper[4909]: I1128 16:10:45.699791 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:45 crc kubenswrapper[4909]: I1128 16:10:45.699807 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:45 crc kubenswrapper[4909]: I1128 16:10:45.699818 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:45Z","lastTransitionTime":"2025-11-28T16:10:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:45 crc kubenswrapper[4909]: I1128 16:10:45.803798 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:45 crc kubenswrapper[4909]: I1128 16:10:45.804246 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:45 crc kubenswrapper[4909]: I1128 16:10:45.804263 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:45 crc kubenswrapper[4909]: I1128 16:10:45.804290 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:45 crc kubenswrapper[4909]: I1128 16:10:45.804308 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:45Z","lastTransitionTime":"2025-11-28T16:10:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:45 crc kubenswrapper[4909]: I1128 16:10:45.901167 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 16:10:45 crc kubenswrapper[4909]: I1128 16:10:45.901190 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 16:10:45 crc kubenswrapper[4909]: I1128 16:10:45.901257 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 16:10:45 crc kubenswrapper[4909]: E1128 16:10:45.901865 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 16:10:45 crc kubenswrapper[4909]: E1128 16:10:45.901738 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 16:10:45 crc kubenswrapper[4909]: E1128 16:10:45.902189 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 16:10:45 crc kubenswrapper[4909]: I1128 16:10:45.906946 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:45 crc kubenswrapper[4909]: I1128 16:10:45.906967 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:45 crc kubenswrapper[4909]: I1128 16:10:45.906977 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:45 crc kubenswrapper[4909]: I1128 16:10:45.906988 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:45 crc kubenswrapper[4909]: I1128 16:10:45.906997 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:45Z","lastTransitionTime":"2025-11-28T16:10:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:46 crc kubenswrapper[4909]: I1128 16:10:46.010219 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:46 crc kubenswrapper[4909]: I1128 16:10:46.010256 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:46 crc kubenswrapper[4909]: I1128 16:10:46.010267 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:46 crc kubenswrapper[4909]: I1128 16:10:46.010284 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:46 crc kubenswrapper[4909]: I1128 16:10:46.010296 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:46Z","lastTransitionTime":"2025-11-28T16:10:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:46 crc kubenswrapper[4909]: I1128 16:10:46.113428 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:46 crc kubenswrapper[4909]: I1128 16:10:46.113488 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:46 crc kubenswrapper[4909]: I1128 16:10:46.113500 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:46 crc kubenswrapper[4909]: I1128 16:10:46.113519 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:46 crc kubenswrapper[4909]: I1128 16:10:46.113531 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:46Z","lastTransitionTime":"2025-11-28T16:10:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:46 crc kubenswrapper[4909]: I1128 16:10:46.144310 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" event={"ID":"c17e2fff-c7ee-475c-8c17-58a394744b91","Type":"ContainerStarted","Data":"d013e334e47a79b960d2aaf673dc8bbd971dac4909a2b90d543cbbab0c279589"} Nov 28 16:10:46 crc kubenswrapper[4909]: I1128 16:10:46.144970 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" Nov 28 16:10:46 crc kubenswrapper[4909]: I1128 16:10:46.145036 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" Nov 28 16:10:46 crc kubenswrapper[4909]: I1128 16:10:46.156523 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-gdz9b" event={"ID":"d9d93f2d-2a90-4d2d-b8e6-e48973be876f","Type":"ContainerStarted","Data":"9e765772d8b12200fadfb28064b55c1abb9a8a6654602159c4910d2ea5b2d307"} Nov 28 16:10:46 crc kubenswrapper[4909]: I1128 16:10:46.174988 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c17e2fff-c7ee-475c-8c17-58a394744b91\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c9f22dca75c10791e7f5e083e737d4d6aad20598ff0d29dd7562fa99b5dbdfd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://697293d217dd4fbc4cecc3bc4be7acc14dbb5aa97b9faf0a608792ef7e4c8dba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d0b87eb388aab62c94756b5f20b73cf95fc80da8aa3d83f2848757f860cdd38e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://031c30950c06bdcae1736062069b96a6ce7cb66d5f286b73d691ee2d2db5832e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://66731a2e0518dd575f621ddf9e760e844e647a224138957440ccaecad3e4b8b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://983e9f73b8e344dbe445f95436cbe143d63f1da2e868201b441496df8cdc99ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d013e334e47a79b960d2aaf673dc8bbd971dac4909a2b90d543cbbab0c279589\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://841b67fe2e8ae0e4ebf834f141b83ad5e850b8e81c3f6410cef7df9894c33dc6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e1fffda4b5571ab3fe797073c7201feadb7818eac66f6dc76b7761ef13a8d0ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e1fffda4b5571ab3fe797073c7201feadb7818eac66f6dc76b7761ef13a8d0ef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-qxw94\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:46Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:46 crc kubenswrapper[4909]: I1128 16:10:46.190645 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" Nov 28 16:10:46 crc kubenswrapper[4909]: I1128 16:10:46.192365 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" Nov 28 16:10:46 crc kubenswrapper[4909]: I1128 16:10:46.199530 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7907b546dbb614e80485187b026c4c5ca17f52d88d5c28ce26a7bf5e3c09e76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:46Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:46 crc kubenswrapper[4909]: I1128 16:10:46.216168 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:46 crc kubenswrapper[4909]: I1128 16:10:46.216393 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:46 crc kubenswrapper[4909]: I1128 16:10:46.216475 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:46 crc kubenswrapper[4909]: I1128 16:10:46.216576 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:46 crc kubenswrapper[4909]: I1128 16:10:46.216678 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:46Z","lastTransitionTime":"2025-11-28T16:10:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:46 crc kubenswrapper[4909]: I1128 16:10:46.220825 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5f0ac931-d37b-4342-8c12-c2779b455cc5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae003675d8b34489b946224bcd380cc68ff49acec6769edfe74a8345018e7909\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5xrns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c2757c4dc287e41bc57c065df2906fc5961d005829fa24f22d3b5078d17555a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5xrns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:38Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-d5nd7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:46Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:46 crc kubenswrapper[4909]: I1128 16:10:46.239535 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:46Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:46 crc kubenswrapper[4909]: I1128 16:10:46.254874 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:46Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:46 crc kubenswrapper[4909]: I1128 16:10:46.267075 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-bwbm6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a458a03-0fea-47c0-9748-510145f40b30\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dd8c3d64058ca1f2862ce478295e1a694117d00f9172f78c7c5e2945d7357aad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmxp7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:36Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-bwbm6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:46Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:46 crc kubenswrapper[4909]: I1128 16:10:46.289416 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gdz9b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d9d93f2d-2a90-4d2d-b8e6-e48973be876f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b45f41a92db18043fc03c9cd7d9bbeef0020790595045abd17a9105d0f01367b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b45f41a92db18043fc03c9cd7d9bbeef0020790595045abd17a9105d0f01367b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f7a3a2a5eea9704ba4cd09ac6fa00e33c97d163973a852882979b1c8f2064979\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f7a3a2a5eea9704ba4cd09ac6fa00e33c97d163973a852882979b1c8f2064979\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0348e1d7066423ab9a595f60893021830d02de3c18fca2fdb0082efa41644124\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0348e1d7066423ab9a595f60893021830d02de3c18fca2fdb0082efa41644124\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbb9ad3dff8afd5ac9ec18bd48d273f46b76a21b791dc6a17aea1d7559750c93\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cbb9ad3dff8afd5ac9ec18bd48d273f46b76a21b791dc6a17aea1d7559750c93\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5898b3475414a39ef005c45982f12791683eb027e734ec8d25dc3489c93026a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5898b3475414a39ef005c45982f12791683eb027e734ec8d25dc3489c93026a2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2056e26b7e46cd32f8a12d6d054e249ed43f9854c0f7720791e98f9e6c5b5346\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2056e26b7e46cd32f8a12d6d054e249ed43f9854c0f7720791e98f9e6c5b5346\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gdz9b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:46Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:46 crc kubenswrapper[4909]: I1128 16:10:46.307526 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://475d2c473e130345e0d544c2b7990c476e83c479c644db89e1ceda2f4278d667\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://18059a14081d677863133d95ec3cecf9359d6464af62be1e53bcd9514311ff59\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:46Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:46 crc kubenswrapper[4909]: I1128 16:10:46.318013 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-q8nfv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6b77cc4b-dc69-4ece-8e10-64eebc98a578\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://71f6384b2d5c86c668a4d7dfc23b14a893f93b9ec587bec43f74eb0926cc2c64\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-prc9l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:37Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-q8nfv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:46Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:46 crc kubenswrapper[4909]: I1128 16:10:46.319206 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:46 crc kubenswrapper[4909]: I1128 16:10:46.319269 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:46 crc kubenswrapper[4909]: I1128 16:10:46.319296 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:46 crc kubenswrapper[4909]: I1128 16:10:46.319383 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:46 crc kubenswrapper[4909]: I1128 16:10:46.319472 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:46Z","lastTransitionTime":"2025-11-28T16:10:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:46 crc kubenswrapper[4909]: I1128 16:10:46.330872 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31f84564a3775e0ece3a5c0f176e8d9607466d4a7a505173e9668c51fa2229e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:46Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:46 crc kubenswrapper[4909]: I1128 16:10:46.344409 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:46Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:46 crc kubenswrapper[4909]: I1128 16:10:46.359418 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-wx2jj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6e3805b2-8ad3-4fa6-b88f-e0ae42294202\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0777b571ce4049338437a97264761c89ab7517b4da8400edcd3381d58aef32e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x6lxd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-wx2jj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:46Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:46 crc kubenswrapper[4909]: I1128 16:10:46.375208 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dda7db1a-d603-4121-8cd6-e72c9ae02961\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://107b559f7d62f02041439f8c727185e9a819cd655afa0898e818d8618cc54cc7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://76ac0606fbf94f3746fb7ffa549a3cb684d40ee72224dd36c96bb47db493b482\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d5716d50bbed7c047029bbd7964431e97f78c6516505f57fe76d2a1548a665d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://53a5c9679a2c94c1297318d2e554c2445efbfab9cf72d25e48d753d66fa217d5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://da70cc63e1edf58e6d6fed49fc9266d54fabba5f7ce3216d910eb18aebc45c34\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0700ae65eb592e5b2110d37bb15dbb1dbdd228986de015ab5dc8fe13672032ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0700ae65eb592e5b2110d37bb15dbb1dbdd228986de015ab5dc8fe13672032ef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:18Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:46Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:46 crc kubenswrapper[4909]: I1128 16:10:46.387848 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b5b5c4c2-af06-4771-b6eb-d13a2819665c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ea29d0be350e786147f355c5bb4902924aa0f921413b432ad093a796d21b9d05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b41cf0481323080f4056d35add13c2254c6ffbe432b1651555067d3fc9d163eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94501e940379fe9b429e532a91799701e733bb5c3be1c5f32da07a9957f955b3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://37dc41c4e260f683c1ae04ae87a883fe78e4f1f620a946ccb6a87191a5eae0ae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:18Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:46Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:46 crc kubenswrapper[4909]: I1128 16:10:46.405526 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b5b5c4c2-af06-4771-b6eb-d13a2819665c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ea29d0be350e786147f355c5bb4902924aa0f921413b432ad093a796d21b9d05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b41cf0481323080f4056d35add13c2254c6ffbe432b1651555067d3fc9d163eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94501e940379fe9b429e532a91799701e733bb5c3be1c5f32da07a9957f955b3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://37dc41c4e260f683c1ae04ae87a883fe78e4f1f620a946ccb6a87191a5eae0ae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:18Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:46Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:46 crc kubenswrapper[4909]: I1128 16:10:46.417120 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31f84564a3775e0ece3a5c0f176e8d9607466d4a7a505173e9668c51fa2229e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:46Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:46 crc kubenswrapper[4909]: I1128 16:10:46.422324 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:46 crc kubenswrapper[4909]: I1128 16:10:46.422392 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:46 crc kubenswrapper[4909]: I1128 16:10:46.422411 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:46 crc kubenswrapper[4909]: I1128 16:10:46.422436 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:46 crc kubenswrapper[4909]: I1128 16:10:46.422454 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:46Z","lastTransitionTime":"2025-11-28T16:10:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:46 crc kubenswrapper[4909]: I1128 16:10:46.431707 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:46Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:46 crc kubenswrapper[4909]: I1128 16:10:46.443805 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-wx2jj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6e3805b2-8ad3-4fa6-b88f-e0ae42294202\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0777b571ce4049338437a97264761c89ab7517b4da8400edcd3381d58aef32e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x6lxd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-wx2jj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:46Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:46 crc kubenswrapper[4909]: I1128 16:10:46.462063 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dda7db1a-d603-4121-8cd6-e72c9ae02961\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://107b559f7d62f02041439f8c727185e9a819cd655afa0898e818d8618cc54cc7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://76ac0606fbf94f3746fb7ffa549a3cb684d40ee72224dd36c96bb47db493b482\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d5716d50bbed7c047029bbd7964431e97f78c6516505f57fe76d2a1548a665d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://53a5c9679a2c94c1297318d2e554c2445efbfab9cf72d25e48d753d66fa217d5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://da70cc63e1edf58e6d6fed49fc9266d54fabba5f7ce3216d910eb18aebc45c34\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0700ae65eb592e5b2110d37bb15dbb1dbdd228986de015ab5dc8fe13672032ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0700ae65eb592e5b2110d37bb15dbb1dbdd228986de015ab5dc8fe13672032ef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:18Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:46Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:46 crc kubenswrapper[4909]: I1128 16:10:46.474625 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5f0ac931-d37b-4342-8c12-c2779b455cc5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae003675d8b34489b946224bcd380cc68ff49acec6769edfe74a8345018e7909\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5xrns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c2757c4dc287e41bc57c065df2906fc5961d005829fa24f22d3b5078d17555a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5xrns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:38Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-d5nd7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:46Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:46 crc kubenswrapper[4909]: I1128 16:10:46.498485 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c17e2fff-c7ee-475c-8c17-58a394744b91\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c9f22dca75c10791e7f5e083e737d4d6aad20598ff0d29dd7562fa99b5dbdfd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://697293d217dd4fbc4cecc3bc4be7acc14dbb5aa97b9faf0a608792ef7e4c8dba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d0b87eb388aab62c94756b5f20b73cf95fc80da8aa3d83f2848757f860cdd38e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://031c30950c06bdcae1736062069b96a6ce7cb66d5f286b73d691ee2d2db5832e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://66731a2e0518dd575f621ddf9e760e844e647a224138957440ccaecad3e4b8b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://983e9f73b8e344dbe445f95436cbe143d63f1da2e868201b441496df8cdc99ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d013e334e47a79b960d2aaf673dc8bbd971dac4909a2b90d543cbbab0c279589\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://841b67fe2e8ae0e4ebf834f141b83ad5e850b8e81c3f6410cef7df9894c33dc6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e1fffda4b5571ab3fe797073c7201feadb7818eac66f6dc76b7761ef13a8d0ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e1fffda4b5571ab3fe797073c7201feadb7818eac66f6dc76b7761ef13a8d0ef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-qxw94\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:46Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:46 crc kubenswrapper[4909]: I1128 16:10:46.517771 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7907b546dbb614e80485187b026c4c5ca17f52d88d5c28ce26a7bf5e3c09e76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:46Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:46 crc kubenswrapper[4909]: I1128 16:10:46.525795 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:46 crc kubenswrapper[4909]: I1128 16:10:46.525864 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:46 crc kubenswrapper[4909]: I1128 16:10:46.525883 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:46 crc kubenswrapper[4909]: I1128 16:10:46.525912 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:46 crc kubenswrapper[4909]: I1128 16:10:46.525929 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:46Z","lastTransitionTime":"2025-11-28T16:10:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:46 crc kubenswrapper[4909]: I1128 16:10:46.534472 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:46Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:46 crc kubenswrapper[4909]: I1128 16:10:46.553130 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:46Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:46 crc kubenswrapper[4909]: I1128 16:10:46.567802 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-bwbm6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a458a03-0fea-47c0-9748-510145f40b30\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dd8c3d64058ca1f2862ce478295e1a694117d00f9172f78c7c5e2945d7357aad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmxp7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:36Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-bwbm6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:46Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:46 crc kubenswrapper[4909]: I1128 16:10:46.593215 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gdz9b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d9d93f2d-2a90-4d2d-b8e6-e48973be876f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e765772d8b12200fadfb28064b55c1abb9a8a6654602159c4910d2ea5b2d307\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b45f41a92db18043fc03c9cd7d9bbeef0020790595045abd17a9105d0f01367b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b45f41a92db18043fc03c9cd7d9bbeef0020790595045abd17a9105d0f01367b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f7a3a2a5eea9704ba4cd09ac6fa00e33c97d163973a852882979b1c8f2064979\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f7a3a2a5eea9704ba4cd09ac6fa00e33c97d163973a852882979b1c8f2064979\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0348e1d7066423ab9a595f60893021830d02de3c18fca2fdb0082efa41644124\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0348e1d7066423ab9a595f60893021830d02de3c18fca2fdb0082efa41644124\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbb9ad3dff8afd5ac9ec18bd48d273f46b76a21b791dc6a17aea1d7559750c93\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cbb9ad3dff8afd5ac9ec18bd48d273f46b76a21b791dc6a17aea1d7559750c93\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5898b3475414a39ef005c45982f12791683eb027e734ec8d25dc3489c93026a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5898b3475414a39ef005c45982f12791683eb027e734ec8d25dc3489c93026a2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2056e26b7e46cd32f8a12d6d054e249ed43f9854c0f7720791e98f9e6c5b5346\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2056e26b7e46cd32f8a12d6d054e249ed43f9854c0f7720791e98f9e6c5b5346\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gdz9b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:46Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:46 crc kubenswrapper[4909]: I1128 16:10:46.610349 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-q8nfv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6b77cc4b-dc69-4ece-8e10-64eebc98a578\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://71f6384b2d5c86c668a4d7dfc23b14a893f93b9ec587bec43f74eb0926cc2c64\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-prc9l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:37Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-q8nfv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:46Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:46 crc kubenswrapper[4909]: I1128 16:10:46.629177 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:46 crc kubenswrapper[4909]: I1128 16:10:46.629248 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:46 crc kubenswrapper[4909]: I1128 16:10:46.629265 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:46 crc kubenswrapper[4909]: I1128 16:10:46.629293 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:46 crc kubenswrapper[4909]: I1128 16:10:46.629311 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:46Z","lastTransitionTime":"2025-11-28T16:10:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:46 crc kubenswrapper[4909]: I1128 16:10:46.633202 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://475d2c473e130345e0d544c2b7990c476e83c479c644db89e1ceda2f4278d667\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://18059a14081d677863133d95ec3cecf9359d6464af62be1e53bcd9514311ff59\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:46Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:46 crc kubenswrapper[4909]: I1128 16:10:46.732530 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:46 crc kubenswrapper[4909]: I1128 16:10:46.732927 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:46 crc kubenswrapper[4909]: I1128 16:10:46.733206 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:46 crc kubenswrapper[4909]: I1128 16:10:46.733404 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:46 crc kubenswrapper[4909]: I1128 16:10:46.733625 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:46Z","lastTransitionTime":"2025-11-28T16:10:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:46 crc kubenswrapper[4909]: I1128 16:10:46.837229 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:46 crc kubenswrapper[4909]: I1128 16:10:46.837293 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:46 crc kubenswrapper[4909]: I1128 16:10:46.837311 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:46 crc kubenswrapper[4909]: I1128 16:10:46.837333 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:46 crc kubenswrapper[4909]: I1128 16:10:46.837348 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:46Z","lastTransitionTime":"2025-11-28T16:10:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:46 crc kubenswrapper[4909]: I1128 16:10:46.940807 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:46 crc kubenswrapper[4909]: I1128 16:10:46.941166 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:46 crc kubenswrapper[4909]: I1128 16:10:46.941341 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:46 crc kubenswrapper[4909]: I1128 16:10:46.941487 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:46 crc kubenswrapper[4909]: I1128 16:10:46.941632 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:46Z","lastTransitionTime":"2025-11-28T16:10:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:47 crc kubenswrapper[4909]: I1128 16:10:47.045325 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:47 crc kubenswrapper[4909]: I1128 16:10:47.045400 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:47 crc kubenswrapper[4909]: I1128 16:10:47.045418 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:47 crc kubenswrapper[4909]: I1128 16:10:47.045444 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:47 crc kubenswrapper[4909]: I1128 16:10:47.045463 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:47Z","lastTransitionTime":"2025-11-28T16:10:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:47 crc kubenswrapper[4909]: I1128 16:10:47.148244 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:47 crc kubenswrapper[4909]: I1128 16:10:47.148305 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:47 crc kubenswrapper[4909]: I1128 16:10:47.148328 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:47 crc kubenswrapper[4909]: I1128 16:10:47.148357 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:47 crc kubenswrapper[4909]: I1128 16:10:47.148379 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:47Z","lastTransitionTime":"2025-11-28T16:10:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:47 crc kubenswrapper[4909]: I1128 16:10:47.160152 4909 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 28 16:10:47 crc kubenswrapper[4909]: I1128 16:10:47.251139 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:47 crc kubenswrapper[4909]: I1128 16:10:47.251402 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:47 crc kubenswrapper[4909]: I1128 16:10:47.251501 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:47 crc kubenswrapper[4909]: I1128 16:10:47.251602 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:47 crc kubenswrapper[4909]: I1128 16:10:47.251742 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:47Z","lastTransitionTime":"2025-11-28T16:10:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:47 crc kubenswrapper[4909]: I1128 16:10:47.353942 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:47 crc kubenswrapper[4909]: I1128 16:10:47.354005 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:47 crc kubenswrapper[4909]: I1128 16:10:47.354023 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:47 crc kubenswrapper[4909]: I1128 16:10:47.354049 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:47 crc kubenswrapper[4909]: I1128 16:10:47.354068 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:47Z","lastTransitionTime":"2025-11-28T16:10:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:47 crc kubenswrapper[4909]: I1128 16:10:47.456064 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:47 crc kubenswrapper[4909]: I1128 16:10:47.456320 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:47 crc kubenswrapper[4909]: I1128 16:10:47.456406 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:47 crc kubenswrapper[4909]: I1128 16:10:47.456482 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:47 crc kubenswrapper[4909]: I1128 16:10:47.456548 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:47Z","lastTransitionTime":"2025-11-28T16:10:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:47 crc kubenswrapper[4909]: I1128 16:10:47.508919 4909 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Nov 28 16:10:47 crc kubenswrapper[4909]: I1128 16:10:47.559329 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:47 crc kubenswrapper[4909]: I1128 16:10:47.559566 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:47 crc kubenswrapper[4909]: I1128 16:10:47.559674 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:47 crc kubenswrapper[4909]: I1128 16:10:47.559789 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:47 crc kubenswrapper[4909]: I1128 16:10:47.559838 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:47Z","lastTransitionTime":"2025-11-28T16:10:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:47 crc kubenswrapper[4909]: I1128 16:10:47.662804 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:47 crc kubenswrapper[4909]: I1128 16:10:47.662857 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:47 crc kubenswrapper[4909]: I1128 16:10:47.662876 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:47 crc kubenswrapper[4909]: I1128 16:10:47.662899 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:47 crc kubenswrapper[4909]: I1128 16:10:47.662915 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:47Z","lastTransitionTime":"2025-11-28T16:10:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:47 crc kubenswrapper[4909]: I1128 16:10:47.708996 4909 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Nov 28 16:10:47 crc kubenswrapper[4909]: I1128 16:10:47.766256 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:47 crc kubenswrapper[4909]: I1128 16:10:47.766330 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:47 crc kubenswrapper[4909]: I1128 16:10:47.766348 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:47 crc kubenswrapper[4909]: I1128 16:10:47.766373 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:47 crc kubenswrapper[4909]: I1128 16:10:47.766393 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:47Z","lastTransitionTime":"2025-11-28T16:10:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:47 crc kubenswrapper[4909]: I1128 16:10:47.869422 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:47 crc kubenswrapper[4909]: I1128 16:10:47.869467 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:47 crc kubenswrapper[4909]: I1128 16:10:47.869480 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:47 crc kubenswrapper[4909]: I1128 16:10:47.869497 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:47 crc kubenswrapper[4909]: I1128 16:10:47.869509 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:47Z","lastTransitionTime":"2025-11-28T16:10:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:47 crc kubenswrapper[4909]: I1128 16:10:47.901551 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 16:10:47 crc kubenswrapper[4909]: E1128 16:10:47.901784 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 16:10:47 crc kubenswrapper[4909]: I1128 16:10:47.901889 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 16:10:47 crc kubenswrapper[4909]: I1128 16:10:47.901932 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 16:10:47 crc kubenswrapper[4909]: E1128 16:10:47.902020 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 16:10:47 crc kubenswrapper[4909]: E1128 16:10:47.902118 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 16:10:47 crc kubenswrapper[4909]: I1128 16:10:47.924309 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dda7db1a-d603-4121-8cd6-e72c9ae02961\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://107b559f7d62f02041439f8c727185e9a819cd655afa0898e818d8618cc54cc7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://76ac0606fbf94f3746fb7ffa549a3cb684d40ee72224dd36c96bb47db493b482\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d5716d50bbed7c047029bbd7964431e97f78c6516505f57fe76d2a1548a665d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://53a5c9679a2c94c1297318d2e554c2445efbfab9cf72d25e48d753d66fa217d5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://da70cc63e1edf58e6d6fed49fc9266d54fabba5f7ce3216d910eb18aebc45c34\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0700ae65eb592e5b2110d37bb15dbb1dbdd228986de015ab5dc8fe13672032ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0700ae65eb592e5b2110d37bb15dbb1dbdd228986de015ab5dc8fe13672032ef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:18Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:47Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:47 crc kubenswrapper[4909]: I1128 16:10:47.942329 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b5b5c4c2-af06-4771-b6eb-d13a2819665c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ea29d0be350e786147f355c5bb4902924aa0f921413b432ad093a796d21b9d05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b41cf0481323080f4056d35add13c2254c6ffbe432b1651555067d3fc9d163eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94501e940379fe9b429e532a91799701e733bb5c3be1c5f32da07a9957f955b3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://37dc41c4e260f683c1ae04ae87a883fe78e4f1f620a946ccb6a87191a5eae0ae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:18Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:47Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:47 crc kubenswrapper[4909]: I1128 16:10:47.960785 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31f84564a3775e0ece3a5c0f176e8d9607466d4a7a505173e9668c51fa2229e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:47Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:47 crc kubenswrapper[4909]: I1128 16:10:47.972393 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:47 crc kubenswrapper[4909]: I1128 16:10:47.972434 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:47 crc kubenswrapper[4909]: I1128 16:10:47.972441 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:47 crc kubenswrapper[4909]: I1128 16:10:47.972455 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:47 crc kubenswrapper[4909]: I1128 16:10:47.972466 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:47Z","lastTransitionTime":"2025-11-28T16:10:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:47 crc kubenswrapper[4909]: I1128 16:10:47.977862 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:47Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:48 crc kubenswrapper[4909]: I1128 16:10:48.003745 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-wx2jj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6e3805b2-8ad3-4fa6-b88f-e0ae42294202\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0777b571ce4049338437a97264761c89ab7517b4da8400edcd3381d58aef32e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x6lxd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-wx2jj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:48Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:48 crc kubenswrapper[4909]: I1128 16:10:48.023037 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7907b546dbb614e80485187b026c4c5ca17f52d88d5c28ce26a7bf5e3c09e76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:48Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:48 crc kubenswrapper[4909]: I1128 16:10:48.039954 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5f0ac931-d37b-4342-8c12-c2779b455cc5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae003675d8b34489b946224bcd380cc68ff49acec6769edfe74a8345018e7909\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5xrns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c2757c4dc287e41bc57c065df2906fc5961d005829fa24f22d3b5078d17555a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5xrns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:38Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-d5nd7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:48Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:48 crc kubenswrapper[4909]: I1128 16:10:48.059194 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c17e2fff-c7ee-475c-8c17-58a394744b91\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c9f22dca75c10791e7f5e083e737d4d6aad20598ff0d29dd7562fa99b5dbdfd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://697293d217dd4fbc4cecc3bc4be7acc14dbb5aa97b9faf0a608792ef7e4c8dba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d0b87eb388aab62c94756b5f20b73cf95fc80da8aa3d83f2848757f860cdd38e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://031c30950c06bdcae1736062069b96a6ce7cb66d5f286b73d691ee2d2db5832e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://66731a2e0518dd575f621ddf9e760e844e647a224138957440ccaecad3e4b8b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://983e9f73b8e344dbe445f95436cbe143d63f1da2e868201b441496df8cdc99ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d013e334e47a79b960d2aaf673dc8bbd971dac4909a2b90d543cbbab0c279589\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://841b67fe2e8ae0e4ebf834f141b83ad5e850b8e81c3f6410cef7df9894c33dc6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e1fffda4b5571ab3fe797073c7201feadb7818eac66f6dc76b7761ef13a8d0ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e1fffda4b5571ab3fe797073c7201feadb7818eac66f6dc76b7761ef13a8d0ef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-qxw94\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:48Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:48 crc kubenswrapper[4909]: I1128 16:10:48.074824 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:48 crc kubenswrapper[4909]: I1128 16:10:48.074877 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:48 crc kubenswrapper[4909]: I1128 16:10:48.074895 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:48 crc kubenswrapper[4909]: I1128 16:10:48.074920 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:48 crc kubenswrapper[4909]: I1128 16:10:48.074939 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:48Z","lastTransitionTime":"2025-11-28T16:10:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:48 crc kubenswrapper[4909]: I1128 16:10:48.077807 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:48Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:48 crc kubenswrapper[4909]: I1128 16:10:48.091590 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:48Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:48 crc kubenswrapper[4909]: I1128 16:10:48.105034 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-bwbm6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a458a03-0fea-47c0-9748-510145f40b30\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dd8c3d64058ca1f2862ce478295e1a694117d00f9172f78c7c5e2945d7357aad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmxp7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:36Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-bwbm6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:48Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:48 crc kubenswrapper[4909]: I1128 16:10:48.125321 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gdz9b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d9d93f2d-2a90-4d2d-b8e6-e48973be876f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e765772d8b12200fadfb28064b55c1abb9a8a6654602159c4910d2ea5b2d307\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b45f41a92db18043fc03c9cd7d9bbeef0020790595045abd17a9105d0f01367b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b45f41a92db18043fc03c9cd7d9bbeef0020790595045abd17a9105d0f01367b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f7a3a2a5eea9704ba4cd09ac6fa00e33c97d163973a852882979b1c8f2064979\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f7a3a2a5eea9704ba4cd09ac6fa00e33c97d163973a852882979b1c8f2064979\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0348e1d7066423ab9a595f60893021830d02de3c18fca2fdb0082efa41644124\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0348e1d7066423ab9a595f60893021830d02de3c18fca2fdb0082efa41644124\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbb9ad3dff8afd5ac9ec18bd48d273f46b76a21b791dc6a17aea1d7559750c93\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cbb9ad3dff8afd5ac9ec18bd48d273f46b76a21b791dc6a17aea1d7559750c93\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5898b3475414a39ef005c45982f12791683eb027e734ec8d25dc3489c93026a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5898b3475414a39ef005c45982f12791683eb027e734ec8d25dc3489c93026a2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2056e26b7e46cd32f8a12d6d054e249ed43f9854c0f7720791e98f9e6c5b5346\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2056e26b7e46cd32f8a12d6d054e249ed43f9854c0f7720791e98f9e6c5b5346\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gdz9b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:48Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:48 crc kubenswrapper[4909]: I1128 16:10:48.143219 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://475d2c473e130345e0d544c2b7990c476e83c479c644db89e1ceda2f4278d667\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://18059a14081d677863133d95ec3cecf9359d6464af62be1e53bcd9514311ff59\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:48Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:48 crc kubenswrapper[4909]: I1128 16:10:48.162352 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-q8nfv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6b77cc4b-dc69-4ece-8e10-64eebc98a578\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://71f6384b2d5c86c668a4d7dfc23b14a893f93b9ec587bec43f74eb0926cc2c64\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-prc9l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:37Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-q8nfv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:48Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:48 crc kubenswrapper[4909]: I1128 16:10:48.165432 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-qxw94_c17e2fff-c7ee-475c-8c17-58a394744b91/ovnkube-controller/0.log" Nov 28 16:10:48 crc kubenswrapper[4909]: I1128 16:10:48.167615 4909 generic.go:334] "Generic (PLEG): container finished" podID="c17e2fff-c7ee-475c-8c17-58a394744b91" containerID="d013e334e47a79b960d2aaf673dc8bbd971dac4909a2b90d543cbbab0c279589" exitCode=1 Nov 28 16:10:48 crc kubenswrapper[4909]: I1128 16:10:48.167744 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" event={"ID":"c17e2fff-c7ee-475c-8c17-58a394744b91","Type":"ContainerDied","Data":"d013e334e47a79b960d2aaf673dc8bbd971dac4909a2b90d543cbbab0c279589"} Nov 28 16:10:48 crc kubenswrapper[4909]: I1128 16:10:48.168798 4909 scope.go:117] "RemoveContainer" containerID="d013e334e47a79b960d2aaf673dc8bbd971dac4909a2b90d543cbbab0c279589" Nov 28 16:10:48 crc kubenswrapper[4909]: I1128 16:10:48.177361 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:48 crc kubenswrapper[4909]: I1128 16:10:48.177442 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:48 crc kubenswrapper[4909]: I1128 16:10:48.177456 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:48 crc kubenswrapper[4909]: I1128 16:10:48.177479 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:48 crc kubenswrapper[4909]: I1128 16:10:48.177492 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:48Z","lastTransitionTime":"2025-11-28T16:10:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:48 crc kubenswrapper[4909]: I1128 16:10:48.188928 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gdz9b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d9d93f2d-2a90-4d2d-b8e6-e48973be876f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e765772d8b12200fadfb28064b55c1abb9a8a6654602159c4910d2ea5b2d307\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b45f41a92db18043fc03c9cd7d9bbeef0020790595045abd17a9105d0f01367b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b45f41a92db18043fc03c9cd7d9bbeef0020790595045abd17a9105d0f01367b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f7a3a2a5eea9704ba4cd09ac6fa00e33c97d163973a852882979b1c8f2064979\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f7a3a2a5eea9704ba4cd09ac6fa00e33c97d163973a852882979b1c8f2064979\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0348e1d7066423ab9a595f60893021830d02de3c18fca2fdb0082efa41644124\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0348e1d7066423ab9a595f60893021830d02de3c18fca2fdb0082efa41644124\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbb9ad3dff8afd5ac9ec18bd48d273f46b76a21b791dc6a17aea1d7559750c93\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cbb9ad3dff8afd5ac9ec18bd48d273f46b76a21b791dc6a17aea1d7559750c93\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5898b3475414a39ef005c45982f12791683eb027e734ec8d25dc3489c93026a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5898b3475414a39ef005c45982f12791683eb027e734ec8d25dc3489c93026a2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2056e26b7e46cd32f8a12d6d054e249ed43f9854c0f7720791e98f9e6c5b5346\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2056e26b7e46cd32f8a12d6d054e249ed43f9854c0f7720791e98f9e6c5b5346\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gdz9b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:48Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:48 crc kubenswrapper[4909]: I1128 16:10:48.202519 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:48Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:48 crc kubenswrapper[4909]: I1128 16:10:48.252543 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:48Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:48 crc kubenswrapper[4909]: I1128 16:10:48.269306 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-bwbm6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a458a03-0fea-47c0-9748-510145f40b30\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dd8c3d64058ca1f2862ce478295e1a694117d00f9172f78c7c5e2945d7357aad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmxp7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:36Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-bwbm6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:48Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:48 crc kubenswrapper[4909]: I1128 16:10:48.279510 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:48 crc kubenswrapper[4909]: I1128 16:10:48.279754 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:48 crc kubenswrapper[4909]: I1128 16:10:48.279829 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:48 crc kubenswrapper[4909]: I1128 16:10:48.279909 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:48 crc kubenswrapper[4909]: I1128 16:10:48.279972 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:48Z","lastTransitionTime":"2025-11-28T16:10:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:48 crc kubenswrapper[4909]: I1128 16:10:48.283468 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://475d2c473e130345e0d544c2b7990c476e83c479c644db89e1ceda2f4278d667\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://18059a14081d677863133d95ec3cecf9359d6464af62be1e53bcd9514311ff59\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:48Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:48 crc kubenswrapper[4909]: I1128 16:10:48.295328 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-q8nfv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6b77cc4b-dc69-4ece-8e10-64eebc98a578\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://71f6384b2d5c86c668a4d7dfc23b14a893f93b9ec587bec43f74eb0926cc2c64\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-prc9l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:37Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-q8nfv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:48Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:48 crc kubenswrapper[4909]: I1128 16:10:48.314329 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dda7db1a-d603-4121-8cd6-e72c9ae02961\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://107b559f7d62f02041439f8c727185e9a819cd655afa0898e818d8618cc54cc7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://76ac0606fbf94f3746fb7ffa549a3cb684d40ee72224dd36c96bb47db493b482\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d5716d50bbed7c047029bbd7964431e97f78c6516505f57fe76d2a1548a665d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://53a5c9679a2c94c1297318d2e554c2445efbfab9cf72d25e48d753d66fa217d5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://da70cc63e1edf58e6d6fed49fc9266d54fabba5f7ce3216d910eb18aebc45c34\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0700ae65eb592e5b2110d37bb15dbb1dbdd228986de015ab5dc8fe13672032ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0700ae65eb592e5b2110d37bb15dbb1dbdd228986de015ab5dc8fe13672032ef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:18Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:48Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:48 crc kubenswrapper[4909]: I1128 16:10:48.333161 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b5b5c4c2-af06-4771-b6eb-d13a2819665c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ea29d0be350e786147f355c5bb4902924aa0f921413b432ad093a796d21b9d05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b41cf0481323080f4056d35add13c2254c6ffbe432b1651555067d3fc9d163eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94501e940379fe9b429e532a91799701e733bb5c3be1c5f32da07a9957f955b3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://37dc41c4e260f683c1ae04ae87a883fe78e4f1f620a946ccb6a87191a5eae0ae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:18Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:48Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:48 crc kubenswrapper[4909]: I1128 16:10:48.348019 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31f84564a3775e0ece3a5c0f176e8d9607466d4a7a505173e9668c51fa2229e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:48Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:48 crc kubenswrapper[4909]: I1128 16:10:48.361806 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:48Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:48 crc kubenswrapper[4909]: I1128 16:10:48.377073 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-wx2jj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6e3805b2-8ad3-4fa6-b88f-e0ae42294202\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0777b571ce4049338437a97264761c89ab7517b4da8400edcd3381d58aef32e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x6lxd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-wx2jj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:48Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:48 crc kubenswrapper[4909]: I1128 16:10:48.383075 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:48 crc kubenswrapper[4909]: I1128 16:10:48.383144 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:48 crc kubenswrapper[4909]: I1128 16:10:48.383156 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:48 crc kubenswrapper[4909]: I1128 16:10:48.383193 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:48 crc kubenswrapper[4909]: I1128 16:10:48.383207 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:48Z","lastTransitionTime":"2025-11-28T16:10:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:48 crc kubenswrapper[4909]: I1128 16:10:48.392705 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7907b546dbb614e80485187b026c4c5ca17f52d88d5c28ce26a7bf5e3c09e76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:48Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:48 crc kubenswrapper[4909]: I1128 16:10:48.407357 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5f0ac931-d37b-4342-8c12-c2779b455cc5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae003675d8b34489b946224bcd380cc68ff49acec6769edfe74a8345018e7909\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5xrns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c2757c4dc287e41bc57c065df2906fc5961d005829fa24f22d3b5078d17555a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5xrns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:38Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-d5nd7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:48Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:48 crc kubenswrapper[4909]: I1128 16:10:48.425915 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c17e2fff-c7ee-475c-8c17-58a394744b91\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c9f22dca75c10791e7f5e083e737d4d6aad20598ff0d29dd7562fa99b5dbdfd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://697293d217dd4fbc4cecc3bc4be7acc14dbb5aa97b9faf0a608792ef7e4c8dba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d0b87eb388aab62c94756b5f20b73cf95fc80da8aa3d83f2848757f860cdd38e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://031c30950c06bdcae1736062069b96a6ce7cb66d5f286b73d691ee2d2db5832e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://66731a2e0518dd575f621ddf9e760e844e647a224138957440ccaecad3e4b8b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://983e9f73b8e344dbe445f95436cbe143d63f1da2e868201b441496df8cdc99ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d013e334e47a79b960d2aaf673dc8bbd971dac4909a2b90d543cbbab0c279589\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d013e334e47a79b960d2aaf673dc8bbd971dac4909a2b90d543cbbab0c279589\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T16:10:47Z\\\",\\\"message\\\":\\\" event handler 8\\\\nI1128 16:10:47.891297 6215 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1128 16:10:47.891357 6215 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1128 16:10:47.891406 6215 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1128 16:10:47.891421 6215 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1128 16:10:47.891448 6215 handler.go:208] Removed *v1.Node event handler 2\\\\nI1128 16:10:47.891484 6215 handler.go:208] Removed *v1.Node event handler 7\\\\nI1128 16:10:47.891493 6215 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1128 16:10:47.891562 6215 factory.go:656] Stopping watch factory\\\\nI1128 16:10:47.891480 6215 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1128 16:10:47.891625 6215 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1128 16:10:47.891622 6215 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1128 16:10:47.892026 6215 factory.go:1336] Added *v1.EgressFirewall event handler 9\\\\nI1128 16:10:47.892145 6215 controller.go:132] Adding controller ef_node_controller event handlers\\\\nI1128 16:10:47.892229 6215 ovnkube.go:599] Stopped ovnkube\\\\nI1128 16:10:47.892282 6215 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1128 16:10:47.892399 6215 ovnkube.go:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://841b67fe2e8ae0e4ebf834f141b83ad5e850b8e81c3f6410cef7df9894c33dc6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e1fffda4b5571ab3fe797073c7201feadb7818eac66f6dc76b7761ef13a8d0ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e1fffda4b5571ab3fe797073c7201feadb7818eac66f6dc76b7761ef13a8d0ef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-qxw94\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:48Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:48 crc kubenswrapper[4909]: I1128 16:10:48.450151 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" Nov 28 16:10:48 crc kubenswrapper[4909]: I1128 16:10:48.486492 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:48 crc kubenswrapper[4909]: I1128 16:10:48.486545 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:48 crc kubenswrapper[4909]: I1128 16:10:48.486563 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:48 crc kubenswrapper[4909]: I1128 16:10:48.486585 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:48 crc kubenswrapper[4909]: I1128 16:10:48.486601 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:48Z","lastTransitionTime":"2025-11-28T16:10:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:48 crc kubenswrapper[4909]: I1128 16:10:48.588126 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:48 crc kubenswrapper[4909]: I1128 16:10:48.588157 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:48 crc kubenswrapper[4909]: I1128 16:10:48.588165 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:48 crc kubenswrapper[4909]: I1128 16:10:48.588178 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:48 crc kubenswrapper[4909]: I1128 16:10:48.588188 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:48Z","lastTransitionTime":"2025-11-28T16:10:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:48 crc kubenswrapper[4909]: I1128 16:10:48.691004 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:48 crc kubenswrapper[4909]: I1128 16:10:48.691062 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:48 crc kubenswrapper[4909]: I1128 16:10:48.691080 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:48 crc kubenswrapper[4909]: I1128 16:10:48.691103 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:48 crc kubenswrapper[4909]: I1128 16:10:48.691120 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:48Z","lastTransitionTime":"2025-11-28T16:10:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:48 crc kubenswrapper[4909]: I1128 16:10:48.793917 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:48 crc kubenswrapper[4909]: I1128 16:10:48.793977 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:48 crc kubenswrapper[4909]: I1128 16:10:48.793987 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:48 crc kubenswrapper[4909]: I1128 16:10:48.794001 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:48 crc kubenswrapper[4909]: I1128 16:10:48.794011 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:48Z","lastTransitionTime":"2025-11-28T16:10:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:48 crc kubenswrapper[4909]: I1128 16:10:48.896825 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:48 crc kubenswrapper[4909]: I1128 16:10:48.896864 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:48 crc kubenswrapper[4909]: I1128 16:10:48.896873 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:48 crc kubenswrapper[4909]: I1128 16:10:48.896887 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:48 crc kubenswrapper[4909]: I1128 16:10:48.896896 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:48Z","lastTransitionTime":"2025-11-28T16:10:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:49 crc kubenswrapper[4909]: I1128 16:10:48.999619 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:49 crc kubenswrapper[4909]: I1128 16:10:49.000015 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:49 crc kubenswrapper[4909]: I1128 16:10:49.000026 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:49 crc kubenswrapper[4909]: I1128 16:10:49.000043 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:49 crc kubenswrapper[4909]: I1128 16:10:49.000055 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:49Z","lastTransitionTime":"2025-11-28T16:10:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:49 crc kubenswrapper[4909]: I1128 16:10:49.103020 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:49 crc kubenswrapper[4909]: I1128 16:10:49.103071 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:49 crc kubenswrapper[4909]: I1128 16:10:49.103085 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:49 crc kubenswrapper[4909]: I1128 16:10:49.103102 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:49 crc kubenswrapper[4909]: I1128 16:10:49.103116 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:49Z","lastTransitionTime":"2025-11-28T16:10:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:49 crc kubenswrapper[4909]: I1128 16:10:49.180717 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-qxw94_c17e2fff-c7ee-475c-8c17-58a394744b91/ovnkube-controller/0.log" Nov 28 16:10:49 crc kubenswrapper[4909]: I1128 16:10:49.184973 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" event={"ID":"c17e2fff-c7ee-475c-8c17-58a394744b91","Type":"ContainerStarted","Data":"a256af2a77f3591aaed9ae0f6c9ef60c2aedbbeab38fa1072ee5f65dc1b256f1"} Nov 28 16:10:49 crc kubenswrapper[4909]: I1128 16:10:49.185540 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" Nov 28 16:10:49 crc kubenswrapper[4909]: I1128 16:10:49.206257 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:49 crc kubenswrapper[4909]: I1128 16:10:49.206322 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:49 crc kubenswrapper[4909]: I1128 16:10:49.206340 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:49 crc kubenswrapper[4909]: I1128 16:10:49.206368 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:49 crc kubenswrapper[4909]: I1128 16:10:49.206391 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:49Z","lastTransitionTime":"2025-11-28T16:10:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:49 crc kubenswrapper[4909]: I1128 16:10:49.206633 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://475d2c473e130345e0d544c2b7990c476e83c479c644db89e1ceda2f4278d667\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://18059a14081d677863133d95ec3cecf9359d6464af62be1e53bcd9514311ff59\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:49Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:49 crc kubenswrapper[4909]: I1128 16:10:49.224446 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-q8nfv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6b77cc4b-dc69-4ece-8e10-64eebc98a578\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://71f6384b2d5c86c668a4d7dfc23b14a893f93b9ec587bec43f74eb0926cc2c64\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-prc9l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:37Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-q8nfv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:49Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:49 crc kubenswrapper[4909]: I1128 16:10:49.247354 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dda7db1a-d603-4121-8cd6-e72c9ae02961\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://107b559f7d62f02041439f8c727185e9a819cd655afa0898e818d8618cc54cc7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://76ac0606fbf94f3746fb7ffa549a3cb684d40ee72224dd36c96bb47db493b482\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d5716d50bbed7c047029bbd7964431e97f78c6516505f57fe76d2a1548a665d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://53a5c9679a2c94c1297318d2e554c2445efbfab9cf72d25e48d753d66fa217d5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://da70cc63e1edf58e6d6fed49fc9266d54fabba5f7ce3216d910eb18aebc45c34\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0700ae65eb592e5b2110d37bb15dbb1dbdd228986de015ab5dc8fe13672032ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0700ae65eb592e5b2110d37bb15dbb1dbdd228986de015ab5dc8fe13672032ef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:18Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:49Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:49 crc kubenswrapper[4909]: I1128 16:10:49.268803 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b5b5c4c2-af06-4771-b6eb-d13a2819665c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ea29d0be350e786147f355c5bb4902924aa0f921413b432ad093a796d21b9d05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b41cf0481323080f4056d35add13c2254c6ffbe432b1651555067d3fc9d163eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94501e940379fe9b429e532a91799701e733bb5c3be1c5f32da07a9957f955b3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://37dc41c4e260f683c1ae04ae87a883fe78e4f1f620a946ccb6a87191a5eae0ae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:18Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:49Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:49 crc kubenswrapper[4909]: I1128 16:10:49.288189 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31f84564a3775e0ece3a5c0f176e8d9607466d4a7a505173e9668c51fa2229e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:49Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:49 crc kubenswrapper[4909]: I1128 16:10:49.309960 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:49 crc kubenswrapper[4909]: I1128 16:10:49.310024 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:49 crc kubenswrapper[4909]: I1128 16:10:49.310044 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:49 crc kubenswrapper[4909]: I1128 16:10:49.310072 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:49 crc kubenswrapper[4909]: I1128 16:10:49.310091 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:49Z","lastTransitionTime":"2025-11-28T16:10:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:49 crc kubenswrapper[4909]: I1128 16:10:49.310754 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:49Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:49 crc kubenswrapper[4909]: I1128 16:10:49.331988 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-wx2jj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6e3805b2-8ad3-4fa6-b88f-e0ae42294202\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0777b571ce4049338437a97264761c89ab7517b4da8400edcd3381d58aef32e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x6lxd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-wx2jj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:49Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:49 crc kubenswrapper[4909]: I1128 16:10:49.353231 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7907b546dbb614e80485187b026c4c5ca17f52d88d5c28ce26a7bf5e3c09e76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:49Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:49 crc kubenswrapper[4909]: I1128 16:10:49.372849 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5f0ac931-d37b-4342-8c12-c2779b455cc5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae003675d8b34489b946224bcd380cc68ff49acec6769edfe74a8345018e7909\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5xrns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c2757c4dc287e41bc57c065df2906fc5961d005829fa24f22d3b5078d17555a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5xrns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:38Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-d5nd7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:49Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:49 crc kubenswrapper[4909]: I1128 16:10:49.407130 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c17e2fff-c7ee-475c-8c17-58a394744b91\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c9f22dca75c10791e7f5e083e737d4d6aad20598ff0d29dd7562fa99b5dbdfd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://697293d217dd4fbc4cecc3bc4be7acc14dbb5aa97b9faf0a608792ef7e4c8dba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d0b87eb388aab62c94756b5f20b73cf95fc80da8aa3d83f2848757f860cdd38e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://031c30950c06bdcae1736062069b96a6ce7cb66d5f286b73d691ee2d2db5832e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://66731a2e0518dd575f621ddf9e760e844e647a224138957440ccaecad3e4b8b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://983e9f73b8e344dbe445f95436cbe143d63f1da2e868201b441496df8cdc99ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a256af2a77f3591aaed9ae0f6c9ef60c2aedbbeab38fa1072ee5f65dc1b256f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d013e334e47a79b960d2aaf673dc8bbd971dac4909a2b90d543cbbab0c279589\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T16:10:47Z\\\",\\\"message\\\":\\\" event handler 8\\\\nI1128 16:10:47.891297 6215 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1128 16:10:47.891357 6215 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1128 16:10:47.891406 6215 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1128 16:10:47.891421 6215 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1128 16:10:47.891448 6215 handler.go:208] Removed *v1.Node event handler 2\\\\nI1128 16:10:47.891484 6215 handler.go:208] Removed *v1.Node event handler 7\\\\nI1128 16:10:47.891493 6215 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1128 16:10:47.891562 6215 factory.go:656] Stopping watch factory\\\\nI1128 16:10:47.891480 6215 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1128 16:10:47.891625 6215 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1128 16:10:47.891622 6215 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1128 16:10:47.892026 6215 factory.go:1336] Added *v1.EgressFirewall event handler 9\\\\nI1128 16:10:47.892145 6215 controller.go:132] Adding controller ef_node_controller event handlers\\\\nI1128 16:10:47.892229 6215 ovnkube.go:599] Stopped ovnkube\\\\nI1128 16:10:47.892282 6215 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1128 16:10:47.892399 6215 ovnkube.go:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:45Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://841b67fe2e8ae0e4ebf834f141b83ad5e850b8e81c3f6410cef7df9894c33dc6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e1fffda4b5571ab3fe797073c7201feadb7818eac66f6dc76b7761ef13a8d0ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e1fffda4b5571ab3fe797073c7201feadb7818eac66f6dc76b7761ef13a8d0ef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-qxw94\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:49Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:49 crc kubenswrapper[4909]: I1128 16:10:49.413021 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:49 crc kubenswrapper[4909]: I1128 16:10:49.413113 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:49 crc kubenswrapper[4909]: I1128 16:10:49.413134 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:49 crc kubenswrapper[4909]: I1128 16:10:49.413158 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:49 crc kubenswrapper[4909]: I1128 16:10:49.413177 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:49Z","lastTransitionTime":"2025-11-28T16:10:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:49 crc kubenswrapper[4909]: I1128 16:10:49.426545 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-bwbm6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a458a03-0fea-47c0-9748-510145f40b30\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dd8c3d64058ca1f2862ce478295e1a694117d00f9172f78c7c5e2945d7357aad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmxp7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:36Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-bwbm6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:49Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:49 crc kubenswrapper[4909]: I1128 16:10:49.450956 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gdz9b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d9d93f2d-2a90-4d2d-b8e6-e48973be876f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e765772d8b12200fadfb28064b55c1abb9a8a6654602159c4910d2ea5b2d307\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b45f41a92db18043fc03c9cd7d9bbeef0020790595045abd17a9105d0f01367b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b45f41a92db18043fc03c9cd7d9bbeef0020790595045abd17a9105d0f01367b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f7a3a2a5eea9704ba4cd09ac6fa00e33c97d163973a852882979b1c8f2064979\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f7a3a2a5eea9704ba4cd09ac6fa00e33c97d163973a852882979b1c8f2064979\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0348e1d7066423ab9a595f60893021830d02de3c18fca2fdb0082efa41644124\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0348e1d7066423ab9a595f60893021830d02de3c18fca2fdb0082efa41644124\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbb9ad3dff8afd5ac9ec18bd48d273f46b76a21b791dc6a17aea1d7559750c93\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cbb9ad3dff8afd5ac9ec18bd48d273f46b76a21b791dc6a17aea1d7559750c93\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5898b3475414a39ef005c45982f12791683eb027e734ec8d25dc3489c93026a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5898b3475414a39ef005c45982f12791683eb027e734ec8d25dc3489c93026a2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2056e26b7e46cd32f8a12d6d054e249ed43f9854c0f7720791e98f9e6c5b5346\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2056e26b7e46cd32f8a12d6d054e249ed43f9854c0f7720791e98f9e6c5b5346\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gdz9b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:49Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:49 crc kubenswrapper[4909]: I1128 16:10:49.470777 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:49Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:49 crc kubenswrapper[4909]: I1128 16:10:49.491457 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:49Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:49 crc kubenswrapper[4909]: I1128 16:10:49.516642 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:49 crc kubenswrapper[4909]: I1128 16:10:49.516758 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:49 crc kubenswrapper[4909]: I1128 16:10:49.516776 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:49 crc kubenswrapper[4909]: I1128 16:10:49.516806 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:49 crc kubenswrapper[4909]: I1128 16:10:49.516825 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:49Z","lastTransitionTime":"2025-11-28T16:10:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:49 crc kubenswrapper[4909]: I1128 16:10:49.620197 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:49 crc kubenswrapper[4909]: I1128 16:10:49.620275 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:49 crc kubenswrapper[4909]: I1128 16:10:49.620296 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:49 crc kubenswrapper[4909]: I1128 16:10:49.620324 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:49 crc kubenswrapper[4909]: I1128 16:10:49.620342 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:49Z","lastTransitionTime":"2025-11-28T16:10:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:49 crc kubenswrapper[4909]: I1128 16:10:49.723327 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:49 crc kubenswrapper[4909]: I1128 16:10:49.723396 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:49 crc kubenswrapper[4909]: I1128 16:10:49.723416 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:49 crc kubenswrapper[4909]: I1128 16:10:49.723442 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:49 crc kubenswrapper[4909]: I1128 16:10:49.723461 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:49Z","lastTransitionTime":"2025-11-28T16:10:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:49 crc kubenswrapper[4909]: I1128 16:10:49.826188 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:49 crc kubenswrapper[4909]: I1128 16:10:49.826256 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:49 crc kubenswrapper[4909]: I1128 16:10:49.826275 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:49 crc kubenswrapper[4909]: I1128 16:10:49.826302 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:49 crc kubenswrapper[4909]: I1128 16:10:49.826319 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:49Z","lastTransitionTime":"2025-11-28T16:10:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:49 crc kubenswrapper[4909]: I1128 16:10:49.901364 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 16:10:49 crc kubenswrapper[4909]: I1128 16:10:49.901516 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 16:10:49 crc kubenswrapper[4909]: E1128 16:10:49.901591 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 16:10:49 crc kubenswrapper[4909]: I1128 16:10:49.901686 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 16:10:49 crc kubenswrapper[4909]: E1128 16:10:49.901861 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 16:10:49 crc kubenswrapper[4909]: E1128 16:10:49.902096 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 16:10:49 crc kubenswrapper[4909]: I1128 16:10:49.929125 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:49 crc kubenswrapper[4909]: I1128 16:10:49.929194 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:49 crc kubenswrapper[4909]: I1128 16:10:49.929213 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:49 crc kubenswrapper[4909]: I1128 16:10:49.929240 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:49 crc kubenswrapper[4909]: I1128 16:10:49.929261 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:49Z","lastTransitionTime":"2025-11-28T16:10:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:50 crc kubenswrapper[4909]: I1128 16:10:50.032939 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:50 crc kubenswrapper[4909]: I1128 16:10:50.033004 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:50 crc kubenswrapper[4909]: I1128 16:10:50.033022 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:50 crc kubenswrapper[4909]: I1128 16:10:50.033048 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:50 crc kubenswrapper[4909]: I1128 16:10:50.033067 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:50Z","lastTransitionTime":"2025-11-28T16:10:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:50 crc kubenswrapper[4909]: I1128 16:10:50.136474 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:50 crc kubenswrapper[4909]: I1128 16:10:50.136545 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:50 crc kubenswrapper[4909]: I1128 16:10:50.136562 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:50 crc kubenswrapper[4909]: I1128 16:10:50.136585 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:50 crc kubenswrapper[4909]: I1128 16:10:50.136604 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:50Z","lastTransitionTime":"2025-11-28T16:10:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:50 crc kubenswrapper[4909]: I1128 16:10:50.192929 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-qxw94_c17e2fff-c7ee-475c-8c17-58a394744b91/ovnkube-controller/1.log" Nov 28 16:10:50 crc kubenswrapper[4909]: I1128 16:10:50.194435 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-qxw94_c17e2fff-c7ee-475c-8c17-58a394744b91/ovnkube-controller/0.log" Nov 28 16:10:50 crc kubenswrapper[4909]: I1128 16:10:50.199003 4909 generic.go:334] "Generic (PLEG): container finished" podID="c17e2fff-c7ee-475c-8c17-58a394744b91" containerID="a256af2a77f3591aaed9ae0f6c9ef60c2aedbbeab38fa1072ee5f65dc1b256f1" exitCode=1 Nov 28 16:10:50 crc kubenswrapper[4909]: I1128 16:10:50.199056 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" event={"ID":"c17e2fff-c7ee-475c-8c17-58a394744b91","Type":"ContainerDied","Data":"a256af2a77f3591aaed9ae0f6c9ef60c2aedbbeab38fa1072ee5f65dc1b256f1"} Nov 28 16:10:50 crc kubenswrapper[4909]: I1128 16:10:50.199124 4909 scope.go:117] "RemoveContainer" containerID="d013e334e47a79b960d2aaf673dc8bbd971dac4909a2b90d543cbbab0c279589" Nov 28 16:10:50 crc kubenswrapper[4909]: I1128 16:10:50.200053 4909 scope.go:117] "RemoveContainer" containerID="a256af2a77f3591aaed9ae0f6c9ef60c2aedbbeab38fa1072ee5f65dc1b256f1" Nov 28 16:10:50 crc kubenswrapper[4909]: E1128 16:10:50.200329 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-qxw94_openshift-ovn-kubernetes(c17e2fff-c7ee-475c-8c17-58a394744b91)\"" pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" podUID="c17e2fff-c7ee-475c-8c17-58a394744b91" Nov 28 16:10:50 crc kubenswrapper[4909]: I1128 16:10:50.219246 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b5b5c4c2-af06-4771-b6eb-d13a2819665c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ea29d0be350e786147f355c5bb4902924aa0f921413b432ad093a796d21b9d05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b41cf0481323080f4056d35add13c2254c6ffbe432b1651555067d3fc9d163eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94501e940379fe9b429e532a91799701e733bb5c3be1c5f32da07a9957f955b3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://37dc41c4e260f683c1ae04ae87a883fe78e4f1f620a946ccb6a87191a5eae0ae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:18Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:50Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:50 crc kubenswrapper[4909]: I1128 16:10:50.240108 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31f84564a3775e0ece3a5c0f176e8d9607466d4a7a505173e9668c51fa2229e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:50Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:50 crc kubenswrapper[4909]: I1128 16:10:50.240516 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:50 crc kubenswrapper[4909]: I1128 16:10:50.240744 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:50 crc kubenswrapper[4909]: I1128 16:10:50.240806 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:50 crc kubenswrapper[4909]: I1128 16:10:50.240833 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:50 crc kubenswrapper[4909]: I1128 16:10:50.240851 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:50Z","lastTransitionTime":"2025-11-28T16:10:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:50 crc kubenswrapper[4909]: I1128 16:10:50.261344 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:50Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:50 crc kubenswrapper[4909]: I1128 16:10:50.284783 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-wx2jj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6e3805b2-8ad3-4fa6-b88f-e0ae42294202\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0777b571ce4049338437a97264761c89ab7517b4da8400edcd3381d58aef32e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x6lxd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-wx2jj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:50Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:50 crc kubenswrapper[4909]: I1128 16:10:50.303248 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dda7db1a-d603-4121-8cd6-e72c9ae02961\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://107b559f7d62f02041439f8c727185e9a819cd655afa0898e818d8618cc54cc7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://76ac0606fbf94f3746fb7ffa549a3cb684d40ee72224dd36c96bb47db493b482\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d5716d50bbed7c047029bbd7964431e97f78c6516505f57fe76d2a1548a665d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://53a5c9679a2c94c1297318d2e554c2445efbfab9cf72d25e48d753d66fa217d5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://da70cc63e1edf58e6d6fed49fc9266d54fabba5f7ce3216d910eb18aebc45c34\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0700ae65eb592e5b2110d37bb15dbb1dbdd228986de015ab5dc8fe13672032ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0700ae65eb592e5b2110d37bb15dbb1dbdd228986de015ab5dc8fe13672032ef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:18Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:50Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:50 crc kubenswrapper[4909]: I1128 16:10:50.320771 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5f0ac931-d37b-4342-8c12-c2779b455cc5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae003675d8b34489b946224bcd380cc68ff49acec6769edfe74a8345018e7909\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5xrns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c2757c4dc287e41bc57c065df2906fc5961d005829fa24f22d3b5078d17555a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5xrns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:38Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-d5nd7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:50Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:50 crc kubenswrapper[4909]: I1128 16:10:50.343967 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:50 crc kubenswrapper[4909]: I1128 16:10:50.344031 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:50 crc kubenswrapper[4909]: I1128 16:10:50.344056 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:50 crc kubenswrapper[4909]: I1128 16:10:50.344089 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:50 crc kubenswrapper[4909]: I1128 16:10:50.344112 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:50Z","lastTransitionTime":"2025-11-28T16:10:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:50 crc kubenswrapper[4909]: I1128 16:10:50.353757 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c17e2fff-c7ee-475c-8c17-58a394744b91\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c9f22dca75c10791e7f5e083e737d4d6aad20598ff0d29dd7562fa99b5dbdfd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://697293d217dd4fbc4cecc3bc4be7acc14dbb5aa97b9faf0a608792ef7e4c8dba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d0b87eb388aab62c94756b5f20b73cf95fc80da8aa3d83f2848757f860cdd38e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://031c30950c06bdcae1736062069b96a6ce7cb66d5f286b73d691ee2d2db5832e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://66731a2e0518dd575f621ddf9e760e844e647a224138957440ccaecad3e4b8b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://983e9f73b8e344dbe445f95436cbe143d63f1da2e868201b441496df8cdc99ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a256af2a77f3591aaed9ae0f6c9ef60c2aedbbeab38fa1072ee5f65dc1b256f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d013e334e47a79b960d2aaf673dc8bbd971dac4909a2b90d543cbbab0c279589\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T16:10:47Z\\\",\\\"message\\\":\\\" event handler 8\\\\nI1128 16:10:47.891297 6215 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1128 16:10:47.891357 6215 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1128 16:10:47.891406 6215 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1128 16:10:47.891421 6215 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1128 16:10:47.891448 6215 handler.go:208] Removed *v1.Node event handler 2\\\\nI1128 16:10:47.891484 6215 handler.go:208] Removed *v1.Node event handler 7\\\\nI1128 16:10:47.891493 6215 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1128 16:10:47.891562 6215 factory.go:656] Stopping watch factory\\\\nI1128 16:10:47.891480 6215 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1128 16:10:47.891625 6215 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1128 16:10:47.891622 6215 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1128 16:10:47.892026 6215 factory.go:1336] Added *v1.EgressFirewall event handler 9\\\\nI1128 16:10:47.892145 6215 controller.go:132] Adding controller ef_node_controller event handlers\\\\nI1128 16:10:47.892229 6215 ovnkube.go:599] Stopped ovnkube\\\\nI1128 16:10:47.892282 6215 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1128 16:10:47.892399 6215 ovnkube.go:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:45Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a256af2a77f3591aaed9ae0f6c9ef60c2aedbbeab38fa1072ee5f65dc1b256f1\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T16:10:49Z\\\",\\\"message\\\":\\\"ource:services.Addr{IP:\\\\\\\"10.217.5.37\\\\\\\", Port:443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nI1128 16:10:48.981466 6343 services_controller.go:452] Built service openshift-apiserver/api per-node LB for network=default: []services.LB{}\\\\nI1128 16:10:48.981482 6343 services_controller.go:453] Built service openshift-apiserver/api template LB for network=default: []services.LB{}\\\\nI1128 16:10:48.981434 6343 services_controller.go:473] Services do not match for network=default, existing lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-apiserver-operator/metrics_TCP_cluster\\\\\\\", UUID:\\\\\\\"8b82f026-5975-4a1b-bb18-08d5d51147ec\\\\\\\", Protocol:\\\\\\\"tcp\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-apiserver-operator/metrics\\\\\\\"}, Opts:services.LBOpts{Reject:false, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{}, Templates:services.TemplateMap{}, Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}, built lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-apiserver-operator/metrics_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.o\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://841b67fe2e8ae0e4ebf834f141b83ad5e850b8e81c3f6410cef7df9894c33dc6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e1fffda4b5571ab3fe797073c7201feadb7818eac66f6dc76b7761ef13a8d0ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e1fffda4b5571ab3fe797073c7201feadb7818eac66f6dc76b7761ef13a8d0ef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-qxw94\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:50Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:50 crc kubenswrapper[4909]: I1128 16:10:50.378734 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7907b546dbb614e80485187b026c4c5ca17f52d88d5c28ce26a7bf5e3c09e76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:50Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:50 crc kubenswrapper[4909]: I1128 16:10:50.398221 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:50Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:50 crc kubenswrapper[4909]: I1128 16:10:50.417508 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:50Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:50 crc kubenswrapper[4909]: I1128 16:10:50.437333 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-bwbm6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a458a03-0fea-47c0-9748-510145f40b30\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dd8c3d64058ca1f2862ce478295e1a694117d00f9172f78c7c5e2945d7357aad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmxp7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:36Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-bwbm6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:50Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:50 crc kubenswrapper[4909]: I1128 16:10:50.447690 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:50 crc kubenswrapper[4909]: I1128 16:10:50.447735 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:50 crc kubenswrapper[4909]: I1128 16:10:50.447751 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:50 crc kubenswrapper[4909]: I1128 16:10:50.447772 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:50 crc kubenswrapper[4909]: I1128 16:10:50.447788 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:50Z","lastTransitionTime":"2025-11-28T16:10:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:50 crc kubenswrapper[4909]: I1128 16:10:50.460177 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gdz9b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d9d93f2d-2a90-4d2d-b8e6-e48973be876f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e765772d8b12200fadfb28064b55c1abb9a8a6654602159c4910d2ea5b2d307\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b45f41a92db18043fc03c9cd7d9bbeef0020790595045abd17a9105d0f01367b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b45f41a92db18043fc03c9cd7d9bbeef0020790595045abd17a9105d0f01367b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f7a3a2a5eea9704ba4cd09ac6fa00e33c97d163973a852882979b1c8f2064979\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f7a3a2a5eea9704ba4cd09ac6fa00e33c97d163973a852882979b1c8f2064979\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0348e1d7066423ab9a595f60893021830d02de3c18fca2fdb0082efa41644124\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0348e1d7066423ab9a595f60893021830d02de3c18fca2fdb0082efa41644124\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbb9ad3dff8afd5ac9ec18bd48d273f46b76a21b791dc6a17aea1d7559750c93\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cbb9ad3dff8afd5ac9ec18bd48d273f46b76a21b791dc6a17aea1d7559750c93\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5898b3475414a39ef005c45982f12791683eb027e734ec8d25dc3489c93026a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5898b3475414a39ef005c45982f12791683eb027e734ec8d25dc3489c93026a2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2056e26b7e46cd32f8a12d6d054e249ed43f9854c0f7720791e98f9e6c5b5346\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2056e26b7e46cd32f8a12d6d054e249ed43f9854c0f7720791e98f9e6c5b5346\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gdz9b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:50Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:50 crc kubenswrapper[4909]: I1128 16:10:50.476906 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-q8nfv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6b77cc4b-dc69-4ece-8e10-64eebc98a578\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://71f6384b2d5c86c668a4d7dfc23b14a893f93b9ec587bec43f74eb0926cc2c64\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-prc9l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:37Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-q8nfv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:50Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:50 crc kubenswrapper[4909]: I1128 16:10:50.486625 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-8s8f9"] Nov 28 16:10:50 crc kubenswrapper[4909]: I1128 16:10:50.487446 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-8s8f9" Nov 28 16:10:50 crc kubenswrapper[4909]: I1128 16:10:50.489797 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Nov 28 16:10:50 crc kubenswrapper[4909]: I1128 16:10:50.490415 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Nov 28 16:10:50 crc kubenswrapper[4909]: I1128 16:10:50.500003 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/d5a7347b-5536-45b5-be75-4bf0ed1b922b-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-8s8f9\" (UID: \"d5a7347b-5536-45b5-be75-4bf0ed1b922b\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-8s8f9" Nov 28 16:10:50 crc kubenswrapper[4909]: I1128 16:10:50.500121 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/d5a7347b-5536-45b5-be75-4bf0ed1b922b-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-8s8f9\" (UID: \"d5a7347b-5536-45b5-be75-4bf0ed1b922b\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-8s8f9" Nov 28 16:10:50 crc kubenswrapper[4909]: I1128 16:10:50.500168 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mglcp\" (UniqueName: \"kubernetes.io/projected/d5a7347b-5536-45b5-be75-4bf0ed1b922b-kube-api-access-mglcp\") pod \"ovnkube-control-plane-749d76644c-8s8f9\" (UID: \"d5a7347b-5536-45b5-be75-4bf0ed1b922b\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-8s8f9" Nov 28 16:10:50 crc kubenswrapper[4909]: I1128 16:10:50.500204 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/d5a7347b-5536-45b5-be75-4bf0ed1b922b-env-overrides\") pod \"ovnkube-control-plane-749d76644c-8s8f9\" (UID: \"d5a7347b-5536-45b5-be75-4bf0ed1b922b\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-8s8f9" Nov 28 16:10:50 crc kubenswrapper[4909]: I1128 16:10:50.505439 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://475d2c473e130345e0d544c2b7990c476e83c479c644db89e1ceda2f4278d667\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://18059a14081d677863133d95ec3cecf9359d6464af62be1e53bcd9514311ff59\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:50Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:50 crc kubenswrapper[4909]: I1128 16:10:50.530563 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7907b546dbb614e80485187b026c4c5ca17f52d88d5c28ce26a7bf5e3c09e76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:50Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:50 crc kubenswrapper[4909]: I1128 16:10:50.548424 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5f0ac931-d37b-4342-8c12-c2779b455cc5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae003675d8b34489b946224bcd380cc68ff49acec6769edfe74a8345018e7909\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5xrns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c2757c4dc287e41bc57c065df2906fc5961d005829fa24f22d3b5078d17555a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5xrns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:38Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-d5nd7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:50Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:50 crc kubenswrapper[4909]: I1128 16:10:50.550970 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:50 crc kubenswrapper[4909]: I1128 16:10:50.551028 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:50 crc kubenswrapper[4909]: I1128 16:10:50.551047 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:50 crc kubenswrapper[4909]: I1128 16:10:50.551069 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:50 crc kubenswrapper[4909]: I1128 16:10:50.551085 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:50Z","lastTransitionTime":"2025-11-28T16:10:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:50 crc kubenswrapper[4909]: I1128 16:10:50.579090 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c17e2fff-c7ee-475c-8c17-58a394744b91\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c9f22dca75c10791e7f5e083e737d4d6aad20598ff0d29dd7562fa99b5dbdfd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://697293d217dd4fbc4cecc3bc4be7acc14dbb5aa97b9faf0a608792ef7e4c8dba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d0b87eb388aab62c94756b5f20b73cf95fc80da8aa3d83f2848757f860cdd38e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://031c30950c06bdcae1736062069b96a6ce7cb66d5f286b73d691ee2d2db5832e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://66731a2e0518dd575f621ddf9e760e844e647a224138957440ccaecad3e4b8b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://983e9f73b8e344dbe445f95436cbe143d63f1da2e868201b441496df8cdc99ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a256af2a77f3591aaed9ae0f6c9ef60c2aedbbeab38fa1072ee5f65dc1b256f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d013e334e47a79b960d2aaf673dc8bbd971dac4909a2b90d543cbbab0c279589\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T16:10:47Z\\\",\\\"message\\\":\\\" event handler 8\\\\nI1128 16:10:47.891297 6215 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1128 16:10:47.891357 6215 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1128 16:10:47.891406 6215 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1128 16:10:47.891421 6215 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1128 16:10:47.891448 6215 handler.go:208] Removed *v1.Node event handler 2\\\\nI1128 16:10:47.891484 6215 handler.go:208] Removed *v1.Node event handler 7\\\\nI1128 16:10:47.891493 6215 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1128 16:10:47.891562 6215 factory.go:656] Stopping watch factory\\\\nI1128 16:10:47.891480 6215 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1128 16:10:47.891625 6215 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1128 16:10:47.891622 6215 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1128 16:10:47.892026 6215 factory.go:1336] Added *v1.EgressFirewall event handler 9\\\\nI1128 16:10:47.892145 6215 controller.go:132] Adding controller ef_node_controller event handlers\\\\nI1128 16:10:47.892229 6215 ovnkube.go:599] Stopped ovnkube\\\\nI1128 16:10:47.892282 6215 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1128 16:10:47.892399 6215 ovnkube.go:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:45Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a256af2a77f3591aaed9ae0f6c9ef60c2aedbbeab38fa1072ee5f65dc1b256f1\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T16:10:49Z\\\",\\\"message\\\":\\\"ource:services.Addr{IP:\\\\\\\"10.217.5.37\\\\\\\", Port:443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nI1128 16:10:48.981466 6343 services_controller.go:452] Built service openshift-apiserver/api per-node LB for network=default: []services.LB{}\\\\nI1128 16:10:48.981482 6343 services_controller.go:453] Built service openshift-apiserver/api template LB for network=default: []services.LB{}\\\\nI1128 16:10:48.981434 6343 services_controller.go:473] Services do not match for network=default, existing lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-apiserver-operator/metrics_TCP_cluster\\\\\\\", UUID:\\\\\\\"8b82f026-5975-4a1b-bb18-08d5d51147ec\\\\\\\", Protocol:\\\\\\\"tcp\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-apiserver-operator/metrics\\\\\\\"}, Opts:services.LBOpts{Reject:false, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{}, Templates:services.TemplateMap{}, Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}, built lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-apiserver-operator/metrics_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.o\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://841b67fe2e8ae0e4ebf834f141b83ad5e850b8e81c3f6410cef7df9894c33dc6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e1fffda4b5571ab3fe797073c7201feadb7818eac66f6dc76b7761ef13a8d0ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e1fffda4b5571ab3fe797073c7201feadb7818eac66f6dc76b7761ef13a8d0ef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-qxw94\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:50Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:50 crc kubenswrapper[4909]: I1128 16:10:50.598449 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:50Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:50 crc kubenswrapper[4909]: I1128 16:10:50.601006 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/d5a7347b-5536-45b5-be75-4bf0ed1b922b-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-8s8f9\" (UID: \"d5a7347b-5536-45b5-be75-4bf0ed1b922b\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-8s8f9" Nov 28 16:10:50 crc kubenswrapper[4909]: I1128 16:10:50.602464 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/d5a7347b-5536-45b5-be75-4bf0ed1b922b-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-8s8f9\" (UID: \"d5a7347b-5536-45b5-be75-4bf0ed1b922b\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-8s8f9" Nov 28 16:10:50 crc kubenswrapper[4909]: I1128 16:10:50.602776 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mglcp\" (UniqueName: \"kubernetes.io/projected/d5a7347b-5536-45b5-be75-4bf0ed1b922b-kube-api-access-mglcp\") pod \"ovnkube-control-plane-749d76644c-8s8f9\" (UID: \"d5a7347b-5536-45b5-be75-4bf0ed1b922b\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-8s8f9" Nov 28 16:10:50 crc kubenswrapper[4909]: I1128 16:10:50.603013 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/d5a7347b-5536-45b5-be75-4bf0ed1b922b-env-overrides\") pod \"ovnkube-control-plane-749d76644c-8s8f9\" (UID: \"d5a7347b-5536-45b5-be75-4bf0ed1b922b\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-8s8f9" Nov 28 16:10:50 crc kubenswrapper[4909]: I1128 16:10:50.602308 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/d5a7347b-5536-45b5-be75-4bf0ed1b922b-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-8s8f9\" (UID: \"d5a7347b-5536-45b5-be75-4bf0ed1b922b\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-8s8f9" Nov 28 16:10:50 crc kubenswrapper[4909]: I1128 16:10:50.604178 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/d5a7347b-5536-45b5-be75-4bf0ed1b922b-env-overrides\") pod \"ovnkube-control-plane-749d76644c-8s8f9\" (UID: \"d5a7347b-5536-45b5-be75-4bf0ed1b922b\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-8s8f9" Nov 28 16:10:50 crc kubenswrapper[4909]: I1128 16:10:50.609770 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/d5a7347b-5536-45b5-be75-4bf0ed1b922b-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-8s8f9\" (UID: \"d5a7347b-5536-45b5-be75-4bf0ed1b922b\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-8s8f9" Nov 28 16:10:50 crc kubenswrapper[4909]: I1128 16:10:50.624849 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:50Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:50 crc kubenswrapper[4909]: I1128 16:10:50.647068 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-bwbm6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a458a03-0fea-47c0-9748-510145f40b30\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dd8c3d64058ca1f2862ce478295e1a694117d00f9172f78c7c5e2945d7357aad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmxp7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:36Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-bwbm6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:50Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:50 crc kubenswrapper[4909]: I1128 16:10:50.658259 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mglcp\" (UniqueName: \"kubernetes.io/projected/d5a7347b-5536-45b5-be75-4bf0ed1b922b-kube-api-access-mglcp\") pod \"ovnkube-control-plane-749d76644c-8s8f9\" (UID: \"d5a7347b-5536-45b5-be75-4bf0ed1b922b\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-8s8f9" Nov 28 16:10:50 crc kubenswrapper[4909]: I1128 16:10:50.669080 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:50 crc kubenswrapper[4909]: I1128 16:10:50.669144 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:50 crc kubenswrapper[4909]: I1128 16:10:50.669168 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:50 crc kubenswrapper[4909]: I1128 16:10:50.669197 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:50 crc kubenswrapper[4909]: I1128 16:10:50.669219 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:50Z","lastTransitionTime":"2025-11-28T16:10:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:50 crc kubenswrapper[4909]: I1128 16:10:50.689627 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gdz9b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d9d93f2d-2a90-4d2d-b8e6-e48973be876f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e765772d8b12200fadfb28064b55c1abb9a8a6654602159c4910d2ea5b2d307\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b45f41a92db18043fc03c9cd7d9bbeef0020790595045abd17a9105d0f01367b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b45f41a92db18043fc03c9cd7d9bbeef0020790595045abd17a9105d0f01367b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f7a3a2a5eea9704ba4cd09ac6fa00e33c97d163973a852882979b1c8f2064979\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f7a3a2a5eea9704ba4cd09ac6fa00e33c97d163973a852882979b1c8f2064979\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0348e1d7066423ab9a595f60893021830d02de3c18fca2fdb0082efa41644124\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0348e1d7066423ab9a595f60893021830d02de3c18fca2fdb0082efa41644124\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbb9ad3dff8afd5ac9ec18bd48d273f46b76a21b791dc6a17aea1d7559750c93\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cbb9ad3dff8afd5ac9ec18bd48d273f46b76a21b791dc6a17aea1d7559750c93\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5898b3475414a39ef005c45982f12791683eb027e734ec8d25dc3489c93026a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5898b3475414a39ef005c45982f12791683eb027e734ec8d25dc3489c93026a2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2056e26b7e46cd32f8a12d6d054e249ed43f9854c0f7720791e98f9e6c5b5346\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2056e26b7e46cd32f8a12d6d054e249ed43f9854c0f7720791e98f9e6c5b5346\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gdz9b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:50Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:50 crc kubenswrapper[4909]: I1128 16:10:50.709950 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://475d2c473e130345e0d544c2b7990c476e83c479c644db89e1ceda2f4278d667\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://18059a14081d677863133d95ec3cecf9359d6464af62be1e53bcd9514311ff59\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:50Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:50 crc kubenswrapper[4909]: I1128 16:10:50.722450 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-q8nfv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6b77cc4b-dc69-4ece-8e10-64eebc98a578\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://71f6384b2d5c86c668a4d7dfc23b14a893f93b9ec587bec43f74eb0926cc2c64\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-prc9l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:37Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-q8nfv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:50Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:50 crc kubenswrapper[4909]: I1128 16:10:50.735485 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-8s8f9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5a7347b-5536-45b5-be75-4bf0ed1b922b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mglcp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mglcp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-8s8f9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:50Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:50 crc kubenswrapper[4909]: I1128 16:10:50.748986 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dda7db1a-d603-4121-8cd6-e72c9ae02961\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://107b559f7d62f02041439f8c727185e9a819cd655afa0898e818d8618cc54cc7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://76ac0606fbf94f3746fb7ffa549a3cb684d40ee72224dd36c96bb47db493b482\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d5716d50bbed7c047029bbd7964431e97f78c6516505f57fe76d2a1548a665d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://53a5c9679a2c94c1297318d2e554c2445efbfab9cf72d25e48d753d66fa217d5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://da70cc63e1edf58e6d6fed49fc9266d54fabba5f7ce3216d910eb18aebc45c34\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0700ae65eb592e5b2110d37bb15dbb1dbdd228986de015ab5dc8fe13672032ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0700ae65eb592e5b2110d37bb15dbb1dbdd228986de015ab5dc8fe13672032ef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:18Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:50Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:50 crc kubenswrapper[4909]: I1128 16:10:50.762594 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b5b5c4c2-af06-4771-b6eb-d13a2819665c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ea29d0be350e786147f355c5bb4902924aa0f921413b432ad093a796d21b9d05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b41cf0481323080f4056d35add13c2254c6ffbe432b1651555067d3fc9d163eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94501e940379fe9b429e532a91799701e733bb5c3be1c5f32da07a9957f955b3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://37dc41c4e260f683c1ae04ae87a883fe78e4f1f620a946ccb6a87191a5eae0ae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:18Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:50Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:50 crc kubenswrapper[4909]: I1128 16:10:50.771336 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:50 crc kubenswrapper[4909]: I1128 16:10:50.771376 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:50 crc kubenswrapper[4909]: I1128 16:10:50.771388 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:50 crc kubenswrapper[4909]: I1128 16:10:50.771402 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:50 crc kubenswrapper[4909]: I1128 16:10:50.771411 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:50Z","lastTransitionTime":"2025-11-28T16:10:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:50 crc kubenswrapper[4909]: I1128 16:10:50.773859 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31f84564a3775e0ece3a5c0f176e8d9607466d4a7a505173e9668c51fa2229e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:50Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:50 crc kubenswrapper[4909]: I1128 16:10:50.785396 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:50Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:50 crc kubenswrapper[4909]: I1128 16:10:50.797505 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-wx2jj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6e3805b2-8ad3-4fa6-b88f-e0ae42294202\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0777b571ce4049338437a97264761c89ab7517b4da8400edcd3381d58aef32e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x6lxd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-wx2jj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:50Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:50 crc kubenswrapper[4909]: I1128 16:10:50.811872 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-8s8f9" Nov 28 16:10:50 crc kubenswrapper[4909]: I1128 16:10:50.873532 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:50 crc kubenswrapper[4909]: I1128 16:10:50.873561 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:50 crc kubenswrapper[4909]: I1128 16:10:50.873571 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:50 crc kubenswrapper[4909]: I1128 16:10:50.873606 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:50 crc kubenswrapper[4909]: I1128 16:10:50.873615 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:50Z","lastTransitionTime":"2025-11-28T16:10:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:50 crc kubenswrapper[4909]: I1128 16:10:50.976262 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:50 crc kubenswrapper[4909]: I1128 16:10:50.976303 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:50 crc kubenswrapper[4909]: I1128 16:10:50.976320 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:50 crc kubenswrapper[4909]: I1128 16:10:50.976339 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:50 crc kubenswrapper[4909]: I1128 16:10:50.976355 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:50Z","lastTransitionTime":"2025-11-28T16:10:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:51 crc kubenswrapper[4909]: I1128 16:10:51.078345 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:51 crc kubenswrapper[4909]: I1128 16:10:51.078376 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:51 crc kubenswrapper[4909]: I1128 16:10:51.078384 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:51 crc kubenswrapper[4909]: I1128 16:10:51.078396 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:51 crc kubenswrapper[4909]: I1128 16:10:51.078405 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:51Z","lastTransitionTime":"2025-11-28T16:10:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:51 crc kubenswrapper[4909]: I1128 16:10:51.136192 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:51 crc kubenswrapper[4909]: I1128 16:10:51.136273 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:51 crc kubenswrapper[4909]: I1128 16:10:51.136299 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:51 crc kubenswrapper[4909]: I1128 16:10:51.136329 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:51 crc kubenswrapper[4909]: I1128 16:10:51.136352 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:51Z","lastTransitionTime":"2025-11-28T16:10:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:51 crc kubenswrapper[4909]: E1128 16:10:51.154476 4909 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:10:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:51Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:10:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:51Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:10:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:51Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:10:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:51Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"b44f6a6c-5ae2-4ed6-9fc9-6c0acf034e9d\\\",\\\"systemUUID\\\":\\\"1e8d38e9-395c-4d37-b567-3bfe4869e3f7\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:51Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:51 crc kubenswrapper[4909]: I1128 16:10:51.159724 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:51 crc kubenswrapper[4909]: I1128 16:10:51.159755 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:51 crc kubenswrapper[4909]: I1128 16:10:51.159766 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:51 crc kubenswrapper[4909]: I1128 16:10:51.159781 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:51 crc kubenswrapper[4909]: I1128 16:10:51.159792 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:51Z","lastTransitionTime":"2025-11-28T16:10:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:51 crc kubenswrapper[4909]: E1128 16:10:51.173978 4909 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:10:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:51Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:10:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:51Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:10:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:51Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:10:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:51Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"b44f6a6c-5ae2-4ed6-9fc9-6c0acf034e9d\\\",\\\"systemUUID\\\":\\\"1e8d38e9-395c-4d37-b567-3bfe4869e3f7\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:51Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:51 crc kubenswrapper[4909]: I1128 16:10:51.179868 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:51 crc kubenswrapper[4909]: I1128 16:10:51.179903 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:51 crc kubenswrapper[4909]: I1128 16:10:51.179915 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:51 crc kubenswrapper[4909]: I1128 16:10:51.179931 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:51 crc kubenswrapper[4909]: I1128 16:10:51.179942 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:51Z","lastTransitionTime":"2025-11-28T16:10:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:51 crc kubenswrapper[4909]: I1128 16:10:51.205294 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-8s8f9" event={"ID":"d5a7347b-5536-45b5-be75-4bf0ed1b922b","Type":"ContainerStarted","Data":"2df3f8d589dcb681c09791c520f54c2b8d0891138717921289c1c4047f2cf432"} Nov 28 16:10:51 crc kubenswrapper[4909]: E1128 16:10:51.205444 4909 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:10:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:51Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:10:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:51Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:10:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:51Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:10:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:51Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"b44f6a6c-5ae2-4ed6-9fc9-6c0acf034e9d\\\",\\\"systemUUID\\\":\\\"1e8d38e9-395c-4d37-b567-3bfe4869e3f7\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:51Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:51 crc kubenswrapper[4909]: I1128 16:10:51.209755 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-qxw94_c17e2fff-c7ee-475c-8c17-58a394744b91/ovnkube-controller/1.log" Nov 28 16:10:51 crc kubenswrapper[4909]: I1128 16:10:51.210605 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:51 crc kubenswrapper[4909]: I1128 16:10:51.210667 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:51 crc kubenswrapper[4909]: I1128 16:10:51.210680 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:51 crc kubenswrapper[4909]: I1128 16:10:51.210697 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:51 crc kubenswrapper[4909]: I1128 16:10:51.210709 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:51Z","lastTransitionTime":"2025-11-28T16:10:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:51 crc kubenswrapper[4909]: I1128 16:10:51.216874 4909 scope.go:117] "RemoveContainer" containerID="a256af2a77f3591aaed9ae0f6c9ef60c2aedbbeab38fa1072ee5f65dc1b256f1" Nov 28 16:10:51 crc kubenswrapper[4909]: E1128 16:10:51.217043 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-qxw94_openshift-ovn-kubernetes(c17e2fff-c7ee-475c-8c17-58a394744b91)\"" pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" podUID="c17e2fff-c7ee-475c-8c17-58a394744b91" Nov 28 16:10:51 crc kubenswrapper[4909]: I1128 16:10:51.233124 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31f84564a3775e0ece3a5c0f176e8d9607466d4a7a505173e9668c51fa2229e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:51Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:51 crc kubenswrapper[4909]: E1128 16:10:51.242975 4909 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:10:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:51Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:10:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:51Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:10:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:51Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:10:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:51Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"b44f6a6c-5ae2-4ed6-9fc9-6c0acf034e9d\\\",\\\"systemUUID\\\":\\\"1e8d38e9-395c-4d37-b567-3bfe4869e3f7\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:51Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:51 crc kubenswrapper[4909]: I1128 16:10:51.248166 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:51 crc kubenswrapper[4909]: I1128 16:10:51.248211 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:51 crc kubenswrapper[4909]: I1128 16:10:51.248223 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:51 crc kubenswrapper[4909]: I1128 16:10:51.248244 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:51 crc kubenswrapper[4909]: I1128 16:10:51.248260 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:51Z","lastTransitionTime":"2025-11-28T16:10:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:51 crc kubenswrapper[4909]: I1128 16:10:51.254299 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:51Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:51 crc kubenswrapper[4909]: E1128 16:10:51.262853 4909 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:10:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:51Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:10:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:51Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:10:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:51Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:10:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:51Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"b44f6a6c-5ae2-4ed6-9fc9-6c0acf034e9d\\\",\\\"systemUUID\\\":\\\"1e8d38e9-395c-4d37-b567-3bfe4869e3f7\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:51Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:51 crc kubenswrapper[4909]: E1128 16:10:51.263032 4909 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 28 16:10:51 crc kubenswrapper[4909]: I1128 16:10:51.264829 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:51 crc kubenswrapper[4909]: I1128 16:10:51.264867 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:51 crc kubenswrapper[4909]: I1128 16:10:51.264879 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:51 crc kubenswrapper[4909]: I1128 16:10:51.264904 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:51 crc kubenswrapper[4909]: I1128 16:10:51.264922 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:51Z","lastTransitionTime":"2025-11-28T16:10:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:51 crc kubenswrapper[4909]: I1128 16:10:51.275724 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-wx2jj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6e3805b2-8ad3-4fa6-b88f-e0ae42294202\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0777b571ce4049338437a97264761c89ab7517b4da8400edcd3381d58aef32e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x6lxd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-wx2jj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:51Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:51 crc kubenswrapper[4909]: I1128 16:10:51.292806 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dda7db1a-d603-4121-8cd6-e72c9ae02961\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://107b559f7d62f02041439f8c727185e9a819cd655afa0898e818d8618cc54cc7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://76ac0606fbf94f3746fb7ffa549a3cb684d40ee72224dd36c96bb47db493b482\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d5716d50bbed7c047029bbd7964431e97f78c6516505f57fe76d2a1548a665d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://53a5c9679a2c94c1297318d2e554c2445efbfab9cf72d25e48d753d66fa217d5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://da70cc63e1edf58e6d6fed49fc9266d54fabba5f7ce3216d910eb18aebc45c34\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0700ae65eb592e5b2110d37bb15dbb1dbdd228986de015ab5dc8fe13672032ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0700ae65eb592e5b2110d37bb15dbb1dbdd228986de015ab5dc8fe13672032ef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:18Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:51Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:51 crc kubenswrapper[4909]: I1128 16:10:51.308542 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b5b5c4c2-af06-4771-b6eb-d13a2819665c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ea29d0be350e786147f355c5bb4902924aa0f921413b432ad093a796d21b9d05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b41cf0481323080f4056d35add13c2254c6ffbe432b1651555067d3fc9d163eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94501e940379fe9b429e532a91799701e733bb5c3be1c5f32da07a9957f955b3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://37dc41c4e260f683c1ae04ae87a883fe78e4f1f620a946ccb6a87191a5eae0ae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:18Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:51Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:51 crc kubenswrapper[4909]: I1128 16:10:51.331826 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c17e2fff-c7ee-475c-8c17-58a394744b91\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c9f22dca75c10791e7f5e083e737d4d6aad20598ff0d29dd7562fa99b5dbdfd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://697293d217dd4fbc4cecc3bc4be7acc14dbb5aa97b9faf0a608792ef7e4c8dba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d0b87eb388aab62c94756b5f20b73cf95fc80da8aa3d83f2848757f860cdd38e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://031c30950c06bdcae1736062069b96a6ce7cb66d5f286b73d691ee2d2db5832e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://66731a2e0518dd575f621ddf9e760e844e647a224138957440ccaecad3e4b8b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://983e9f73b8e344dbe445f95436cbe143d63f1da2e868201b441496df8cdc99ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a256af2a77f3591aaed9ae0f6c9ef60c2aedbbeab38fa1072ee5f65dc1b256f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a256af2a77f3591aaed9ae0f6c9ef60c2aedbbeab38fa1072ee5f65dc1b256f1\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T16:10:49Z\\\",\\\"message\\\":\\\"ource:services.Addr{IP:\\\\\\\"10.217.5.37\\\\\\\", Port:443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nI1128 16:10:48.981466 6343 services_controller.go:452] Built service openshift-apiserver/api per-node LB for network=default: []services.LB{}\\\\nI1128 16:10:48.981482 6343 services_controller.go:453] Built service openshift-apiserver/api template LB for network=default: []services.LB{}\\\\nI1128 16:10:48.981434 6343 services_controller.go:473] Services do not match for network=default, existing lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-apiserver-operator/metrics_TCP_cluster\\\\\\\", UUID:\\\\\\\"8b82f026-5975-4a1b-bb18-08d5d51147ec\\\\\\\", Protocol:\\\\\\\"tcp\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-apiserver-operator/metrics\\\\\\\"}, Opts:services.LBOpts{Reject:false, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{}, Templates:services.TemplateMap{}, Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}, built lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-apiserver-operator/metrics_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.o\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:48Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-qxw94_openshift-ovn-kubernetes(c17e2fff-c7ee-475c-8c17-58a394744b91)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://841b67fe2e8ae0e4ebf834f141b83ad5e850b8e81c3f6410cef7df9894c33dc6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e1fffda4b5571ab3fe797073c7201feadb7818eac66f6dc76b7761ef13a8d0ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e1fffda4b5571ab3fe797073c7201feadb7818eac66f6dc76b7761ef13a8d0ef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-qxw94\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:51Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:51 crc kubenswrapper[4909]: I1128 16:10:51.352810 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7907b546dbb614e80485187b026c4c5ca17f52d88d5c28ce26a7bf5e3c09e76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:51Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:51 crc kubenswrapper[4909]: I1128 16:10:51.366421 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5f0ac931-d37b-4342-8c12-c2779b455cc5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae003675d8b34489b946224bcd380cc68ff49acec6769edfe74a8345018e7909\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5xrns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c2757c4dc287e41bc57c065df2906fc5961d005829fa24f22d3b5078d17555a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5xrns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:38Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-d5nd7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:51Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:51 crc kubenswrapper[4909]: I1128 16:10:51.367597 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:51 crc kubenswrapper[4909]: I1128 16:10:51.367643 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:51 crc kubenswrapper[4909]: I1128 16:10:51.367695 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:51 crc kubenswrapper[4909]: I1128 16:10:51.367720 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:51 crc kubenswrapper[4909]: I1128 16:10:51.367738 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:51Z","lastTransitionTime":"2025-11-28T16:10:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:51 crc kubenswrapper[4909]: I1128 16:10:51.386942 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:51Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:51 crc kubenswrapper[4909]: I1128 16:10:51.411090 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:51Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:51 crc kubenswrapper[4909]: I1128 16:10:51.429331 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-bwbm6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a458a03-0fea-47c0-9748-510145f40b30\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dd8c3d64058ca1f2862ce478295e1a694117d00f9172f78c7c5e2945d7357aad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmxp7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:36Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-bwbm6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:51Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:51 crc kubenswrapper[4909]: I1128 16:10:51.457180 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gdz9b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d9d93f2d-2a90-4d2d-b8e6-e48973be876f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e765772d8b12200fadfb28064b55c1abb9a8a6654602159c4910d2ea5b2d307\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b45f41a92db18043fc03c9cd7d9bbeef0020790595045abd17a9105d0f01367b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b45f41a92db18043fc03c9cd7d9bbeef0020790595045abd17a9105d0f01367b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f7a3a2a5eea9704ba4cd09ac6fa00e33c97d163973a852882979b1c8f2064979\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f7a3a2a5eea9704ba4cd09ac6fa00e33c97d163973a852882979b1c8f2064979\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0348e1d7066423ab9a595f60893021830d02de3c18fca2fdb0082efa41644124\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0348e1d7066423ab9a595f60893021830d02de3c18fca2fdb0082efa41644124\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbb9ad3dff8afd5ac9ec18bd48d273f46b76a21b791dc6a17aea1d7559750c93\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cbb9ad3dff8afd5ac9ec18bd48d273f46b76a21b791dc6a17aea1d7559750c93\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5898b3475414a39ef005c45982f12791683eb027e734ec8d25dc3489c93026a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5898b3475414a39ef005c45982f12791683eb027e734ec8d25dc3489c93026a2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2056e26b7e46cd32f8a12d6d054e249ed43f9854c0f7720791e98f9e6c5b5346\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2056e26b7e46cd32f8a12d6d054e249ed43f9854c0f7720791e98f9e6c5b5346\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gdz9b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:51Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:51 crc kubenswrapper[4909]: I1128 16:10:51.470245 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:51 crc kubenswrapper[4909]: I1128 16:10:51.470289 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:51 crc kubenswrapper[4909]: I1128 16:10:51.470305 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:51 crc kubenswrapper[4909]: I1128 16:10:51.470328 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:51 crc kubenswrapper[4909]: I1128 16:10:51.470345 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:51Z","lastTransitionTime":"2025-11-28T16:10:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:51 crc kubenswrapper[4909]: I1128 16:10:51.473151 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-8s8f9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5a7347b-5536-45b5-be75-4bf0ed1b922b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mglcp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mglcp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-8s8f9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:51Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:51 crc kubenswrapper[4909]: I1128 16:10:51.491013 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://475d2c473e130345e0d544c2b7990c476e83c479c644db89e1ceda2f4278d667\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://18059a14081d677863133d95ec3cecf9359d6464af62be1e53bcd9514311ff59\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:51Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:51 crc kubenswrapper[4909]: I1128 16:10:51.505323 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-q8nfv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6b77cc4b-dc69-4ece-8e10-64eebc98a578\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://71f6384b2d5c86c668a4d7dfc23b14a893f93b9ec587bec43f74eb0926cc2c64\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-prc9l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:37Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-q8nfv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:51Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:51 crc kubenswrapper[4909]: I1128 16:10:51.573377 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:51 crc kubenswrapper[4909]: I1128 16:10:51.573434 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:51 crc kubenswrapper[4909]: I1128 16:10:51.573450 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:51 crc kubenswrapper[4909]: I1128 16:10:51.573473 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:51 crc kubenswrapper[4909]: I1128 16:10:51.573490 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:51Z","lastTransitionTime":"2025-11-28T16:10:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:51 crc kubenswrapper[4909]: I1128 16:10:51.607831 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/network-metrics-daemon-8rjn2"] Nov 28 16:10:51 crc kubenswrapper[4909]: I1128 16:10:51.608918 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-8rjn2" Nov 28 16:10:51 crc kubenswrapper[4909]: E1128 16:10:51.609022 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-8rjn2" podUID="ffceca0e-d9b5-484f-8753-5e0269eec811" Nov 28 16:10:51 crc kubenswrapper[4909]: I1128 16:10:51.612748 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/ffceca0e-d9b5-484f-8753-5e0269eec811-metrics-certs\") pod \"network-metrics-daemon-8rjn2\" (UID: \"ffceca0e-d9b5-484f-8753-5e0269eec811\") " pod="openshift-multus/network-metrics-daemon-8rjn2" Nov 28 16:10:51 crc kubenswrapper[4909]: I1128 16:10:51.612830 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wgql6\" (UniqueName: \"kubernetes.io/projected/ffceca0e-d9b5-484f-8753-5e0269eec811-kube-api-access-wgql6\") pod \"network-metrics-daemon-8rjn2\" (UID: \"ffceca0e-d9b5-484f-8753-5e0269eec811\") " pod="openshift-multus/network-metrics-daemon-8rjn2" Nov 28 16:10:51 crc kubenswrapper[4909]: I1128 16:10:51.629911 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7907b546dbb614e80485187b026c4c5ca17f52d88d5c28ce26a7bf5e3c09e76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:51Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:51 crc kubenswrapper[4909]: I1128 16:10:51.721991 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wgql6\" (UniqueName: \"kubernetes.io/projected/ffceca0e-d9b5-484f-8753-5e0269eec811-kube-api-access-wgql6\") pod \"network-metrics-daemon-8rjn2\" (UID: \"ffceca0e-d9b5-484f-8753-5e0269eec811\") " pod="openshift-multus/network-metrics-daemon-8rjn2" Nov 28 16:10:51 crc kubenswrapper[4909]: I1128 16:10:51.722096 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/ffceca0e-d9b5-484f-8753-5e0269eec811-metrics-certs\") pod \"network-metrics-daemon-8rjn2\" (UID: \"ffceca0e-d9b5-484f-8753-5e0269eec811\") " pod="openshift-multus/network-metrics-daemon-8rjn2" Nov 28 16:10:51 crc kubenswrapper[4909]: E1128 16:10:51.722257 4909 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 28 16:10:51 crc kubenswrapper[4909]: E1128 16:10:51.722345 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ffceca0e-d9b5-484f-8753-5e0269eec811-metrics-certs podName:ffceca0e-d9b5-484f-8753-5e0269eec811 nodeName:}" failed. No retries permitted until 2025-11-28 16:10:52.222321758 +0000 UTC m=+34.619006302 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/ffceca0e-d9b5-484f-8753-5e0269eec811-metrics-certs") pod "network-metrics-daemon-8rjn2" (UID: "ffceca0e-d9b5-484f-8753-5e0269eec811") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 28 16:10:51 crc kubenswrapper[4909]: I1128 16:10:51.725041 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:51 crc kubenswrapper[4909]: I1128 16:10:51.725080 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:51 crc kubenswrapper[4909]: I1128 16:10:51.725089 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:51 crc kubenswrapper[4909]: I1128 16:10:51.725104 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:51 crc kubenswrapper[4909]: I1128 16:10:51.725113 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:51Z","lastTransitionTime":"2025-11-28T16:10:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:51 crc kubenswrapper[4909]: I1128 16:10:51.725634 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5f0ac931-d37b-4342-8c12-c2779b455cc5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae003675d8b34489b946224bcd380cc68ff49acec6769edfe74a8345018e7909\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5xrns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c2757c4dc287e41bc57c065df2906fc5961d005829fa24f22d3b5078d17555a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5xrns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:38Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-d5nd7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:51Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:51 crc kubenswrapper[4909]: I1128 16:10:51.743266 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c17e2fff-c7ee-475c-8c17-58a394744b91\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c9f22dca75c10791e7f5e083e737d4d6aad20598ff0d29dd7562fa99b5dbdfd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://697293d217dd4fbc4cecc3bc4be7acc14dbb5aa97b9faf0a608792ef7e4c8dba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d0b87eb388aab62c94756b5f20b73cf95fc80da8aa3d83f2848757f860cdd38e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://031c30950c06bdcae1736062069b96a6ce7cb66d5f286b73d691ee2d2db5832e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://66731a2e0518dd575f621ddf9e760e844e647a224138957440ccaecad3e4b8b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://983e9f73b8e344dbe445f95436cbe143d63f1da2e868201b441496df8cdc99ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a256af2a77f3591aaed9ae0f6c9ef60c2aedbbeab38fa1072ee5f65dc1b256f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a256af2a77f3591aaed9ae0f6c9ef60c2aedbbeab38fa1072ee5f65dc1b256f1\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T16:10:49Z\\\",\\\"message\\\":\\\"ource:services.Addr{IP:\\\\\\\"10.217.5.37\\\\\\\", Port:443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nI1128 16:10:48.981466 6343 services_controller.go:452] Built service openshift-apiserver/api per-node LB for network=default: []services.LB{}\\\\nI1128 16:10:48.981482 6343 services_controller.go:453] Built service openshift-apiserver/api template LB for network=default: []services.LB{}\\\\nI1128 16:10:48.981434 6343 services_controller.go:473] Services do not match for network=default, existing lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-apiserver-operator/metrics_TCP_cluster\\\\\\\", UUID:\\\\\\\"8b82f026-5975-4a1b-bb18-08d5d51147ec\\\\\\\", Protocol:\\\\\\\"tcp\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-apiserver-operator/metrics\\\\\\\"}, Opts:services.LBOpts{Reject:false, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{}, Templates:services.TemplateMap{}, Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}, built lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-apiserver-operator/metrics_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.o\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:48Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-qxw94_openshift-ovn-kubernetes(c17e2fff-c7ee-475c-8c17-58a394744b91)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://841b67fe2e8ae0e4ebf834f141b83ad5e850b8e81c3f6410cef7df9894c33dc6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e1fffda4b5571ab3fe797073c7201feadb7818eac66f6dc76b7761ef13a8d0ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e1fffda4b5571ab3fe797073c7201feadb7818eac66f6dc76b7761ef13a8d0ef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-qxw94\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:51Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:51 crc kubenswrapper[4909]: I1128 16:10:51.747626 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wgql6\" (UniqueName: \"kubernetes.io/projected/ffceca0e-d9b5-484f-8753-5e0269eec811-kube-api-access-wgql6\") pod \"network-metrics-daemon-8rjn2\" (UID: \"ffceca0e-d9b5-484f-8753-5e0269eec811\") " pod="openshift-multus/network-metrics-daemon-8rjn2" Nov 28 16:10:51 crc kubenswrapper[4909]: I1128 16:10:51.757305 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:51Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:51 crc kubenswrapper[4909]: I1128 16:10:51.767258 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:51Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:51 crc kubenswrapper[4909]: I1128 16:10:51.776271 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-bwbm6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a458a03-0fea-47c0-9748-510145f40b30\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dd8c3d64058ca1f2862ce478295e1a694117d00f9172f78c7c5e2945d7357aad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmxp7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:36Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-bwbm6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:51Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:51 crc kubenswrapper[4909]: I1128 16:10:51.788347 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gdz9b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d9d93f2d-2a90-4d2d-b8e6-e48973be876f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e765772d8b12200fadfb28064b55c1abb9a8a6654602159c4910d2ea5b2d307\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b45f41a92db18043fc03c9cd7d9bbeef0020790595045abd17a9105d0f01367b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b45f41a92db18043fc03c9cd7d9bbeef0020790595045abd17a9105d0f01367b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f7a3a2a5eea9704ba4cd09ac6fa00e33c97d163973a852882979b1c8f2064979\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f7a3a2a5eea9704ba4cd09ac6fa00e33c97d163973a852882979b1c8f2064979\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0348e1d7066423ab9a595f60893021830d02de3c18fca2fdb0082efa41644124\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0348e1d7066423ab9a595f60893021830d02de3c18fca2fdb0082efa41644124\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbb9ad3dff8afd5ac9ec18bd48d273f46b76a21b791dc6a17aea1d7559750c93\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cbb9ad3dff8afd5ac9ec18bd48d273f46b76a21b791dc6a17aea1d7559750c93\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5898b3475414a39ef005c45982f12791683eb027e734ec8d25dc3489c93026a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5898b3475414a39ef005c45982f12791683eb027e734ec8d25dc3489c93026a2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2056e26b7e46cd32f8a12d6d054e249ed43f9854c0f7720791e98f9e6c5b5346\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2056e26b7e46cd32f8a12d6d054e249ed43f9854c0f7720791e98f9e6c5b5346\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gdz9b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:51Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:51 crc kubenswrapper[4909]: I1128 16:10:51.800453 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-8rjn2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ffceca0e-d9b5-484f-8753-5e0269eec811\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:51Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:51Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wgql6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wgql6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:51Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-8rjn2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:51Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:51 crc kubenswrapper[4909]: I1128 16:10:51.812961 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://475d2c473e130345e0d544c2b7990c476e83c479c644db89e1ceda2f4278d667\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://18059a14081d677863133d95ec3cecf9359d6464af62be1e53bcd9514311ff59\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:51Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:51 crc kubenswrapper[4909]: I1128 16:10:51.825593 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-q8nfv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6b77cc4b-dc69-4ece-8e10-64eebc98a578\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://71f6384b2d5c86c668a4d7dfc23b14a893f93b9ec587bec43f74eb0926cc2c64\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-prc9l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:37Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-q8nfv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:51Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:51 crc kubenswrapper[4909]: I1128 16:10:51.827594 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:51 crc kubenswrapper[4909]: I1128 16:10:51.827624 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:51 crc kubenswrapper[4909]: I1128 16:10:51.827632 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:51 crc kubenswrapper[4909]: I1128 16:10:51.827647 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:51 crc kubenswrapper[4909]: I1128 16:10:51.827673 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:51Z","lastTransitionTime":"2025-11-28T16:10:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:51 crc kubenswrapper[4909]: I1128 16:10:51.840909 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-8s8f9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5a7347b-5536-45b5-be75-4bf0ed1b922b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mglcp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mglcp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-8s8f9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:51Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:51 crc kubenswrapper[4909]: I1128 16:10:51.855697 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:51Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:51 crc kubenswrapper[4909]: I1128 16:10:51.875003 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-wx2jj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6e3805b2-8ad3-4fa6-b88f-e0ae42294202\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0777b571ce4049338437a97264761c89ab7517b4da8400edcd3381d58aef32e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x6lxd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-wx2jj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:51Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:51 crc kubenswrapper[4909]: I1128 16:10:51.893593 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dda7db1a-d603-4121-8cd6-e72c9ae02961\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://107b559f7d62f02041439f8c727185e9a819cd655afa0898e818d8618cc54cc7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://76ac0606fbf94f3746fb7ffa549a3cb684d40ee72224dd36c96bb47db493b482\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d5716d50bbed7c047029bbd7964431e97f78c6516505f57fe76d2a1548a665d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://53a5c9679a2c94c1297318d2e554c2445efbfab9cf72d25e48d753d66fa217d5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://da70cc63e1edf58e6d6fed49fc9266d54fabba5f7ce3216d910eb18aebc45c34\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0700ae65eb592e5b2110d37bb15dbb1dbdd228986de015ab5dc8fe13672032ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0700ae65eb592e5b2110d37bb15dbb1dbdd228986de015ab5dc8fe13672032ef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:18Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:51Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:51 crc kubenswrapper[4909]: I1128 16:10:51.900883 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 16:10:51 crc kubenswrapper[4909]: I1128 16:10:51.900896 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 16:10:51 crc kubenswrapper[4909]: E1128 16:10:51.901094 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 16:10:51 crc kubenswrapper[4909]: I1128 16:10:51.900895 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 16:10:51 crc kubenswrapper[4909]: E1128 16:10:51.901298 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 16:10:51 crc kubenswrapper[4909]: E1128 16:10:51.901326 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 16:10:51 crc kubenswrapper[4909]: I1128 16:10:51.914427 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b5b5c4c2-af06-4771-b6eb-d13a2819665c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ea29d0be350e786147f355c5bb4902924aa0f921413b432ad093a796d21b9d05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b41cf0481323080f4056d35add13c2254c6ffbe432b1651555067d3fc9d163eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94501e940379fe9b429e532a91799701e733bb5c3be1c5f32da07a9957f955b3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://37dc41c4e260f683c1ae04ae87a883fe78e4f1f620a946ccb6a87191a5eae0ae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:18Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:51Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:51 crc kubenswrapper[4909]: I1128 16:10:51.928151 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31f84564a3775e0ece3a5c0f176e8d9607466d4a7a505173e9668c51fa2229e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:51Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:51 crc kubenswrapper[4909]: I1128 16:10:51.929852 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:51 crc kubenswrapper[4909]: I1128 16:10:51.929872 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:51 crc kubenswrapper[4909]: I1128 16:10:51.929880 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:51 crc kubenswrapper[4909]: I1128 16:10:51.929892 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:51 crc kubenswrapper[4909]: I1128 16:10:51.929900 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:51Z","lastTransitionTime":"2025-11-28T16:10:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:52 crc kubenswrapper[4909]: I1128 16:10:52.033616 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:52 crc kubenswrapper[4909]: I1128 16:10:52.033767 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:52 crc kubenswrapper[4909]: I1128 16:10:52.033788 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:52 crc kubenswrapper[4909]: I1128 16:10:52.033813 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:52 crc kubenswrapper[4909]: I1128 16:10:52.033831 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:52Z","lastTransitionTime":"2025-11-28T16:10:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:52 crc kubenswrapper[4909]: I1128 16:10:52.137693 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:52 crc kubenswrapper[4909]: I1128 16:10:52.137763 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:52 crc kubenswrapper[4909]: I1128 16:10:52.137788 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:52 crc kubenswrapper[4909]: I1128 16:10:52.137820 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:52 crc kubenswrapper[4909]: I1128 16:10:52.137844 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:52Z","lastTransitionTime":"2025-11-28T16:10:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:52 crc kubenswrapper[4909]: I1128 16:10:52.226403 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/ffceca0e-d9b5-484f-8753-5e0269eec811-metrics-certs\") pod \"network-metrics-daemon-8rjn2\" (UID: \"ffceca0e-d9b5-484f-8753-5e0269eec811\") " pod="openshift-multus/network-metrics-daemon-8rjn2" Nov 28 16:10:52 crc kubenswrapper[4909]: E1128 16:10:52.226562 4909 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 28 16:10:52 crc kubenswrapper[4909]: E1128 16:10:52.226709 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ffceca0e-d9b5-484f-8753-5e0269eec811-metrics-certs podName:ffceca0e-d9b5-484f-8753-5e0269eec811 nodeName:}" failed. No retries permitted until 2025-11-28 16:10:53.22663329 +0000 UTC m=+35.623317844 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/ffceca0e-d9b5-484f-8753-5e0269eec811-metrics-certs") pod "network-metrics-daemon-8rjn2" (UID: "ffceca0e-d9b5-484f-8753-5e0269eec811") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 28 16:10:52 crc kubenswrapper[4909]: I1128 16:10:52.240219 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:52 crc kubenswrapper[4909]: I1128 16:10:52.240263 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:52 crc kubenswrapper[4909]: I1128 16:10:52.240275 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:52 crc kubenswrapper[4909]: I1128 16:10:52.240291 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:52 crc kubenswrapper[4909]: I1128 16:10:52.240303 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:52Z","lastTransitionTime":"2025-11-28T16:10:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:52 crc kubenswrapper[4909]: I1128 16:10:52.327009 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 16:10:52 crc kubenswrapper[4909]: E1128 16:10:52.327286 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 16:11:08.327248596 +0000 UTC m=+50.723933150 (durationBeforeRetry 16s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:10:52 crc kubenswrapper[4909]: I1128 16:10:52.343165 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:52 crc kubenswrapper[4909]: I1128 16:10:52.343221 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:52 crc kubenswrapper[4909]: I1128 16:10:52.343240 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:52 crc kubenswrapper[4909]: I1128 16:10:52.343265 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:52 crc kubenswrapper[4909]: I1128 16:10:52.343282 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:52Z","lastTransitionTime":"2025-11-28T16:10:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:52 crc kubenswrapper[4909]: I1128 16:10:52.428195 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 16:10:52 crc kubenswrapper[4909]: I1128 16:10:52.428311 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 16:10:52 crc kubenswrapper[4909]: E1128 16:10:52.428348 4909 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 28 16:10:52 crc kubenswrapper[4909]: E1128 16:10:52.428422 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-28 16:11:08.428402227 +0000 UTC m=+50.825086761 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 28 16:10:52 crc kubenswrapper[4909]: I1128 16:10:52.428356 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 16:10:52 crc kubenswrapper[4909]: I1128 16:10:52.428476 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 16:10:52 crc kubenswrapper[4909]: E1128 16:10:52.428500 4909 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 28 16:10:52 crc kubenswrapper[4909]: E1128 16:10:52.428524 4909 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 28 16:10:52 crc kubenswrapper[4909]: E1128 16:10:52.428543 4909 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 16:10:52 crc kubenswrapper[4909]: E1128 16:10:52.428569 4909 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 28 16:10:52 crc kubenswrapper[4909]: E1128 16:10:52.428604 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-28 16:11:08.428582512 +0000 UTC m=+50.825267076 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 16:10:52 crc kubenswrapper[4909]: E1128 16:10:52.428631 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-28 16:11:08.428618093 +0000 UTC m=+50.825302647 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 28 16:10:52 crc kubenswrapper[4909]: E1128 16:10:52.428686 4909 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 28 16:10:52 crc kubenswrapper[4909]: E1128 16:10:52.428704 4909 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 28 16:10:52 crc kubenswrapper[4909]: E1128 16:10:52.428717 4909 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 16:10:52 crc kubenswrapper[4909]: E1128 16:10:52.428746 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-28 16:11:08.428736616 +0000 UTC m=+50.825421150 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 16:10:52 crc kubenswrapper[4909]: I1128 16:10:52.449359 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:52 crc kubenswrapper[4909]: I1128 16:10:52.449425 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:52 crc kubenswrapper[4909]: I1128 16:10:52.449444 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:52 crc kubenswrapper[4909]: I1128 16:10:52.449470 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:52 crc kubenswrapper[4909]: I1128 16:10:52.449487 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:52Z","lastTransitionTime":"2025-11-28T16:10:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:52 crc kubenswrapper[4909]: I1128 16:10:52.552845 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:52 crc kubenswrapper[4909]: I1128 16:10:52.553206 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:52 crc kubenswrapper[4909]: I1128 16:10:52.553224 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:52 crc kubenswrapper[4909]: I1128 16:10:52.553249 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:52 crc kubenswrapper[4909]: I1128 16:10:52.553272 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:52Z","lastTransitionTime":"2025-11-28T16:10:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:52 crc kubenswrapper[4909]: I1128 16:10:52.656928 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:52 crc kubenswrapper[4909]: I1128 16:10:52.656997 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:52 crc kubenswrapper[4909]: I1128 16:10:52.657015 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:52 crc kubenswrapper[4909]: I1128 16:10:52.657041 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:52 crc kubenswrapper[4909]: I1128 16:10:52.657060 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:52Z","lastTransitionTime":"2025-11-28T16:10:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:52 crc kubenswrapper[4909]: I1128 16:10:52.759328 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:52 crc kubenswrapper[4909]: I1128 16:10:52.759370 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:52 crc kubenswrapper[4909]: I1128 16:10:52.759389 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:52 crc kubenswrapper[4909]: I1128 16:10:52.759412 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:52 crc kubenswrapper[4909]: I1128 16:10:52.759430 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:52Z","lastTransitionTime":"2025-11-28T16:10:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:52 crc kubenswrapper[4909]: I1128 16:10:52.862333 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:52 crc kubenswrapper[4909]: I1128 16:10:52.862397 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:52 crc kubenswrapper[4909]: I1128 16:10:52.862414 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:52 crc kubenswrapper[4909]: I1128 16:10:52.862445 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:52 crc kubenswrapper[4909]: I1128 16:10:52.862463 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:52Z","lastTransitionTime":"2025-11-28T16:10:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:52 crc kubenswrapper[4909]: I1128 16:10:52.901143 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-8rjn2" Nov 28 16:10:52 crc kubenswrapper[4909]: E1128 16:10:52.901293 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-8rjn2" podUID="ffceca0e-d9b5-484f-8753-5e0269eec811" Nov 28 16:10:52 crc kubenswrapper[4909]: I1128 16:10:52.965261 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:52 crc kubenswrapper[4909]: I1128 16:10:52.965320 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:52 crc kubenswrapper[4909]: I1128 16:10:52.965339 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:52 crc kubenswrapper[4909]: I1128 16:10:52.965362 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:52 crc kubenswrapper[4909]: I1128 16:10:52.965378 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:52Z","lastTransitionTime":"2025-11-28T16:10:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:53 crc kubenswrapper[4909]: I1128 16:10:53.068613 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:53 crc kubenswrapper[4909]: I1128 16:10:53.068703 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:53 crc kubenswrapper[4909]: I1128 16:10:53.068723 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:53 crc kubenswrapper[4909]: I1128 16:10:53.068750 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:53 crc kubenswrapper[4909]: I1128 16:10:53.068769 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:53Z","lastTransitionTime":"2025-11-28T16:10:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:53 crc kubenswrapper[4909]: I1128 16:10:53.171490 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:53 crc kubenswrapper[4909]: I1128 16:10:53.171529 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:53 crc kubenswrapper[4909]: I1128 16:10:53.171540 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:53 crc kubenswrapper[4909]: I1128 16:10:53.171559 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:53 crc kubenswrapper[4909]: I1128 16:10:53.171570 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:53Z","lastTransitionTime":"2025-11-28T16:10:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:53 crc kubenswrapper[4909]: I1128 16:10:53.224370 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-8s8f9" event={"ID":"d5a7347b-5536-45b5-be75-4bf0ed1b922b","Type":"ContainerStarted","Data":"309f7a04e1c92fde11e7fedfe089ff043e14ea788f60339fb7acf8c6df0c8c8d"} Nov 28 16:10:53 crc kubenswrapper[4909]: I1128 16:10:53.224424 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-8s8f9" event={"ID":"d5a7347b-5536-45b5-be75-4bf0ed1b922b","Type":"ContainerStarted","Data":"75a9b32698a7aaaa2b6f88541c9069902a98ad0146bf478ec58ce3a97fb410ee"} Nov 28 16:10:53 crc kubenswrapper[4909]: I1128 16:10:53.235171 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/ffceca0e-d9b5-484f-8753-5e0269eec811-metrics-certs\") pod \"network-metrics-daemon-8rjn2\" (UID: \"ffceca0e-d9b5-484f-8753-5e0269eec811\") " pod="openshift-multus/network-metrics-daemon-8rjn2" Nov 28 16:10:53 crc kubenswrapper[4909]: E1128 16:10:53.235421 4909 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 28 16:10:53 crc kubenswrapper[4909]: E1128 16:10:53.235529 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ffceca0e-d9b5-484f-8753-5e0269eec811-metrics-certs podName:ffceca0e-d9b5-484f-8753-5e0269eec811 nodeName:}" failed. No retries permitted until 2025-11-28 16:10:55.235496395 +0000 UTC m=+37.632180959 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/ffceca0e-d9b5-484f-8753-5e0269eec811-metrics-certs") pod "network-metrics-daemon-8rjn2" (UID: "ffceca0e-d9b5-484f-8753-5e0269eec811") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 28 16:10:53 crc kubenswrapper[4909]: I1128 16:10:53.244812 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7907b546dbb614e80485187b026c4c5ca17f52d88d5c28ce26a7bf5e3c09e76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:53Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:53 crc kubenswrapper[4909]: I1128 16:10:53.263866 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5f0ac931-d37b-4342-8c12-c2779b455cc5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae003675d8b34489b946224bcd380cc68ff49acec6769edfe74a8345018e7909\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5xrns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c2757c4dc287e41bc57c065df2906fc5961d005829fa24f22d3b5078d17555a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5xrns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:38Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-d5nd7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:53Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:53 crc kubenswrapper[4909]: I1128 16:10:53.277488 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:53 crc kubenswrapper[4909]: I1128 16:10:53.277569 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:53 crc kubenswrapper[4909]: I1128 16:10:53.277590 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:53 crc kubenswrapper[4909]: I1128 16:10:53.277611 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:53 crc kubenswrapper[4909]: I1128 16:10:53.277624 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:53Z","lastTransitionTime":"2025-11-28T16:10:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:53 crc kubenswrapper[4909]: I1128 16:10:53.296297 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c17e2fff-c7ee-475c-8c17-58a394744b91\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c9f22dca75c10791e7f5e083e737d4d6aad20598ff0d29dd7562fa99b5dbdfd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://697293d217dd4fbc4cecc3bc4be7acc14dbb5aa97b9faf0a608792ef7e4c8dba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d0b87eb388aab62c94756b5f20b73cf95fc80da8aa3d83f2848757f860cdd38e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://031c30950c06bdcae1736062069b96a6ce7cb66d5f286b73d691ee2d2db5832e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://66731a2e0518dd575f621ddf9e760e844e647a224138957440ccaecad3e4b8b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://983e9f73b8e344dbe445f95436cbe143d63f1da2e868201b441496df8cdc99ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a256af2a77f3591aaed9ae0f6c9ef60c2aedbbeab38fa1072ee5f65dc1b256f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a256af2a77f3591aaed9ae0f6c9ef60c2aedbbeab38fa1072ee5f65dc1b256f1\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T16:10:49Z\\\",\\\"message\\\":\\\"ource:services.Addr{IP:\\\\\\\"10.217.5.37\\\\\\\", Port:443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nI1128 16:10:48.981466 6343 services_controller.go:452] Built service openshift-apiserver/api per-node LB for network=default: []services.LB{}\\\\nI1128 16:10:48.981482 6343 services_controller.go:453] Built service openshift-apiserver/api template LB for network=default: []services.LB{}\\\\nI1128 16:10:48.981434 6343 services_controller.go:473] Services do not match for network=default, existing lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-apiserver-operator/metrics_TCP_cluster\\\\\\\", UUID:\\\\\\\"8b82f026-5975-4a1b-bb18-08d5d51147ec\\\\\\\", Protocol:\\\\\\\"tcp\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-apiserver-operator/metrics\\\\\\\"}, Opts:services.LBOpts{Reject:false, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{}, Templates:services.TemplateMap{}, Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}, built lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-apiserver-operator/metrics_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.o\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:48Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-qxw94_openshift-ovn-kubernetes(c17e2fff-c7ee-475c-8c17-58a394744b91)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://841b67fe2e8ae0e4ebf834f141b83ad5e850b8e81c3f6410cef7df9894c33dc6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e1fffda4b5571ab3fe797073c7201feadb7818eac66f6dc76b7761ef13a8d0ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e1fffda4b5571ab3fe797073c7201feadb7818eac66f6dc76b7761ef13a8d0ef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-qxw94\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:53Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:53 crc kubenswrapper[4909]: I1128 16:10:53.313452 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:53Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:53 crc kubenswrapper[4909]: I1128 16:10:53.326728 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-bwbm6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a458a03-0fea-47c0-9748-510145f40b30\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dd8c3d64058ca1f2862ce478295e1a694117d00f9172f78c7c5e2945d7357aad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmxp7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:36Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-bwbm6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:53Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:53 crc kubenswrapper[4909]: I1128 16:10:53.348001 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gdz9b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d9d93f2d-2a90-4d2d-b8e6-e48973be876f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e765772d8b12200fadfb28064b55c1abb9a8a6654602159c4910d2ea5b2d307\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b45f41a92db18043fc03c9cd7d9bbeef0020790595045abd17a9105d0f01367b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b45f41a92db18043fc03c9cd7d9bbeef0020790595045abd17a9105d0f01367b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f7a3a2a5eea9704ba4cd09ac6fa00e33c97d163973a852882979b1c8f2064979\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f7a3a2a5eea9704ba4cd09ac6fa00e33c97d163973a852882979b1c8f2064979\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0348e1d7066423ab9a595f60893021830d02de3c18fca2fdb0082efa41644124\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0348e1d7066423ab9a595f60893021830d02de3c18fca2fdb0082efa41644124\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbb9ad3dff8afd5ac9ec18bd48d273f46b76a21b791dc6a17aea1d7559750c93\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cbb9ad3dff8afd5ac9ec18bd48d273f46b76a21b791dc6a17aea1d7559750c93\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5898b3475414a39ef005c45982f12791683eb027e734ec8d25dc3489c93026a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5898b3475414a39ef005c45982f12791683eb027e734ec8d25dc3489c93026a2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2056e26b7e46cd32f8a12d6d054e249ed43f9854c0f7720791e98f9e6c5b5346\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2056e26b7e46cd32f8a12d6d054e249ed43f9854c0f7720791e98f9e6c5b5346\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gdz9b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:53Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:53 crc kubenswrapper[4909]: I1128 16:10:53.365218 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-8rjn2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ffceca0e-d9b5-484f-8753-5e0269eec811\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:51Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:51Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wgql6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wgql6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:51Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-8rjn2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:53Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:53 crc kubenswrapper[4909]: I1128 16:10:53.381099 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:53 crc kubenswrapper[4909]: I1128 16:10:53.381151 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:53 crc kubenswrapper[4909]: I1128 16:10:53.381164 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:53 crc kubenswrapper[4909]: I1128 16:10:53.381181 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:53 crc kubenswrapper[4909]: I1128 16:10:53.381192 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:53Z","lastTransitionTime":"2025-11-28T16:10:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:53 crc kubenswrapper[4909]: I1128 16:10:53.385535 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:53Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:53 crc kubenswrapper[4909]: I1128 16:10:53.408403 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://475d2c473e130345e0d544c2b7990c476e83c479c644db89e1ceda2f4278d667\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://18059a14081d677863133d95ec3cecf9359d6464af62be1e53bcd9514311ff59\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:53Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:53 crc kubenswrapper[4909]: I1128 16:10:53.425048 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-q8nfv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6b77cc4b-dc69-4ece-8e10-64eebc98a578\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://71f6384b2d5c86c668a4d7dfc23b14a893f93b9ec587bec43f74eb0926cc2c64\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-prc9l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:37Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-q8nfv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:53Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:53 crc kubenswrapper[4909]: I1128 16:10:53.442530 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-8s8f9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5a7347b-5536-45b5-be75-4bf0ed1b922b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://75a9b32698a7aaaa2b6f88541c9069902a98ad0146bf478ec58ce3a97fb410ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mglcp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://309f7a04e1c92fde11e7fedfe089ff043e14ea788f60339fb7acf8c6df0c8c8d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mglcp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-8s8f9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:53Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:53 crc kubenswrapper[4909]: I1128 16:10:53.461410 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-wx2jj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6e3805b2-8ad3-4fa6-b88f-e0ae42294202\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0777b571ce4049338437a97264761c89ab7517b4da8400edcd3381d58aef32e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x6lxd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-wx2jj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:53Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:53 crc kubenswrapper[4909]: I1128 16:10:53.481825 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dda7db1a-d603-4121-8cd6-e72c9ae02961\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://107b559f7d62f02041439f8c727185e9a819cd655afa0898e818d8618cc54cc7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://76ac0606fbf94f3746fb7ffa549a3cb684d40ee72224dd36c96bb47db493b482\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d5716d50bbed7c047029bbd7964431e97f78c6516505f57fe76d2a1548a665d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://53a5c9679a2c94c1297318d2e554c2445efbfab9cf72d25e48d753d66fa217d5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://da70cc63e1edf58e6d6fed49fc9266d54fabba5f7ce3216d910eb18aebc45c34\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0700ae65eb592e5b2110d37bb15dbb1dbdd228986de015ab5dc8fe13672032ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0700ae65eb592e5b2110d37bb15dbb1dbdd228986de015ab5dc8fe13672032ef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:18Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:53Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:53 crc kubenswrapper[4909]: I1128 16:10:53.483618 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:53 crc kubenswrapper[4909]: I1128 16:10:53.483685 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:53 crc kubenswrapper[4909]: I1128 16:10:53.483697 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:53 crc kubenswrapper[4909]: I1128 16:10:53.483715 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:53 crc kubenswrapper[4909]: I1128 16:10:53.483749 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:53Z","lastTransitionTime":"2025-11-28T16:10:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:53 crc kubenswrapper[4909]: I1128 16:10:53.500800 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b5b5c4c2-af06-4771-b6eb-d13a2819665c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ea29d0be350e786147f355c5bb4902924aa0f921413b432ad093a796d21b9d05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b41cf0481323080f4056d35add13c2254c6ffbe432b1651555067d3fc9d163eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94501e940379fe9b429e532a91799701e733bb5c3be1c5f32da07a9957f955b3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://37dc41c4e260f683c1ae04ae87a883fe78e4f1f620a946ccb6a87191a5eae0ae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:18Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:53Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:53 crc kubenswrapper[4909]: I1128 16:10:53.519020 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31f84564a3775e0ece3a5c0f176e8d9607466d4a7a505173e9668c51fa2229e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:53Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:53 crc kubenswrapper[4909]: I1128 16:10:53.542803 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:53Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:53 crc kubenswrapper[4909]: I1128 16:10:53.587139 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:53 crc kubenswrapper[4909]: I1128 16:10:53.587187 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:53 crc kubenswrapper[4909]: I1128 16:10:53.587204 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:53 crc kubenswrapper[4909]: I1128 16:10:53.587228 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:53 crc kubenswrapper[4909]: I1128 16:10:53.587247 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:53Z","lastTransitionTime":"2025-11-28T16:10:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:53 crc kubenswrapper[4909]: I1128 16:10:53.689432 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:53 crc kubenswrapper[4909]: I1128 16:10:53.689480 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:53 crc kubenswrapper[4909]: I1128 16:10:53.689497 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:53 crc kubenswrapper[4909]: I1128 16:10:53.689517 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:53 crc kubenswrapper[4909]: I1128 16:10:53.689534 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:53Z","lastTransitionTime":"2025-11-28T16:10:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:53 crc kubenswrapper[4909]: I1128 16:10:53.793223 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:53 crc kubenswrapper[4909]: I1128 16:10:53.793267 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:53 crc kubenswrapper[4909]: I1128 16:10:53.793279 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:53 crc kubenswrapper[4909]: I1128 16:10:53.793298 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:53 crc kubenswrapper[4909]: I1128 16:10:53.793310 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:53Z","lastTransitionTime":"2025-11-28T16:10:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:53 crc kubenswrapper[4909]: I1128 16:10:53.896362 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:53 crc kubenswrapper[4909]: I1128 16:10:53.896417 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:53 crc kubenswrapper[4909]: I1128 16:10:53.896439 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:53 crc kubenswrapper[4909]: I1128 16:10:53.896460 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:53 crc kubenswrapper[4909]: I1128 16:10:53.896477 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:53Z","lastTransitionTime":"2025-11-28T16:10:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:53 crc kubenswrapper[4909]: I1128 16:10:53.900835 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 16:10:53 crc kubenswrapper[4909]: I1128 16:10:53.900908 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 16:10:53 crc kubenswrapper[4909]: E1128 16:10:53.901057 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 16:10:53 crc kubenswrapper[4909]: I1128 16:10:53.901120 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 16:10:53 crc kubenswrapper[4909]: E1128 16:10:53.901306 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 16:10:53 crc kubenswrapper[4909]: E1128 16:10:53.901437 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 16:10:53 crc kubenswrapper[4909]: I1128 16:10:53.999385 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:53 crc kubenswrapper[4909]: I1128 16:10:53.999432 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:53 crc kubenswrapper[4909]: I1128 16:10:53.999449 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:53 crc kubenswrapper[4909]: I1128 16:10:53.999470 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:53 crc kubenswrapper[4909]: I1128 16:10:53.999517 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:53Z","lastTransitionTime":"2025-11-28T16:10:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:54 crc kubenswrapper[4909]: I1128 16:10:54.101584 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:54 crc kubenswrapper[4909]: I1128 16:10:54.101627 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:54 crc kubenswrapper[4909]: I1128 16:10:54.101643 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:54 crc kubenswrapper[4909]: I1128 16:10:54.101691 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:54 crc kubenswrapper[4909]: I1128 16:10:54.101707 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:54Z","lastTransitionTime":"2025-11-28T16:10:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:54 crc kubenswrapper[4909]: I1128 16:10:54.204893 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:54 crc kubenswrapper[4909]: I1128 16:10:54.204967 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:54 crc kubenswrapper[4909]: I1128 16:10:54.204984 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:54 crc kubenswrapper[4909]: I1128 16:10:54.205426 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:54 crc kubenswrapper[4909]: I1128 16:10:54.205481 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:54Z","lastTransitionTime":"2025-11-28T16:10:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:54 crc kubenswrapper[4909]: I1128 16:10:54.308787 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:54 crc kubenswrapper[4909]: I1128 16:10:54.308904 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:54 crc kubenswrapper[4909]: I1128 16:10:54.308923 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:54 crc kubenswrapper[4909]: I1128 16:10:54.308945 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:54 crc kubenswrapper[4909]: I1128 16:10:54.308962 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:54Z","lastTransitionTime":"2025-11-28T16:10:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:54 crc kubenswrapper[4909]: I1128 16:10:54.411790 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:54 crc kubenswrapper[4909]: I1128 16:10:54.411853 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:54 crc kubenswrapper[4909]: I1128 16:10:54.411870 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:54 crc kubenswrapper[4909]: I1128 16:10:54.411892 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:54 crc kubenswrapper[4909]: I1128 16:10:54.411909 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:54Z","lastTransitionTime":"2025-11-28T16:10:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:54 crc kubenswrapper[4909]: I1128 16:10:54.514958 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:54 crc kubenswrapper[4909]: I1128 16:10:54.515090 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:54 crc kubenswrapper[4909]: I1128 16:10:54.515108 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:54 crc kubenswrapper[4909]: I1128 16:10:54.515132 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:54 crc kubenswrapper[4909]: I1128 16:10:54.515149 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:54Z","lastTransitionTime":"2025-11-28T16:10:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:54 crc kubenswrapper[4909]: I1128 16:10:54.618359 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:54 crc kubenswrapper[4909]: I1128 16:10:54.618423 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:54 crc kubenswrapper[4909]: I1128 16:10:54.618440 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:54 crc kubenswrapper[4909]: I1128 16:10:54.618465 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:54 crc kubenswrapper[4909]: I1128 16:10:54.618482 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:54Z","lastTransitionTime":"2025-11-28T16:10:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:54 crc kubenswrapper[4909]: I1128 16:10:54.721431 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:54 crc kubenswrapper[4909]: I1128 16:10:54.721483 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:54 crc kubenswrapper[4909]: I1128 16:10:54.721501 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:54 crc kubenswrapper[4909]: I1128 16:10:54.721525 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:54 crc kubenswrapper[4909]: I1128 16:10:54.721542 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:54Z","lastTransitionTime":"2025-11-28T16:10:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:54 crc kubenswrapper[4909]: I1128 16:10:54.824250 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:54 crc kubenswrapper[4909]: I1128 16:10:54.824324 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:54 crc kubenswrapper[4909]: I1128 16:10:54.824341 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:54 crc kubenswrapper[4909]: I1128 16:10:54.824369 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:54 crc kubenswrapper[4909]: I1128 16:10:54.824388 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:54Z","lastTransitionTime":"2025-11-28T16:10:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:54 crc kubenswrapper[4909]: I1128 16:10:54.901212 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-8rjn2" Nov 28 16:10:54 crc kubenswrapper[4909]: E1128 16:10:54.901413 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-8rjn2" podUID="ffceca0e-d9b5-484f-8753-5e0269eec811" Nov 28 16:10:54 crc kubenswrapper[4909]: I1128 16:10:54.926489 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:54 crc kubenswrapper[4909]: I1128 16:10:54.926543 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:54 crc kubenswrapper[4909]: I1128 16:10:54.926555 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:54 crc kubenswrapper[4909]: I1128 16:10:54.926574 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:54 crc kubenswrapper[4909]: I1128 16:10:54.926590 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:54Z","lastTransitionTime":"2025-11-28T16:10:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:55 crc kubenswrapper[4909]: I1128 16:10:55.029594 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:55 crc kubenswrapper[4909]: I1128 16:10:55.029651 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:55 crc kubenswrapper[4909]: I1128 16:10:55.029703 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:55 crc kubenswrapper[4909]: I1128 16:10:55.029728 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:55 crc kubenswrapper[4909]: I1128 16:10:55.029746 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:55Z","lastTransitionTime":"2025-11-28T16:10:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:55 crc kubenswrapper[4909]: I1128 16:10:55.132537 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:55 crc kubenswrapper[4909]: I1128 16:10:55.132623 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:55 crc kubenswrapper[4909]: I1128 16:10:55.132650 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:55 crc kubenswrapper[4909]: I1128 16:10:55.132715 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:55 crc kubenswrapper[4909]: I1128 16:10:55.132751 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:55Z","lastTransitionTime":"2025-11-28T16:10:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:55 crc kubenswrapper[4909]: I1128 16:10:55.236353 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:55 crc kubenswrapper[4909]: I1128 16:10:55.236424 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:55 crc kubenswrapper[4909]: I1128 16:10:55.236442 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:55 crc kubenswrapper[4909]: I1128 16:10:55.236469 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:55 crc kubenswrapper[4909]: I1128 16:10:55.236491 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:55Z","lastTransitionTime":"2025-11-28T16:10:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:55 crc kubenswrapper[4909]: I1128 16:10:55.255371 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/ffceca0e-d9b5-484f-8753-5e0269eec811-metrics-certs\") pod \"network-metrics-daemon-8rjn2\" (UID: \"ffceca0e-d9b5-484f-8753-5e0269eec811\") " pod="openshift-multus/network-metrics-daemon-8rjn2" Nov 28 16:10:55 crc kubenswrapper[4909]: E1128 16:10:55.255610 4909 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 28 16:10:55 crc kubenswrapper[4909]: E1128 16:10:55.255735 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ffceca0e-d9b5-484f-8753-5e0269eec811-metrics-certs podName:ffceca0e-d9b5-484f-8753-5e0269eec811 nodeName:}" failed. No retries permitted until 2025-11-28 16:10:59.255712506 +0000 UTC m=+41.652397040 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/ffceca0e-d9b5-484f-8753-5e0269eec811-metrics-certs") pod "network-metrics-daemon-8rjn2" (UID: "ffceca0e-d9b5-484f-8753-5e0269eec811") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 28 16:10:55 crc kubenswrapper[4909]: I1128 16:10:55.339511 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:55 crc kubenswrapper[4909]: I1128 16:10:55.339613 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:55 crc kubenswrapper[4909]: I1128 16:10:55.339633 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:55 crc kubenswrapper[4909]: I1128 16:10:55.339743 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:55 crc kubenswrapper[4909]: I1128 16:10:55.339766 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:55Z","lastTransitionTime":"2025-11-28T16:10:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:55 crc kubenswrapper[4909]: I1128 16:10:55.442908 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:55 crc kubenswrapper[4909]: I1128 16:10:55.442984 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:55 crc kubenswrapper[4909]: I1128 16:10:55.443008 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:55 crc kubenswrapper[4909]: I1128 16:10:55.443042 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:55 crc kubenswrapper[4909]: I1128 16:10:55.443066 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:55Z","lastTransitionTime":"2025-11-28T16:10:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:55 crc kubenswrapper[4909]: I1128 16:10:55.546017 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:55 crc kubenswrapper[4909]: I1128 16:10:55.546064 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:55 crc kubenswrapper[4909]: I1128 16:10:55.546075 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:55 crc kubenswrapper[4909]: I1128 16:10:55.546094 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:55 crc kubenswrapper[4909]: I1128 16:10:55.546106 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:55Z","lastTransitionTime":"2025-11-28T16:10:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:55 crc kubenswrapper[4909]: I1128 16:10:55.649326 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:55 crc kubenswrapper[4909]: I1128 16:10:55.649599 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:55 crc kubenswrapper[4909]: I1128 16:10:55.649619 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:55 crc kubenswrapper[4909]: I1128 16:10:55.649643 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:55 crc kubenswrapper[4909]: I1128 16:10:55.649691 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:55Z","lastTransitionTime":"2025-11-28T16:10:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:55 crc kubenswrapper[4909]: I1128 16:10:55.752025 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:55 crc kubenswrapper[4909]: I1128 16:10:55.752074 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:55 crc kubenswrapper[4909]: I1128 16:10:55.752087 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:55 crc kubenswrapper[4909]: I1128 16:10:55.752103 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:55 crc kubenswrapper[4909]: I1128 16:10:55.752114 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:55Z","lastTransitionTime":"2025-11-28T16:10:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:55 crc kubenswrapper[4909]: I1128 16:10:55.854340 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:55 crc kubenswrapper[4909]: I1128 16:10:55.854374 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:55 crc kubenswrapper[4909]: I1128 16:10:55.854382 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:55 crc kubenswrapper[4909]: I1128 16:10:55.854395 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:55 crc kubenswrapper[4909]: I1128 16:10:55.854404 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:55Z","lastTransitionTime":"2025-11-28T16:10:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:55 crc kubenswrapper[4909]: I1128 16:10:55.901211 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 16:10:55 crc kubenswrapper[4909]: E1128 16:10:55.901327 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 16:10:55 crc kubenswrapper[4909]: I1128 16:10:55.901215 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 16:10:55 crc kubenswrapper[4909]: I1128 16:10:55.901211 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 16:10:55 crc kubenswrapper[4909]: E1128 16:10:55.901394 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 16:10:55 crc kubenswrapper[4909]: E1128 16:10:55.901603 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 16:10:55 crc kubenswrapper[4909]: I1128 16:10:55.957451 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:55 crc kubenswrapper[4909]: I1128 16:10:55.957494 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:55 crc kubenswrapper[4909]: I1128 16:10:55.957502 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:55 crc kubenswrapper[4909]: I1128 16:10:55.957517 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:55 crc kubenswrapper[4909]: I1128 16:10:55.957525 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:55Z","lastTransitionTime":"2025-11-28T16:10:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:56 crc kubenswrapper[4909]: I1128 16:10:56.060811 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:56 crc kubenswrapper[4909]: I1128 16:10:56.060866 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:56 crc kubenswrapper[4909]: I1128 16:10:56.060884 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:56 crc kubenswrapper[4909]: I1128 16:10:56.060910 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:56 crc kubenswrapper[4909]: I1128 16:10:56.060927 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:56Z","lastTransitionTime":"2025-11-28T16:10:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:56 crc kubenswrapper[4909]: I1128 16:10:56.165052 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:56 crc kubenswrapper[4909]: I1128 16:10:56.165124 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:56 crc kubenswrapper[4909]: I1128 16:10:56.165147 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:56 crc kubenswrapper[4909]: I1128 16:10:56.165178 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:56 crc kubenswrapper[4909]: I1128 16:10:56.165199 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:56Z","lastTransitionTime":"2025-11-28T16:10:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:56 crc kubenswrapper[4909]: I1128 16:10:56.268117 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:56 crc kubenswrapper[4909]: I1128 16:10:56.268176 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:56 crc kubenswrapper[4909]: I1128 16:10:56.268194 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:56 crc kubenswrapper[4909]: I1128 16:10:56.268219 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:56 crc kubenswrapper[4909]: I1128 16:10:56.268239 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:56Z","lastTransitionTime":"2025-11-28T16:10:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:56 crc kubenswrapper[4909]: I1128 16:10:56.371712 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:56 crc kubenswrapper[4909]: I1128 16:10:56.371751 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:56 crc kubenswrapper[4909]: I1128 16:10:56.371760 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:56 crc kubenswrapper[4909]: I1128 16:10:56.371775 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:56 crc kubenswrapper[4909]: I1128 16:10:56.371784 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:56Z","lastTransitionTime":"2025-11-28T16:10:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:56 crc kubenswrapper[4909]: I1128 16:10:56.474184 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:56 crc kubenswrapper[4909]: I1128 16:10:56.474225 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:56 crc kubenswrapper[4909]: I1128 16:10:56.474232 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:56 crc kubenswrapper[4909]: I1128 16:10:56.474247 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:56 crc kubenswrapper[4909]: I1128 16:10:56.474256 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:56Z","lastTransitionTime":"2025-11-28T16:10:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:56 crc kubenswrapper[4909]: I1128 16:10:56.577045 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:56 crc kubenswrapper[4909]: I1128 16:10:56.577128 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:56 crc kubenswrapper[4909]: I1128 16:10:56.577152 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:56 crc kubenswrapper[4909]: I1128 16:10:56.577189 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:56 crc kubenswrapper[4909]: I1128 16:10:56.577213 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:56Z","lastTransitionTime":"2025-11-28T16:10:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:56 crc kubenswrapper[4909]: I1128 16:10:56.680184 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:56 crc kubenswrapper[4909]: I1128 16:10:56.680258 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:56 crc kubenswrapper[4909]: I1128 16:10:56.680284 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:56 crc kubenswrapper[4909]: I1128 16:10:56.680313 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:56 crc kubenswrapper[4909]: I1128 16:10:56.680336 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:56Z","lastTransitionTime":"2025-11-28T16:10:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:56 crc kubenswrapper[4909]: I1128 16:10:56.783824 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:56 crc kubenswrapper[4909]: I1128 16:10:56.783890 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:56 crc kubenswrapper[4909]: I1128 16:10:56.783909 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:56 crc kubenswrapper[4909]: I1128 16:10:56.783933 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:56 crc kubenswrapper[4909]: I1128 16:10:56.783955 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:56Z","lastTransitionTime":"2025-11-28T16:10:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:56 crc kubenswrapper[4909]: I1128 16:10:56.887932 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:56 crc kubenswrapper[4909]: I1128 16:10:56.888000 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:56 crc kubenswrapper[4909]: I1128 16:10:56.888017 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:56 crc kubenswrapper[4909]: I1128 16:10:56.888161 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:56 crc kubenswrapper[4909]: I1128 16:10:56.888186 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:56Z","lastTransitionTime":"2025-11-28T16:10:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:56 crc kubenswrapper[4909]: I1128 16:10:56.901239 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-8rjn2" Nov 28 16:10:56 crc kubenswrapper[4909]: E1128 16:10:56.901430 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-8rjn2" podUID="ffceca0e-d9b5-484f-8753-5e0269eec811" Nov 28 16:10:56 crc kubenswrapper[4909]: I1128 16:10:56.990690 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:56 crc kubenswrapper[4909]: I1128 16:10:56.990757 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:56 crc kubenswrapper[4909]: I1128 16:10:56.990775 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:56 crc kubenswrapper[4909]: I1128 16:10:56.990800 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:56 crc kubenswrapper[4909]: I1128 16:10:56.990821 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:56Z","lastTransitionTime":"2025-11-28T16:10:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:57 crc kubenswrapper[4909]: I1128 16:10:57.094387 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:57 crc kubenswrapper[4909]: I1128 16:10:57.094439 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:57 crc kubenswrapper[4909]: I1128 16:10:57.094456 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:57 crc kubenswrapper[4909]: I1128 16:10:57.094478 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:57 crc kubenswrapper[4909]: I1128 16:10:57.094495 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:57Z","lastTransitionTime":"2025-11-28T16:10:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:57 crc kubenswrapper[4909]: I1128 16:10:57.197545 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:57 crc kubenswrapper[4909]: I1128 16:10:57.197610 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:57 crc kubenswrapper[4909]: I1128 16:10:57.197632 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:57 crc kubenswrapper[4909]: I1128 16:10:57.197713 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:57 crc kubenswrapper[4909]: I1128 16:10:57.197739 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:57Z","lastTransitionTime":"2025-11-28T16:10:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:57 crc kubenswrapper[4909]: I1128 16:10:57.300894 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:57 crc kubenswrapper[4909]: I1128 16:10:57.300957 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:57 crc kubenswrapper[4909]: I1128 16:10:57.300974 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:57 crc kubenswrapper[4909]: I1128 16:10:57.300999 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:57 crc kubenswrapper[4909]: I1128 16:10:57.301017 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:57Z","lastTransitionTime":"2025-11-28T16:10:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:57 crc kubenswrapper[4909]: I1128 16:10:57.403386 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:57 crc kubenswrapper[4909]: I1128 16:10:57.403459 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:57 crc kubenswrapper[4909]: I1128 16:10:57.403480 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:57 crc kubenswrapper[4909]: I1128 16:10:57.403510 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:57 crc kubenswrapper[4909]: I1128 16:10:57.403531 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:57Z","lastTransitionTime":"2025-11-28T16:10:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:57 crc kubenswrapper[4909]: I1128 16:10:57.506587 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:57 crc kubenswrapper[4909]: I1128 16:10:57.506640 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:57 crc kubenswrapper[4909]: I1128 16:10:57.506700 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:57 crc kubenswrapper[4909]: I1128 16:10:57.506745 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:57 crc kubenswrapper[4909]: I1128 16:10:57.506764 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:57Z","lastTransitionTime":"2025-11-28T16:10:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:57 crc kubenswrapper[4909]: I1128 16:10:57.610098 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:57 crc kubenswrapper[4909]: I1128 16:10:57.610194 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:57 crc kubenswrapper[4909]: I1128 16:10:57.610223 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:57 crc kubenswrapper[4909]: I1128 16:10:57.610257 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:57 crc kubenswrapper[4909]: I1128 16:10:57.610280 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:57Z","lastTransitionTime":"2025-11-28T16:10:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:57 crc kubenswrapper[4909]: I1128 16:10:57.713411 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:57 crc kubenswrapper[4909]: I1128 16:10:57.713494 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:57 crc kubenswrapper[4909]: I1128 16:10:57.713516 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:57 crc kubenswrapper[4909]: I1128 16:10:57.713546 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:57 crc kubenswrapper[4909]: I1128 16:10:57.713571 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:57Z","lastTransitionTime":"2025-11-28T16:10:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:57 crc kubenswrapper[4909]: I1128 16:10:57.816734 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:57 crc kubenswrapper[4909]: I1128 16:10:57.816783 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:57 crc kubenswrapper[4909]: I1128 16:10:57.816801 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:57 crc kubenswrapper[4909]: I1128 16:10:57.816824 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:57 crc kubenswrapper[4909]: I1128 16:10:57.816841 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:57Z","lastTransitionTime":"2025-11-28T16:10:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:57 crc kubenswrapper[4909]: I1128 16:10:57.901360 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 16:10:57 crc kubenswrapper[4909]: E1128 16:10:57.901540 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 16:10:57 crc kubenswrapper[4909]: I1128 16:10:57.901610 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 16:10:57 crc kubenswrapper[4909]: I1128 16:10:57.901729 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 16:10:57 crc kubenswrapper[4909]: E1128 16:10:57.901759 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 16:10:57 crc kubenswrapper[4909]: E1128 16:10:57.901869 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 16:10:57 crc kubenswrapper[4909]: I1128 16:10:57.919796 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:57 crc kubenswrapper[4909]: I1128 16:10:57.919912 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:57 crc kubenswrapper[4909]: I1128 16:10:57.919969 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:57 crc kubenswrapper[4909]: I1128 16:10:57.919996 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:57 crc kubenswrapper[4909]: I1128 16:10:57.920014 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:57Z","lastTransitionTime":"2025-11-28T16:10:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:57 crc kubenswrapper[4909]: I1128 16:10:57.922938 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://475d2c473e130345e0d544c2b7990c476e83c479c644db89e1ceda2f4278d667\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://18059a14081d677863133d95ec3cecf9359d6464af62be1e53bcd9514311ff59\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:57Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:57 crc kubenswrapper[4909]: I1128 16:10:57.938633 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-q8nfv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6b77cc4b-dc69-4ece-8e10-64eebc98a578\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://71f6384b2d5c86c668a4d7dfc23b14a893f93b9ec587bec43f74eb0926cc2c64\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-prc9l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:37Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-q8nfv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:57Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:57 crc kubenswrapper[4909]: I1128 16:10:57.955871 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-8s8f9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5a7347b-5536-45b5-be75-4bf0ed1b922b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://75a9b32698a7aaaa2b6f88541c9069902a98ad0146bf478ec58ce3a97fb410ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mglcp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://309f7a04e1c92fde11e7fedfe089ff043e14ea788f60339fb7acf8c6df0c8c8d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mglcp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-8s8f9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:57Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:57 crc kubenswrapper[4909]: I1128 16:10:57.977802 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dda7db1a-d603-4121-8cd6-e72c9ae02961\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://107b559f7d62f02041439f8c727185e9a819cd655afa0898e818d8618cc54cc7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://76ac0606fbf94f3746fb7ffa549a3cb684d40ee72224dd36c96bb47db493b482\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d5716d50bbed7c047029bbd7964431e97f78c6516505f57fe76d2a1548a665d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://53a5c9679a2c94c1297318d2e554c2445efbfab9cf72d25e48d753d66fa217d5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://da70cc63e1edf58e6d6fed49fc9266d54fabba5f7ce3216d910eb18aebc45c34\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0700ae65eb592e5b2110d37bb15dbb1dbdd228986de015ab5dc8fe13672032ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0700ae65eb592e5b2110d37bb15dbb1dbdd228986de015ab5dc8fe13672032ef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:18Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:57Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:57 crc kubenswrapper[4909]: I1128 16:10:57.997960 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b5b5c4c2-af06-4771-b6eb-d13a2819665c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ea29d0be350e786147f355c5bb4902924aa0f921413b432ad093a796d21b9d05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b41cf0481323080f4056d35add13c2254c6ffbe432b1651555067d3fc9d163eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94501e940379fe9b429e532a91799701e733bb5c3be1c5f32da07a9957f955b3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://37dc41c4e260f683c1ae04ae87a883fe78e4f1f620a946ccb6a87191a5eae0ae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:18Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:57Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:58 crc kubenswrapper[4909]: I1128 16:10:58.017268 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31f84564a3775e0ece3a5c0f176e8d9607466d4a7a505173e9668c51fa2229e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:58Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:58 crc kubenswrapper[4909]: I1128 16:10:58.022406 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:58 crc kubenswrapper[4909]: I1128 16:10:58.022466 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:58 crc kubenswrapper[4909]: I1128 16:10:58.022484 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:58 crc kubenswrapper[4909]: I1128 16:10:58.022509 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:58 crc kubenswrapper[4909]: I1128 16:10:58.022527 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:58Z","lastTransitionTime":"2025-11-28T16:10:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:58 crc kubenswrapper[4909]: I1128 16:10:58.039319 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:58Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:58 crc kubenswrapper[4909]: I1128 16:10:58.060130 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-wx2jj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6e3805b2-8ad3-4fa6-b88f-e0ae42294202\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0777b571ce4049338437a97264761c89ab7517b4da8400edcd3381d58aef32e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x6lxd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-wx2jj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:58Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:58 crc kubenswrapper[4909]: I1128 16:10:58.079628 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7907b546dbb614e80485187b026c4c5ca17f52d88d5c28ce26a7bf5e3c09e76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:58Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:58 crc kubenswrapper[4909]: I1128 16:10:58.097018 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5f0ac931-d37b-4342-8c12-c2779b455cc5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae003675d8b34489b946224bcd380cc68ff49acec6769edfe74a8345018e7909\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5xrns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c2757c4dc287e41bc57c065df2906fc5961d005829fa24f22d3b5078d17555a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5xrns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:38Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-d5nd7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:58Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:58 crc kubenswrapper[4909]: I1128 16:10:58.125859 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:58 crc kubenswrapper[4909]: I1128 16:10:58.126091 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:58 crc kubenswrapper[4909]: I1128 16:10:58.126230 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:58 crc kubenswrapper[4909]: I1128 16:10:58.126399 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:58 crc kubenswrapper[4909]: I1128 16:10:58.126543 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:58Z","lastTransitionTime":"2025-11-28T16:10:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:58 crc kubenswrapper[4909]: I1128 16:10:58.134580 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c17e2fff-c7ee-475c-8c17-58a394744b91\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c9f22dca75c10791e7f5e083e737d4d6aad20598ff0d29dd7562fa99b5dbdfd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://697293d217dd4fbc4cecc3bc4be7acc14dbb5aa97b9faf0a608792ef7e4c8dba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d0b87eb388aab62c94756b5f20b73cf95fc80da8aa3d83f2848757f860cdd38e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://031c30950c06bdcae1736062069b96a6ce7cb66d5f286b73d691ee2d2db5832e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://66731a2e0518dd575f621ddf9e760e844e647a224138957440ccaecad3e4b8b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://983e9f73b8e344dbe445f95436cbe143d63f1da2e868201b441496df8cdc99ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a256af2a77f3591aaed9ae0f6c9ef60c2aedbbeab38fa1072ee5f65dc1b256f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a256af2a77f3591aaed9ae0f6c9ef60c2aedbbeab38fa1072ee5f65dc1b256f1\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T16:10:49Z\\\",\\\"message\\\":\\\"ource:services.Addr{IP:\\\\\\\"10.217.5.37\\\\\\\", Port:443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nI1128 16:10:48.981466 6343 services_controller.go:452] Built service openshift-apiserver/api per-node LB for network=default: []services.LB{}\\\\nI1128 16:10:48.981482 6343 services_controller.go:453] Built service openshift-apiserver/api template LB for network=default: []services.LB{}\\\\nI1128 16:10:48.981434 6343 services_controller.go:473] Services do not match for network=default, existing lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-apiserver-operator/metrics_TCP_cluster\\\\\\\", UUID:\\\\\\\"8b82f026-5975-4a1b-bb18-08d5d51147ec\\\\\\\", Protocol:\\\\\\\"tcp\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-apiserver-operator/metrics\\\\\\\"}, Opts:services.LBOpts{Reject:false, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{}, Templates:services.TemplateMap{}, Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}, built lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-apiserver-operator/metrics_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.o\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:48Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-qxw94_openshift-ovn-kubernetes(c17e2fff-c7ee-475c-8c17-58a394744b91)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://841b67fe2e8ae0e4ebf834f141b83ad5e850b8e81c3f6410cef7df9894c33dc6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e1fffda4b5571ab3fe797073c7201feadb7818eac66f6dc76b7761ef13a8d0ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e1fffda4b5571ab3fe797073c7201feadb7818eac66f6dc76b7761ef13a8d0ef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-qxw94\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:58Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:58 crc kubenswrapper[4909]: I1128 16:10:58.147756 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-8rjn2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ffceca0e-d9b5-484f-8753-5e0269eec811\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:51Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:51Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wgql6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wgql6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:51Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-8rjn2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:58Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:58 crc kubenswrapper[4909]: I1128 16:10:58.163899 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:58Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:58 crc kubenswrapper[4909]: I1128 16:10:58.178527 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:58Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:58 crc kubenswrapper[4909]: I1128 16:10:58.196588 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-bwbm6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a458a03-0fea-47c0-9748-510145f40b30\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dd8c3d64058ca1f2862ce478295e1a694117d00f9172f78c7c5e2945d7357aad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmxp7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:36Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-bwbm6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:58Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:58 crc kubenswrapper[4909]: I1128 16:10:58.218800 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gdz9b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d9d93f2d-2a90-4d2d-b8e6-e48973be876f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e765772d8b12200fadfb28064b55c1abb9a8a6654602159c4910d2ea5b2d307\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b45f41a92db18043fc03c9cd7d9bbeef0020790595045abd17a9105d0f01367b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b45f41a92db18043fc03c9cd7d9bbeef0020790595045abd17a9105d0f01367b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f7a3a2a5eea9704ba4cd09ac6fa00e33c97d163973a852882979b1c8f2064979\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f7a3a2a5eea9704ba4cd09ac6fa00e33c97d163973a852882979b1c8f2064979\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0348e1d7066423ab9a595f60893021830d02de3c18fca2fdb0082efa41644124\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0348e1d7066423ab9a595f60893021830d02de3c18fca2fdb0082efa41644124\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbb9ad3dff8afd5ac9ec18bd48d273f46b76a21b791dc6a17aea1d7559750c93\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cbb9ad3dff8afd5ac9ec18bd48d273f46b76a21b791dc6a17aea1d7559750c93\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5898b3475414a39ef005c45982f12791683eb027e734ec8d25dc3489c93026a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5898b3475414a39ef005c45982f12791683eb027e734ec8d25dc3489c93026a2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2056e26b7e46cd32f8a12d6d054e249ed43f9854c0f7720791e98f9e6c5b5346\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2056e26b7e46cd32f8a12d6d054e249ed43f9854c0f7720791e98f9e6c5b5346\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gdz9b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:10:58Z is after 2025-08-24T17:21:41Z" Nov 28 16:10:58 crc kubenswrapper[4909]: I1128 16:10:58.231125 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:58 crc kubenswrapper[4909]: I1128 16:10:58.231181 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:58 crc kubenswrapper[4909]: I1128 16:10:58.231200 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:58 crc kubenswrapper[4909]: I1128 16:10:58.231229 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:58 crc kubenswrapper[4909]: I1128 16:10:58.231248 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:58Z","lastTransitionTime":"2025-11-28T16:10:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:58 crc kubenswrapper[4909]: I1128 16:10:58.334296 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:58 crc kubenswrapper[4909]: I1128 16:10:58.334346 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:58 crc kubenswrapper[4909]: I1128 16:10:58.334363 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:58 crc kubenswrapper[4909]: I1128 16:10:58.334391 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:58 crc kubenswrapper[4909]: I1128 16:10:58.334415 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:58Z","lastTransitionTime":"2025-11-28T16:10:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:58 crc kubenswrapper[4909]: I1128 16:10:58.438159 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:58 crc kubenswrapper[4909]: I1128 16:10:58.438241 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:58 crc kubenswrapper[4909]: I1128 16:10:58.438261 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:58 crc kubenswrapper[4909]: I1128 16:10:58.438298 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:58 crc kubenswrapper[4909]: I1128 16:10:58.438322 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:58Z","lastTransitionTime":"2025-11-28T16:10:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:58 crc kubenswrapper[4909]: I1128 16:10:58.542119 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:58 crc kubenswrapper[4909]: I1128 16:10:58.542470 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:58 crc kubenswrapper[4909]: I1128 16:10:58.542605 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:58 crc kubenswrapper[4909]: I1128 16:10:58.542786 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:58 crc kubenswrapper[4909]: I1128 16:10:58.542914 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:58Z","lastTransitionTime":"2025-11-28T16:10:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:58 crc kubenswrapper[4909]: I1128 16:10:58.646502 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:58 crc kubenswrapper[4909]: I1128 16:10:58.646776 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:58 crc kubenswrapper[4909]: I1128 16:10:58.646938 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:58 crc kubenswrapper[4909]: I1128 16:10:58.647084 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:58 crc kubenswrapper[4909]: I1128 16:10:58.647216 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:58Z","lastTransitionTime":"2025-11-28T16:10:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:58 crc kubenswrapper[4909]: I1128 16:10:58.750388 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:58 crc kubenswrapper[4909]: I1128 16:10:58.750448 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:58 crc kubenswrapper[4909]: I1128 16:10:58.750466 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:58 crc kubenswrapper[4909]: I1128 16:10:58.750698 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:58 crc kubenswrapper[4909]: I1128 16:10:58.750719 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:58Z","lastTransitionTime":"2025-11-28T16:10:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:58 crc kubenswrapper[4909]: I1128 16:10:58.853872 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:58 crc kubenswrapper[4909]: I1128 16:10:58.854405 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:58 crc kubenswrapper[4909]: I1128 16:10:58.855550 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:58 crc kubenswrapper[4909]: I1128 16:10:58.855974 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:58 crc kubenswrapper[4909]: I1128 16:10:58.856592 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:58Z","lastTransitionTime":"2025-11-28T16:10:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:58 crc kubenswrapper[4909]: I1128 16:10:58.901245 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-8rjn2" Nov 28 16:10:58 crc kubenswrapper[4909]: E1128 16:10:58.901465 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-8rjn2" podUID="ffceca0e-d9b5-484f-8753-5e0269eec811" Nov 28 16:10:58 crc kubenswrapper[4909]: I1128 16:10:58.960150 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:58 crc kubenswrapper[4909]: I1128 16:10:58.960218 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:58 crc kubenswrapper[4909]: I1128 16:10:58.960239 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:58 crc kubenswrapper[4909]: I1128 16:10:58.960266 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:58 crc kubenswrapper[4909]: I1128 16:10:58.960284 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:58Z","lastTransitionTime":"2025-11-28T16:10:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:59 crc kubenswrapper[4909]: I1128 16:10:59.063509 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:59 crc kubenswrapper[4909]: I1128 16:10:59.063865 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:59 crc kubenswrapper[4909]: I1128 16:10:59.064046 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:59 crc kubenswrapper[4909]: I1128 16:10:59.064196 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:59 crc kubenswrapper[4909]: I1128 16:10:59.064338 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:59Z","lastTransitionTime":"2025-11-28T16:10:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:59 crc kubenswrapper[4909]: I1128 16:10:59.168148 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:59 crc kubenswrapper[4909]: I1128 16:10:59.168216 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:59 crc kubenswrapper[4909]: I1128 16:10:59.168234 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:59 crc kubenswrapper[4909]: I1128 16:10:59.168260 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:59 crc kubenswrapper[4909]: I1128 16:10:59.168278 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:59Z","lastTransitionTime":"2025-11-28T16:10:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:59 crc kubenswrapper[4909]: I1128 16:10:59.271363 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:59 crc kubenswrapper[4909]: I1128 16:10:59.271421 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:59 crc kubenswrapper[4909]: I1128 16:10:59.271439 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:59 crc kubenswrapper[4909]: I1128 16:10:59.271463 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:59 crc kubenswrapper[4909]: I1128 16:10:59.271485 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:59Z","lastTransitionTime":"2025-11-28T16:10:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:59 crc kubenswrapper[4909]: I1128 16:10:59.301599 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/ffceca0e-d9b5-484f-8753-5e0269eec811-metrics-certs\") pod \"network-metrics-daemon-8rjn2\" (UID: \"ffceca0e-d9b5-484f-8753-5e0269eec811\") " pod="openshift-multus/network-metrics-daemon-8rjn2" Nov 28 16:10:59 crc kubenswrapper[4909]: E1128 16:10:59.301878 4909 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 28 16:10:59 crc kubenswrapper[4909]: E1128 16:10:59.301995 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ffceca0e-d9b5-484f-8753-5e0269eec811-metrics-certs podName:ffceca0e-d9b5-484f-8753-5e0269eec811 nodeName:}" failed. No retries permitted until 2025-11-28 16:11:07.301962603 +0000 UTC m=+49.698647167 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/ffceca0e-d9b5-484f-8753-5e0269eec811-metrics-certs") pod "network-metrics-daemon-8rjn2" (UID: "ffceca0e-d9b5-484f-8753-5e0269eec811") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 28 16:10:59 crc kubenswrapper[4909]: I1128 16:10:59.375232 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:59 crc kubenswrapper[4909]: I1128 16:10:59.375294 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:59 crc kubenswrapper[4909]: I1128 16:10:59.375313 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:59 crc kubenswrapper[4909]: I1128 16:10:59.375337 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:59 crc kubenswrapper[4909]: I1128 16:10:59.375354 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:59Z","lastTransitionTime":"2025-11-28T16:10:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:59 crc kubenswrapper[4909]: I1128 16:10:59.478517 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:59 crc kubenswrapper[4909]: I1128 16:10:59.478579 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:59 crc kubenswrapper[4909]: I1128 16:10:59.478598 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:59 crc kubenswrapper[4909]: I1128 16:10:59.478623 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:59 crc kubenswrapper[4909]: I1128 16:10:59.478642 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:59Z","lastTransitionTime":"2025-11-28T16:10:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:59 crc kubenswrapper[4909]: I1128 16:10:59.581127 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:59 crc kubenswrapper[4909]: I1128 16:10:59.581189 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:59 crc kubenswrapper[4909]: I1128 16:10:59.581205 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:59 crc kubenswrapper[4909]: I1128 16:10:59.581227 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:59 crc kubenswrapper[4909]: I1128 16:10:59.581242 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:59Z","lastTransitionTime":"2025-11-28T16:10:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:59 crc kubenswrapper[4909]: I1128 16:10:59.684315 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:59 crc kubenswrapper[4909]: I1128 16:10:59.684608 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:59 crc kubenswrapper[4909]: I1128 16:10:59.685023 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:59 crc kubenswrapper[4909]: I1128 16:10:59.685386 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:59 crc kubenswrapper[4909]: I1128 16:10:59.685804 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:59Z","lastTransitionTime":"2025-11-28T16:10:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:59 crc kubenswrapper[4909]: I1128 16:10:59.788595 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:59 crc kubenswrapper[4909]: I1128 16:10:59.788692 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:59 crc kubenswrapper[4909]: I1128 16:10:59.788709 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:59 crc kubenswrapper[4909]: I1128 16:10:59.788730 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:59 crc kubenswrapper[4909]: I1128 16:10:59.788746 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:59Z","lastTransitionTime":"2025-11-28T16:10:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:59 crc kubenswrapper[4909]: I1128 16:10:59.891817 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:59 crc kubenswrapper[4909]: I1128 16:10:59.892235 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:59 crc kubenswrapper[4909]: I1128 16:10:59.892375 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:59 crc kubenswrapper[4909]: I1128 16:10:59.892465 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:59 crc kubenswrapper[4909]: I1128 16:10:59.892552 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:59Z","lastTransitionTime":"2025-11-28T16:10:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:10:59 crc kubenswrapper[4909]: I1128 16:10:59.901290 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 16:10:59 crc kubenswrapper[4909]: E1128 16:10:59.901504 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 16:10:59 crc kubenswrapper[4909]: I1128 16:10:59.901887 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 16:10:59 crc kubenswrapper[4909]: I1128 16:10:59.901949 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 16:10:59 crc kubenswrapper[4909]: E1128 16:10:59.902021 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 16:10:59 crc kubenswrapper[4909]: E1128 16:10:59.902115 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 16:10:59 crc kubenswrapper[4909]: I1128 16:10:59.996199 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:59 crc kubenswrapper[4909]: I1128 16:10:59.996256 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:59 crc kubenswrapper[4909]: I1128 16:10:59.996275 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:59 crc kubenswrapper[4909]: I1128 16:10:59.996298 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:10:59 crc kubenswrapper[4909]: I1128 16:10:59.996315 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:10:59Z","lastTransitionTime":"2025-11-28T16:10:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:00 crc kubenswrapper[4909]: I1128 16:11:00.099213 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:00 crc kubenswrapper[4909]: I1128 16:11:00.099292 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:00 crc kubenswrapper[4909]: I1128 16:11:00.099314 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:00 crc kubenswrapper[4909]: I1128 16:11:00.099340 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:00 crc kubenswrapper[4909]: I1128 16:11:00.099359 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:00Z","lastTransitionTime":"2025-11-28T16:11:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:00 crc kubenswrapper[4909]: I1128 16:11:00.202374 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:00 crc kubenswrapper[4909]: I1128 16:11:00.202416 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:00 crc kubenswrapper[4909]: I1128 16:11:00.202427 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:00 crc kubenswrapper[4909]: I1128 16:11:00.202442 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:00 crc kubenswrapper[4909]: I1128 16:11:00.202456 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:00Z","lastTransitionTime":"2025-11-28T16:11:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:00 crc kubenswrapper[4909]: I1128 16:11:00.305466 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:00 crc kubenswrapper[4909]: I1128 16:11:00.305527 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:00 crc kubenswrapper[4909]: I1128 16:11:00.305545 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:00 crc kubenswrapper[4909]: I1128 16:11:00.305573 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:00 crc kubenswrapper[4909]: I1128 16:11:00.305594 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:00Z","lastTransitionTime":"2025-11-28T16:11:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:00 crc kubenswrapper[4909]: I1128 16:11:00.409291 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:00 crc kubenswrapper[4909]: I1128 16:11:00.409343 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:00 crc kubenswrapper[4909]: I1128 16:11:00.409357 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:00 crc kubenswrapper[4909]: I1128 16:11:00.409376 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:00 crc kubenswrapper[4909]: I1128 16:11:00.409418 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:00Z","lastTransitionTime":"2025-11-28T16:11:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:00 crc kubenswrapper[4909]: I1128 16:11:00.512349 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:00 crc kubenswrapper[4909]: I1128 16:11:00.512415 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:00 crc kubenswrapper[4909]: I1128 16:11:00.512435 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:00 crc kubenswrapper[4909]: I1128 16:11:00.512463 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:00 crc kubenswrapper[4909]: I1128 16:11:00.512482 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:00Z","lastTransitionTime":"2025-11-28T16:11:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:00 crc kubenswrapper[4909]: I1128 16:11:00.616736 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:00 crc kubenswrapper[4909]: I1128 16:11:00.617036 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:00 crc kubenswrapper[4909]: I1128 16:11:00.617189 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:00 crc kubenswrapper[4909]: I1128 16:11:00.617337 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:00 crc kubenswrapper[4909]: I1128 16:11:00.617456 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:00Z","lastTransitionTime":"2025-11-28T16:11:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:00 crc kubenswrapper[4909]: I1128 16:11:00.720514 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:00 crc kubenswrapper[4909]: I1128 16:11:00.720580 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:00 crc kubenswrapper[4909]: I1128 16:11:00.720626 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:00 crc kubenswrapper[4909]: I1128 16:11:00.720685 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:00 crc kubenswrapper[4909]: I1128 16:11:00.720705 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:00Z","lastTransitionTime":"2025-11-28T16:11:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:00 crc kubenswrapper[4909]: I1128 16:11:00.823353 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:00 crc kubenswrapper[4909]: I1128 16:11:00.823408 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:00 crc kubenswrapper[4909]: I1128 16:11:00.823432 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:00 crc kubenswrapper[4909]: I1128 16:11:00.823460 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:00 crc kubenswrapper[4909]: I1128 16:11:00.823480 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:00Z","lastTransitionTime":"2025-11-28T16:11:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:00 crc kubenswrapper[4909]: I1128 16:11:00.901291 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-8rjn2" Nov 28 16:11:00 crc kubenswrapper[4909]: E1128 16:11:00.901456 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-8rjn2" podUID="ffceca0e-d9b5-484f-8753-5e0269eec811" Nov 28 16:11:00 crc kubenswrapper[4909]: I1128 16:11:00.926830 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:00 crc kubenswrapper[4909]: I1128 16:11:00.927139 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:00 crc kubenswrapper[4909]: I1128 16:11:00.927303 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:00 crc kubenswrapper[4909]: I1128 16:11:00.927441 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:00 crc kubenswrapper[4909]: I1128 16:11:00.927599 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:00Z","lastTransitionTime":"2025-11-28T16:11:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:01 crc kubenswrapper[4909]: I1128 16:11:01.030025 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:01 crc kubenswrapper[4909]: I1128 16:11:01.030303 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:01 crc kubenswrapper[4909]: I1128 16:11:01.030446 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:01 crc kubenswrapper[4909]: I1128 16:11:01.030555 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:01 crc kubenswrapper[4909]: I1128 16:11:01.030694 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:01Z","lastTransitionTime":"2025-11-28T16:11:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:01 crc kubenswrapper[4909]: I1128 16:11:01.133429 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:01 crc kubenswrapper[4909]: I1128 16:11:01.133729 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:01 crc kubenswrapper[4909]: I1128 16:11:01.133845 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:01 crc kubenswrapper[4909]: I1128 16:11:01.133934 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:01 crc kubenswrapper[4909]: I1128 16:11:01.134016 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:01Z","lastTransitionTime":"2025-11-28T16:11:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:01 crc kubenswrapper[4909]: I1128 16:11:01.237502 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:01 crc kubenswrapper[4909]: I1128 16:11:01.237835 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:01 crc kubenswrapper[4909]: I1128 16:11:01.238023 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:01 crc kubenswrapper[4909]: I1128 16:11:01.238168 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:01 crc kubenswrapper[4909]: I1128 16:11:01.238314 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:01Z","lastTransitionTime":"2025-11-28T16:11:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:01 crc kubenswrapper[4909]: I1128 16:11:01.341517 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:01 crc kubenswrapper[4909]: I1128 16:11:01.341614 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:01 crc kubenswrapper[4909]: I1128 16:11:01.341635 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:01 crc kubenswrapper[4909]: I1128 16:11:01.341679 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:01 crc kubenswrapper[4909]: I1128 16:11:01.341697 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:01Z","lastTransitionTime":"2025-11-28T16:11:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:01 crc kubenswrapper[4909]: I1128 16:11:01.444397 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:01 crc kubenswrapper[4909]: I1128 16:11:01.444774 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:01 crc kubenswrapper[4909]: I1128 16:11:01.444949 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:01 crc kubenswrapper[4909]: I1128 16:11:01.445114 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:01 crc kubenswrapper[4909]: I1128 16:11:01.445330 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:01Z","lastTransitionTime":"2025-11-28T16:11:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:01 crc kubenswrapper[4909]: I1128 16:11:01.505486 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:01 crc kubenswrapper[4909]: I1128 16:11:01.505559 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:01 crc kubenswrapper[4909]: I1128 16:11:01.505582 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:01 crc kubenswrapper[4909]: I1128 16:11:01.505609 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:01 crc kubenswrapper[4909]: I1128 16:11:01.505631 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:01Z","lastTransitionTime":"2025-11-28T16:11:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:01 crc kubenswrapper[4909]: E1128 16:11:01.526132 4909 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:01Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:01Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:01Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:01Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"b44f6a6c-5ae2-4ed6-9fc9-6c0acf034e9d\\\",\\\"systemUUID\\\":\\\"1e8d38e9-395c-4d37-b567-3bfe4869e3f7\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:01Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:01 crc kubenswrapper[4909]: I1128 16:11:01.532767 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:01 crc kubenswrapper[4909]: I1128 16:11:01.532824 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:01 crc kubenswrapper[4909]: I1128 16:11:01.532843 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:01 crc kubenswrapper[4909]: I1128 16:11:01.532874 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:01 crc kubenswrapper[4909]: I1128 16:11:01.532891 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:01Z","lastTransitionTime":"2025-11-28T16:11:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:01 crc kubenswrapper[4909]: E1128 16:11:01.556186 4909 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:01Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:01Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:01Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:01Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"b44f6a6c-5ae2-4ed6-9fc9-6c0acf034e9d\\\",\\\"systemUUID\\\":\\\"1e8d38e9-395c-4d37-b567-3bfe4869e3f7\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:01Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:01 crc kubenswrapper[4909]: I1128 16:11:01.560875 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:01 crc kubenswrapper[4909]: I1128 16:11:01.561147 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:01 crc kubenswrapper[4909]: I1128 16:11:01.561297 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:01 crc kubenswrapper[4909]: I1128 16:11:01.561456 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:01 crc kubenswrapper[4909]: I1128 16:11:01.561606 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:01Z","lastTransitionTime":"2025-11-28T16:11:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:01 crc kubenswrapper[4909]: E1128 16:11:01.582249 4909 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:01Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:01Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:01Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:01Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"b44f6a6c-5ae2-4ed6-9fc9-6c0acf034e9d\\\",\\\"systemUUID\\\":\\\"1e8d38e9-395c-4d37-b567-3bfe4869e3f7\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:01Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:01 crc kubenswrapper[4909]: I1128 16:11:01.587683 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:01 crc kubenswrapper[4909]: I1128 16:11:01.587942 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:01 crc kubenswrapper[4909]: I1128 16:11:01.588137 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:01 crc kubenswrapper[4909]: I1128 16:11:01.588350 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:01 crc kubenswrapper[4909]: I1128 16:11:01.588572 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:01Z","lastTransitionTime":"2025-11-28T16:11:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:01 crc kubenswrapper[4909]: E1128 16:11:01.609006 4909 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:01Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:01Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:01Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:01Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"b44f6a6c-5ae2-4ed6-9fc9-6c0acf034e9d\\\",\\\"systemUUID\\\":\\\"1e8d38e9-395c-4d37-b567-3bfe4869e3f7\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:01Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:01 crc kubenswrapper[4909]: I1128 16:11:01.614388 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:01 crc kubenswrapper[4909]: I1128 16:11:01.614451 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:01 crc kubenswrapper[4909]: I1128 16:11:01.614468 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:01 crc kubenswrapper[4909]: I1128 16:11:01.614495 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:01 crc kubenswrapper[4909]: I1128 16:11:01.614513 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:01Z","lastTransitionTime":"2025-11-28T16:11:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:01 crc kubenswrapper[4909]: E1128 16:11:01.632068 4909 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:01Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:01Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:01Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:01Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"b44f6a6c-5ae2-4ed6-9fc9-6c0acf034e9d\\\",\\\"systemUUID\\\":\\\"1e8d38e9-395c-4d37-b567-3bfe4869e3f7\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:01Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:01 crc kubenswrapper[4909]: E1128 16:11:01.632292 4909 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 28 16:11:01 crc kubenswrapper[4909]: I1128 16:11:01.633931 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:01 crc kubenswrapper[4909]: I1128 16:11:01.633982 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:01 crc kubenswrapper[4909]: I1128 16:11:01.633999 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:01 crc kubenswrapper[4909]: I1128 16:11:01.634021 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:01 crc kubenswrapper[4909]: I1128 16:11:01.634038 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:01Z","lastTransitionTime":"2025-11-28T16:11:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:01 crc kubenswrapper[4909]: I1128 16:11:01.736612 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:01 crc kubenswrapper[4909]: I1128 16:11:01.736693 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:01 crc kubenswrapper[4909]: I1128 16:11:01.736718 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:01 crc kubenswrapper[4909]: I1128 16:11:01.736742 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:01 crc kubenswrapper[4909]: I1128 16:11:01.736758 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:01Z","lastTransitionTime":"2025-11-28T16:11:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:01 crc kubenswrapper[4909]: I1128 16:11:01.840145 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:01 crc kubenswrapper[4909]: I1128 16:11:01.840206 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:01 crc kubenswrapper[4909]: I1128 16:11:01.840239 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:01 crc kubenswrapper[4909]: I1128 16:11:01.840264 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:01 crc kubenswrapper[4909]: I1128 16:11:01.840281 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:01Z","lastTransitionTime":"2025-11-28T16:11:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:01 crc kubenswrapper[4909]: I1128 16:11:01.901174 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 16:11:01 crc kubenswrapper[4909]: I1128 16:11:01.901267 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 16:11:01 crc kubenswrapper[4909]: E1128 16:11:01.901372 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 16:11:01 crc kubenswrapper[4909]: E1128 16:11:01.901576 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 16:11:01 crc kubenswrapper[4909]: I1128 16:11:01.902813 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 16:11:01 crc kubenswrapper[4909]: E1128 16:11:01.903065 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 16:11:01 crc kubenswrapper[4909]: I1128 16:11:01.943853 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:01 crc kubenswrapper[4909]: I1128 16:11:01.943930 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:01 crc kubenswrapper[4909]: I1128 16:11:01.943953 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:01 crc kubenswrapper[4909]: I1128 16:11:01.943978 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:01 crc kubenswrapper[4909]: I1128 16:11:01.943996 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:01Z","lastTransitionTime":"2025-11-28T16:11:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:02 crc kubenswrapper[4909]: I1128 16:11:02.046581 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:02 crc kubenswrapper[4909]: I1128 16:11:02.046645 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:02 crc kubenswrapper[4909]: I1128 16:11:02.046696 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:02 crc kubenswrapper[4909]: I1128 16:11:02.046725 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:02 crc kubenswrapper[4909]: I1128 16:11:02.046741 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:02Z","lastTransitionTime":"2025-11-28T16:11:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:02 crc kubenswrapper[4909]: I1128 16:11:02.149123 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:02 crc kubenswrapper[4909]: I1128 16:11:02.149522 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:02 crc kubenswrapper[4909]: I1128 16:11:02.149606 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:02 crc kubenswrapper[4909]: I1128 16:11:02.149732 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:02 crc kubenswrapper[4909]: I1128 16:11:02.149817 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:02Z","lastTransitionTime":"2025-11-28T16:11:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:02 crc kubenswrapper[4909]: I1128 16:11:02.253199 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:02 crc kubenswrapper[4909]: I1128 16:11:02.253237 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:02 crc kubenswrapper[4909]: I1128 16:11:02.253247 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:02 crc kubenswrapper[4909]: I1128 16:11:02.253263 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:02 crc kubenswrapper[4909]: I1128 16:11:02.253274 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:02Z","lastTransitionTime":"2025-11-28T16:11:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:02 crc kubenswrapper[4909]: I1128 16:11:02.355995 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:02 crc kubenswrapper[4909]: I1128 16:11:02.356107 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:02 crc kubenswrapper[4909]: I1128 16:11:02.356127 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:02 crc kubenswrapper[4909]: I1128 16:11:02.356188 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:02 crc kubenswrapper[4909]: I1128 16:11:02.356206 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:02Z","lastTransitionTime":"2025-11-28T16:11:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:02 crc kubenswrapper[4909]: I1128 16:11:02.459078 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:02 crc kubenswrapper[4909]: I1128 16:11:02.459120 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:02 crc kubenswrapper[4909]: I1128 16:11:02.459154 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:02 crc kubenswrapper[4909]: I1128 16:11:02.459176 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:02 crc kubenswrapper[4909]: I1128 16:11:02.459185 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:02Z","lastTransitionTime":"2025-11-28T16:11:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:02 crc kubenswrapper[4909]: I1128 16:11:02.561580 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:02 crc kubenswrapper[4909]: I1128 16:11:02.561645 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:02 crc kubenswrapper[4909]: I1128 16:11:02.561716 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:02 crc kubenswrapper[4909]: I1128 16:11:02.561745 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:02 crc kubenswrapper[4909]: I1128 16:11:02.561766 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:02Z","lastTransitionTime":"2025-11-28T16:11:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:02 crc kubenswrapper[4909]: I1128 16:11:02.665197 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:02 crc kubenswrapper[4909]: I1128 16:11:02.665262 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:02 crc kubenswrapper[4909]: I1128 16:11:02.665277 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:02 crc kubenswrapper[4909]: I1128 16:11:02.665301 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:02 crc kubenswrapper[4909]: I1128 16:11:02.665317 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:02Z","lastTransitionTime":"2025-11-28T16:11:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:02 crc kubenswrapper[4909]: I1128 16:11:02.768470 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:02 crc kubenswrapper[4909]: I1128 16:11:02.768582 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:02 crc kubenswrapper[4909]: I1128 16:11:02.768604 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:02 crc kubenswrapper[4909]: I1128 16:11:02.768631 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:02 crc kubenswrapper[4909]: I1128 16:11:02.768687 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:02Z","lastTransitionTime":"2025-11-28T16:11:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:02 crc kubenswrapper[4909]: I1128 16:11:02.872150 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:02 crc kubenswrapper[4909]: I1128 16:11:02.872263 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:02 crc kubenswrapper[4909]: I1128 16:11:02.872282 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:02 crc kubenswrapper[4909]: I1128 16:11:02.872306 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:02 crc kubenswrapper[4909]: I1128 16:11:02.872323 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:02Z","lastTransitionTime":"2025-11-28T16:11:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:02 crc kubenswrapper[4909]: I1128 16:11:02.900930 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-8rjn2" Nov 28 16:11:02 crc kubenswrapper[4909]: E1128 16:11:02.901532 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-8rjn2" podUID="ffceca0e-d9b5-484f-8753-5e0269eec811" Nov 28 16:11:02 crc kubenswrapper[4909]: I1128 16:11:02.977429 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:02 crc kubenswrapper[4909]: I1128 16:11:02.977809 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:02 crc kubenswrapper[4909]: I1128 16:11:02.977980 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:02 crc kubenswrapper[4909]: I1128 16:11:02.978214 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:02 crc kubenswrapper[4909]: I1128 16:11:02.978547 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:02Z","lastTransitionTime":"2025-11-28T16:11:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:03 crc kubenswrapper[4909]: I1128 16:11:03.082053 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:03 crc kubenswrapper[4909]: I1128 16:11:03.082386 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:03 crc kubenswrapper[4909]: I1128 16:11:03.082546 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:03 crc kubenswrapper[4909]: I1128 16:11:03.082728 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:03 crc kubenswrapper[4909]: I1128 16:11:03.082890 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:03Z","lastTransitionTime":"2025-11-28T16:11:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:03 crc kubenswrapper[4909]: I1128 16:11:03.186527 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:03 crc kubenswrapper[4909]: I1128 16:11:03.186590 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:03 crc kubenswrapper[4909]: I1128 16:11:03.186609 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:03 crc kubenswrapper[4909]: I1128 16:11:03.186634 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:03 crc kubenswrapper[4909]: I1128 16:11:03.186652 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:03Z","lastTransitionTime":"2025-11-28T16:11:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:03 crc kubenswrapper[4909]: I1128 16:11:03.289100 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:03 crc kubenswrapper[4909]: I1128 16:11:03.289165 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:03 crc kubenswrapper[4909]: I1128 16:11:03.289183 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:03 crc kubenswrapper[4909]: I1128 16:11:03.289210 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:03 crc kubenswrapper[4909]: I1128 16:11:03.289229 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:03Z","lastTransitionTime":"2025-11-28T16:11:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:03 crc kubenswrapper[4909]: I1128 16:11:03.391645 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:03 crc kubenswrapper[4909]: I1128 16:11:03.391698 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:03 crc kubenswrapper[4909]: I1128 16:11:03.391731 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:03 crc kubenswrapper[4909]: I1128 16:11:03.391751 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:03 crc kubenswrapper[4909]: I1128 16:11:03.391762 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:03Z","lastTransitionTime":"2025-11-28T16:11:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:03 crc kubenswrapper[4909]: I1128 16:11:03.494346 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:03 crc kubenswrapper[4909]: I1128 16:11:03.494530 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:03 crc kubenswrapper[4909]: I1128 16:11:03.494563 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:03 crc kubenswrapper[4909]: I1128 16:11:03.494650 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:03 crc kubenswrapper[4909]: I1128 16:11:03.494752 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:03Z","lastTransitionTime":"2025-11-28T16:11:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:03 crc kubenswrapper[4909]: I1128 16:11:03.598258 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:03 crc kubenswrapper[4909]: I1128 16:11:03.598321 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:03 crc kubenswrapper[4909]: I1128 16:11:03.598339 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:03 crc kubenswrapper[4909]: I1128 16:11:03.598364 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:03 crc kubenswrapper[4909]: I1128 16:11:03.598382 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:03Z","lastTransitionTime":"2025-11-28T16:11:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:03 crc kubenswrapper[4909]: I1128 16:11:03.700224 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:03 crc kubenswrapper[4909]: I1128 16:11:03.700283 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:03 crc kubenswrapper[4909]: I1128 16:11:03.700300 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:03 crc kubenswrapper[4909]: I1128 16:11:03.700332 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:03 crc kubenswrapper[4909]: I1128 16:11:03.700350 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:03Z","lastTransitionTime":"2025-11-28T16:11:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:03 crc kubenswrapper[4909]: I1128 16:11:03.803201 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:03 crc kubenswrapper[4909]: I1128 16:11:03.803446 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:03 crc kubenswrapper[4909]: I1128 16:11:03.803536 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:03 crc kubenswrapper[4909]: I1128 16:11:03.803624 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:03 crc kubenswrapper[4909]: I1128 16:11:03.803758 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:03Z","lastTransitionTime":"2025-11-28T16:11:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:03 crc kubenswrapper[4909]: I1128 16:11:03.901331 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 16:11:03 crc kubenswrapper[4909]: I1128 16:11:03.901337 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 16:11:03 crc kubenswrapper[4909]: I1128 16:11:03.901430 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 16:11:03 crc kubenswrapper[4909]: E1128 16:11:03.902767 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 16:11:03 crc kubenswrapper[4909]: E1128 16:11:03.902758 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 16:11:03 crc kubenswrapper[4909]: E1128 16:11:03.903018 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 16:11:03 crc kubenswrapper[4909]: I1128 16:11:03.906809 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:03 crc kubenswrapper[4909]: I1128 16:11:03.906868 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:03 crc kubenswrapper[4909]: I1128 16:11:03.906891 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:03 crc kubenswrapper[4909]: I1128 16:11:03.906919 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:03 crc kubenswrapper[4909]: I1128 16:11:03.906942 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:03Z","lastTransitionTime":"2025-11-28T16:11:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:04 crc kubenswrapper[4909]: I1128 16:11:04.010929 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:04 crc kubenswrapper[4909]: I1128 16:11:04.010966 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:04 crc kubenswrapper[4909]: I1128 16:11:04.010977 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:04 crc kubenswrapper[4909]: I1128 16:11:04.010993 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:04 crc kubenswrapper[4909]: I1128 16:11:04.011004 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:04Z","lastTransitionTime":"2025-11-28T16:11:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:04 crc kubenswrapper[4909]: I1128 16:11:04.113049 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:04 crc kubenswrapper[4909]: I1128 16:11:04.113119 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:04 crc kubenswrapper[4909]: I1128 16:11:04.113138 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:04 crc kubenswrapper[4909]: I1128 16:11:04.113164 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:04 crc kubenswrapper[4909]: I1128 16:11:04.113182 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:04Z","lastTransitionTime":"2025-11-28T16:11:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:04 crc kubenswrapper[4909]: I1128 16:11:04.215810 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:04 crc kubenswrapper[4909]: I1128 16:11:04.215872 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:04 crc kubenswrapper[4909]: I1128 16:11:04.215928 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:04 crc kubenswrapper[4909]: I1128 16:11:04.215960 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:04 crc kubenswrapper[4909]: I1128 16:11:04.215983 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:04Z","lastTransitionTime":"2025-11-28T16:11:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:04 crc kubenswrapper[4909]: I1128 16:11:04.319403 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:04 crc kubenswrapper[4909]: I1128 16:11:04.319487 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:04 crc kubenswrapper[4909]: I1128 16:11:04.319508 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:04 crc kubenswrapper[4909]: I1128 16:11:04.319532 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:04 crc kubenswrapper[4909]: I1128 16:11:04.319549 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:04Z","lastTransitionTime":"2025-11-28T16:11:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:04 crc kubenswrapper[4909]: I1128 16:11:04.422383 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:04 crc kubenswrapper[4909]: I1128 16:11:04.422455 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:04 crc kubenswrapper[4909]: I1128 16:11:04.422480 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:04 crc kubenswrapper[4909]: I1128 16:11:04.422513 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:04 crc kubenswrapper[4909]: I1128 16:11:04.422538 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:04Z","lastTransitionTime":"2025-11-28T16:11:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:04 crc kubenswrapper[4909]: I1128 16:11:04.525258 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:04 crc kubenswrapper[4909]: I1128 16:11:04.525334 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:04 crc kubenswrapper[4909]: I1128 16:11:04.525359 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:04 crc kubenswrapper[4909]: I1128 16:11:04.525389 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:04 crc kubenswrapper[4909]: I1128 16:11:04.525412 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:04Z","lastTransitionTime":"2025-11-28T16:11:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:04 crc kubenswrapper[4909]: I1128 16:11:04.627688 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:04 crc kubenswrapper[4909]: I1128 16:11:04.627727 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:04 crc kubenswrapper[4909]: I1128 16:11:04.627738 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:04 crc kubenswrapper[4909]: I1128 16:11:04.627754 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:04 crc kubenswrapper[4909]: I1128 16:11:04.627764 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:04Z","lastTransitionTime":"2025-11-28T16:11:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:04 crc kubenswrapper[4909]: I1128 16:11:04.730689 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:04 crc kubenswrapper[4909]: I1128 16:11:04.730765 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:04 crc kubenswrapper[4909]: I1128 16:11:04.730788 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:04 crc kubenswrapper[4909]: I1128 16:11:04.730816 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:04 crc kubenswrapper[4909]: I1128 16:11:04.730851 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:04Z","lastTransitionTime":"2025-11-28T16:11:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:04 crc kubenswrapper[4909]: I1128 16:11:04.833933 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:04 crc kubenswrapper[4909]: I1128 16:11:04.834011 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:04 crc kubenswrapper[4909]: I1128 16:11:04.834029 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:04 crc kubenswrapper[4909]: I1128 16:11:04.834053 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:04 crc kubenswrapper[4909]: I1128 16:11:04.834071 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:04Z","lastTransitionTime":"2025-11-28T16:11:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:04 crc kubenswrapper[4909]: I1128 16:11:04.901573 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-8rjn2" Nov 28 16:11:04 crc kubenswrapper[4909]: E1128 16:11:04.901907 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-8rjn2" podUID="ffceca0e-d9b5-484f-8753-5e0269eec811" Nov 28 16:11:04 crc kubenswrapper[4909]: I1128 16:11:04.937142 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:04 crc kubenswrapper[4909]: I1128 16:11:04.937232 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:04 crc kubenswrapper[4909]: I1128 16:11:04.937257 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:04 crc kubenswrapper[4909]: I1128 16:11:04.937288 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:04 crc kubenswrapper[4909]: I1128 16:11:04.937313 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:04Z","lastTransitionTime":"2025-11-28T16:11:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:05 crc kubenswrapper[4909]: I1128 16:11:05.040043 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:05 crc kubenswrapper[4909]: I1128 16:11:05.040140 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:05 crc kubenswrapper[4909]: I1128 16:11:05.040157 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:05 crc kubenswrapper[4909]: I1128 16:11:05.040181 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:05 crc kubenswrapper[4909]: I1128 16:11:05.040199 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:05Z","lastTransitionTime":"2025-11-28T16:11:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:05 crc kubenswrapper[4909]: I1128 16:11:05.143429 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:05 crc kubenswrapper[4909]: I1128 16:11:05.143490 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:05 crc kubenswrapper[4909]: I1128 16:11:05.143506 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:05 crc kubenswrapper[4909]: I1128 16:11:05.143528 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:05 crc kubenswrapper[4909]: I1128 16:11:05.143543 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:05Z","lastTransitionTime":"2025-11-28T16:11:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:05 crc kubenswrapper[4909]: I1128 16:11:05.246825 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:05 crc kubenswrapper[4909]: I1128 16:11:05.246867 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:05 crc kubenswrapper[4909]: I1128 16:11:05.246878 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:05 crc kubenswrapper[4909]: I1128 16:11:05.246891 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:05 crc kubenswrapper[4909]: I1128 16:11:05.246901 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:05Z","lastTransitionTime":"2025-11-28T16:11:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:05 crc kubenswrapper[4909]: I1128 16:11:05.349283 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:05 crc kubenswrapper[4909]: I1128 16:11:05.349318 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:05 crc kubenswrapper[4909]: I1128 16:11:05.349327 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:05 crc kubenswrapper[4909]: I1128 16:11:05.349340 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:05 crc kubenswrapper[4909]: I1128 16:11:05.349349 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:05Z","lastTransitionTime":"2025-11-28T16:11:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:05 crc kubenswrapper[4909]: I1128 16:11:05.451983 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:05 crc kubenswrapper[4909]: I1128 16:11:05.452033 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:05 crc kubenswrapper[4909]: I1128 16:11:05.452044 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:05 crc kubenswrapper[4909]: I1128 16:11:05.452060 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:05 crc kubenswrapper[4909]: I1128 16:11:05.452072 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:05Z","lastTransitionTime":"2025-11-28T16:11:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:05 crc kubenswrapper[4909]: I1128 16:11:05.554891 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:05 crc kubenswrapper[4909]: I1128 16:11:05.554933 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:05 crc kubenswrapper[4909]: I1128 16:11:05.554943 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:05 crc kubenswrapper[4909]: I1128 16:11:05.554957 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:05 crc kubenswrapper[4909]: I1128 16:11:05.554966 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:05Z","lastTransitionTime":"2025-11-28T16:11:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:05 crc kubenswrapper[4909]: I1128 16:11:05.657831 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:05 crc kubenswrapper[4909]: I1128 16:11:05.657881 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:05 crc kubenswrapper[4909]: I1128 16:11:05.657892 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:05 crc kubenswrapper[4909]: I1128 16:11:05.657909 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:05 crc kubenswrapper[4909]: I1128 16:11:05.657924 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:05Z","lastTransitionTime":"2025-11-28T16:11:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:05 crc kubenswrapper[4909]: I1128 16:11:05.760922 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:05 crc kubenswrapper[4909]: I1128 16:11:05.761021 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:05 crc kubenswrapper[4909]: I1128 16:11:05.761034 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:05 crc kubenswrapper[4909]: I1128 16:11:05.761050 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:05 crc kubenswrapper[4909]: I1128 16:11:05.761062 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:05Z","lastTransitionTime":"2025-11-28T16:11:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:05 crc kubenswrapper[4909]: I1128 16:11:05.863469 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:05 crc kubenswrapper[4909]: I1128 16:11:05.863506 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:05 crc kubenswrapper[4909]: I1128 16:11:05.863515 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:05 crc kubenswrapper[4909]: I1128 16:11:05.863526 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:05 crc kubenswrapper[4909]: I1128 16:11:05.863535 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:05Z","lastTransitionTime":"2025-11-28T16:11:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:05 crc kubenswrapper[4909]: I1128 16:11:05.901387 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 16:11:05 crc kubenswrapper[4909]: I1128 16:11:05.901471 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 16:11:05 crc kubenswrapper[4909]: E1128 16:11:05.901517 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 16:11:05 crc kubenswrapper[4909]: I1128 16:11:05.901570 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 16:11:05 crc kubenswrapper[4909]: E1128 16:11:05.901707 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 16:11:05 crc kubenswrapper[4909]: E1128 16:11:05.901821 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 16:11:05 crc kubenswrapper[4909]: I1128 16:11:05.902839 4909 scope.go:117] "RemoveContainer" containerID="a256af2a77f3591aaed9ae0f6c9ef60c2aedbbeab38fa1072ee5f65dc1b256f1" Nov 28 16:11:05 crc kubenswrapper[4909]: I1128 16:11:05.966616 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:05 crc kubenswrapper[4909]: I1128 16:11:05.967046 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:05 crc kubenswrapper[4909]: I1128 16:11:05.967072 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:05 crc kubenswrapper[4909]: I1128 16:11:05.967099 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:05 crc kubenswrapper[4909]: I1128 16:11:05.967120 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:05Z","lastTransitionTime":"2025-11-28T16:11:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:06 crc kubenswrapper[4909]: I1128 16:11:06.070535 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:06 crc kubenswrapper[4909]: I1128 16:11:06.070591 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:06 crc kubenswrapper[4909]: I1128 16:11:06.070617 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:06 crc kubenswrapper[4909]: I1128 16:11:06.070650 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:06 crc kubenswrapper[4909]: I1128 16:11:06.070707 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:06Z","lastTransitionTime":"2025-11-28T16:11:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:06 crc kubenswrapper[4909]: I1128 16:11:06.174821 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:06 crc kubenswrapper[4909]: I1128 16:11:06.174876 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:06 crc kubenswrapper[4909]: I1128 16:11:06.174895 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:06 crc kubenswrapper[4909]: I1128 16:11:06.174917 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:06 crc kubenswrapper[4909]: I1128 16:11:06.174932 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:06Z","lastTransitionTime":"2025-11-28T16:11:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:06 crc kubenswrapper[4909]: I1128 16:11:06.275164 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-qxw94_c17e2fff-c7ee-475c-8c17-58a394744b91/ovnkube-controller/1.log" Nov 28 16:11:06 crc kubenswrapper[4909]: I1128 16:11:06.283967 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:06 crc kubenswrapper[4909]: I1128 16:11:06.284039 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:06 crc kubenswrapper[4909]: I1128 16:11:06.284058 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:06 crc kubenswrapper[4909]: I1128 16:11:06.284094 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:06 crc kubenswrapper[4909]: I1128 16:11:06.284114 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:06Z","lastTransitionTime":"2025-11-28T16:11:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:06 crc kubenswrapper[4909]: I1128 16:11:06.288107 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" event={"ID":"c17e2fff-c7ee-475c-8c17-58a394744b91","Type":"ContainerStarted","Data":"32babd1d5575ade6650fc51e99fbd5a062f2f8c3e97ffa12588d753f4f133902"} Nov 28 16:11:06 crc kubenswrapper[4909]: I1128 16:11:06.288717 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" Nov 28 16:11:06 crc kubenswrapper[4909]: I1128 16:11:06.314547 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://475d2c473e130345e0d544c2b7990c476e83c479c644db89e1ceda2f4278d667\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://18059a14081d677863133d95ec3cecf9359d6464af62be1e53bcd9514311ff59\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:06Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:06 crc kubenswrapper[4909]: I1128 16:11:06.330038 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-q8nfv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6b77cc4b-dc69-4ece-8e10-64eebc98a578\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://71f6384b2d5c86c668a4d7dfc23b14a893f93b9ec587bec43f74eb0926cc2c64\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-prc9l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:37Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-q8nfv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:06Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:06 crc kubenswrapper[4909]: I1128 16:11:06.350443 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-8s8f9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5a7347b-5536-45b5-be75-4bf0ed1b922b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://75a9b32698a7aaaa2b6f88541c9069902a98ad0146bf478ec58ce3a97fb410ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mglcp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://309f7a04e1c92fde11e7fedfe089ff043e14ea788f60339fb7acf8c6df0c8c8d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mglcp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-8s8f9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:06Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:06 crc kubenswrapper[4909]: I1128 16:11:06.367364 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dda7db1a-d603-4121-8cd6-e72c9ae02961\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://107b559f7d62f02041439f8c727185e9a819cd655afa0898e818d8618cc54cc7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://76ac0606fbf94f3746fb7ffa549a3cb684d40ee72224dd36c96bb47db493b482\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d5716d50bbed7c047029bbd7964431e97f78c6516505f57fe76d2a1548a665d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://53a5c9679a2c94c1297318d2e554c2445efbfab9cf72d25e48d753d66fa217d5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://da70cc63e1edf58e6d6fed49fc9266d54fabba5f7ce3216d910eb18aebc45c34\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0700ae65eb592e5b2110d37bb15dbb1dbdd228986de015ab5dc8fe13672032ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0700ae65eb592e5b2110d37bb15dbb1dbdd228986de015ab5dc8fe13672032ef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:18Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:06Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:06 crc kubenswrapper[4909]: I1128 16:11:06.386013 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:06 crc kubenswrapper[4909]: I1128 16:11:06.386051 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:06 crc kubenswrapper[4909]: I1128 16:11:06.386062 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:06 crc kubenswrapper[4909]: I1128 16:11:06.386077 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:06 crc kubenswrapper[4909]: I1128 16:11:06.386088 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:06Z","lastTransitionTime":"2025-11-28T16:11:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:06 crc kubenswrapper[4909]: I1128 16:11:06.386725 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b5b5c4c2-af06-4771-b6eb-d13a2819665c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ea29d0be350e786147f355c5bb4902924aa0f921413b432ad093a796d21b9d05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b41cf0481323080f4056d35add13c2254c6ffbe432b1651555067d3fc9d163eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94501e940379fe9b429e532a91799701e733bb5c3be1c5f32da07a9957f955b3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://37dc41c4e260f683c1ae04ae87a883fe78e4f1f620a946ccb6a87191a5eae0ae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:18Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:06Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:06 crc kubenswrapper[4909]: I1128 16:11:06.402909 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31f84564a3775e0ece3a5c0f176e8d9607466d4a7a505173e9668c51fa2229e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:06Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:06 crc kubenswrapper[4909]: I1128 16:11:06.414752 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:06Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:06 crc kubenswrapper[4909]: I1128 16:11:06.426550 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-wx2jj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6e3805b2-8ad3-4fa6-b88f-e0ae42294202\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0777b571ce4049338437a97264761c89ab7517b4da8400edcd3381d58aef32e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x6lxd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-wx2jj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:06Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:06 crc kubenswrapper[4909]: I1128 16:11:06.446603 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7907b546dbb614e80485187b026c4c5ca17f52d88d5c28ce26a7bf5e3c09e76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:06Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:06 crc kubenswrapper[4909]: I1128 16:11:06.475294 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5f0ac931-d37b-4342-8c12-c2779b455cc5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae003675d8b34489b946224bcd380cc68ff49acec6769edfe74a8345018e7909\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5xrns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c2757c4dc287e41bc57c065df2906fc5961d005829fa24f22d3b5078d17555a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5xrns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:38Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-d5nd7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:06Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:06 crc kubenswrapper[4909]: I1128 16:11:06.487790 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:06 crc kubenswrapper[4909]: I1128 16:11:06.487828 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:06 crc kubenswrapper[4909]: I1128 16:11:06.487836 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:06 crc kubenswrapper[4909]: I1128 16:11:06.487848 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:06 crc kubenswrapper[4909]: I1128 16:11:06.487856 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:06Z","lastTransitionTime":"2025-11-28T16:11:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:06 crc kubenswrapper[4909]: I1128 16:11:06.499118 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c17e2fff-c7ee-475c-8c17-58a394744b91\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c9f22dca75c10791e7f5e083e737d4d6aad20598ff0d29dd7562fa99b5dbdfd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://697293d217dd4fbc4cecc3bc4be7acc14dbb5aa97b9faf0a608792ef7e4c8dba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d0b87eb388aab62c94756b5f20b73cf95fc80da8aa3d83f2848757f860cdd38e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://031c30950c06bdcae1736062069b96a6ce7cb66d5f286b73d691ee2d2db5832e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://66731a2e0518dd575f621ddf9e760e844e647a224138957440ccaecad3e4b8b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://983e9f73b8e344dbe445f95436cbe143d63f1da2e868201b441496df8cdc99ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://32babd1d5575ade6650fc51e99fbd5a062f2f8c3e97ffa12588d753f4f133902\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a256af2a77f3591aaed9ae0f6c9ef60c2aedbbeab38fa1072ee5f65dc1b256f1\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T16:10:49Z\\\",\\\"message\\\":\\\"ource:services.Addr{IP:\\\\\\\"10.217.5.37\\\\\\\", Port:443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nI1128 16:10:48.981466 6343 services_controller.go:452] Built service openshift-apiserver/api per-node LB for network=default: []services.LB{}\\\\nI1128 16:10:48.981482 6343 services_controller.go:453] Built service openshift-apiserver/api template LB for network=default: []services.LB{}\\\\nI1128 16:10:48.981434 6343 services_controller.go:473] Services do not match for network=default, existing lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-apiserver-operator/metrics_TCP_cluster\\\\\\\", UUID:\\\\\\\"8b82f026-5975-4a1b-bb18-08d5d51147ec\\\\\\\", Protocol:\\\\\\\"tcp\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-apiserver-operator/metrics\\\\\\\"}, Opts:services.LBOpts{Reject:false, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{}, Templates:services.TemplateMap{}, Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}, built lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-apiserver-operator/metrics_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.o\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:48Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://841b67fe2e8ae0e4ebf834f141b83ad5e850b8e81c3f6410cef7df9894c33dc6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e1fffda4b5571ab3fe797073c7201feadb7818eac66f6dc76b7761ef13a8d0ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e1fffda4b5571ab3fe797073c7201feadb7818eac66f6dc76b7761ef13a8d0ef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-qxw94\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:06Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:06 crc kubenswrapper[4909]: I1128 16:11:06.508050 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-8rjn2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ffceca0e-d9b5-484f-8753-5e0269eec811\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:51Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:51Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wgql6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wgql6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:51Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-8rjn2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:06Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:06 crc kubenswrapper[4909]: I1128 16:11:06.520048 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:06Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:06 crc kubenswrapper[4909]: I1128 16:11:06.534517 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:06Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:06 crc kubenswrapper[4909]: I1128 16:11:06.544430 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-bwbm6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a458a03-0fea-47c0-9748-510145f40b30\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dd8c3d64058ca1f2862ce478295e1a694117d00f9172f78c7c5e2945d7357aad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmxp7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:36Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-bwbm6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:06Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:06 crc kubenswrapper[4909]: I1128 16:11:06.560293 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gdz9b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d9d93f2d-2a90-4d2d-b8e6-e48973be876f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e765772d8b12200fadfb28064b55c1abb9a8a6654602159c4910d2ea5b2d307\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b45f41a92db18043fc03c9cd7d9bbeef0020790595045abd17a9105d0f01367b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b45f41a92db18043fc03c9cd7d9bbeef0020790595045abd17a9105d0f01367b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f7a3a2a5eea9704ba4cd09ac6fa00e33c97d163973a852882979b1c8f2064979\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f7a3a2a5eea9704ba4cd09ac6fa00e33c97d163973a852882979b1c8f2064979\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0348e1d7066423ab9a595f60893021830d02de3c18fca2fdb0082efa41644124\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0348e1d7066423ab9a595f60893021830d02de3c18fca2fdb0082efa41644124\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbb9ad3dff8afd5ac9ec18bd48d273f46b76a21b791dc6a17aea1d7559750c93\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cbb9ad3dff8afd5ac9ec18bd48d273f46b76a21b791dc6a17aea1d7559750c93\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5898b3475414a39ef005c45982f12791683eb027e734ec8d25dc3489c93026a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5898b3475414a39ef005c45982f12791683eb027e734ec8d25dc3489c93026a2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2056e26b7e46cd32f8a12d6d054e249ed43f9854c0f7720791e98f9e6c5b5346\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2056e26b7e46cd32f8a12d6d054e249ed43f9854c0f7720791e98f9e6c5b5346\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gdz9b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:06Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:06 crc kubenswrapper[4909]: I1128 16:11:06.589648 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:06 crc kubenswrapper[4909]: I1128 16:11:06.589703 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:06 crc kubenswrapper[4909]: I1128 16:11:06.589713 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:06 crc kubenswrapper[4909]: I1128 16:11:06.589730 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:06 crc kubenswrapper[4909]: I1128 16:11:06.589742 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:06Z","lastTransitionTime":"2025-11-28T16:11:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:06 crc kubenswrapper[4909]: I1128 16:11:06.692006 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:06 crc kubenswrapper[4909]: I1128 16:11:06.692035 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:06 crc kubenswrapper[4909]: I1128 16:11:06.692043 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:06 crc kubenswrapper[4909]: I1128 16:11:06.692055 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:06 crc kubenswrapper[4909]: I1128 16:11:06.692064 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:06Z","lastTransitionTime":"2025-11-28T16:11:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:06 crc kubenswrapper[4909]: I1128 16:11:06.794315 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:06 crc kubenswrapper[4909]: I1128 16:11:06.794344 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:06 crc kubenswrapper[4909]: I1128 16:11:06.794351 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:06 crc kubenswrapper[4909]: I1128 16:11:06.794364 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:06 crc kubenswrapper[4909]: I1128 16:11:06.794375 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:06Z","lastTransitionTime":"2025-11-28T16:11:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:06 crc kubenswrapper[4909]: I1128 16:11:06.897009 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:06 crc kubenswrapper[4909]: I1128 16:11:06.897063 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:06 crc kubenswrapper[4909]: I1128 16:11:06.897082 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:06 crc kubenswrapper[4909]: I1128 16:11:06.897105 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:06 crc kubenswrapper[4909]: I1128 16:11:06.897121 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:06Z","lastTransitionTime":"2025-11-28T16:11:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:06 crc kubenswrapper[4909]: I1128 16:11:06.901393 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-8rjn2" Nov 28 16:11:06 crc kubenswrapper[4909]: E1128 16:11:06.901633 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-8rjn2" podUID="ffceca0e-d9b5-484f-8753-5e0269eec811" Nov 28 16:11:06 crc kubenswrapper[4909]: I1128 16:11:06.999896 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:06 crc kubenswrapper[4909]: I1128 16:11:06.999947 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:06 crc kubenswrapper[4909]: I1128 16:11:06.999965 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:07 crc kubenswrapper[4909]: I1128 16:11:06.999988 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:07 crc kubenswrapper[4909]: I1128 16:11:07.000007 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:06Z","lastTransitionTime":"2025-11-28T16:11:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:07 crc kubenswrapper[4909]: I1128 16:11:07.103021 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:07 crc kubenswrapper[4909]: I1128 16:11:07.103076 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:07 crc kubenswrapper[4909]: I1128 16:11:07.103093 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:07 crc kubenswrapper[4909]: I1128 16:11:07.103117 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:07 crc kubenswrapper[4909]: I1128 16:11:07.103134 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:07Z","lastTransitionTime":"2025-11-28T16:11:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:07 crc kubenswrapper[4909]: I1128 16:11:07.159496 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 28 16:11:07 crc kubenswrapper[4909]: I1128 16:11:07.170689 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler/openshift-kube-scheduler-crc"] Nov 28 16:11:07 crc kubenswrapper[4909]: I1128 16:11:07.177749 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7907b546dbb614e80485187b026c4c5ca17f52d88d5c28ce26a7bf5e3c09e76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:07Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:07 crc kubenswrapper[4909]: I1128 16:11:07.191866 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5f0ac931-d37b-4342-8c12-c2779b455cc5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae003675d8b34489b946224bcd380cc68ff49acec6769edfe74a8345018e7909\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5xrns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c2757c4dc287e41bc57c065df2906fc5961d005829fa24f22d3b5078d17555a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5xrns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:38Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-d5nd7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:07Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:07 crc kubenswrapper[4909]: I1128 16:11:07.205131 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:07 crc kubenswrapper[4909]: I1128 16:11:07.205179 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:07 crc kubenswrapper[4909]: I1128 16:11:07.205200 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:07 crc kubenswrapper[4909]: I1128 16:11:07.205217 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:07 crc kubenswrapper[4909]: I1128 16:11:07.205231 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:07Z","lastTransitionTime":"2025-11-28T16:11:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:07 crc kubenswrapper[4909]: I1128 16:11:07.222432 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c17e2fff-c7ee-475c-8c17-58a394744b91\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c9f22dca75c10791e7f5e083e737d4d6aad20598ff0d29dd7562fa99b5dbdfd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://697293d217dd4fbc4cecc3bc4be7acc14dbb5aa97b9faf0a608792ef7e4c8dba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d0b87eb388aab62c94756b5f20b73cf95fc80da8aa3d83f2848757f860cdd38e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://031c30950c06bdcae1736062069b96a6ce7cb66d5f286b73d691ee2d2db5832e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://66731a2e0518dd575f621ddf9e760e844e647a224138957440ccaecad3e4b8b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://983e9f73b8e344dbe445f95436cbe143d63f1da2e868201b441496df8cdc99ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://32babd1d5575ade6650fc51e99fbd5a062f2f8c3e97ffa12588d753f4f133902\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a256af2a77f3591aaed9ae0f6c9ef60c2aedbbeab38fa1072ee5f65dc1b256f1\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T16:10:49Z\\\",\\\"message\\\":\\\"ource:services.Addr{IP:\\\\\\\"10.217.5.37\\\\\\\", Port:443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nI1128 16:10:48.981466 6343 services_controller.go:452] Built service openshift-apiserver/api per-node LB for network=default: []services.LB{}\\\\nI1128 16:10:48.981482 6343 services_controller.go:453] Built service openshift-apiserver/api template LB for network=default: []services.LB{}\\\\nI1128 16:10:48.981434 6343 services_controller.go:473] Services do not match for network=default, existing lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-apiserver-operator/metrics_TCP_cluster\\\\\\\", UUID:\\\\\\\"8b82f026-5975-4a1b-bb18-08d5d51147ec\\\\\\\", Protocol:\\\\\\\"tcp\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-apiserver-operator/metrics\\\\\\\"}, Opts:services.LBOpts{Reject:false, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{}, Templates:services.TemplateMap{}, Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}, built lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-apiserver-operator/metrics_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.o\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:48Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://841b67fe2e8ae0e4ebf834f141b83ad5e850b8e81c3f6410cef7df9894c33dc6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e1fffda4b5571ab3fe797073c7201feadb7818eac66f6dc76b7761ef13a8d0ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e1fffda4b5571ab3fe797073c7201feadb7818eac66f6dc76b7761ef13a8d0ef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-qxw94\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:07Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:07 crc kubenswrapper[4909]: I1128 16:11:07.240345 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gdz9b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d9d93f2d-2a90-4d2d-b8e6-e48973be876f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e765772d8b12200fadfb28064b55c1abb9a8a6654602159c4910d2ea5b2d307\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b45f41a92db18043fc03c9cd7d9bbeef0020790595045abd17a9105d0f01367b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b45f41a92db18043fc03c9cd7d9bbeef0020790595045abd17a9105d0f01367b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f7a3a2a5eea9704ba4cd09ac6fa00e33c97d163973a852882979b1c8f2064979\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f7a3a2a5eea9704ba4cd09ac6fa00e33c97d163973a852882979b1c8f2064979\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0348e1d7066423ab9a595f60893021830d02de3c18fca2fdb0082efa41644124\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0348e1d7066423ab9a595f60893021830d02de3c18fca2fdb0082efa41644124\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbb9ad3dff8afd5ac9ec18bd48d273f46b76a21b791dc6a17aea1d7559750c93\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cbb9ad3dff8afd5ac9ec18bd48d273f46b76a21b791dc6a17aea1d7559750c93\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5898b3475414a39ef005c45982f12791683eb027e734ec8d25dc3489c93026a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5898b3475414a39ef005c45982f12791683eb027e734ec8d25dc3489c93026a2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2056e26b7e46cd32f8a12d6d054e249ed43f9854c0f7720791e98f9e6c5b5346\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2056e26b7e46cd32f8a12d6d054e249ed43f9854c0f7720791e98f9e6c5b5346\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gdz9b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:07Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:07 crc kubenswrapper[4909]: I1128 16:11:07.252008 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-8rjn2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ffceca0e-d9b5-484f-8753-5e0269eec811\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:51Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:51Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wgql6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wgql6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:51Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-8rjn2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:07Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:07 crc kubenswrapper[4909]: I1128 16:11:07.268889 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:07Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:07 crc kubenswrapper[4909]: I1128 16:11:07.286083 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:07Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:07 crc kubenswrapper[4909]: I1128 16:11:07.293027 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-qxw94_c17e2fff-c7ee-475c-8c17-58a394744b91/ovnkube-controller/2.log" Nov 28 16:11:07 crc kubenswrapper[4909]: I1128 16:11:07.294007 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-qxw94_c17e2fff-c7ee-475c-8c17-58a394744b91/ovnkube-controller/1.log" Nov 28 16:11:07 crc kubenswrapper[4909]: I1128 16:11:07.297918 4909 generic.go:334] "Generic (PLEG): container finished" podID="c17e2fff-c7ee-475c-8c17-58a394744b91" containerID="32babd1d5575ade6650fc51e99fbd5a062f2f8c3e97ffa12588d753f4f133902" exitCode=1 Nov 28 16:11:07 crc kubenswrapper[4909]: I1128 16:11:07.298019 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" event={"ID":"c17e2fff-c7ee-475c-8c17-58a394744b91","Type":"ContainerDied","Data":"32babd1d5575ade6650fc51e99fbd5a062f2f8c3e97ffa12588d753f4f133902"} Nov 28 16:11:07 crc kubenswrapper[4909]: I1128 16:11:07.298088 4909 scope.go:117] "RemoveContainer" containerID="a256af2a77f3591aaed9ae0f6c9ef60c2aedbbeab38fa1072ee5f65dc1b256f1" Nov 28 16:11:07 crc kubenswrapper[4909]: I1128 16:11:07.299380 4909 scope.go:117] "RemoveContainer" containerID="32babd1d5575ade6650fc51e99fbd5a062f2f8c3e97ffa12588d753f4f133902" Nov 28 16:11:07 crc kubenswrapper[4909]: E1128 16:11:07.299627 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-qxw94_openshift-ovn-kubernetes(c17e2fff-c7ee-475c-8c17-58a394744b91)\"" pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" podUID="c17e2fff-c7ee-475c-8c17-58a394744b91" Nov 28 16:11:07 crc kubenswrapper[4909]: I1128 16:11:07.300412 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-bwbm6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a458a03-0fea-47c0-9748-510145f40b30\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dd8c3d64058ca1f2862ce478295e1a694117d00f9172f78c7c5e2945d7357aad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmxp7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:36Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-bwbm6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:07Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:07 crc kubenswrapper[4909]: I1128 16:11:07.307058 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:07 crc kubenswrapper[4909]: I1128 16:11:07.307345 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:07 crc kubenswrapper[4909]: I1128 16:11:07.307359 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:07 crc kubenswrapper[4909]: I1128 16:11:07.307380 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:07 crc kubenswrapper[4909]: I1128 16:11:07.307395 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:07Z","lastTransitionTime":"2025-11-28T16:11:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:07 crc kubenswrapper[4909]: I1128 16:11:07.318932 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://475d2c473e130345e0d544c2b7990c476e83c479c644db89e1ceda2f4278d667\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://18059a14081d677863133d95ec3cecf9359d6464af62be1e53bcd9514311ff59\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:07Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:07 crc kubenswrapper[4909]: I1128 16:11:07.332060 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-q8nfv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6b77cc4b-dc69-4ece-8e10-64eebc98a578\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://71f6384b2d5c86c668a4d7dfc23b14a893f93b9ec587bec43f74eb0926cc2c64\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-prc9l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:37Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-q8nfv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:07Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:07 crc kubenswrapper[4909]: I1128 16:11:07.345478 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-8s8f9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5a7347b-5536-45b5-be75-4bf0ed1b922b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://75a9b32698a7aaaa2b6f88541c9069902a98ad0146bf478ec58ce3a97fb410ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mglcp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://309f7a04e1c92fde11e7fedfe089ff043e14ea788f60339fb7acf8c6df0c8c8d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mglcp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-8s8f9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:07Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:07 crc kubenswrapper[4909]: I1128 16:11:07.363676 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dda7db1a-d603-4121-8cd6-e72c9ae02961\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://107b559f7d62f02041439f8c727185e9a819cd655afa0898e818d8618cc54cc7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://76ac0606fbf94f3746fb7ffa549a3cb684d40ee72224dd36c96bb47db493b482\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d5716d50bbed7c047029bbd7964431e97f78c6516505f57fe76d2a1548a665d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://53a5c9679a2c94c1297318d2e554c2445efbfab9cf72d25e48d753d66fa217d5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://da70cc63e1edf58e6d6fed49fc9266d54fabba5f7ce3216d910eb18aebc45c34\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0700ae65eb592e5b2110d37bb15dbb1dbdd228986de015ab5dc8fe13672032ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0700ae65eb592e5b2110d37bb15dbb1dbdd228986de015ab5dc8fe13672032ef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:18Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:07Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:07 crc kubenswrapper[4909]: I1128 16:11:07.381489 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b5b5c4c2-af06-4771-b6eb-d13a2819665c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ea29d0be350e786147f355c5bb4902924aa0f921413b432ad093a796d21b9d05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b41cf0481323080f4056d35add13c2254c6ffbe432b1651555067d3fc9d163eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94501e940379fe9b429e532a91799701e733bb5c3be1c5f32da07a9957f955b3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://37dc41c4e260f683c1ae04ae87a883fe78e4f1f620a946ccb6a87191a5eae0ae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:18Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:07Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:07 crc kubenswrapper[4909]: I1128 16:11:07.393276 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/ffceca0e-d9b5-484f-8753-5e0269eec811-metrics-certs\") pod \"network-metrics-daemon-8rjn2\" (UID: \"ffceca0e-d9b5-484f-8753-5e0269eec811\") " pod="openshift-multus/network-metrics-daemon-8rjn2" Nov 28 16:11:07 crc kubenswrapper[4909]: E1128 16:11:07.393726 4909 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 28 16:11:07 crc kubenswrapper[4909]: E1128 16:11:07.393771 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ffceca0e-d9b5-484f-8753-5e0269eec811-metrics-certs podName:ffceca0e-d9b5-484f-8753-5e0269eec811 nodeName:}" failed. No retries permitted until 2025-11-28 16:11:23.393757108 +0000 UTC m=+65.790441632 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/ffceca0e-d9b5-484f-8753-5e0269eec811-metrics-certs") pod "network-metrics-daemon-8rjn2" (UID: "ffceca0e-d9b5-484f-8753-5e0269eec811") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 28 16:11:07 crc kubenswrapper[4909]: I1128 16:11:07.399306 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31f84564a3775e0ece3a5c0f176e8d9607466d4a7a505173e9668c51fa2229e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:07Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:07 crc kubenswrapper[4909]: I1128 16:11:07.409453 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:07 crc kubenswrapper[4909]: I1128 16:11:07.409485 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:07 crc kubenswrapper[4909]: I1128 16:11:07.409494 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:07 crc kubenswrapper[4909]: I1128 16:11:07.409506 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:07 crc kubenswrapper[4909]: I1128 16:11:07.409514 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:07Z","lastTransitionTime":"2025-11-28T16:11:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:07 crc kubenswrapper[4909]: I1128 16:11:07.409686 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:07Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:07 crc kubenswrapper[4909]: I1128 16:11:07.421015 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-wx2jj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6e3805b2-8ad3-4fa6-b88f-e0ae42294202\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0777b571ce4049338437a97264761c89ab7517b4da8400edcd3381d58aef32e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x6lxd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-wx2jj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:07Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:07 crc kubenswrapper[4909]: I1128 16:11:07.434914 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"443537be-83fe-4770-9aff-5fb3d2bef9a6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3d62be23143f1f28ff523369b4a6b5cb91146ac54236b31cc8d91d200bd8598e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f88b0080f0e1e8677f2525f47faf3bcd7fa2f54bc91057b318d4c1f86a16f9f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://34312055c6f3a6714d732ad11b27c20139c8a9be7636a9dd215a6e680803afd6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a891f34668e5de053a5fef2b954e97fb437e1a9e3bc2ed26b9bb767a3dda592d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a891f34668e5de053a5fef2b954e97fb437e1a9e3bc2ed26b9bb767a3dda592d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:18Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:18Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:07Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:07 crc kubenswrapper[4909]: I1128 16:11:07.448187 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:07Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:07 crc kubenswrapper[4909]: I1128 16:11:07.459862 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:07Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:07 crc kubenswrapper[4909]: I1128 16:11:07.473604 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-bwbm6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a458a03-0fea-47c0-9748-510145f40b30\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dd8c3d64058ca1f2862ce478295e1a694117d00f9172f78c7c5e2945d7357aad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmxp7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:36Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-bwbm6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:07Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:07 crc kubenswrapper[4909]: I1128 16:11:07.492627 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gdz9b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d9d93f2d-2a90-4d2d-b8e6-e48973be876f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e765772d8b12200fadfb28064b55c1abb9a8a6654602159c4910d2ea5b2d307\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b45f41a92db18043fc03c9cd7d9bbeef0020790595045abd17a9105d0f01367b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b45f41a92db18043fc03c9cd7d9bbeef0020790595045abd17a9105d0f01367b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f7a3a2a5eea9704ba4cd09ac6fa00e33c97d163973a852882979b1c8f2064979\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f7a3a2a5eea9704ba4cd09ac6fa00e33c97d163973a852882979b1c8f2064979\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0348e1d7066423ab9a595f60893021830d02de3c18fca2fdb0082efa41644124\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0348e1d7066423ab9a595f60893021830d02de3c18fca2fdb0082efa41644124\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbb9ad3dff8afd5ac9ec18bd48d273f46b76a21b791dc6a17aea1d7559750c93\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cbb9ad3dff8afd5ac9ec18bd48d273f46b76a21b791dc6a17aea1d7559750c93\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5898b3475414a39ef005c45982f12791683eb027e734ec8d25dc3489c93026a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5898b3475414a39ef005c45982f12791683eb027e734ec8d25dc3489c93026a2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2056e26b7e46cd32f8a12d6d054e249ed43f9854c0f7720791e98f9e6c5b5346\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2056e26b7e46cd32f8a12d6d054e249ed43f9854c0f7720791e98f9e6c5b5346\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gdz9b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:07Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:07 crc kubenswrapper[4909]: I1128 16:11:07.506573 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-8rjn2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ffceca0e-d9b5-484f-8753-5e0269eec811\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:51Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:51Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wgql6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wgql6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:51Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-8rjn2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:07Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:07 crc kubenswrapper[4909]: I1128 16:11:07.511166 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:07 crc kubenswrapper[4909]: I1128 16:11:07.511194 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:07 crc kubenswrapper[4909]: I1128 16:11:07.511206 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:07 crc kubenswrapper[4909]: I1128 16:11:07.511222 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:07 crc kubenswrapper[4909]: I1128 16:11:07.511233 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:07Z","lastTransitionTime":"2025-11-28T16:11:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:07 crc kubenswrapper[4909]: I1128 16:11:07.518215 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-q8nfv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6b77cc4b-dc69-4ece-8e10-64eebc98a578\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://71f6384b2d5c86c668a4d7dfc23b14a893f93b9ec587bec43f74eb0926cc2c64\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-prc9l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:37Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-q8nfv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:07Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:07 crc kubenswrapper[4909]: I1128 16:11:07.530031 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-8s8f9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5a7347b-5536-45b5-be75-4bf0ed1b922b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://75a9b32698a7aaaa2b6f88541c9069902a98ad0146bf478ec58ce3a97fb410ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mglcp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://309f7a04e1c92fde11e7fedfe089ff043e14ea788f60339fb7acf8c6df0c8c8d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mglcp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-8s8f9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:07Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:07 crc kubenswrapper[4909]: I1128 16:11:07.546583 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://475d2c473e130345e0d544c2b7990c476e83c479c644db89e1ceda2f4278d667\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://18059a14081d677863133d95ec3cecf9359d6464af62be1e53bcd9514311ff59\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:07Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:07 crc kubenswrapper[4909]: I1128 16:11:07.562007 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b5b5c4c2-af06-4771-b6eb-d13a2819665c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ea29d0be350e786147f355c5bb4902924aa0f921413b432ad093a796d21b9d05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b41cf0481323080f4056d35add13c2254c6ffbe432b1651555067d3fc9d163eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94501e940379fe9b429e532a91799701e733bb5c3be1c5f32da07a9957f955b3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://37dc41c4e260f683c1ae04ae87a883fe78e4f1f620a946ccb6a87191a5eae0ae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:18Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:07Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:07 crc kubenswrapper[4909]: I1128 16:11:07.579931 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31f84564a3775e0ece3a5c0f176e8d9607466d4a7a505173e9668c51fa2229e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:07Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:07 crc kubenswrapper[4909]: I1128 16:11:07.595126 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:07Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:07 crc kubenswrapper[4909]: I1128 16:11:07.614081 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:07 crc kubenswrapper[4909]: I1128 16:11:07.614150 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:07 crc kubenswrapper[4909]: I1128 16:11:07.614173 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:07 crc kubenswrapper[4909]: I1128 16:11:07.614202 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:07 crc kubenswrapper[4909]: I1128 16:11:07.614224 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:07Z","lastTransitionTime":"2025-11-28T16:11:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:07 crc kubenswrapper[4909]: I1128 16:11:07.616383 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-wx2jj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6e3805b2-8ad3-4fa6-b88f-e0ae42294202\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0777b571ce4049338437a97264761c89ab7517b4da8400edcd3381d58aef32e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x6lxd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-wx2jj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:07Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:07 crc kubenswrapper[4909]: I1128 16:11:07.636052 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dda7db1a-d603-4121-8cd6-e72c9ae02961\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://107b559f7d62f02041439f8c727185e9a819cd655afa0898e818d8618cc54cc7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://76ac0606fbf94f3746fb7ffa549a3cb684d40ee72224dd36c96bb47db493b482\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d5716d50bbed7c047029bbd7964431e97f78c6516505f57fe76d2a1548a665d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://53a5c9679a2c94c1297318d2e554c2445efbfab9cf72d25e48d753d66fa217d5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://da70cc63e1edf58e6d6fed49fc9266d54fabba5f7ce3216d910eb18aebc45c34\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0700ae65eb592e5b2110d37bb15dbb1dbdd228986de015ab5dc8fe13672032ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0700ae65eb592e5b2110d37bb15dbb1dbdd228986de015ab5dc8fe13672032ef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:18Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:07Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:07 crc kubenswrapper[4909]: I1128 16:11:07.652420 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5f0ac931-d37b-4342-8c12-c2779b455cc5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae003675d8b34489b946224bcd380cc68ff49acec6769edfe74a8345018e7909\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5xrns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c2757c4dc287e41bc57c065df2906fc5961d005829fa24f22d3b5078d17555a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5xrns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:38Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-d5nd7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:07Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:07 crc kubenswrapper[4909]: I1128 16:11:07.679831 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c17e2fff-c7ee-475c-8c17-58a394744b91\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c9f22dca75c10791e7f5e083e737d4d6aad20598ff0d29dd7562fa99b5dbdfd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://697293d217dd4fbc4cecc3bc4be7acc14dbb5aa97b9faf0a608792ef7e4c8dba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d0b87eb388aab62c94756b5f20b73cf95fc80da8aa3d83f2848757f860cdd38e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://031c30950c06bdcae1736062069b96a6ce7cb66d5f286b73d691ee2d2db5832e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://66731a2e0518dd575f621ddf9e760e844e647a224138957440ccaecad3e4b8b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://983e9f73b8e344dbe445f95436cbe143d63f1da2e868201b441496df8cdc99ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://32babd1d5575ade6650fc51e99fbd5a062f2f8c3e97ffa12588d753f4f133902\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a256af2a77f3591aaed9ae0f6c9ef60c2aedbbeab38fa1072ee5f65dc1b256f1\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T16:10:49Z\\\",\\\"message\\\":\\\"ource:services.Addr{IP:\\\\\\\"10.217.5.37\\\\\\\", Port:443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nI1128 16:10:48.981466 6343 services_controller.go:452] Built service openshift-apiserver/api per-node LB for network=default: []services.LB{}\\\\nI1128 16:10:48.981482 6343 services_controller.go:453] Built service openshift-apiserver/api template LB for network=default: []services.LB{}\\\\nI1128 16:10:48.981434 6343 services_controller.go:473] Services do not match for network=default, existing lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-apiserver-operator/metrics_TCP_cluster\\\\\\\", UUID:\\\\\\\"8b82f026-5975-4a1b-bb18-08d5d51147ec\\\\\\\", Protocol:\\\\\\\"tcp\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-apiserver-operator/metrics\\\\\\\"}, Opts:services.LBOpts{Reject:false, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{}, Templates:services.TemplateMap{}, Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}, built lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-apiserver-operator/metrics_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.o\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:48Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://32babd1d5575ade6650fc51e99fbd5a062f2f8c3e97ffa12588d753f4f133902\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T16:11:07Z\\\",\\\"message\\\":\\\"04 6563 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1128 16:11:06.748135 6563 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1128 16:11:06.748850 6563 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1128 16:11:06.749020 6563 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1128 16:11:06.755731 6563 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1128 16:11:06.755787 6563 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1128 16:11:06.755852 6563 factory.go:656] Stopping watch factory\\\\nI1128 16:11:06.755873 6563 handler.go:208] Removed *v1.Node event handler 2\\\\nI1128 16:11:06.755890 6563 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1128 16:11:06.764172 6563 shared_informer.go:320] Caches are synced for node-tracker-controller\\\\nI1128 16:11:06.764189 6563 services_controller.go:204] Setting up event handlers for services for network=default\\\\nI1128 16:11:06.764255 6563 ovnkube.go:599] Stopped ovnkube\\\\nI1128 16:11:06.764281 6563 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1128 16:11:06.764366 6563 ovnkube.go:137] failed to run ov\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://841b67fe2e8ae0e4ebf834f141b83ad5e850b8e81c3f6410cef7df9894c33dc6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e1fffda4b5571ab3fe797073c7201feadb7818eac66f6dc76b7761ef13a8d0ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e1fffda4b5571ab3fe797073c7201feadb7818eac66f6dc76b7761ef13a8d0ef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-qxw94\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:07Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:07 crc kubenswrapper[4909]: I1128 16:11:07.697954 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7907b546dbb614e80485187b026c4c5ca17f52d88d5c28ce26a7bf5e3c09e76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:07Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:07 crc kubenswrapper[4909]: I1128 16:11:07.716798 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:07 crc kubenswrapper[4909]: I1128 16:11:07.716860 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:07 crc kubenswrapper[4909]: I1128 16:11:07.716879 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:07 crc kubenswrapper[4909]: I1128 16:11:07.716906 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:07 crc kubenswrapper[4909]: I1128 16:11:07.716927 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:07Z","lastTransitionTime":"2025-11-28T16:11:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:07 crc kubenswrapper[4909]: I1128 16:11:07.820475 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:07 crc kubenswrapper[4909]: I1128 16:11:07.820521 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:07 crc kubenswrapper[4909]: I1128 16:11:07.820537 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:07 crc kubenswrapper[4909]: I1128 16:11:07.820559 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:07 crc kubenswrapper[4909]: I1128 16:11:07.820576 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:07Z","lastTransitionTime":"2025-11-28T16:11:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:07 crc kubenswrapper[4909]: I1128 16:11:07.900755 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 16:11:07 crc kubenswrapper[4909]: I1128 16:11:07.900862 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 16:11:07 crc kubenswrapper[4909]: I1128 16:11:07.900946 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 16:11:07 crc kubenswrapper[4909]: E1128 16:11:07.901082 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 16:11:07 crc kubenswrapper[4909]: E1128 16:11:07.901293 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 16:11:07 crc kubenswrapper[4909]: E1128 16:11:07.901466 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 16:11:07 crc kubenswrapper[4909]: I1128 16:11:07.923778 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:07 crc kubenswrapper[4909]: I1128 16:11:07.923856 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:07 crc kubenswrapper[4909]: I1128 16:11:07.923881 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:07 crc kubenswrapper[4909]: I1128 16:11:07.923909 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:07 crc kubenswrapper[4909]: I1128 16:11:07.923932 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:07Z","lastTransitionTime":"2025-11-28T16:11:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:07 crc kubenswrapper[4909]: I1128 16:11:07.927494 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://475d2c473e130345e0d544c2b7990c476e83c479c644db89e1ceda2f4278d667\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://18059a14081d677863133d95ec3cecf9359d6464af62be1e53bcd9514311ff59\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:07Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:07 crc kubenswrapper[4909]: I1128 16:11:07.941452 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-q8nfv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6b77cc4b-dc69-4ece-8e10-64eebc98a578\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://71f6384b2d5c86c668a4d7dfc23b14a893f93b9ec587bec43f74eb0926cc2c64\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-prc9l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:37Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-q8nfv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:07Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:07 crc kubenswrapper[4909]: I1128 16:11:07.956347 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-8s8f9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5a7347b-5536-45b5-be75-4bf0ed1b922b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://75a9b32698a7aaaa2b6f88541c9069902a98ad0146bf478ec58ce3a97fb410ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mglcp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://309f7a04e1c92fde11e7fedfe089ff043e14ea788f60339fb7acf8c6df0c8c8d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mglcp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-8s8f9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:07Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:07 crc kubenswrapper[4909]: I1128 16:11:07.973434 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dda7db1a-d603-4121-8cd6-e72c9ae02961\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://107b559f7d62f02041439f8c727185e9a819cd655afa0898e818d8618cc54cc7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://76ac0606fbf94f3746fb7ffa549a3cb684d40ee72224dd36c96bb47db493b482\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d5716d50bbed7c047029bbd7964431e97f78c6516505f57fe76d2a1548a665d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://53a5c9679a2c94c1297318d2e554c2445efbfab9cf72d25e48d753d66fa217d5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://da70cc63e1edf58e6d6fed49fc9266d54fabba5f7ce3216d910eb18aebc45c34\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0700ae65eb592e5b2110d37bb15dbb1dbdd228986de015ab5dc8fe13672032ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0700ae65eb592e5b2110d37bb15dbb1dbdd228986de015ab5dc8fe13672032ef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:18Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:07Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:07 crc kubenswrapper[4909]: I1128 16:11:07.990973 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b5b5c4c2-af06-4771-b6eb-d13a2819665c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ea29d0be350e786147f355c5bb4902924aa0f921413b432ad093a796d21b9d05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b41cf0481323080f4056d35add13c2254c6ffbe432b1651555067d3fc9d163eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94501e940379fe9b429e532a91799701e733bb5c3be1c5f32da07a9957f955b3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://37dc41c4e260f683c1ae04ae87a883fe78e4f1f620a946ccb6a87191a5eae0ae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:18Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:07Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:08 crc kubenswrapper[4909]: I1128 16:11:08.011269 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31f84564a3775e0ece3a5c0f176e8d9607466d4a7a505173e9668c51fa2229e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:08Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:08 crc kubenswrapper[4909]: I1128 16:11:08.026383 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:08 crc kubenswrapper[4909]: I1128 16:11:08.026682 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:08 crc kubenswrapper[4909]: I1128 16:11:08.026951 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:08 crc kubenswrapper[4909]: I1128 16:11:08.027200 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:08 crc kubenswrapper[4909]: I1128 16:11:08.027333 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:08Z","lastTransitionTime":"2025-11-28T16:11:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:08 crc kubenswrapper[4909]: I1128 16:11:08.029207 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:08Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:08 crc kubenswrapper[4909]: I1128 16:11:08.046391 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-wx2jj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6e3805b2-8ad3-4fa6-b88f-e0ae42294202\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0777b571ce4049338437a97264761c89ab7517b4da8400edcd3381d58aef32e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x6lxd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-wx2jj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:08Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:08 crc kubenswrapper[4909]: I1128 16:11:08.062509 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7907b546dbb614e80485187b026c4c5ca17f52d88d5c28ce26a7bf5e3c09e76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:08Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:08 crc kubenswrapper[4909]: I1128 16:11:08.076938 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5f0ac931-d37b-4342-8c12-c2779b455cc5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae003675d8b34489b946224bcd380cc68ff49acec6769edfe74a8345018e7909\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5xrns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c2757c4dc287e41bc57c065df2906fc5961d005829fa24f22d3b5078d17555a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5xrns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:38Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-d5nd7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:08Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:08 crc kubenswrapper[4909]: I1128 16:11:08.104240 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c17e2fff-c7ee-475c-8c17-58a394744b91\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c9f22dca75c10791e7f5e083e737d4d6aad20598ff0d29dd7562fa99b5dbdfd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://697293d217dd4fbc4cecc3bc4be7acc14dbb5aa97b9faf0a608792ef7e4c8dba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d0b87eb388aab62c94756b5f20b73cf95fc80da8aa3d83f2848757f860cdd38e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://031c30950c06bdcae1736062069b96a6ce7cb66d5f286b73d691ee2d2db5832e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://66731a2e0518dd575f621ddf9e760e844e647a224138957440ccaecad3e4b8b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://983e9f73b8e344dbe445f95436cbe143d63f1da2e868201b441496df8cdc99ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://32babd1d5575ade6650fc51e99fbd5a062f2f8c3e97ffa12588d753f4f133902\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a256af2a77f3591aaed9ae0f6c9ef60c2aedbbeab38fa1072ee5f65dc1b256f1\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T16:10:49Z\\\",\\\"message\\\":\\\"ource:services.Addr{IP:\\\\\\\"10.217.5.37\\\\\\\", Port:443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nI1128 16:10:48.981466 6343 services_controller.go:452] Built service openshift-apiserver/api per-node LB for network=default: []services.LB{}\\\\nI1128 16:10:48.981482 6343 services_controller.go:453] Built service openshift-apiserver/api template LB for network=default: []services.LB{}\\\\nI1128 16:10:48.981434 6343 services_controller.go:473] Services do not match for network=default, existing lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-apiserver-operator/metrics_TCP_cluster\\\\\\\", UUID:\\\\\\\"8b82f026-5975-4a1b-bb18-08d5d51147ec\\\\\\\", Protocol:\\\\\\\"tcp\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-apiserver-operator/metrics\\\\\\\"}, Opts:services.LBOpts{Reject:false, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{}, Templates:services.TemplateMap{}, Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}, built lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-apiserver-operator/metrics_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.o\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:48Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://32babd1d5575ade6650fc51e99fbd5a062f2f8c3e97ffa12588d753f4f133902\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T16:11:07Z\\\",\\\"message\\\":\\\"04 6563 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1128 16:11:06.748135 6563 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1128 16:11:06.748850 6563 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1128 16:11:06.749020 6563 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1128 16:11:06.755731 6563 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1128 16:11:06.755787 6563 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1128 16:11:06.755852 6563 factory.go:656] Stopping watch factory\\\\nI1128 16:11:06.755873 6563 handler.go:208] Removed *v1.Node event handler 2\\\\nI1128 16:11:06.755890 6563 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1128 16:11:06.764172 6563 shared_informer.go:320] Caches are synced for node-tracker-controller\\\\nI1128 16:11:06.764189 6563 services_controller.go:204] Setting up event handlers for services for network=default\\\\nI1128 16:11:06.764255 6563 ovnkube.go:599] Stopped ovnkube\\\\nI1128 16:11:06.764281 6563 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1128 16:11:06.764366 6563 ovnkube.go:137] failed to run ov\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://841b67fe2e8ae0e4ebf834f141b83ad5e850b8e81c3f6410cef7df9894c33dc6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e1fffda4b5571ab3fe797073c7201feadb7818eac66f6dc76b7761ef13a8d0ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e1fffda4b5571ab3fe797073c7201feadb7818eac66f6dc76b7761ef13a8d0ef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-qxw94\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:08Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:08 crc kubenswrapper[4909]: I1128 16:11:08.124490 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gdz9b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d9d93f2d-2a90-4d2d-b8e6-e48973be876f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e765772d8b12200fadfb28064b55c1abb9a8a6654602159c4910d2ea5b2d307\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b45f41a92db18043fc03c9cd7d9bbeef0020790595045abd17a9105d0f01367b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b45f41a92db18043fc03c9cd7d9bbeef0020790595045abd17a9105d0f01367b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f7a3a2a5eea9704ba4cd09ac6fa00e33c97d163973a852882979b1c8f2064979\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f7a3a2a5eea9704ba4cd09ac6fa00e33c97d163973a852882979b1c8f2064979\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0348e1d7066423ab9a595f60893021830d02de3c18fca2fdb0082efa41644124\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0348e1d7066423ab9a595f60893021830d02de3c18fca2fdb0082efa41644124\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbb9ad3dff8afd5ac9ec18bd48d273f46b76a21b791dc6a17aea1d7559750c93\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cbb9ad3dff8afd5ac9ec18bd48d273f46b76a21b791dc6a17aea1d7559750c93\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5898b3475414a39ef005c45982f12791683eb027e734ec8d25dc3489c93026a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5898b3475414a39ef005c45982f12791683eb027e734ec8d25dc3489c93026a2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2056e26b7e46cd32f8a12d6d054e249ed43f9854c0f7720791e98f9e6c5b5346\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2056e26b7e46cd32f8a12d6d054e249ed43f9854c0f7720791e98f9e6c5b5346\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gdz9b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:08Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:08 crc kubenswrapper[4909]: I1128 16:11:08.130067 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:08 crc kubenswrapper[4909]: I1128 16:11:08.130128 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:08 crc kubenswrapper[4909]: I1128 16:11:08.130144 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:08 crc kubenswrapper[4909]: I1128 16:11:08.130167 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:08 crc kubenswrapper[4909]: I1128 16:11:08.130184 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:08Z","lastTransitionTime":"2025-11-28T16:11:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:08 crc kubenswrapper[4909]: I1128 16:11:08.139362 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-8rjn2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ffceca0e-d9b5-484f-8753-5e0269eec811\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:51Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:51Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wgql6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wgql6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:51Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-8rjn2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:08Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:08 crc kubenswrapper[4909]: I1128 16:11:08.153230 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"443537be-83fe-4770-9aff-5fb3d2bef9a6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3d62be23143f1f28ff523369b4a6b5cb91146ac54236b31cc8d91d200bd8598e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f88b0080f0e1e8677f2525f47faf3bcd7fa2f54bc91057b318d4c1f86a16f9f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://34312055c6f3a6714d732ad11b27c20139c8a9be7636a9dd215a6e680803afd6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a891f34668e5de053a5fef2b954e97fb437e1a9e3bc2ed26b9bb767a3dda592d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a891f34668e5de053a5fef2b954e97fb437e1a9e3bc2ed26b9bb767a3dda592d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:18Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:18Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:08Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:08 crc kubenswrapper[4909]: I1128 16:11:08.169319 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:08Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:08 crc kubenswrapper[4909]: I1128 16:11:08.187600 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:08Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:08 crc kubenswrapper[4909]: I1128 16:11:08.205219 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-bwbm6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a458a03-0fea-47c0-9748-510145f40b30\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dd8c3d64058ca1f2862ce478295e1a694117d00f9172f78c7c5e2945d7357aad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmxp7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:36Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-bwbm6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:08Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:08 crc kubenswrapper[4909]: I1128 16:11:08.232580 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:08 crc kubenswrapper[4909]: I1128 16:11:08.232639 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:08 crc kubenswrapper[4909]: I1128 16:11:08.232682 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:08 crc kubenswrapper[4909]: I1128 16:11:08.232706 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:08 crc kubenswrapper[4909]: I1128 16:11:08.232724 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:08Z","lastTransitionTime":"2025-11-28T16:11:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:08 crc kubenswrapper[4909]: I1128 16:11:08.303995 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-qxw94_c17e2fff-c7ee-475c-8c17-58a394744b91/ovnkube-controller/2.log" Nov 28 16:11:08 crc kubenswrapper[4909]: I1128 16:11:08.309285 4909 scope.go:117] "RemoveContainer" containerID="32babd1d5575ade6650fc51e99fbd5a062f2f8c3e97ffa12588d753f4f133902" Nov 28 16:11:08 crc kubenswrapper[4909]: E1128 16:11:08.309512 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-qxw94_openshift-ovn-kubernetes(c17e2fff-c7ee-475c-8c17-58a394744b91)\"" pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" podUID="c17e2fff-c7ee-475c-8c17-58a394744b91" Nov 28 16:11:08 crc kubenswrapper[4909]: I1128 16:11:08.328849 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://475d2c473e130345e0d544c2b7990c476e83c479c644db89e1ceda2f4278d667\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://18059a14081d677863133d95ec3cecf9359d6464af62be1e53bcd9514311ff59\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:08Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:08 crc kubenswrapper[4909]: I1128 16:11:08.340812 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:08 crc kubenswrapper[4909]: I1128 16:11:08.340898 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:08 crc kubenswrapper[4909]: I1128 16:11:08.340923 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:08 crc kubenswrapper[4909]: I1128 16:11:08.340952 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:08 crc kubenswrapper[4909]: I1128 16:11:08.340975 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:08Z","lastTransitionTime":"2025-11-28T16:11:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:08 crc kubenswrapper[4909]: I1128 16:11:08.342604 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-q8nfv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6b77cc4b-dc69-4ece-8e10-64eebc98a578\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://71f6384b2d5c86c668a4d7dfc23b14a893f93b9ec587bec43f74eb0926cc2c64\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-prc9l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:37Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-q8nfv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:08Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:08 crc kubenswrapper[4909]: I1128 16:11:08.356916 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-8s8f9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5a7347b-5536-45b5-be75-4bf0ed1b922b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://75a9b32698a7aaaa2b6f88541c9069902a98ad0146bf478ec58ce3a97fb410ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mglcp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://309f7a04e1c92fde11e7fedfe089ff043e14ea788f60339fb7acf8c6df0c8c8d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mglcp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-8s8f9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:08Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:08 crc kubenswrapper[4909]: I1128 16:11:08.371189 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dda7db1a-d603-4121-8cd6-e72c9ae02961\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://107b559f7d62f02041439f8c727185e9a819cd655afa0898e818d8618cc54cc7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://76ac0606fbf94f3746fb7ffa549a3cb684d40ee72224dd36c96bb47db493b482\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d5716d50bbed7c047029bbd7964431e97f78c6516505f57fe76d2a1548a665d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://53a5c9679a2c94c1297318d2e554c2445efbfab9cf72d25e48d753d66fa217d5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://da70cc63e1edf58e6d6fed49fc9266d54fabba5f7ce3216d910eb18aebc45c34\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0700ae65eb592e5b2110d37bb15dbb1dbdd228986de015ab5dc8fe13672032ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0700ae65eb592e5b2110d37bb15dbb1dbdd228986de015ab5dc8fe13672032ef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:18Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:08Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:08 crc kubenswrapper[4909]: I1128 16:11:08.386560 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b5b5c4c2-af06-4771-b6eb-d13a2819665c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ea29d0be350e786147f355c5bb4902924aa0f921413b432ad093a796d21b9d05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b41cf0481323080f4056d35add13c2254c6ffbe432b1651555067d3fc9d163eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94501e940379fe9b429e532a91799701e733bb5c3be1c5f32da07a9957f955b3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://37dc41c4e260f683c1ae04ae87a883fe78e4f1f620a946ccb6a87191a5eae0ae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:18Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:08Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:08 crc kubenswrapper[4909]: I1128 16:11:08.402830 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31f84564a3775e0ece3a5c0f176e8d9607466d4a7a505173e9668c51fa2229e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:08Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:08 crc kubenswrapper[4909]: I1128 16:11:08.404192 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 16:11:08 crc kubenswrapper[4909]: E1128 16:11:08.405027 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 16:11:40.404377352 +0000 UTC m=+82.801061916 (durationBeforeRetry 32s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:11:08 crc kubenswrapper[4909]: I1128 16:11:08.421087 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:08Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:08 crc kubenswrapper[4909]: I1128 16:11:08.439287 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-wx2jj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6e3805b2-8ad3-4fa6-b88f-e0ae42294202\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0777b571ce4049338437a97264761c89ab7517b4da8400edcd3381d58aef32e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x6lxd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-wx2jj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:08Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:08 crc kubenswrapper[4909]: I1128 16:11:08.443824 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:08 crc kubenswrapper[4909]: I1128 16:11:08.443888 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:08 crc kubenswrapper[4909]: I1128 16:11:08.443913 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:08 crc kubenswrapper[4909]: I1128 16:11:08.443944 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:08 crc kubenswrapper[4909]: I1128 16:11:08.443968 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:08Z","lastTransitionTime":"2025-11-28T16:11:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:08 crc kubenswrapper[4909]: I1128 16:11:08.458526 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7907b546dbb614e80485187b026c4c5ca17f52d88d5c28ce26a7bf5e3c09e76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:08Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:08 crc kubenswrapper[4909]: I1128 16:11:08.473431 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5f0ac931-d37b-4342-8c12-c2779b455cc5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae003675d8b34489b946224bcd380cc68ff49acec6769edfe74a8345018e7909\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5xrns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c2757c4dc287e41bc57c065df2906fc5961d005829fa24f22d3b5078d17555a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5xrns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:38Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-d5nd7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:08Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:08 crc kubenswrapper[4909]: I1128 16:11:08.491390 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c17e2fff-c7ee-475c-8c17-58a394744b91\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c9f22dca75c10791e7f5e083e737d4d6aad20598ff0d29dd7562fa99b5dbdfd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://697293d217dd4fbc4cecc3bc4be7acc14dbb5aa97b9faf0a608792ef7e4c8dba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d0b87eb388aab62c94756b5f20b73cf95fc80da8aa3d83f2848757f860cdd38e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://031c30950c06bdcae1736062069b96a6ce7cb66d5f286b73d691ee2d2db5832e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://66731a2e0518dd575f621ddf9e760e844e647a224138957440ccaecad3e4b8b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://983e9f73b8e344dbe445f95436cbe143d63f1da2e868201b441496df8cdc99ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://32babd1d5575ade6650fc51e99fbd5a062f2f8c3e97ffa12588d753f4f133902\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://32babd1d5575ade6650fc51e99fbd5a062f2f8c3e97ffa12588d753f4f133902\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T16:11:07Z\\\",\\\"message\\\":\\\"04 6563 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1128 16:11:06.748135 6563 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1128 16:11:06.748850 6563 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1128 16:11:06.749020 6563 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1128 16:11:06.755731 6563 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1128 16:11:06.755787 6563 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1128 16:11:06.755852 6563 factory.go:656] Stopping watch factory\\\\nI1128 16:11:06.755873 6563 handler.go:208] Removed *v1.Node event handler 2\\\\nI1128 16:11:06.755890 6563 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1128 16:11:06.764172 6563 shared_informer.go:320] Caches are synced for node-tracker-controller\\\\nI1128 16:11:06.764189 6563 services_controller.go:204] Setting up event handlers for services for network=default\\\\nI1128 16:11:06.764255 6563 ovnkube.go:599] Stopped ovnkube\\\\nI1128 16:11:06.764281 6563 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1128 16:11:06.764366 6563 ovnkube.go:137] failed to run ov\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:06Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-qxw94_openshift-ovn-kubernetes(c17e2fff-c7ee-475c-8c17-58a394744b91)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://841b67fe2e8ae0e4ebf834f141b83ad5e850b8e81c3f6410cef7df9894c33dc6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e1fffda4b5571ab3fe797073c7201feadb7818eac66f6dc76b7761ef13a8d0ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e1fffda4b5571ab3fe797073c7201feadb7818eac66f6dc76b7761ef13a8d0ef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-qxw94\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:08Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:08 crc kubenswrapper[4909]: I1128 16:11:08.506401 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-8rjn2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ffceca0e-d9b5-484f-8753-5e0269eec811\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:51Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:51Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wgql6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wgql6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:51Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-8rjn2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:08Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:08 crc kubenswrapper[4909]: I1128 16:11:08.506499 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 16:11:08 crc kubenswrapper[4909]: I1128 16:11:08.506526 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 16:11:08 crc kubenswrapper[4909]: I1128 16:11:08.506546 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 16:11:08 crc kubenswrapper[4909]: I1128 16:11:08.506576 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 16:11:08 crc kubenswrapper[4909]: E1128 16:11:08.506645 4909 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 28 16:11:08 crc kubenswrapper[4909]: E1128 16:11:08.506701 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-28 16:11:40.506689045 +0000 UTC m=+82.903373569 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 28 16:11:08 crc kubenswrapper[4909]: E1128 16:11:08.506738 4909 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 28 16:11:08 crc kubenswrapper[4909]: E1128 16:11:08.506781 4909 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 28 16:11:08 crc kubenswrapper[4909]: E1128 16:11:08.506800 4909 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 16:11:08 crc kubenswrapper[4909]: E1128 16:11:08.506839 4909 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 28 16:11:08 crc kubenswrapper[4909]: E1128 16:11:08.506876 4909 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 28 16:11:08 crc kubenswrapper[4909]: E1128 16:11:08.506893 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-28 16:11:40.50686352 +0000 UTC m=+82.903548084 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 16:11:08 crc kubenswrapper[4909]: E1128 16:11:08.506896 4909 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 28 16:11:08 crc kubenswrapper[4909]: E1128 16:11:08.506976 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-28 16:11:40.506952053 +0000 UTC m=+82.903636607 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 28 16:11:08 crc kubenswrapper[4909]: E1128 16:11:08.506900 4909 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 16:11:08 crc kubenswrapper[4909]: E1128 16:11:08.507028 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-28 16:11:40.507019515 +0000 UTC m=+82.903704039 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 16:11:08 crc kubenswrapper[4909]: I1128 16:11:08.518283 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"443537be-83fe-4770-9aff-5fb3d2bef9a6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3d62be23143f1f28ff523369b4a6b5cb91146ac54236b31cc8d91d200bd8598e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f88b0080f0e1e8677f2525f47faf3bcd7fa2f54bc91057b318d4c1f86a16f9f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://34312055c6f3a6714d732ad11b27c20139c8a9be7636a9dd215a6e680803afd6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a891f34668e5de053a5fef2b954e97fb437e1a9e3bc2ed26b9bb767a3dda592d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a891f34668e5de053a5fef2b954e97fb437e1a9e3bc2ed26b9bb767a3dda592d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:18Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:18Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:08Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:08 crc kubenswrapper[4909]: I1128 16:11:08.537422 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:08Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:08 crc kubenswrapper[4909]: I1128 16:11:08.546070 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:08 crc kubenswrapper[4909]: I1128 16:11:08.546127 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:08 crc kubenswrapper[4909]: I1128 16:11:08.546149 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:08 crc kubenswrapper[4909]: I1128 16:11:08.546178 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:08 crc kubenswrapper[4909]: I1128 16:11:08.546199 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:08Z","lastTransitionTime":"2025-11-28T16:11:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:08 crc kubenswrapper[4909]: I1128 16:11:08.548828 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:08Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:08 crc kubenswrapper[4909]: I1128 16:11:08.559226 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-bwbm6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a458a03-0fea-47c0-9748-510145f40b30\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dd8c3d64058ca1f2862ce478295e1a694117d00f9172f78c7c5e2945d7357aad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmxp7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:36Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-bwbm6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:08Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:08 crc kubenswrapper[4909]: I1128 16:11:08.573083 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gdz9b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d9d93f2d-2a90-4d2d-b8e6-e48973be876f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e765772d8b12200fadfb28064b55c1abb9a8a6654602159c4910d2ea5b2d307\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b45f41a92db18043fc03c9cd7d9bbeef0020790595045abd17a9105d0f01367b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b45f41a92db18043fc03c9cd7d9bbeef0020790595045abd17a9105d0f01367b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f7a3a2a5eea9704ba4cd09ac6fa00e33c97d163973a852882979b1c8f2064979\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f7a3a2a5eea9704ba4cd09ac6fa00e33c97d163973a852882979b1c8f2064979\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0348e1d7066423ab9a595f60893021830d02de3c18fca2fdb0082efa41644124\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0348e1d7066423ab9a595f60893021830d02de3c18fca2fdb0082efa41644124\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbb9ad3dff8afd5ac9ec18bd48d273f46b76a21b791dc6a17aea1d7559750c93\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cbb9ad3dff8afd5ac9ec18bd48d273f46b76a21b791dc6a17aea1d7559750c93\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5898b3475414a39ef005c45982f12791683eb027e734ec8d25dc3489c93026a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5898b3475414a39ef005c45982f12791683eb027e734ec8d25dc3489c93026a2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2056e26b7e46cd32f8a12d6d054e249ed43f9854c0f7720791e98f9e6c5b5346\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2056e26b7e46cd32f8a12d6d054e249ed43f9854c0f7720791e98f9e6c5b5346\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gdz9b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:08Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:08 crc kubenswrapper[4909]: I1128 16:11:08.648155 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:08 crc kubenswrapper[4909]: I1128 16:11:08.648290 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:08 crc kubenswrapper[4909]: I1128 16:11:08.648305 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:08 crc kubenswrapper[4909]: I1128 16:11:08.648323 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:08 crc kubenswrapper[4909]: I1128 16:11:08.648334 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:08Z","lastTransitionTime":"2025-11-28T16:11:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:08 crc kubenswrapper[4909]: I1128 16:11:08.750756 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:08 crc kubenswrapper[4909]: I1128 16:11:08.750818 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:08 crc kubenswrapper[4909]: I1128 16:11:08.750841 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:08 crc kubenswrapper[4909]: I1128 16:11:08.750870 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:08 crc kubenswrapper[4909]: I1128 16:11:08.750889 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:08Z","lastTransitionTime":"2025-11-28T16:11:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:08 crc kubenswrapper[4909]: I1128 16:11:08.852647 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:08 crc kubenswrapper[4909]: I1128 16:11:08.852696 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:08 crc kubenswrapper[4909]: I1128 16:11:08.852704 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:08 crc kubenswrapper[4909]: I1128 16:11:08.852717 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:08 crc kubenswrapper[4909]: I1128 16:11:08.852726 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:08Z","lastTransitionTime":"2025-11-28T16:11:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:08 crc kubenswrapper[4909]: I1128 16:11:08.900421 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-8rjn2" Nov 28 16:11:08 crc kubenswrapper[4909]: E1128 16:11:08.900528 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-8rjn2" podUID="ffceca0e-d9b5-484f-8753-5e0269eec811" Nov 28 16:11:08 crc kubenswrapper[4909]: I1128 16:11:08.956237 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:08 crc kubenswrapper[4909]: I1128 16:11:08.956277 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:08 crc kubenswrapper[4909]: I1128 16:11:08.956296 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:08 crc kubenswrapper[4909]: I1128 16:11:08.956318 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:08 crc kubenswrapper[4909]: I1128 16:11:08.956336 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:08Z","lastTransitionTime":"2025-11-28T16:11:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:09 crc kubenswrapper[4909]: I1128 16:11:09.059011 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:09 crc kubenswrapper[4909]: I1128 16:11:09.059068 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:09 crc kubenswrapper[4909]: I1128 16:11:09.059080 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:09 crc kubenswrapper[4909]: I1128 16:11:09.059098 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:09 crc kubenswrapper[4909]: I1128 16:11:09.059114 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:09Z","lastTransitionTime":"2025-11-28T16:11:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:09 crc kubenswrapper[4909]: I1128 16:11:09.162099 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:09 crc kubenswrapper[4909]: I1128 16:11:09.162156 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:09 crc kubenswrapper[4909]: I1128 16:11:09.162174 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:09 crc kubenswrapper[4909]: I1128 16:11:09.162198 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:09 crc kubenswrapper[4909]: I1128 16:11:09.162216 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:09Z","lastTransitionTime":"2025-11-28T16:11:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:09 crc kubenswrapper[4909]: I1128 16:11:09.264452 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:09 crc kubenswrapper[4909]: I1128 16:11:09.264504 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:09 crc kubenswrapper[4909]: I1128 16:11:09.264521 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:09 crc kubenswrapper[4909]: I1128 16:11:09.264543 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:09 crc kubenswrapper[4909]: I1128 16:11:09.264562 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:09Z","lastTransitionTime":"2025-11-28T16:11:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:09 crc kubenswrapper[4909]: I1128 16:11:09.367107 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:09 crc kubenswrapper[4909]: I1128 16:11:09.367173 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:09 crc kubenswrapper[4909]: I1128 16:11:09.367199 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:09 crc kubenswrapper[4909]: I1128 16:11:09.367226 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:09 crc kubenswrapper[4909]: I1128 16:11:09.367250 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:09Z","lastTransitionTime":"2025-11-28T16:11:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:09 crc kubenswrapper[4909]: I1128 16:11:09.469830 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:09 crc kubenswrapper[4909]: I1128 16:11:09.469894 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:09 crc kubenswrapper[4909]: I1128 16:11:09.469914 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:09 crc kubenswrapper[4909]: I1128 16:11:09.469937 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:09 crc kubenswrapper[4909]: I1128 16:11:09.469955 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:09Z","lastTransitionTime":"2025-11-28T16:11:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:09 crc kubenswrapper[4909]: I1128 16:11:09.573321 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:09 crc kubenswrapper[4909]: I1128 16:11:09.573427 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:09 crc kubenswrapper[4909]: I1128 16:11:09.573446 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:09 crc kubenswrapper[4909]: I1128 16:11:09.573534 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:09 crc kubenswrapper[4909]: I1128 16:11:09.573615 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:09Z","lastTransitionTime":"2025-11-28T16:11:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:09 crc kubenswrapper[4909]: I1128 16:11:09.676204 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:09 crc kubenswrapper[4909]: I1128 16:11:09.676258 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:09 crc kubenswrapper[4909]: I1128 16:11:09.676274 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:09 crc kubenswrapper[4909]: I1128 16:11:09.676297 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:09 crc kubenswrapper[4909]: I1128 16:11:09.676314 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:09Z","lastTransitionTime":"2025-11-28T16:11:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:09 crc kubenswrapper[4909]: I1128 16:11:09.779948 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:09 crc kubenswrapper[4909]: I1128 16:11:09.780009 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:09 crc kubenswrapper[4909]: I1128 16:11:09.780025 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:09 crc kubenswrapper[4909]: I1128 16:11:09.780047 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:09 crc kubenswrapper[4909]: I1128 16:11:09.780063 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:09Z","lastTransitionTime":"2025-11-28T16:11:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:09 crc kubenswrapper[4909]: I1128 16:11:09.883040 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:09 crc kubenswrapper[4909]: I1128 16:11:09.883115 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:09 crc kubenswrapper[4909]: I1128 16:11:09.883138 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:09 crc kubenswrapper[4909]: I1128 16:11:09.883165 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:09 crc kubenswrapper[4909]: I1128 16:11:09.883182 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:09Z","lastTransitionTime":"2025-11-28T16:11:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:09 crc kubenswrapper[4909]: I1128 16:11:09.900964 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 16:11:09 crc kubenswrapper[4909]: I1128 16:11:09.900989 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 16:11:09 crc kubenswrapper[4909]: E1128 16:11:09.901152 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 16:11:09 crc kubenswrapper[4909]: I1128 16:11:09.901224 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 16:11:09 crc kubenswrapper[4909]: E1128 16:11:09.901400 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 16:11:09 crc kubenswrapper[4909]: E1128 16:11:09.901548 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 16:11:09 crc kubenswrapper[4909]: I1128 16:11:09.985850 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:09 crc kubenswrapper[4909]: I1128 16:11:09.985910 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:09 crc kubenswrapper[4909]: I1128 16:11:09.985926 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:09 crc kubenswrapper[4909]: I1128 16:11:09.985947 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:09 crc kubenswrapper[4909]: I1128 16:11:09.985964 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:09Z","lastTransitionTime":"2025-11-28T16:11:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:10 crc kubenswrapper[4909]: I1128 16:11:10.089226 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:10 crc kubenswrapper[4909]: I1128 16:11:10.089285 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:10 crc kubenswrapper[4909]: I1128 16:11:10.089306 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:10 crc kubenswrapper[4909]: I1128 16:11:10.089337 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:10 crc kubenswrapper[4909]: I1128 16:11:10.089378 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:10Z","lastTransitionTime":"2025-11-28T16:11:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:10 crc kubenswrapper[4909]: I1128 16:11:10.191852 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:10 crc kubenswrapper[4909]: I1128 16:11:10.191917 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:10 crc kubenswrapper[4909]: I1128 16:11:10.191934 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:10 crc kubenswrapper[4909]: I1128 16:11:10.191959 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:10 crc kubenswrapper[4909]: I1128 16:11:10.191976 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:10Z","lastTransitionTime":"2025-11-28T16:11:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:10 crc kubenswrapper[4909]: I1128 16:11:10.294997 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:10 crc kubenswrapper[4909]: I1128 16:11:10.295050 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:10 crc kubenswrapper[4909]: I1128 16:11:10.295071 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:10 crc kubenswrapper[4909]: I1128 16:11:10.295098 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:10 crc kubenswrapper[4909]: I1128 16:11:10.295114 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:10Z","lastTransitionTime":"2025-11-28T16:11:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:10 crc kubenswrapper[4909]: I1128 16:11:10.397448 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:10 crc kubenswrapper[4909]: I1128 16:11:10.397500 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:10 crc kubenswrapper[4909]: I1128 16:11:10.397517 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:10 crc kubenswrapper[4909]: I1128 16:11:10.397541 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:10 crc kubenswrapper[4909]: I1128 16:11:10.397559 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:10Z","lastTransitionTime":"2025-11-28T16:11:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:10 crc kubenswrapper[4909]: I1128 16:11:10.500481 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:10 crc kubenswrapper[4909]: I1128 16:11:10.500537 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:10 crc kubenswrapper[4909]: I1128 16:11:10.500553 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:10 crc kubenswrapper[4909]: I1128 16:11:10.500581 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:10 crc kubenswrapper[4909]: I1128 16:11:10.500598 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:10Z","lastTransitionTime":"2025-11-28T16:11:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:10 crc kubenswrapper[4909]: I1128 16:11:10.603755 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:10 crc kubenswrapper[4909]: I1128 16:11:10.604073 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:10 crc kubenswrapper[4909]: I1128 16:11:10.604266 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:10 crc kubenswrapper[4909]: I1128 16:11:10.604446 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:10 crc kubenswrapper[4909]: I1128 16:11:10.604596 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:10Z","lastTransitionTime":"2025-11-28T16:11:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:10 crc kubenswrapper[4909]: I1128 16:11:10.707726 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:10 crc kubenswrapper[4909]: I1128 16:11:10.707905 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:10 crc kubenswrapper[4909]: I1128 16:11:10.708080 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:10 crc kubenswrapper[4909]: I1128 16:11:10.708248 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:10 crc kubenswrapper[4909]: I1128 16:11:10.708386 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:10Z","lastTransitionTime":"2025-11-28T16:11:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:10 crc kubenswrapper[4909]: I1128 16:11:10.811288 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:10 crc kubenswrapper[4909]: I1128 16:11:10.811908 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:10 crc kubenswrapper[4909]: I1128 16:11:10.812032 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:10 crc kubenswrapper[4909]: I1128 16:11:10.812138 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:10 crc kubenswrapper[4909]: I1128 16:11:10.812243 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:10Z","lastTransitionTime":"2025-11-28T16:11:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:10 crc kubenswrapper[4909]: I1128 16:11:10.900936 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-8rjn2" Nov 28 16:11:10 crc kubenswrapper[4909]: E1128 16:11:10.901152 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-8rjn2" podUID="ffceca0e-d9b5-484f-8753-5e0269eec811" Nov 28 16:11:10 crc kubenswrapper[4909]: I1128 16:11:10.915219 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:10 crc kubenswrapper[4909]: I1128 16:11:10.915270 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:10 crc kubenswrapper[4909]: I1128 16:11:10.915293 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:10 crc kubenswrapper[4909]: I1128 16:11:10.915318 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:10 crc kubenswrapper[4909]: I1128 16:11:10.915338 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:10Z","lastTransitionTime":"2025-11-28T16:11:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:11 crc kubenswrapper[4909]: I1128 16:11:11.018138 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:11 crc kubenswrapper[4909]: I1128 16:11:11.018210 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:11 crc kubenswrapper[4909]: I1128 16:11:11.018232 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:11 crc kubenswrapper[4909]: I1128 16:11:11.018261 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:11 crc kubenswrapper[4909]: I1128 16:11:11.018282 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:11Z","lastTransitionTime":"2025-11-28T16:11:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:11 crc kubenswrapper[4909]: I1128 16:11:11.121931 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:11 crc kubenswrapper[4909]: I1128 16:11:11.121996 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:11 crc kubenswrapper[4909]: I1128 16:11:11.122009 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:11 crc kubenswrapper[4909]: I1128 16:11:11.122028 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:11 crc kubenswrapper[4909]: I1128 16:11:11.122040 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:11Z","lastTransitionTime":"2025-11-28T16:11:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:11 crc kubenswrapper[4909]: I1128 16:11:11.224313 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:11 crc kubenswrapper[4909]: I1128 16:11:11.224364 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:11 crc kubenswrapper[4909]: I1128 16:11:11.224378 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:11 crc kubenswrapper[4909]: I1128 16:11:11.224397 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:11 crc kubenswrapper[4909]: I1128 16:11:11.224411 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:11Z","lastTransitionTime":"2025-11-28T16:11:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:11 crc kubenswrapper[4909]: I1128 16:11:11.326492 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:11 crc kubenswrapper[4909]: I1128 16:11:11.326549 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:11 crc kubenswrapper[4909]: I1128 16:11:11.326572 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:11 crc kubenswrapper[4909]: I1128 16:11:11.326635 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:11 crc kubenswrapper[4909]: I1128 16:11:11.326685 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:11Z","lastTransitionTime":"2025-11-28T16:11:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:11 crc kubenswrapper[4909]: I1128 16:11:11.429850 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:11 crc kubenswrapper[4909]: I1128 16:11:11.429906 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:11 crc kubenswrapper[4909]: I1128 16:11:11.429924 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:11 crc kubenswrapper[4909]: I1128 16:11:11.429947 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:11 crc kubenswrapper[4909]: I1128 16:11:11.429966 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:11Z","lastTransitionTime":"2025-11-28T16:11:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:11 crc kubenswrapper[4909]: I1128 16:11:11.534410 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:11 crc kubenswrapper[4909]: I1128 16:11:11.537337 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:11 crc kubenswrapper[4909]: I1128 16:11:11.537538 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:11 crc kubenswrapper[4909]: I1128 16:11:11.537730 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:11 crc kubenswrapper[4909]: I1128 16:11:11.537893 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:11Z","lastTransitionTime":"2025-11-28T16:11:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:11 crc kubenswrapper[4909]: I1128 16:11:11.640722 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:11 crc kubenswrapper[4909]: I1128 16:11:11.641011 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:11 crc kubenswrapper[4909]: I1128 16:11:11.641128 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:11 crc kubenswrapper[4909]: I1128 16:11:11.641253 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:11 crc kubenswrapper[4909]: I1128 16:11:11.641366 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:11Z","lastTransitionTime":"2025-11-28T16:11:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:11 crc kubenswrapper[4909]: I1128 16:11:11.744617 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:11 crc kubenswrapper[4909]: I1128 16:11:11.745126 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:11 crc kubenswrapper[4909]: I1128 16:11:11.745324 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:11 crc kubenswrapper[4909]: I1128 16:11:11.745517 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:11 crc kubenswrapper[4909]: I1128 16:11:11.745729 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:11Z","lastTransitionTime":"2025-11-28T16:11:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:11 crc kubenswrapper[4909]: I1128 16:11:11.848488 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:11 crc kubenswrapper[4909]: I1128 16:11:11.848518 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:11 crc kubenswrapper[4909]: I1128 16:11:11.848526 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:11 crc kubenswrapper[4909]: I1128 16:11:11.848542 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:11 crc kubenswrapper[4909]: I1128 16:11:11.848551 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:11Z","lastTransitionTime":"2025-11-28T16:11:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:11 crc kubenswrapper[4909]: I1128 16:11:11.901158 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 16:11:11 crc kubenswrapper[4909]: I1128 16:11:11.901370 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 16:11:11 crc kubenswrapper[4909]: I1128 16:11:11.901226 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 16:11:11 crc kubenswrapper[4909]: E1128 16:11:11.901580 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 16:11:11 crc kubenswrapper[4909]: E1128 16:11:11.901762 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 16:11:11 crc kubenswrapper[4909]: E1128 16:11:11.901949 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 16:11:11 crc kubenswrapper[4909]: I1128 16:11:11.947873 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:11 crc kubenswrapper[4909]: I1128 16:11:11.947946 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:11 crc kubenswrapper[4909]: I1128 16:11:11.947968 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:11 crc kubenswrapper[4909]: I1128 16:11:11.947990 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:11 crc kubenswrapper[4909]: I1128 16:11:11.948007 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:11Z","lastTransitionTime":"2025-11-28T16:11:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:11 crc kubenswrapper[4909]: E1128 16:11:11.966271 4909 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:11Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:11Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:11Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:11Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"b44f6a6c-5ae2-4ed6-9fc9-6c0acf034e9d\\\",\\\"systemUUID\\\":\\\"1e8d38e9-395c-4d37-b567-3bfe4869e3f7\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:11Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:11 crc kubenswrapper[4909]: I1128 16:11:11.971411 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:11 crc kubenswrapper[4909]: I1128 16:11:11.971459 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:11 crc kubenswrapper[4909]: I1128 16:11:11.971476 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:11 crc kubenswrapper[4909]: I1128 16:11:11.971497 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:11 crc kubenswrapper[4909]: I1128 16:11:11.971513 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:11Z","lastTransitionTime":"2025-11-28T16:11:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:11 crc kubenswrapper[4909]: E1128 16:11:11.991362 4909 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:11Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:11Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:11Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:11Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"b44f6a6c-5ae2-4ed6-9fc9-6c0acf034e9d\\\",\\\"systemUUID\\\":\\\"1e8d38e9-395c-4d37-b567-3bfe4869e3f7\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:11Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:11 crc kubenswrapper[4909]: I1128 16:11:11.997318 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:11 crc kubenswrapper[4909]: I1128 16:11:11.997373 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:11 crc kubenswrapper[4909]: I1128 16:11:11.997390 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:11 crc kubenswrapper[4909]: I1128 16:11:11.997415 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:11 crc kubenswrapper[4909]: I1128 16:11:11.997433 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:11Z","lastTransitionTime":"2025-11-28T16:11:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:12 crc kubenswrapper[4909]: E1128 16:11:12.018048 4909 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:11Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:11Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:11Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:11Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"b44f6a6c-5ae2-4ed6-9fc9-6c0acf034e9d\\\",\\\"systemUUID\\\":\\\"1e8d38e9-395c-4d37-b567-3bfe4869e3f7\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:12Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:12 crc kubenswrapper[4909]: I1128 16:11:12.024510 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:12 crc kubenswrapper[4909]: I1128 16:11:12.024562 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:12 crc kubenswrapper[4909]: I1128 16:11:12.024575 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:12 crc kubenswrapper[4909]: I1128 16:11:12.024593 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:12 crc kubenswrapper[4909]: I1128 16:11:12.024605 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:12Z","lastTransitionTime":"2025-11-28T16:11:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:12 crc kubenswrapper[4909]: E1128 16:11:12.044584 4909 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"b44f6a6c-5ae2-4ed6-9fc9-6c0acf034e9d\\\",\\\"systemUUID\\\":\\\"1e8d38e9-395c-4d37-b567-3bfe4869e3f7\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:12Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:12 crc kubenswrapper[4909]: I1128 16:11:12.049245 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:12 crc kubenswrapper[4909]: I1128 16:11:12.049322 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:12 crc kubenswrapper[4909]: I1128 16:11:12.049346 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:12 crc kubenswrapper[4909]: I1128 16:11:12.049379 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:12 crc kubenswrapper[4909]: I1128 16:11:12.049401 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:12Z","lastTransitionTime":"2025-11-28T16:11:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:12 crc kubenswrapper[4909]: E1128 16:11:12.072010 4909 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"b44f6a6c-5ae2-4ed6-9fc9-6c0acf034e9d\\\",\\\"systemUUID\\\":\\\"1e8d38e9-395c-4d37-b567-3bfe4869e3f7\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:12Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:12 crc kubenswrapper[4909]: E1128 16:11:12.072213 4909 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 28 16:11:12 crc kubenswrapper[4909]: I1128 16:11:12.074838 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:12 crc kubenswrapper[4909]: I1128 16:11:12.074884 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:12 crc kubenswrapper[4909]: I1128 16:11:12.074896 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:12 crc kubenswrapper[4909]: I1128 16:11:12.074916 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:12 crc kubenswrapper[4909]: I1128 16:11:12.074932 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:12Z","lastTransitionTime":"2025-11-28T16:11:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:12 crc kubenswrapper[4909]: I1128 16:11:12.178249 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:12 crc kubenswrapper[4909]: I1128 16:11:12.178317 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:12 crc kubenswrapper[4909]: I1128 16:11:12.178335 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:12 crc kubenswrapper[4909]: I1128 16:11:12.178359 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:12 crc kubenswrapper[4909]: I1128 16:11:12.178376 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:12Z","lastTransitionTime":"2025-11-28T16:11:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:12 crc kubenswrapper[4909]: I1128 16:11:12.280931 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:12 crc kubenswrapper[4909]: I1128 16:11:12.281047 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:12 crc kubenswrapper[4909]: I1128 16:11:12.281070 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:12 crc kubenswrapper[4909]: I1128 16:11:12.281136 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:12 crc kubenswrapper[4909]: I1128 16:11:12.281153 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:12Z","lastTransitionTime":"2025-11-28T16:11:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:12 crc kubenswrapper[4909]: I1128 16:11:12.384650 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:12 crc kubenswrapper[4909]: I1128 16:11:12.384710 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:12 crc kubenswrapper[4909]: I1128 16:11:12.384730 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:12 crc kubenswrapper[4909]: I1128 16:11:12.384751 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:12 crc kubenswrapper[4909]: I1128 16:11:12.384767 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:12Z","lastTransitionTime":"2025-11-28T16:11:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:12 crc kubenswrapper[4909]: I1128 16:11:12.487730 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:12 crc kubenswrapper[4909]: I1128 16:11:12.487805 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:12 crc kubenswrapper[4909]: I1128 16:11:12.487829 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:12 crc kubenswrapper[4909]: I1128 16:11:12.487857 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:12 crc kubenswrapper[4909]: I1128 16:11:12.487880 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:12Z","lastTransitionTime":"2025-11-28T16:11:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:12 crc kubenswrapper[4909]: I1128 16:11:12.590740 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:12 crc kubenswrapper[4909]: I1128 16:11:12.590802 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:12 crc kubenswrapper[4909]: I1128 16:11:12.590825 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:12 crc kubenswrapper[4909]: I1128 16:11:12.590855 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:12 crc kubenswrapper[4909]: I1128 16:11:12.590878 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:12Z","lastTransitionTime":"2025-11-28T16:11:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:12 crc kubenswrapper[4909]: I1128 16:11:12.694280 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:12 crc kubenswrapper[4909]: I1128 16:11:12.694341 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:12 crc kubenswrapper[4909]: I1128 16:11:12.694367 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:12 crc kubenswrapper[4909]: I1128 16:11:12.694395 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:12 crc kubenswrapper[4909]: I1128 16:11:12.694416 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:12Z","lastTransitionTime":"2025-11-28T16:11:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:12 crc kubenswrapper[4909]: I1128 16:11:12.800462 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:12 crc kubenswrapper[4909]: I1128 16:11:12.800544 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:12 crc kubenswrapper[4909]: I1128 16:11:12.800571 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:12 crc kubenswrapper[4909]: I1128 16:11:12.800601 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:12 crc kubenswrapper[4909]: I1128 16:11:12.800632 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:12Z","lastTransitionTime":"2025-11-28T16:11:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:12 crc kubenswrapper[4909]: I1128 16:11:12.901538 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-8rjn2" Nov 28 16:11:12 crc kubenswrapper[4909]: E1128 16:11:12.901746 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-8rjn2" podUID="ffceca0e-d9b5-484f-8753-5e0269eec811" Nov 28 16:11:12 crc kubenswrapper[4909]: I1128 16:11:12.903685 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:12 crc kubenswrapper[4909]: I1128 16:11:12.903721 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:12 crc kubenswrapper[4909]: I1128 16:11:12.903740 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:12 crc kubenswrapper[4909]: I1128 16:11:12.903761 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:12 crc kubenswrapper[4909]: I1128 16:11:12.903778 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:12Z","lastTransitionTime":"2025-11-28T16:11:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:13 crc kubenswrapper[4909]: I1128 16:11:13.005900 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:13 crc kubenswrapper[4909]: I1128 16:11:13.005958 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:13 crc kubenswrapper[4909]: I1128 16:11:13.005974 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:13 crc kubenswrapper[4909]: I1128 16:11:13.005997 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:13 crc kubenswrapper[4909]: I1128 16:11:13.006015 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:13Z","lastTransitionTime":"2025-11-28T16:11:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:13 crc kubenswrapper[4909]: I1128 16:11:13.109823 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:13 crc kubenswrapper[4909]: I1128 16:11:13.109876 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:13 crc kubenswrapper[4909]: I1128 16:11:13.109894 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:13 crc kubenswrapper[4909]: I1128 16:11:13.109921 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:13 crc kubenswrapper[4909]: I1128 16:11:13.109939 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:13Z","lastTransitionTime":"2025-11-28T16:11:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:13 crc kubenswrapper[4909]: I1128 16:11:13.212962 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:13 crc kubenswrapper[4909]: I1128 16:11:13.213048 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:13 crc kubenswrapper[4909]: I1128 16:11:13.213070 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:13 crc kubenswrapper[4909]: I1128 16:11:13.213094 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:13 crc kubenswrapper[4909]: I1128 16:11:13.213110 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:13Z","lastTransitionTime":"2025-11-28T16:11:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:13 crc kubenswrapper[4909]: I1128 16:11:13.316057 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:13 crc kubenswrapper[4909]: I1128 16:11:13.316119 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:13 crc kubenswrapper[4909]: I1128 16:11:13.316135 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:13 crc kubenswrapper[4909]: I1128 16:11:13.316157 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:13 crc kubenswrapper[4909]: I1128 16:11:13.316175 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:13Z","lastTransitionTime":"2025-11-28T16:11:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:13 crc kubenswrapper[4909]: I1128 16:11:13.419188 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:13 crc kubenswrapper[4909]: I1128 16:11:13.419243 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:13 crc kubenswrapper[4909]: I1128 16:11:13.419260 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:13 crc kubenswrapper[4909]: I1128 16:11:13.419287 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:13 crc kubenswrapper[4909]: I1128 16:11:13.419303 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:13Z","lastTransitionTime":"2025-11-28T16:11:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:13 crc kubenswrapper[4909]: I1128 16:11:13.522458 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:13 crc kubenswrapper[4909]: I1128 16:11:13.522492 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:13 crc kubenswrapper[4909]: I1128 16:11:13.522499 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:13 crc kubenswrapper[4909]: I1128 16:11:13.522514 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:13 crc kubenswrapper[4909]: I1128 16:11:13.522523 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:13Z","lastTransitionTime":"2025-11-28T16:11:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:13 crc kubenswrapper[4909]: I1128 16:11:13.626092 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:13 crc kubenswrapper[4909]: I1128 16:11:13.626153 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:13 crc kubenswrapper[4909]: I1128 16:11:13.626170 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:13 crc kubenswrapper[4909]: I1128 16:11:13.626198 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:13 crc kubenswrapper[4909]: I1128 16:11:13.626216 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:13Z","lastTransitionTime":"2025-11-28T16:11:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:13 crc kubenswrapper[4909]: I1128 16:11:13.728991 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:13 crc kubenswrapper[4909]: I1128 16:11:13.729055 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:13 crc kubenswrapper[4909]: I1128 16:11:13.729078 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:13 crc kubenswrapper[4909]: I1128 16:11:13.729105 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:13 crc kubenswrapper[4909]: I1128 16:11:13.729126 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:13Z","lastTransitionTime":"2025-11-28T16:11:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:13 crc kubenswrapper[4909]: I1128 16:11:13.831851 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:13 crc kubenswrapper[4909]: I1128 16:11:13.831909 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:13 crc kubenswrapper[4909]: I1128 16:11:13.831923 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:13 crc kubenswrapper[4909]: I1128 16:11:13.831944 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:13 crc kubenswrapper[4909]: I1128 16:11:13.831959 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:13Z","lastTransitionTime":"2025-11-28T16:11:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:13 crc kubenswrapper[4909]: I1128 16:11:13.900539 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 16:11:13 crc kubenswrapper[4909]: I1128 16:11:13.900589 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 16:11:13 crc kubenswrapper[4909]: I1128 16:11:13.900612 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 16:11:13 crc kubenswrapper[4909]: E1128 16:11:13.900802 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 16:11:13 crc kubenswrapper[4909]: E1128 16:11:13.900916 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 16:11:13 crc kubenswrapper[4909]: E1128 16:11:13.900979 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 16:11:13 crc kubenswrapper[4909]: I1128 16:11:13.934723 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:13 crc kubenswrapper[4909]: I1128 16:11:13.934801 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:13 crc kubenswrapper[4909]: I1128 16:11:13.934824 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:13 crc kubenswrapper[4909]: I1128 16:11:13.934851 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:13 crc kubenswrapper[4909]: I1128 16:11:13.934876 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:13Z","lastTransitionTime":"2025-11-28T16:11:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:14 crc kubenswrapper[4909]: I1128 16:11:14.038324 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:14 crc kubenswrapper[4909]: I1128 16:11:14.038392 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:14 crc kubenswrapper[4909]: I1128 16:11:14.038408 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:14 crc kubenswrapper[4909]: I1128 16:11:14.038431 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:14 crc kubenswrapper[4909]: I1128 16:11:14.038447 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:14Z","lastTransitionTime":"2025-11-28T16:11:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:14 crc kubenswrapper[4909]: I1128 16:11:14.141617 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:14 crc kubenswrapper[4909]: I1128 16:11:14.141743 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:14 crc kubenswrapper[4909]: I1128 16:11:14.141767 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:14 crc kubenswrapper[4909]: I1128 16:11:14.141792 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:14 crc kubenswrapper[4909]: I1128 16:11:14.141809 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:14Z","lastTransitionTime":"2025-11-28T16:11:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:14 crc kubenswrapper[4909]: I1128 16:11:14.245100 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:14 crc kubenswrapper[4909]: I1128 16:11:14.245158 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:14 crc kubenswrapper[4909]: I1128 16:11:14.245175 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:14 crc kubenswrapper[4909]: I1128 16:11:14.245202 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:14 crc kubenswrapper[4909]: I1128 16:11:14.245219 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:14Z","lastTransitionTime":"2025-11-28T16:11:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:14 crc kubenswrapper[4909]: I1128 16:11:14.348196 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:14 crc kubenswrapper[4909]: I1128 16:11:14.348280 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:14 crc kubenswrapper[4909]: I1128 16:11:14.348299 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:14 crc kubenswrapper[4909]: I1128 16:11:14.348324 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:14 crc kubenswrapper[4909]: I1128 16:11:14.348343 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:14Z","lastTransitionTime":"2025-11-28T16:11:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:14 crc kubenswrapper[4909]: I1128 16:11:14.451198 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:14 crc kubenswrapper[4909]: I1128 16:11:14.451258 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:14 crc kubenswrapper[4909]: I1128 16:11:14.451281 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:14 crc kubenswrapper[4909]: I1128 16:11:14.451308 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:14 crc kubenswrapper[4909]: I1128 16:11:14.451329 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:14Z","lastTransitionTime":"2025-11-28T16:11:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:14 crc kubenswrapper[4909]: I1128 16:11:14.554347 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:14 crc kubenswrapper[4909]: I1128 16:11:14.554429 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:14 crc kubenswrapper[4909]: I1128 16:11:14.554453 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:14 crc kubenswrapper[4909]: I1128 16:11:14.554486 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:14 crc kubenswrapper[4909]: I1128 16:11:14.554518 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:14Z","lastTransitionTime":"2025-11-28T16:11:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:14 crc kubenswrapper[4909]: I1128 16:11:14.657077 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:14 crc kubenswrapper[4909]: I1128 16:11:14.657135 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:14 crc kubenswrapper[4909]: I1128 16:11:14.657153 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:14 crc kubenswrapper[4909]: I1128 16:11:14.657175 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:14 crc kubenswrapper[4909]: I1128 16:11:14.657193 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:14Z","lastTransitionTime":"2025-11-28T16:11:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:14 crc kubenswrapper[4909]: I1128 16:11:14.759731 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:14 crc kubenswrapper[4909]: I1128 16:11:14.759775 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:14 crc kubenswrapper[4909]: I1128 16:11:14.759786 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:14 crc kubenswrapper[4909]: I1128 16:11:14.759800 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:14 crc kubenswrapper[4909]: I1128 16:11:14.759814 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:14Z","lastTransitionTime":"2025-11-28T16:11:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:14 crc kubenswrapper[4909]: I1128 16:11:14.862737 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:14 crc kubenswrapper[4909]: I1128 16:11:14.862799 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:14 crc kubenswrapper[4909]: I1128 16:11:14.862815 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:14 crc kubenswrapper[4909]: I1128 16:11:14.862837 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:14 crc kubenswrapper[4909]: I1128 16:11:14.862854 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:14Z","lastTransitionTime":"2025-11-28T16:11:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:14 crc kubenswrapper[4909]: I1128 16:11:14.900498 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-8rjn2" Nov 28 16:11:14 crc kubenswrapper[4909]: E1128 16:11:14.900759 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-8rjn2" podUID="ffceca0e-d9b5-484f-8753-5e0269eec811" Nov 28 16:11:14 crc kubenswrapper[4909]: I1128 16:11:14.965260 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:14 crc kubenswrapper[4909]: I1128 16:11:14.965334 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:14 crc kubenswrapper[4909]: I1128 16:11:14.965357 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:14 crc kubenswrapper[4909]: I1128 16:11:14.965386 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:14 crc kubenswrapper[4909]: I1128 16:11:14.965408 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:14Z","lastTransitionTime":"2025-11-28T16:11:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:15 crc kubenswrapper[4909]: I1128 16:11:15.068601 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:15 crc kubenswrapper[4909]: I1128 16:11:15.068759 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:15 crc kubenswrapper[4909]: I1128 16:11:15.068848 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:15 crc kubenswrapper[4909]: I1128 16:11:15.068946 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:15 crc kubenswrapper[4909]: I1128 16:11:15.069044 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:15Z","lastTransitionTime":"2025-11-28T16:11:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:15 crc kubenswrapper[4909]: I1128 16:11:15.171587 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:15 crc kubenswrapper[4909]: I1128 16:11:15.171649 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:15 crc kubenswrapper[4909]: I1128 16:11:15.171705 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:15 crc kubenswrapper[4909]: I1128 16:11:15.171733 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:15 crc kubenswrapper[4909]: I1128 16:11:15.171753 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:15Z","lastTransitionTime":"2025-11-28T16:11:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:15 crc kubenswrapper[4909]: I1128 16:11:15.274226 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:15 crc kubenswrapper[4909]: I1128 16:11:15.274274 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:15 crc kubenswrapper[4909]: I1128 16:11:15.274291 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:15 crc kubenswrapper[4909]: I1128 16:11:15.274312 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:15 crc kubenswrapper[4909]: I1128 16:11:15.274329 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:15Z","lastTransitionTime":"2025-11-28T16:11:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:15 crc kubenswrapper[4909]: I1128 16:11:15.377185 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:15 crc kubenswrapper[4909]: I1128 16:11:15.377217 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:15 crc kubenswrapper[4909]: I1128 16:11:15.377226 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:15 crc kubenswrapper[4909]: I1128 16:11:15.377239 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:15 crc kubenswrapper[4909]: I1128 16:11:15.377248 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:15Z","lastTransitionTime":"2025-11-28T16:11:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:15 crc kubenswrapper[4909]: I1128 16:11:15.480100 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:15 crc kubenswrapper[4909]: I1128 16:11:15.480160 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:15 crc kubenswrapper[4909]: I1128 16:11:15.480177 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:15 crc kubenswrapper[4909]: I1128 16:11:15.480201 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:15 crc kubenswrapper[4909]: I1128 16:11:15.480217 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:15Z","lastTransitionTime":"2025-11-28T16:11:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:15 crc kubenswrapper[4909]: I1128 16:11:15.583096 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:15 crc kubenswrapper[4909]: I1128 16:11:15.583180 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:15 crc kubenswrapper[4909]: I1128 16:11:15.583206 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:15 crc kubenswrapper[4909]: I1128 16:11:15.583235 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:15 crc kubenswrapper[4909]: I1128 16:11:15.583258 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:15Z","lastTransitionTime":"2025-11-28T16:11:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:15 crc kubenswrapper[4909]: I1128 16:11:15.686365 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:15 crc kubenswrapper[4909]: I1128 16:11:15.686426 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:15 crc kubenswrapper[4909]: I1128 16:11:15.686448 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:15 crc kubenswrapper[4909]: I1128 16:11:15.686491 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:15 crc kubenswrapper[4909]: I1128 16:11:15.686512 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:15Z","lastTransitionTime":"2025-11-28T16:11:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:15 crc kubenswrapper[4909]: I1128 16:11:15.788892 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:15 crc kubenswrapper[4909]: I1128 16:11:15.788938 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:15 crc kubenswrapper[4909]: I1128 16:11:15.788956 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:15 crc kubenswrapper[4909]: I1128 16:11:15.788975 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:15 crc kubenswrapper[4909]: I1128 16:11:15.788986 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:15Z","lastTransitionTime":"2025-11-28T16:11:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:15 crc kubenswrapper[4909]: I1128 16:11:15.891062 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:15 crc kubenswrapper[4909]: I1128 16:11:15.891100 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:15 crc kubenswrapper[4909]: I1128 16:11:15.891111 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:15 crc kubenswrapper[4909]: I1128 16:11:15.891126 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:15 crc kubenswrapper[4909]: I1128 16:11:15.891138 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:15Z","lastTransitionTime":"2025-11-28T16:11:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:15 crc kubenswrapper[4909]: I1128 16:11:15.900710 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 16:11:15 crc kubenswrapper[4909]: I1128 16:11:15.900742 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 16:11:15 crc kubenswrapper[4909]: E1128 16:11:15.900849 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 16:11:15 crc kubenswrapper[4909]: I1128 16:11:15.900872 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 16:11:15 crc kubenswrapper[4909]: E1128 16:11:15.900966 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 16:11:15 crc kubenswrapper[4909]: E1128 16:11:15.901024 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 16:11:15 crc kubenswrapper[4909]: I1128 16:11:15.993875 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:15 crc kubenswrapper[4909]: I1128 16:11:15.993930 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:15 crc kubenswrapper[4909]: I1128 16:11:15.993947 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:15 crc kubenswrapper[4909]: I1128 16:11:15.993971 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:15 crc kubenswrapper[4909]: I1128 16:11:15.993987 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:15Z","lastTransitionTime":"2025-11-28T16:11:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:16 crc kubenswrapper[4909]: I1128 16:11:16.096631 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:16 crc kubenswrapper[4909]: I1128 16:11:16.096888 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:16 crc kubenswrapper[4909]: I1128 16:11:16.097036 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:16 crc kubenswrapper[4909]: I1128 16:11:16.097202 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:16 crc kubenswrapper[4909]: I1128 16:11:16.097345 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:16Z","lastTransitionTime":"2025-11-28T16:11:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:16 crc kubenswrapper[4909]: I1128 16:11:16.199948 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:16 crc kubenswrapper[4909]: I1128 16:11:16.200004 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:16 crc kubenswrapper[4909]: I1128 16:11:16.200021 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:16 crc kubenswrapper[4909]: I1128 16:11:16.200047 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:16 crc kubenswrapper[4909]: I1128 16:11:16.200064 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:16Z","lastTransitionTime":"2025-11-28T16:11:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:16 crc kubenswrapper[4909]: I1128 16:11:16.302423 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:16 crc kubenswrapper[4909]: I1128 16:11:16.302478 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:16 crc kubenswrapper[4909]: I1128 16:11:16.302491 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:16 crc kubenswrapper[4909]: I1128 16:11:16.302510 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:16 crc kubenswrapper[4909]: I1128 16:11:16.302521 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:16Z","lastTransitionTime":"2025-11-28T16:11:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:16 crc kubenswrapper[4909]: I1128 16:11:16.405114 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:16 crc kubenswrapper[4909]: I1128 16:11:16.405174 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:16 crc kubenswrapper[4909]: I1128 16:11:16.405193 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:16 crc kubenswrapper[4909]: I1128 16:11:16.405216 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:16 crc kubenswrapper[4909]: I1128 16:11:16.405232 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:16Z","lastTransitionTime":"2025-11-28T16:11:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:16 crc kubenswrapper[4909]: I1128 16:11:16.508052 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:16 crc kubenswrapper[4909]: I1128 16:11:16.508110 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:16 crc kubenswrapper[4909]: I1128 16:11:16.508127 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:16 crc kubenswrapper[4909]: I1128 16:11:16.508150 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:16 crc kubenswrapper[4909]: I1128 16:11:16.508167 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:16Z","lastTransitionTime":"2025-11-28T16:11:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:16 crc kubenswrapper[4909]: I1128 16:11:16.611145 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:16 crc kubenswrapper[4909]: I1128 16:11:16.611205 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:16 crc kubenswrapper[4909]: I1128 16:11:16.611222 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:16 crc kubenswrapper[4909]: I1128 16:11:16.611246 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:16 crc kubenswrapper[4909]: I1128 16:11:16.611263 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:16Z","lastTransitionTime":"2025-11-28T16:11:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:16 crc kubenswrapper[4909]: I1128 16:11:16.714354 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:16 crc kubenswrapper[4909]: I1128 16:11:16.714422 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:16 crc kubenswrapper[4909]: I1128 16:11:16.714441 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:16 crc kubenswrapper[4909]: I1128 16:11:16.714467 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:16 crc kubenswrapper[4909]: I1128 16:11:16.714485 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:16Z","lastTransitionTime":"2025-11-28T16:11:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:16 crc kubenswrapper[4909]: I1128 16:11:16.817017 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:16 crc kubenswrapper[4909]: I1128 16:11:16.817073 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:16 crc kubenswrapper[4909]: I1128 16:11:16.817090 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:16 crc kubenswrapper[4909]: I1128 16:11:16.817114 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:16 crc kubenswrapper[4909]: I1128 16:11:16.817130 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:16Z","lastTransitionTime":"2025-11-28T16:11:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:16 crc kubenswrapper[4909]: I1128 16:11:16.901457 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-8rjn2" Nov 28 16:11:16 crc kubenswrapper[4909]: E1128 16:11:16.901703 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-8rjn2" podUID="ffceca0e-d9b5-484f-8753-5e0269eec811" Nov 28 16:11:16 crc kubenswrapper[4909]: I1128 16:11:16.920614 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:16 crc kubenswrapper[4909]: I1128 16:11:16.920740 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:16 crc kubenswrapper[4909]: I1128 16:11:16.920760 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:16 crc kubenswrapper[4909]: I1128 16:11:16.920783 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:16 crc kubenswrapper[4909]: I1128 16:11:16.920801 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:16Z","lastTransitionTime":"2025-11-28T16:11:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:17 crc kubenswrapper[4909]: I1128 16:11:17.023928 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:17 crc kubenswrapper[4909]: I1128 16:11:17.024804 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:17 crc kubenswrapper[4909]: I1128 16:11:17.024848 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:17 crc kubenswrapper[4909]: I1128 16:11:17.024872 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:17 crc kubenswrapper[4909]: I1128 16:11:17.024891 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:17Z","lastTransitionTime":"2025-11-28T16:11:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:17 crc kubenswrapper[4909]: I1128 16:11:17.128049 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:17 crc kubenswrapper[4909]: I1128 16:11:17.128113 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:17 crc kubenswrapper[4909]: I1128 16:11:17.128132 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:17 crc kubenswrapper[4909]: I1128 16:11:17.128156 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:17 crc kubenswrapper[4909]: I1128 16:11:17.128175 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:17Z","lastTransitionTime":"2025-11-28T16:11:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:17 crc kubenswrapper[4909]: I1128 16:11:17.231491 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:17 crc kubenswrapper[4909]: I1128 16:11:17.231551 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:17 crc kubenswrapper[4909]: I1128 16:11:17.231570 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:17 crc kubenswrapper[4909]: I1128 16:11:17.231600 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:17 crc kubenswrapper[4909]: I1128 16:11:17.231628 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:17Z","lastTransitionTime":"2025-11-28T16:11:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:17 crc kubenswrapper[4909]: I1128 16:11:17.334794 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:17 crc kubenswrapper[4909]: I1128 16:11:17.334857 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:17 crc kubenswrapper[4909]: I1128 16:11:17.334876 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:17 crc kubenswrapper[4909]: I1128 16:11:17.334898 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:17 crc kubenswrapper[4909]: I1128 16:11:17.334915 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:17Z","lastTransitionTime":"2025-11-28T16:11:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:17 crc kubenswrapper[4909]: I1128 16:11:17.438407 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:17 crc kubenswrapper[4909]: I1128 16:11:17.438475 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:17 crc kubenswrapper[4909]: I1128 16:11:17.438499 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:17 crc kubenswrapper[4909]: I1128 16:11:17.438529 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:17 crc kubenswrapper[4909]: I1128 16:11:17.438553 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:17Z","lastTransitionTime":"2025-11-28T16:11:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:17 crc kubenswrapper[4909]: I1128 16:11:17.541809 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:17 crc kubenswrapper[4909]: I1128 16:11:17.542166 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:17 crc kubenswrapper[4909]: I1128 16:11:17.542350 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:17 crc kubenswrapper[4909]: I1128 16:11:17.542561 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:17 crc kubenswrapper[4909]: I1128 16:11:17.542796 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:17Z","lastTransitionTime":"2025-11-28T16:11:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:17 crc kubenswrapper[4909]: I1128 16:11:17.645554 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:17 crc kubenswrapper[4909]: I1128 16:11:17.645597 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:17 crc kubenswrapper[4909]: I1128 16:11:17.645609 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:17 crc kubenswrapper[4909]: I1128 16:11:17.645629 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:17 crc kubenswrapper[4909]: I1128 16:11:17.645642 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:17Z","lastTransitionTime":"2025-11-28T16:11:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:17 crc kubenswrapper[4909]: I1128 16:11:17.748458 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:17 crc kubenswrapper[4909]: I1128 16:11:17.748780 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:17 crc kubenswrapper[4909]: I1128 16:11:17.748879 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:17 crc kubenswrapper[4909]: I1128 16:11:17.749014 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:17 crc kubenswrapper[4909]: I1128 16:11:17.749366 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:17Z","lastTransitionTime":"2025-11-28T16:11:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:17 crc kubenswrapper[4909]: I1128 16:11:17.853327 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:17 crc kubenswrapper[4909]: I1128 16:11:17.853385 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:17 crc kubenswrapper[4909]: I1128 16:11:17.853401 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:17 crc kubenswrapper[4909]: I1128 16:11:17.853425 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:17 crc kubenswrapper[4909]: I1128 16:11:17.853446 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:17Z","lastTransitionTime":"2025-11-28T16:11:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:17 crc kubenswrapper[4909]: I1128 16:11:17.900576 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 16:11:17 crc kubenswrapper[4909]: I1128 16:11:17.900599 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 16:11:17 crc kubenswrapper[4909]: I1128 16:11:17.900742 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 16:11:17 crc kubenswrapper[4909]: E1128 16:11:17.901148 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 16:11:17 crc kubenswrapper[4909]: E1128 16:11:17.901318 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 16:11:17 crc kubenswrapper[4909]: E1128 16:11:17.901470 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 16:11:17 crc kubenswrapper[4909]: I1128 16:11:17.930593 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c17e2fff-c7ee-475c-8c17-58a394744b91\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c9f22dca75c10791e7f5e083e737d4d6aad20598ff0d29dd7562fa99b5dbdfd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://697293d217dd4fbc4cecc3bc4be7acc14dbb5aa97b9faf0a608792ef7e4c8dba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d0b87eb388aab62c94756b5f20b73cf95fc80da8aa3d83f2848757f860cdd38e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://031c30950c06bdcae1736062069b96a6ce7cb66d5f286b73d691ee2d2db5832e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://66731a2e0518dd575f621ddf9e760e844e647a224138957440ccaecad3e4b8b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://983e9f73b8e344dbe445f95436cbe143d63f1da2e868201b441496df8cdc99ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://32babd1d5575ade6650fc51e99fbd5a062f2f8c3e97ffa12588d753f4f133902\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://32babd1d5575ade6650fc51e99fbd5a062f2f8c3e97ffa12588d753f4f133902\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T16:11:07Z\\\",\\\"message\\\":\\\"04 6563 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1128 16:11:06.748135 6563 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1128 16:11:06.748850 6563 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1128 16:11:06.749020 6563 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1128 16:11:06.755731 6563 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1128 16:11:06.755787 6563 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1128 16:11:06.755852 6563 factory.go:656] Stopping watch factory\\\\nI1128 16:11:06.755873 6563 handler.go:208] Removed *v1.Node event handler 2\\\\nI1128 16:11:06.755890 6563 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1128 16:11:06.764172 6563 shared_informer.go:320] Caches are synced for node-tracker-controller\\\\nI1128 16:11:06.764189 6563 services_controller.go:204] Setting up event handlers for services for network=default\\\\nI1128 16:11:06.764255 6563 ovnkube.go:599] Stopped ovnkube\\\\nI1128 16:11:06.764281 6563 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1128 16:11:06.764366 6563 ovnkube.go:137] failed to run ov\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:06Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-qxw94_openshift-ovn-kubernetes(c17e2fff-c7ee-475c-8c17-58a394744b91)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://841b67fe2e8ae0e4ebf834f141b83ad5e850b8e81c3f6410cef7df9894c33dc6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e1fffda4b5571ab3fe797073c7201feadb7818eac66f6dc76b7761ef13a8d0ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e1fffda4b5571ab3fe797073c7201feadb7818eac66f6dc76b7761ef13a8d0ef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-qxw94\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:17Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:17 crc kubenswrapper[4909]: I1128 16:11:17.949211 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7907b546dbb614e80485187b026c4c5ca17f52d88d5c28ce26a7bf5e3c09e76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:17Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:17 crc kubenswrapper[4909]: I1128 16:11:17.956159 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:17 crc kubenswrapper[4909]: I1128 16:11:17.956200 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:17 crc kubenswrapper[4909]: I1128 16:11:17.956211 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:17 crc kubenswrapper[4909]: I1128 16:11:17.956228 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:17 crc kubenswrapper[4909]: I1128 16:11:17.956240 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:17Z","lastTransitionTime":"2025-11-28T16:11:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:17 crc kubenswrapper[4909]: I1128 16:11:17.963685 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5f0ac931-d37b-4342-8c12-c2779b455cc5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae003675d8b34489b946224bcd380cc68ff49acec6769edfe74a8345018e7909\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5xrns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c2757c4dc287e41bc57c065df2906fc5961d005829fa24f22d3b5078d17555a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5xrns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:38Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-d5nd7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:17Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:17 crc kubenswrapper[4909]: I1128 16:11:17.979482 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"443537be-83fe-4770-9aff-5fb3d2bef9a6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3d62be23143f1f28ff523369b4a6b5cb91146ac54236b31cc8d91d200bd8598e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f88b0080f0e1e8677f2525f47faf3bcd7fa2f54bc91057b318d4c1f86a16f9f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://34312055c6f3a6714d732ad11b27c20139c8a9be7636a9dd215a6e680803afd6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a891f34668e5de053a5fef2b954e97fb437e1a9e3bc2ed26b9bb767a3dda592d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a891f34668e5de053a5fef2b954e97fb437e1a9e3bc2ed26b9bb767a3dda592d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:18Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:18Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:17Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:17 crc kubenswrapper[4909]: I1128 16:11:17.996114 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:17Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:18 crc kubenswrapper[4909]: I1128 16:11:18.014423 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:18Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:18 crc kubenswrapper[4909]: I1128 16:11:18.030097 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-bwbm6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a458a03-0fea-47c0-9748-510145f40b30\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dd8c3d64058ca1f2862ce478295e1a694117d00f9172f78c7c5e2945d7357aad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmxp7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:36Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-bwbm6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:18Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:18 crc kubenswrapper[4909]: I1128 16:11:18.048763 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gdz9b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d9d93f2d-2a90-4d2d-b8e6-e48973be876f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e765772d8b12200fadfb28064b55c1abb9a8a6654602159c4910d2ea5b2d307\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b45f41a92db18043fc03c9cd7d9bbeef0020790595045abd17a9105d0f01367b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b45f41a92db18043fc03c9cd7d9bbeef0020790595045abd17a9105d0f01367b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f7a3a2a5eea9704ba4cd09ac6fa00e33c97d163973a852882979b1c8f2064979\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f7a3a2a5eea9704ba4cd09ac6fa00e33c97d163973a852882979b1c8f2064979\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0348e1d7066423ab9a595f60893021830d02de3c18fca2fdb0082efa41644124\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0348e1d7066423ab9a595f60893021830d02de3c18fca2fdb0082efa41644124\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbb9ad3dff8afd5ac9ec18bd48d273f46b76a21b791dc6a17aea1d7559750c93\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cbb9ad3dff8afd5ac9ec18bd48d273f46b76a21b791dc6a17aea1d7559750c93\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5898b3475414a39ef005c45982f12791683eb027e734ec8d25dc3489c93026a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5898b3475414a39ef005c45982f12791683eb027e734ec8d25dc3489c93026a2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2056e26b7e46cd32f8a12d6d054e249ed43f9854c0f7720791e98f9e6c5b5346\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2056e26b7e46cd32f8a12d6d054e249ed43f9854c0f7720791e98f9e6c5b5346\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gdz9b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:18Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:18 crc kubenswrapper[4909]: I1128 16:11:18.061480 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:18 crc kubenswrapper[4909]: I1128 16:11:18.061513 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:18 crc kubenswrapper[4909]: I1128 16:11:18.061524 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:18 crc kubenswrapper[4909]: I1128 16:11:18.061542 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:18 crc kubenswrapper[4909]: I1128 16:11:18.061557 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:18Z","lastTransitionTime":"2025-11-28T16:11:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:18 crc kubenswrapper[4909]: I1128 16:11:18.063982 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-8rjn2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ffceca0e-d9b5-484f-8753-5e0269eec811\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:51Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:51Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wgql6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wgql6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:51Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-8rjn2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:18Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:18 crc kubenswrapper[4909]: I1128 16:11:18.078351 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-8s8f9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5a7347b-5536-45b5-be75-4bf0ed1b922b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://75a9b32698a7aaaa2b6f88541c9069902a98ad0146bf478ec58ce3a97fb410ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mglcp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://309f7a04e1c92fde11e7fedfe089ff043e14ea788f60339fb7acf8c6df0c8c8d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mglcp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-8s8f9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:18Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:18 crc kubenswrapper[4909]: I1128 16:11:18.096840 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://475d2c473e130345e0d544c2b7990c476e83c479c644db89e1ceda2f4278d667\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://18059a14081d677863133d95ec3cecf9359d6464af62be1e53bcd9514311ff59\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:18Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:18 crc kubenswrapper[4909]: I1128 16:11:18.110779 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-q8nfv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6b77cc4b-dc69-4ece-8e10-64eebc98a578\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://71f6384b2d5c86c668a4d7dfc23b14a893f93b9ec587bec43f74eb0926cc2c64\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-prc9l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:37Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-q8nfv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:18Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:18 crc kubenswrapper[4909]: I1128 16:11:18.125699 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31f84564a3775e0ece3a5c0f176e8d9607466d4a7a505173e9668c51fa2229e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:18Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:18 crc kubenswrapper[4909]: I1128 16:11:18.143211 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:18Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:18 crc kubenswrapper[4909]: I1128 16:11:18.162875 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-wx2jj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6e3805b2-8ad3-4fa6-b88f-e0ae42294202\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0777b571ce4049338437a97264761c89ab7517b4da8400edcd3381d58aef32e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x6lxd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-wx2jj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:18Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:18 crc kubenswrapper[4909]: I1128 16:11:18.164361 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:18 crc kubenswrapper[4909]: I1128 16:11:18.164511 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:18 crc kubenswrapper[4909]: I1128 16:11:18.164536 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:18 crc kubenswrapper[4909]: I1128 16:11:18.164561 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:18 crc kubenswrapper[4909]: I1128 16:11:18.164612 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:18Z","lastTransitionTime":"2025-11-28T16:11:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:18 crc kubenswrapper[4909]: I1128 16:11:18.181579 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dda7db1a-d603-4121-8cd6-e72c9ae02961\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://107b559f7d62f02041439f8c727185e9a819cd655afa0898e818d8618cc54cc7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://76ac0606fbf94f3746fb7ffa549a3cb684d40ee72224dd36c96bb47db493b482\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d5716d50bbed7c047029bbd7964431e97f78c6516505f57fe76d2a1548a665d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://53a5c9679a2c94c1297318d2e554c2445efbfab9cf72d25e48d753d66fa217d5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://da70cc63e1edf58e6d6fed49fc9266d54fabba5f7ce3216d910eb18aebc45c34\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0700ae65eb592e5b2110d37bb15dbb1dbdd228986de015ab5dc8fe13672032ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0700ae65eb592e5b2110d37bb15dbb1dbdd228986de015ab5dc8fe13672032ef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:18Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:18Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:18 crc kubenswrapper[4909]: I1128 16:11:18.198797 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b5b5c4c2-af06-4771-b6eb-d13a2819665c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ea29d0be350e786147f355c5bb4902924aa0f921413b432ad093a796d21b9d05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b41cf0481323080f4056d35add13c2254c6ffbe432b1651555067d3fc9d163eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94501e940379fe9b429e532a91799701e733bb5c3be1c5f32da07a9957f955b3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://37dc41c4e260f683c1ae04ae87a883fe78e4f1f620a946ccb6a87191a5eae0ae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:18Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:18Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:18 crc kubenswrapper[4909]: I1128 16:11:18.267317 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:18 crc kubenswrapper[4909]: I1128 16:11:18.267714 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:18 crc kubenswrapper[4909]: I1128 16:11:18.267809 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:18 crc kubenswrapper[4909]: I1128 16:11:18.267906 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:18 crc kubenswrapper[4909]: I1128 16:11:18.267983 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:18Z","lastTransitionTime":"2025-11-28T16:11:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:18 crc kubenswrapper[4909]: I1128 16:11:18.370288 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:18 crc kubenswrapper[4909]: I1128 16:11:18.370348 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:18 crc kubenswrapper[4909]: I1128 16:11:18.370367 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:18 crc kubenswrapper[4909]: I1128 16:11:18.370391 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:18 crc kubenswrapper[4909]: I1128 16:11:18.370410 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:18Z","lastTransitionTime":"2025-11-28T16:11:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:18 crc kubenswrapper[4909]: I1128 16:11:18.474040 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:18 crc kubenswrapper[4909]: I1128 16:11:18.474105 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:18 crc kubenswrapper[4909]: I1128 16:11:18.474122 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:18 crc kubenswrapper[4909]: I1128 16:11:18.474149 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:18 crc kubenswrapper[4909]: I1128 16:11:18.474174 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:18Z","lastTransitionTime":"2025-11-28T16:11:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:18 crc kubenswrapper[4909]: I1128 16:11:18.576829 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:18 crc kubenswrapper[4909]: I1128 16:11:18.576880 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:18 crc kubenswrapper[4909]: I1128 16:11:18.576905 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:18 crc kubenswrapper[4909]: I1128 16:11:18.576934 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:18 crc kubenswrapper[4909]: I1128 16:11:18.576956 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:18Z","lastTransitionTime":"2025-11-28T16:11:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:18 crc kubenswrapper[4909]: I1128 16:11:18.680352 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:18 crc kubenswrapper[4909]: I1128 16:11:18.680415 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:18 crc kubenswrapper[4909]: I1128 16:11:18.680437 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:18 crc kubenswrapper[4909]: I1128 16:11:18.680465 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:18 crc kubenswrapper[4909]: I1128 16:11:18.680491 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:18Z","lastTransitionTime":"2025-11-28T16:11:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:18 crc kubenswrapper[4909]: I1128 16:11:18.783594 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:18 crc kubenswrapper[4909]: I1128 16:11:18.784043 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:18 crc kubenswrapper[4909]: I1128 16:11:18.784226 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:18 crc kubenswrapper[4909]: I1128 16:11:18.784370 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:18 crc kubenswrapper[4909]: I1128 16:11:18.784510 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:18Z","lastTransitionTime":"2025-11-28T16:11:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:18 crc kubenswrapper[4909]: I1128 16:11:18.888059 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:18 crc kubenswrapper[4909]: I1128 16:11:18.888808 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:18 crc kubenswrapper[4909]: I1128 16:11:18.889032 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:18 crc kubenswrapper[4909]: I1128 16:11:18.889432 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:18 crc kubenswrapper[4909]: I1128 16:11:18.889596 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:18Z","lastTransitionTime":"2025-11-28T16:11:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:18 crc kubenswrapper[4909]: I1128 16:11:18.901385 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-8rjn2" Nov 28 16:11:18 crc kubenswrapper[4909]: E1128 16:11:18.901590 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-8rjn2" podUID="ffceca0e-d9b5-484f-8753-5e0269eec811" Nov 28 16:11:18 crc kubenswrapper[4909]: I1128 16:11:18.910602 4909 scope.go:117] "RemoveContainer" containerID="32babd1d5575ade6650fc51e99fbd5a062f2f8c3e97ffa12588d753f4f133902" Nov 28 16:11:18 crc kubenswrapper[4909]: E1128 16:11:18.910937 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-qxw94_openshift-ovn-kubernetes(c17e2fff-c7ee-475c-8c17-58a394744b91)\"" pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" podUID="c17e2fff-c7ee-475c-8c17-58a394744b91" Nov 28 16:11:18 crc kubenswrapper[4909]: I1128 16:11:18.992455 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:18 crc kubenswrapper[4909]: I1128 16:11:18.992520 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:18 crc kubenswrapper[4909]: I1128 16:11:18.992542 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:18 crc kubenswrapper[4909]: I1128 16:11:18.992572 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:18 crc kubenswrapper[4909]: I1128 16:11:18.992594 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:18Z","lastTransitionTime":"2025-11-28T16:11:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:19 crc kubenswrapper[4909]: I1128 16:11:19.096160 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:19 crc kubenswrapper[4909]: I1128 16:11:19.096244 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:19 crc kubenswrapper[4909]: I1128 16:11:19.096269 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:19 crc kubenswrapper[4909]: I1128 16:11:19.096299 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:19 crc kubenswrapper[4909]: I1128 16:11:19.096322 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:19Z","lastTransitionTime":"2025-11-28T16:11:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:19 crc kubenswrapper[4909]: I1128 16:11:19.199491 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:19 crc kubenswrapper[4909]: I1128 16:11:19.199531 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:19 crc kubenswrapper[4909]: I1128 16:11:19.199541 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:19 crc kubenswrapper[4909]: I1128 16:11:19.199557 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:19 crc kubenswrapper[4909]: I1128 16:11:19.199568 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:19Z","lastTransitionTime":"2025-11-28T16:11:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:19 crc kubenswrapper[4909]: I1128 16:11:19.302532 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:19 crc kubenswrapper[4909]: I1128 16:11:19.302913 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:19 crc kubenswrapper[4909]: I1128 16:11:19.303016 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:19 crc kubenswrapper[4909]: I1128 16:11:19.303087 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:19 crc kubenswrapper[4909]: I1128 16:11:19.303161 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:19Z","lastTransitionTime":"2025-11-28T16:11:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:19 crc kubenswrapper[4909]: I1128 16:11:19.406099 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:19 crc kubenswrapper[4909]: I1128 16:11:19.406324 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:19 crc kubenswrapper[4909]: I1128 16:11:19.406429 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:19 crc kubenswrapper[4909]: I1128 16:11:19.406497 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:19 crc kubenswrapper[4909]: I1128 16:11:19.406556 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:19Z","lastTransitionTime":"2025-11-28T16:11:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:19 crc kubenswrapper[4909]: I1128 16:11:19.509559 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:19 crc kubenswrapper[4909]: I1128 16:11:19.509929 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:19 crc kubenswrapper[4909]: I1128 16:11:19.510065 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:19 crc kubenswrapper[4909]: I1128 16:11:19.510198 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:19 crc kubenswrapper[4909]: I1128 16:11:19.510313 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:19Z","lastTransitionTime":"2025-11-28T16:11:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:19 crc kubenswrapper[4909]: I1128 16:11:19.614468 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:19 crc kubenswrapper[4909]: I1128 16:11:19.614848 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:19 crc kubenswrapper[4909]: I1128 16:11:19.615026 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:19 crc kubenswrapper[4909]: I1128 16:11:19.615152 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:19 crc kubenswrapper[4909]: I1128 16:11:19.615295 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:19Z","lastTransitionTime":"2025-11-28T16:11:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:19 crc kubenswrapper[4909]: I1128 16:11:19.718028 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:19 crc kubenswrapper[4909]: I1128 16:11:19.718082 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:19 crc kubenswrapper[4909]: I1128 16:11:19.718101 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:19 crc kubenswrapper[4909]: I1128 16:11:19.718127 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:19 crc kubenswrapper[4909]: I1128 16:11:19.718144 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:19Z","lastTransitionTime":"2025-11-28T16:11:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:19 crc kubenswrapper[4909]: I1128 16:11:19.821388 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:19 crc kubenswrapper[4909]: I1128 16:11:19.821435 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:19 crc kubenswrapper[4909]: I1128 16:11:19.821454 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:19 crc kubenswrapper[4909]: I1128 16:11:19.821480 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:19 crc kubenswrapper[4909]: I1128 16:11:19.821497 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:19Z","lastTransitionTime":"2025-11-28T16:11:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:19 crc kubenswrapper[4909]: I1128 16:11:19.905218 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 16:11:19 crc kubenswrapper[4909]: E1128 16:11:19.905377 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 16:11:19 crc kubenswrapper[4909]: I1128 16:11:19.905627 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 16:11:19 crc kubenswrapper[4909]: E1128 16:11:19.905749 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 16:11:19 crc kubenswrapper[4909]: I1128 16:11:19.905953 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 16:11:19 crc kubenswrapper[4909]: E1128 16:11:19.906042 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 16:11:19 crc kubenswrapper[4909]: I1128 16:11:19.923825 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:19 crc kubenswrapper[4909]: I1128 16:11:19.923866 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:19 crc kubenswrapper[4909]: I1128 16:11:19.923882 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:19 crc kubenswrapper[4909]: I1128 16:11:19.923905 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:19 crc kubenswrapper[4909]: I1128 16:11:19.923922 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:19Z","lastTransitionTime":"2025-11-28T16:11:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:20 crc kubenswrapper[4909]: I1128 16:11:20.026337 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:20 crc kubenswrapper[4909]: I1128 16:11:20.026379 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:20 crc kubenswrapper[4909]: I1128 16:11:20.026395 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:20 crc kubenswrapper[4909]: I1128 16:11:20.026416 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:20 crc kubenswrapper[4909]: I1128 16:11:20.026434 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:20Z","lastTransitionTime":"2025-11-28T16:11:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:20 crc kubenswrapper[4909]: I1128 16:11:20.129836 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:20 crc kubenswrapper[4909]: I1128 16:11:20.130351 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:20 crc kubenswrapper[4909]: I1128 16:11:20.130418 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:20 crc kubenswrapper[4909]: I1128 16:11:20.130482 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:20 crc kubenswrapper[4909]: I1128 16:11:20.130541 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:20Z","lastTransitionTime":"2025-11-28T16:11:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:20 crc kubenswrapper[4909]: I1128 16:11:20.232882 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:20 crc kubenswrapper[4909]: I1128 16:11:20.232911 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:20 crc kubenswrapper[4909]: I1128 16:11:20.232919 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:20 crc kubenswrapper[4909]: I1128 16:11:20.232932 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:20 crc kubenswrapper[4909]: I1128 16:11:20.232941 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:20Z","lastTransitionTime":"2025-11-28T16:11:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:20 crc kubenswrapper[4909]: I1128 16:11:20.335393 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:20 crc kubenswrapper[4909]: I1128 16:11:20.335421 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:20 crc kubenswrapper[4909]: I1128 16:11:20.335448 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:20 crc kubenswrapper[4909]: I1128 16:11:20.335461 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:20 crc kubenswrapper[4909]: I1128 16:11:20.335470 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:20Z","lastTransitionTime":"2025-11-28T16:11:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:20 crc kubenswrapper[4909]: I1128 16:11:20.438439 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:20 crc kubenswrapper[4909]: I1128 16:11:20.438465 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:20 crc kubenswrapper[4909]: I1128 16:11:20.438472 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:20 crc kubenswrapper[4909]: I1128 16:11:20.438485 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:20 crc kubenswrapper[4909]: I1128 16:11:20.438494 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:20Z","lastTransitionTime":"2025-11-28T16:11:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:20 crc kubenswrapper[4909]: I1128 16:11:20.541286 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:20 crc kubenswrapper[4909]: I1128 16:11:20.541317 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:20 crc kubenswrapper[4909]: I1128 16:11:20.541327 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:20 crc kubenswrapper[4909]: I1128 16:11:20.541342 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:20 crc kubenswrapper[4909]: I1128 16:11:20.541355 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:20Z","lastTransitionTime":"2025-11-28T16:11:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:20 crc kubenswrapper[4909]: I1128 16:11:20.644041 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:20 crc kubenswrapper[4909]: I1128 16:11:20.644251 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:20 crc kubenswrapper[4909]: I1128 16:11:20.644316 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:20 crc kubenswrapper[4909]: I1128 16:11:20.644393 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:20 crc kubenswrapper[4909]: I1128 16:11:20.644455 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:20Z","lastTransitionTime":"2025-11-28T16:11:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:20 crc kubenswrapper[4909]: I1128 16:11:20.748043 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:20 crc kubenswrapper[4909]: I1128 16:11:20.748154 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:20 crc kubenswrapper[4909]: I1128 16:11:20.748215 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:20 crc kubenswrapper[4909]: I1128 16:11:20.748240 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:20 crc kubenswrapper[4909]: I1128 16:11:20.748296 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:20Z","lastTransitionTime":"2025-11-28T16:11:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:20 crc kubenswrapper[4909]: I1128 16:11:20.851008 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:20 crc kubenswrapper[4909]: I1128 16:11:20.851067 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:20 crc kubenswrapper[4909]: I1128 16:11:20.851079 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:20 crc kubenswrapper[4909]: I1128 16:11:20.851096 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:20 crc kubenswrapper[4909]: I1128 16:11:20.851109 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:20Z","lastTransitionTime":"2025-11-28T16:11:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:20 crc kubenswrapper[4909]: I1128 16:11:20.900944 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-8rjn2" Nov 28 16:11:20 crc kubenswrapper[4909]: E1128 16:11:20.901140 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-8rjn2" podUID="ffceca0e-d9b5-484f-8753-5e0269eec811" Nov 28 16:11:20 crc kubenswrapper[4909]: I1128 16:11:20.954052 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:20 crc kubenswrapper[4909]: I1128 16:11:20.954106 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:20 crc kubenswrapper[4909]: I1128 16:11:20.954119 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:20 crc kubenswrapper[4909]: I1128 16:11:20.954138 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:20 crc kubenswrapper[4909]: I1128 16:11:20.954150 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:20Z","lastTransitionTime":"2025-11-28T16:11:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:21 crc kubenswrapper[4909]: I1128 16:11:21.056715 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:21 crc kubenswrapper[4909]: I1128 16:11:21.056782 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:21 crc kubenswrapper[4909]: I1128 16:11:21.056801 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:21 crc kubenswrapper[4909]: I1128 16:11:21.056826 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:21 crc kubenswrapper[4909]: I1128 16:11:21.056843 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:21Z","lastTransitionTime":"2025-11-28T16:11:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:21 crc kubenswrapper[4909]: I1128 16:11:21.159350 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:21 crc kubenswrapper[4909]: I1128 16:11:21.159414 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:21 crc kubenswrapper[4909]: I1128 16:11:21.159430 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:21 crc kubenswrapper[4909]: I1128 16:11:21.159453 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:21 crc kubenswrapper[4909]: I1128 16:11:21.159473 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:21Z","lastTransitionTime":"2025-11-28T16:11:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:21 crc kubenswrapper[4909]: I1128 16:11:21.665086 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:21 crc kubenswrapper[4909]: I1128 16:11:21.665153 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:21 crc kubenswrapper[4909]: I1128 16:11:21.665170 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:21 crc kubenswrapper[4909]: I1128 16:11:21.665190 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:21 crc kubenswrapper[4909]: I1128 16:11:21.665205 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:21Z","lastTransitionTime":"2025-11-28T16:11:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:21 crc kubenswrapper[4909]: I1128 16:11:21.768882 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:21 crc kubenswrapper[4909]: I1128 16:11:21.768933 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:21 crc kubenswrapper[4909]: I1128 16:11:21.768946 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:21 crc kubenswrapper[4909]: I1128 16:11:21.768964 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:21 crc kubenswrapper[4909]: I1128 16:11:21.768978 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:21Z","lastTransitionTime":"2025-11-28T16:11:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:21 crc kubenswrapper[4909]: I1128 16:11:21.871768 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:21 crc kubenswrapper[4909]: I1128 16:11:21.871823 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:21 crc kubenswrapper[4909]: I1128 16:11:21.871843 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:21 crc kubenswrapper[4909]: I1128 16:11:21.871866 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:21 crc kubenswrapper[4909]: I1128 16:11:21.871883 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:21Z","lastTransitionTime":"2025-11-28T16:11:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:21 crc kubenswrapper[4909]: I1128 16:11:21.901399 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 16:11:21 crc kubenswrapper[4909]: I1128 16:11:21.901546 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 16:11:21 crc kubenswrapper[4909]: E1128 16:11:21.901695 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 16:11:21 crc kubenswrapper[4909]: I1128 16:11:21.901411 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 16:11:21 crc kubenswrapper[4909]: E1128 16:11:21.902008 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 16:11:21 crc kubenswrapper[4909]: E1128 16:11:21.902126 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 16:11:21 crc kubenswrapper[4909]: I1128 16:11:21.974886 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:21 crc kubenswrapper[4909]: I1128 16:11:21.974920 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:21 crc kubenswrapper[4909]: I1128 16:11:21.974928 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:21 crc kubenswrapper[4909]: I1128 16:11:21.974944 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:21 crc kubenswrapper[4909]: I1128 16:11:21.974952 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:21Z","lastTransitionTime":"2025-11-28T16:11:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:22 crc kubenswrapper[4909]: I1128 16:11:22.076904 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:22 crc kubenswrapper[4909]: I1128 16:11:22.076946 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:22 crc kubenswrapper[4909]: I1128 16:11:22.076955 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:22 crc kubenswrapper[4909]: I1128 16:11:22.076968 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:22 crc kubenswrapper[4909]: I1128 16:11:22.076977 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:22Z","lastTransitionTime":"2025-11-28T16:11:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:22 crc kubenswrapper[4909]: I1128 16:11:22.179606 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:22 crc kubenswrapper[4909]: I1128 16:11:22.180401 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:22 crc kubenswrapper[4909]: I1128 16:11:22.180555 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:22 crc kubenswrapper[4909]: I1128 16:11:22.180689 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:22 crc kubenswrapper[4909]: I1128 16:11:22.180934 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:22Z","lastTransitionTime":"2025-11-28T16:11:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:22 crc kubenswrapper[4909]: I1128 16:11:22.283585 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:22 crc kubenswrapper[4909]: I1128 16:11:22.283644 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:22 crc kubenswrapper[4909]: I1128 16:11:22.283702 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:22 crc kubenswrapper[4909]: I1128 16:11:22.283734 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:22 crc kubenswrapper[4909]: I1128 16:11:22.283756 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:22Z","lastTransitionTime":"2025-11-28T16:11:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:22 crc kubenswrapper[4909]: I1128 16:11:22.386690 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:22 crc kubenswrapper[4909]: I1128 16:11:22.386909 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:22 crc kubenswrapper[4909]: I1128 16:11:22.387859 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:22 crc kubenswrapper[4909]: I1128 16:11:22.388428 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:22 crc kubenswrapper[4909]: I1128 16:11:22.389024 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:22Z","lastTransitionTime":"2025-11-28T16:11:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:22 crc kubenswrapper[4909]: I1128 16:11:22.451034 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:22 crc kubenswrapper[4909]: I1128 16:11:22.451069 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:22 crc kubenswrapper[4909]: I1128 16:11:22.451080 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:22 crc kubenswrapper[4909]: I1128 16:11:22.451095 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:22 crc kubenswrapper[4909]: I1128 16:11:22.451105 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:22Z","lastTransitionTime":"2025-11-28T16:11:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:22 crc kubenswrapper[4909]: E1128 16:11:22.465078 4909 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:22Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:22Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"b44f6a6c-5ae2-4ed6-9fc9-6c0acf034e9d\\\",\\\"systemUUID\\\":\\\"1e8d38e9-395c-4d37-b567-3bfe4869e3f7\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:22Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:22 crc kubenswrapper[4909]: I1128 16:11:22.468933 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:22 crc kubenswrapper[4909]: I1128 16:11:22.468964 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:22 crc kubenswrapper[4909]: I1128 16:11:22.468974 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:22 crc kubenswrapper[4909]: I1128 16:11:22.468986 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:22 crc kubenswrapper[4909]: I1128 16:11:22.468996 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:22Z","lastTransitionTime":"2025-11-28T16:11:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:22 crc kubenswrapper[4909]: E1128 16:11:22.487819 4909 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:22Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:22Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"b44f6a6c-5ae2-4ed6-9fc9-6c0acf034e9d\\\",\\\"systemUUID\\\":\\\"1e8d38e9-395c-4d37-b567-3bfe4869e3f7\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:22Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:22 crc kubenswrapper[4909]: I1128 16:11:22.492148 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:22 crc kubenswrapper[4909]: I1128 16:11:22.492184 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:22 crc kubenswrapper[4909]: I1128 16:11:22.492194 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:22 crc kubenswrapper[4909]: I1128 16:11:22.492207 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:22 crc kubenswrapper[4909]: I1128 16:11:22.492217 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:22Z","lastTransitionTime":"2025-11-28T16:11:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:22 crc kubenswrapper[4909]: E1128 16:11:22.509810 4909 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:22Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:22Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"b44f6a6c-5ae2-4ed6-9fc9-6c0acf034e9d\\\",\\\"systemUUID\\\":\\\"1e8d38e9-395c-4d37-b567-3bfe4869e3f7\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:22Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:22 crc kubenswrapper[4909]: I1128 16:11:22.514712 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:22 crc kubenswrapper[4909]: I1128 16:11:22.514958 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:22 crc kubenswrapper[4909]: I1128 16:11:22.515149 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:22 crc kubenswrapper[4909]: I1128 16:11:22.515341 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:22 crc kubenswrapper[4909]: I1128 16:11:22.515527 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:22Z","lastTransitionTime":"2025-11-28T16:11:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:22 crc kubenswrapper[4909]: E1128 16:11:22.533378 4909 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:22Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:22Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"b44f6a6c-5ae2-4ed6-9fc9-6c0acf034e9d\\\",\\\"systemUUID\\\":\\\"1e8d38e9-395c-4d37-b567-3bfe4869e3f7\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:22Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:22 crc kubenswrapper[4909]: I1128 16:11:22.538105 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:22 crc kubenswrapper[4909]: I1128 16:11:22.538154 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:22 crc kubenswrapper[4909]: I1128 16:11:22.538169 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:22 crc kubenswrapper[4909]: I1128 16:11:22.538189 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:22 crc kubenswrapper[4909]: I1128 16:11:22.538207 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:22Z","lastTransitionTime":"2025-11-28T16:11:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:22 crc kubenswrapper[4909]: E1128 16:11:22.555905 4909 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:22Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:22Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"b44f6a6c-5ae2-4ed6-9fc9-6c0acf034e9d\\\",\\\"systemUUID\\\":\\\"1e8d38e9-395c-4d37-b567-3bfe4869e3f7\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:22Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:22 crc kubenswrapper[4909]: E1128 16:11:22.556072 4909 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 28 16:11:22 crc kubenswrapper[4909]: I1128 16:11:22.557455 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:22 crc kubenswrapper[4909]: I1128 16:11:22.557486 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:22 crc kubenswrapper[4909]: I1128 16:11:22.557498 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:22 crc kubenswrapper[4909]: I1128 16:11:22.557510 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:22 crc kubenswrapper[4909]: I1128 16:11:22.557520 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:22Z","lastTransitionTime":"2025-11-28T16:11:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:22 crc kubenswrapper[4909]: I1128 16:11:22.660156 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:22 crc kubenswrapper[4909]: I1128 16:11:22.660215 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:22 crc kubenswrapper[4909]: I1128 16:11:22.660232 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:22 crc kubenswrapper[4909]: I1128 16:11:22.660257 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:22 crc kubenswrapper[4909]: I1128 16:11:22.660278 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:22Z","lastTransitionTime":"2025-11-28T16:11:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:22 crc kubenswrapper[4909]: I1128 16:11:22.762625 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:22 crc kubenswrapper[4909]: I1128 16:11:22.762685 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:22 crc kubenswrapper[4909]: I1128 16:11:22.762700 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:22 crc kubenswrapper[4909]: I1128 16:11:22.762719 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:22 crc kubenswrapper[4909]: I1128 16:11:22.762733 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:22Z","lastTransitionTime":"2025-11-28T16:11:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:22 crc kubenswrapper[4909]: I1128 16:11:22.864847 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:22 crc kubenswrapper[4909]: I1128 16:11:22.864942 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:22 crc kubenswrapper[4909]: I1128 16:11:22.864968 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:22 crc kubenswrapper[4909]: I1128 16:11:22.865001 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:22 crc kubenswrapper[4909]: I1128 16:11:22.865026 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:22Z","lastTransitionTime":"2025-11-28T16:11:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:22 crc kubenswrapper[4909]: I1128 16:11:22.901104 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-8rjn2" Nov 28 16:11:22 crc kubenswrapper[4909]: E1128 16:11:22.901281 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-8rjn2" podUID="ffceca0e-d9b5-484f-8753-5e0269eec811" Nov 28 16:11:22 crc kubenswrapper[4909]: I1128 16:11:22.967456 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:22 crc kubenswrapper[4909]: I1128 16:11:22.967489 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:22 crc kubenswrapper[4909]: I1128 16:11:22.967500 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:22 crc kubenswrapper[4909]: I1128 16:11:22.967513 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:22 crc kubenswrapper[4909]: I1128 16:11:22.967522 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:22Z","lastTransitionTime":"2025-11-28T16:11:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:23 crc kubenswrapper[4909]: I1128 16:11:23.069889 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:23 crc kubenswrapper[4909]: I1128 16:11:23.069945 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:23 crc kubenswrapper[4909]: I1128 16:11:23.069963 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:23 crc kubenswrapper[4909]: I1128 16:11:23.069988 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:23 crc kubenswrapper[4909]: I1128 16:11:23.070006 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:23Z","lastTransitionTime":"2025-11-28T16:11:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:23 crc kubenswrapper[4909]: I1128 16:11:23.177172 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:23 crc kubenswrapper[4909]: I1128 16:11:23.177248 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:23 crc kubenswrapper[4909]: I1128 16:11:23.177339 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:23 crc kubenswrapper[4909]: I1128 16:11:23.177355 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:23 crc kubenswrapper[4909]: I1128 16:11:23.177369 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:23Z","lastTransitionTime":"2025-11-28T16:11:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:23 crc kubenswrapper[4909]: I1128 16:11:23.280784 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:23 crc kubenswrapper[4909]: I1128 16:11:23.280827 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:23 crc kubenswrapper[4909]: I1128 16:11:23.280840 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:23 crc kubenswrapper[4909]: I1128 16:11:23.280858 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:23 crc kubenswrapper[4909]: I1128 16:11:23.280870 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:23Z","lastTransitionTime":"2025-11-28T16:11:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:23 crc kubenswrapper[4909]: I1128 16:11:23.383308 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:23 crc kubenswrapper[4909]: I1128 16:11:23.383374 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:23 crc kubenswrapper[4909]: I1128 16:11:23.383391 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:23 crc kubenswrapper[4909]: I1128 16:11:23.383415 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:23 crc kubenswrapper[4909]: I1128 16:11:23.383431 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:23Z","lastTransitionTime":"2025-11-28T16:11:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:23 crc kubenswrapper[4909]: I1128 16:11:23.484480 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/ffceca0e-d9b5-484f-8753-5e0269eec811-metrics-certs\") pod \"network-metrics-daemon-8rjn2\" (UID: \"ffceca0e-d9b5-484f-8753-5e0269eec811\") " pod="openshift-multus/network-metrics-daemon-8rjn2" Nov 28 16:11:23 crc kubenswrapper[4909]: E1128 16:11:23.484676 4909 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 28 16:11:23 crc kubenswrapper[4909]: E1128 16:11:23.484974 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ffceca0e-d9b5-484f-8753-5e0269eec811-metrics-certs podName:ffceca0e-d9b5-484f-8753-5e0269eec811 nodeName:}" failed. No retries permitted until 2025-11-28 16:11:55.484953578 +0000 UTC m=+97.881638112 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/ffceca0e-d9b5-484f-8753-5e0269eec811-metrics-certs") pod "network-metrics-daemon-8rjn2" (UID: "ffceca0e-d9b5-484f-8753-5e0269eec811") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 28 16:11:23 crc kubenswrapper[4909]: I1128 16:11:23.486230 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:23 crc kubenswrapper[4909]: I1128 16:11:23.486279 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:23 crc kubenswrapper[4909]: I1128 16:11:23.486296 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:23 crc kubenswrapper[4909]: I1128 16:11:23.486320 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:23 crc kubenswrapper[4909]: I1128 16:11:23.486336 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:23Z","lastTransitionTime":"2025-11-28T16:11:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:23 crc kubenswrapper[4909]: I1128 16:11:23.589529 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:23 crc kubenswrapper[4909]: I1128 16:11:23.589586 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:23 crc kubenswrapper[4909]: I1128 16:11:23.589604 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:23 crc kubenswrapper[4909]: I1128 16:11:23.589631 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:23 crc kubenswrapper[4909]: I1128 16:11:23.589650 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:23Z","lastTransitionTime":"2025-11-28T16:11:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:23 crc kubenswrapper[4909]: I1128 16:11:23.692540 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:23 crc kubenswrapper[4909]: I1128 16:11:23.692616 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:23 crc kubenswrapper[4909]: I1128 16:11:23.692628 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:23 crc kubenswrapper[4909]: I1128 16:11:23.692643 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:23 crc kubenswrapper[4909]: I1128 16:11:23.692667 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:23Z","lastTransitionTime":"2025-11-28T16:11:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:23 crc kubenswrapper[4909]: I1128 16:11:23.795138 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:23 crc kubenswrapper[4909]: I1128 16:11:23.795169 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:23 crc kubenswrapper[4909]: I1128 16:11:23.795177 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:23 crc kubenswrapper[4909]: I1128 16:11:23.795189 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:23 crc kubenswrapper[4909]: I1128 16:11:23.795199 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:23Z","lastTransitionTime":"2025-11-28T16:11:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:23 crc kubenswrapper[4909]: I1128 16:11:23.897764 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:23 crc kubenswrapper[4909]: I1128 16:11:23.897825 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:23 crc kubenswrapper[4909]: I1128 16:11:23.897839 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:23 crc kubenswrapper[4909]: I1128 16:11:23.897861 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:23 crc kubenswrapper[4909]: I1128 16:11:23.897874 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:23Z","lastTransitionTime":"2025-11-28T16:11:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:23 crc kubenswrapper[4909]: I1128 16:11:23.901470 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 16:11:23 crc kubenswrapper[4909]: I1128 16:11:23.901524 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 16:11:23 crc kubenswrapper[4909]: I1128 16:11:23.901537 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 16:11:23 crc kubenswrapper[4909]: E1128 16:11:23.901605 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 16:11:23 crc kubenswrapper[4909]: E1128 16:11:23.901686 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 16:11:23 crc kubenswrapper[4909]: E1128 16:11:23.901830 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 16:11:24 crc kubenswrapper[4909]: I1128 16:11:24.000902 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:24 crc kubenswrapper[4909]: I1128 16:11:24.000954 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:24 crc kubenswrapper[4909]: I1128 16:11:24.000970 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:24 crc kubenswrapper[4909]: I1128 16:11:24.000992 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:24 crc kubenswrapper[4909]: I1128 16:11:24.001008 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:24Z","lastTransitionTime":"2025-11-28T16:11:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:24 crc kubenswrapper[4909]: I1128 16:11:24.103729 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:24 crc kubenswrapper[4909]: I1128 16:11:24.103791 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:24 crc kubenswrapper[4909]: I1128 16:11:24.103801 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:24 crc kubenswrapper[4909]: I1128 16:11:24.103816 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:24 crc kubenswrapper[4909]: I1128 16:11:24.103833 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:24Z","lastTransitionTime":"2025-11-28T16:11:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:24 crc kubenswrapper[4909]: I1128 16:11:24.205970 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:24 crc kubenswrapper[4909]: I1128 16:11:24.206013 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:24 crc kubenswrapper[4909]: I1128 16:11:24.206027 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:24 crc kubenswrapper[4909]: I1128 16:11:24.206043 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:24 crc kubenswrapper[4909]: I1128 16:11:24.206056 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:24Z","lastTransitionTime":"2025-11-28T16:11:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:24 crc kubenswrapper[4909]: I1128 16:11:24.308013 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:24 crc kubenswrapper[4909]: I1128 16:11:24.308057 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:24 crc kubenswrapper[4909]: I1128 16:11:24.308068 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:24 crc kubenswrapper[4909]: I1128 16:11:24.308092 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:24 crc kubenswrapper[4909]: I1128 16:11:24.308125 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:24Z","lastTransitionTime":"2025-11-28T16:11:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:24 crc kubenswrapper[4909]: I1128 16:11:24.410942 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:24 crc kubenswrapper[4909]: I1128 16:11:24.411001 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:24 crc kubenswrapper[4909]: I1128 16:11:24.411020 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:24 crc kubenswrapper[4909]: I1128 16:11:24.411045 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:24 crc kubenswrapper[4909]: I1128 16:11:24.411061 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:24Z","lastTransitionTime":"2025-11-28T16:11:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:24 crc kubenswrapper[4909]: I1128 16:11:24.514718 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:24 crc kubenswrapper[4909]: I1128 16:11:24.514766 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:24 crc kubenswrapper[4909]: I1128 16:11:24.514783 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:24 crc kubenswrapper[4909]: I1128 16:11:24.514804 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:24 crc kubenswrapper[4909]: I1128 16:11:24.514872 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:24Z","lastTransitionTime":"2025-11-28T16:11:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:24 crc kubenswrapper[4909]: I1128 16:11:24.617502 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:24 crc kubenswrapper[4909]: I1128 16:11:24.617534 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:24 crc kubenswrapper[4909]: I1128 16:11:24.617543 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:24 crc kubenswrapper[4909]: I1128 16:11:24.617575 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:24 crc kubenswrapper[4909]: I1128 16:11:24.617600 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:24Z","lastTransitionTime":"2025-11-28T16:11:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:24 crc kubenswrapper[4909]: I1128 16:11:24.720714 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:24 crc kubenswrapper[4909]: I1128 16:11:24.720977 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:24 crc kubenswrapper[4909]: I1128 16:11:24.720989 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:24 crc kubenswrapper[4909]: I1128 16:11:24.721009 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:24 crc kubenswrapper[4909]: I1128 16:11:24.721022 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:24Z","lastTransitionTime":"2025-11-28T16:11:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:24 crc kubenswrapper[4909]: I1128 16:11:24.824119 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:24 crc kubenswrapper[4909]: I1128 16:11:24.824172 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:24 crc kubenswrapper[4909]: I1128 16:11:24.824189 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:24 crc kubenswrapper[4909]: I1128 16:11:24.824212 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:24 crc kubenswrapper[4909]: I1128 16:11:24.824227 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:24Z","lastTransitionTime":"2025-11-28T16:11:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:24 crc kubenswrapper[4909]: I1128 16:11:24.900420 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-8rjn2" Nov 28 16:11:24 crc kubenswrapper[4909]: E1128 16:11:24.900532 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-8rjn2" podUID="ffceca0e-d9b5-484f-8753-5e0269eec811" Nov 28 16:11:24 crc kubenswrapper[4909]: I1128 16:11:24.927754 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:24 crc kubenswrapper[4909]: I1128 16:11:24.927808 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:24 crc kubenswrapper[4909]: I1128 16:11:24.927822 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:24 crc kubenswrapper[4909]: I1128 16:11:24.927843 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:24 crc kubenswrapper[4909]: I1128 16:11:24.927858 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:24Z","lastTransitionTime":"2025-11-28T16:11:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:25 crc kubenswrapper[4909]: I1128 16:11:25.030199 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:25 crc kubenswrapper[4909]: I1128 16:11:25.030232 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:25 crc kubenswrapper[4909]: I1128 16:11:25.030241 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:25 crc kubenswrapper[4909]: I1128 16:11:25.030254 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:25 crc kubenswrapper[4909]: I1128 16:11:25.030262 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:25Z","lastTransitionTime":"2025-11-28T16:11:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:25 crc kubenswrapper[4909]: I1128 16:11:25.132909 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:25 crc kubenswrapper[4909]: I1128 16:11:25.132941 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:25 crc kubenswrapper[4909]: I1128 16:11:25.132950 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:25 crc kubenswrapper[4909]: I1128 16:11:25.132961 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:25 crc kubenswrapper[4909]: I1128 16:11:25.132971 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:25Z","lastTransitionTime":"2025-11-28T16:11:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:25 crc kubenswrapper[4909]: I1128 16:11:25.236466 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:25 crc kubenswrapper[4909]: I1128 16:11:25.236505 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:25 crc kubenswrapper[4909]: I1128 16:11:25.236514 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:25 crc kubenswrapper[4909]: I1128 16:11:25.236526 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:25 crc kubenswrapper[4909]: I1128 16:11:25.236537 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:25Z","lastTransitionTime":"2025-11-28T16:11:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:25 crc kubenswrapper[4909]: I1128 16:11:25.338888 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:25 crc kubenswrapper[4909]: I1128 16:11:25.338945 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:25 crc kubenswrapper[4909]: I1128 16:11:25.338954 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:25 crc kubenswrapper[4909]: I1128 16:11:25.338970 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:25 crc kubenswrapper[4909]: I1128 16:11:25.338980 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:25Z","lastTransitionTime":"2025-11-28T16:11:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:25 crc kubenswrapper[4909]: I1128 16:11:25.442440 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:25 crc kubenswrapper[4909]: I1128 16:11:25.442503 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:25 crc kubenswrapper[4909]: I1128 16:11:25.442520 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:25 crc kubenswrapper[4909]: I1128 16:11:25.442545 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:25 crc kubenswrapper[4909]: I1128 16:11:25.442562 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:25Z","lastTransitionTime":"2025-11-28T16:11:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:25 crc kubenswrapper[4909]: I1128 16:11:25.544704 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:25 crc kubenswrapper[4909]: I1128 16:11:25.544759 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:25 crc kubenswrapper[4909]: I1128 16:11:25.544768 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:25 crc kubenswrapper[4909]: I1128 16:11:25.544784 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:25 crc kubenswrapper[4909]: I1128 16:11:25.544795 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:25Z","lastTransitionTime":"2025-11-28T16:11:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:25 crc kubenswrapper[4909]: I1128 16:11:25.646970 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:25 crc kubenswrapper[4909]: I1128 16:11:25.647008 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:25 crc kubenswrapper[4909]: I1128 16:11:25.647021 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:25 crc kubenswrapper[4909]: I1128 16:11:25.647036 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:25 crc kubenswrapper[4909]: I1128 16:11:25.647047 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:25Z","lastTransitionTime":"2025-11-28T16:11:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:25 crc kubenswrapper[4909]: I1128 16:11:25.679766 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-wx2jj_6e3805b2-8ad3-4fa6-b88f-e0ae42294202/kube-multus/0.log" Nov 28 16:11:25 crc kubenswrapper[4909]: I1128 16:11:25.679831 4909 generic.go:334] "Generic (PLEG): container finished" podID="6e3805b2-8ad3-4fa6-b88f-e0ae42294202" containerID="0777b571ce4049338437a97264761c89ab7517b4da8400edcd3381d58aef32e4" exitCode=1 Nov 28 16:11:25 crc kubenswrapper[4909]: I1128 16:11:25.679867 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-wx2jj" event={"ID":"6e3805b2-8ad3-4fa6-b88f-e0ae42294202","Type":"ContainerDied","Data":"0777b571ce4049338437a97264761c89ab7517b4da8400edcd3381d58aef32e4"} Nov 28 16:11:25 crc kubenswrapper[4909]: I1128 16:11:25.680267 4909 scope.go:117] "RemoveContainer" containerID="0777b571ce4049338437a97264761c89ab7517b4da8400edcd3381d58aef32e4" Nov 28 16:11:25 crc kubenswrapper[4909]: I1128 16:11:25.705525 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dda7db1a-d603-4121-8cd6-e72c9ae02961\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://107b559f7d62f02041439f8c727185e9a819cd655afa0898e818d8618cc54cc7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://76ac0606fbf94f3746fb7ffa549a3cb684d40ee72224dd36c96bb47db493b482\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d5716d50bbed7c047029bbd7964431e97f78c6516505f57fe76d2a1548a665d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://53a5c9679a2c94c1297318d2e554c2445efbfab9cf72d25e48d753d66fa217d5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://da70cc63e1edf58e6d6fed49fc9266d54fabba5f7ce3216d910eb18aebc45c34\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0700ae65eb592e5b2110d37bb15dbb1dbdd228986de015ab5dc8fe13672032ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0700ae65eb592e5b2110d37bb15dbb1dbdd228986de015ab5dc8fe13672032ef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:18Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:25Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:25 crc kubenswrapper[4909]: I1128 16:11:25.726788 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b5b5c4c2-af06-4771-b6eb-d13a2819665c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ea29d0be350e786147f355c5bb4902924aa0f921413b432ad093a796d21b9d05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b41cf0481323080f4056d35add13c2254c6ffbe432b1651555067d3fc9d163eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94501e940379fe9b429e532a91799701e733bb5c3be1c5f32da07a9957f955b3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://37dc41c4e260f683c1ae04ae87a883fe78e4f1f620a946ccb6a87191a5eae0ae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:18Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:25Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:25 crc kubenswrapper[4909]: I1128 16:11:25.743007 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31f84564a3775e0ece3a5c0f176e8d9607466d4a7a505173e9668c51fa2229e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:25Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:25 crc kubenswrapper[4909]: I1128 16:11:25.748807 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:25 crc kubenswrapper[4909]: I1128 16:11:25.748839 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:25 crc kubenswrapper[4909]: I1128 16:11:25.748850 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:25 crc kubenswrapper[4909]: I1128 16:11:25.748867 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:25 crc kubenswrapper[4909]: I1128 16:11:25.748878 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:25Z","lastTransitionTime":"2025-11-28T16:11:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:25 crc kubenswrapper[4909]: I1128 16:11:25.760605 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:25Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:25 crc kubenswrapper[4909]: I1128 16:11:25.781632 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-wx2jj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6e3805b2-8ad3-4fa6-b88f-e0ae42294202\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0777b571ce4049338437a97264761c89ab7517b4da8400edcd3381d58aef32e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0777b571ce4049338437a97264761c89ab7517b4da8400edcd3381d58aef32e4\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T16:11:24Z\\\",\\\"message\\\":\\\"2025-11-28T16:10:39+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_76b37028-5be7-441a-8772-e096e031ea50\\\\n2025-11-28T16:10:39+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_76b37028-5be7-441a-8772-e096e031ea50 to /host/opt/cni/bin/\\\\n2025-11-28T16:10:39Z [verbose] multus-daemon started\\\\n2025-11-28T16:10:39Z [verbose] Readiness Indicator file check\\\\n2025-11-28T16:11:24Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x6lxd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-wx2jj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:25Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:25 crc kubenswrapper[4909]: I1128 16:11:25.798735 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7907b546dbb614e80485187b026c4c5ca17f52d88d5c28ce26a7bf5e3c09e76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:25Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:25 crc kubenswrapper[4909]: I1128 16:11:25.811193 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5f0ac931-d37b-4342-8c12-c2779b455cc5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae003675d8b34489b946224bcd380cc68ff49acec6769edfe74a8345018e7909\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5xrns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c2757c4dc287e41bc57c065df2906fc5961d005829fa24f22d3b5078d17555a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5xrns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:38Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-d5nd7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:25Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:25 crc kubenswrapper[4909]: I1128 16:11:25.840880 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c17e2fff-c7ee-475c-8c17-58a394744b91\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c9f22dca75c10791e7f5e083e737d4d6aad20598ff0d29dd7562fa99b5dbdfd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://697293d217dd4fbc4cecc3bc4be7acc14dbb5aa97b9faf0a608792ef7e4c8dba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d0b87eb388aab62c94756b5f20b73cf95fc80da8aa3d83f2848757f860cdd38e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://031c30950c06bdcae1736062069b96a6ce7cb66d5f286b73d691ee2d2db5832e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://66731a2e0518dd575f621ddf9e760e844e647a224138957440ccaecad3e4b8b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://983e9f73b8e344dbe445f95436cbe143d63f1da2e868201b441496df8cdc99ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://32babd1d5575ade6650fc51e99fbd5a062f2f8c3e97ffa12588d753f4f133902\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://32babd1d5575ade6650fc51e99fbd5a062f2f8c3e97ffa12588d753f4f133902\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T16:11:07Z\\\",\\\"message\\\":\\\"04 6563 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1128 16:11:06.748135 6563 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1128 16:11:06.748850 6563 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1128 16:11:06.749020 6563 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1128 16:11:06.755731 6563 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1128 16:11:06.755787 6563 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1128 16:11:06.755852 6563 factory.go:656] Stopping watch factory\\\\nI1128 16:11:06.755873 6563 handler.go:208] Removed *v1.Node event handler 2\\\\nI1128 16:11:06.755890 6563 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1128 16:11:06.764172 6563 shared_informer.go:320] Caches are synced for node-tracker-controller\\\\nI1128 16:11:06.764189 6563 services_controller.go:204] Setting up event handlers for services for network=default\\\\nI1128 16:11:06.764255 6563 ovnkube.go:599] Stopped ovnkube\\\\nI1128 16:11:06.764281 6563 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1128 16:11:06.764366 6563 ovnkube.go:137] failed to run ov\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:06Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-qxw94_openshift-ovn-kubernetes(c17e2fff-c7ee-475c-8c17-58a394744b91)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://841b67fe2e8ae0e4ebf834f141b83ad5e850b8e81c3f6410cef7df9894c33dc6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e1fffda4b5571ab3fe797073c7201feadb7818eac66f6dc76b7761ef13a8d0ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e1fffda4b5571ab3fe797073c7201feadb7818eac66f6dc76b7761ef13a8d0ef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-qxw94\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:25Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:25 crc kubenswrapper[4909]: I1128 16:11:25.851243 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:25 crc kubenswrapper[4909]: I1128 16:11:25.851275 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:25 crc kubenswrapper[4909]: I1128 16:11:25.851286 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:25 crc kubenswrapper[4909]: I1128 16:11:25.851302 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:25 crc kubenswrapper[4909]: I1128 16:11:25.851315 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:25Z","lastTransitionTime":"2025-11-28T16:11:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:25 crc kubenswrapper[4909]: I1128 16:11:25.852586 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"443537be-83fe-4770-9aff-5fb3d2bef9a6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3d62be23143f1f28ff523369b4a6b5cb91146ac54236b31cc8d91d200bd8598e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f88b0080f0e1e8677f2525f47faf3bcd7fa2f54bc91057b318d4c1f86a16f9f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://34312055c6f3a6714d732ad11b27c20139c8a9be7636a9dd215a6e680803afd6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a891f34668e5de053a5fef2b954e97fb437e1a9e3bc2ed26b9bb767a3dda592d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a891f34668e5de053a5fef2b954e97fb437e1a9e3bc2ed26b9bb767a3dda592d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:18Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:18Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:25Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:25 crc kubenswrapper[4909]: I1128 16:11:25.865421 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:25Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:25 crc kubenswrapper[4909]: I1128 16:11:25.881437 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:25Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:25 crc kubenswrapper[4909]: I1128 16:11:25.900744 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 16:11:25 crc kubenswrapper[4909]: I1128 16:11:25.900823 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 16:11:25 crc kubenswrapper[4909]: I1128 16:11:25.900872 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 16:11:25 crc kubenswrapper[4909]: E1128 16:11:25.901020 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 16:11:25 crc kubenswrapper[4909]: E1128 16:11:25.901254 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 16:11:25 crc kubenswrapper[4909]: E1128 16:11:25.901380 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 16:11:25 crc kubenswrapper[4909]: I1128 16:11:25.901767 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-bwbm6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a458a03-0fea-47c0-9748-510145f40b30\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dd8c3d64058ca1f2862ce478295e1a694117d00f9172f78c7c5e2945d7357aad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmxp7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:36Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-bwbm6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:25Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:25 crc kubenswrapper[4909]: I1128 16:11:25.917805 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gdz9b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d9d93f2d-2a90-4d2d-b8e6-e48973be876f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e765772d8b12200fadfb28064b55c1abb9a8a6654602159c4910d2ea5b2d307\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b45f41a92db18043fc03c9cd7d9bbeef0020790595045abd17a9105d0f01367b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b45f41a92db18043fc03c9cd7d9bbeef0020790595045abd17a9105d0f01367b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f7a3a2a5eea9704ba4cd09ac6fa00e33c97d163973a852882979b1c8f2064979\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f7a3a2a5eea9704ba4cd09ac6fa00e33c97d163973a852882979b1c8f2064979\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0348e1d7066423ab9a595f60893021830d02de3c18fca2fdb0082efa41644124\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0348e1d7066423ab9a595f60893021830d02de3c18fca2fdb0082efa41644124\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbb9ad3dff8afd5ac9ec18bd48d273f46b76a21b791dc6a17aea1d7559750c93\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cbb9ad3dff8afd5ac9ec18bd48d273f46b76a21b791dc6a17aea1d7559750c93\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5898b3475414a39ef005c45982f12791683eb027e734ec8d25dc3489c93026a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5898b3475414a39ef005c45982f12791683eb027e734ec8d25dc3489c93026a2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2056e26b7e46cd32f8a12d6d054e249ed43f9854c0f7720791e98f9e6c5b5346\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2056e26b7e46cd32f8a12d6d054e249ed43f9854c0f7720791e98f9e6c5b5346\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gdz9b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:25Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:25 crc kubenswrapper[4909]: I1128 16:11:25.927013 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-8rjn2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ffceca0e-d9b5-484f-8753-5e0269eec811\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:51Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:51Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wgql6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wgql6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:51Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-8rjn2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:25Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:25 crc kubenswrapper[4909]: I1128 16:11:25.938749 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://475d2c473e130345e0d544c2b7990c476e83c479c644db89e1ceda2f4278d667\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://18059a14081d677863133d95ec3cecf9359d6464af62be1e53bcd9514311ff59\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:25Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:25 crc kubenswrapper[4909]: I1128 16:11:25.950324 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-q8nfv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6b77cc4b-dc69-4ece-8e10-64eebc98a578\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://71f6384b2d5c86c668a4d7dfc23b14a893f93b9ec587bec43f74eb0926cc2c64\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-prc9l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:37Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-q8nfv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:25Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:25 crc kubenswrapper[4909]: I1128 16:11:25.953687 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:25 crc kubenswrapper[4909]: I1128 16:11:25.953719 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:25 crc kubenswrapper[4909]: I1128 16:11:25.953732 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:25 crc kubenswrapper[4909]: I1128 16:11:25.953748 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:25 crc kubenswrapper[4909]: I1128 16:11:25.953759 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:25Z","lastTransitionTime":"2025-11-28T16:11:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:25 crc kubenswrapper[4909]: I1128 16:11:25.960383 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-8s8f9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5a7347b-5536-45b5-be75-4bf0ed1b922b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://75a9b32698a7aaaa2b6f88541c9069902a98ad0146bf478ec58ce3a97fb410ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mglcp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://309f7a04e1c92fde11e7fedfe089ff043e14ea788f60339fb7acf8c6df0c8c8d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mglcp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-8s8f9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:25Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:26 crc kubenswrapper[4909]: I1128 16:11:26.056669 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:26 crc kubenswrapper[4909]: I1128 16:11:26.056707 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:26 crc kubenswrapper[4909]: I1128 16:11:26.056716 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:26 crc kubenswrapper[4909]: I1128 16:11:26.056731 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:26 crc kubenswrapper[4909]: I1128 16:11:26.056740 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:26Z","lastTransitionTime":"2025-11-28T16:11:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:26 crc kubenswrapper[4909]: I1128 16:11:26.158794 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:26 crc kubenswrapper[4909]: I1128 16:11:26.158830 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:26 crc kubenswrapper[4909]: I1128 16:11:26.158839 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:26 crc kubenswrapper[4909]: I1128 16:11:26.158852 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:26 crc kubenswrapper[4909]: I1128 16:11:26.158861 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:26Z","lastTransitionTime":"2025-11-28T16:11:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:26 crc kubenswrapper[4909]: I1128 16:11:26.260684 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:26 crc kubenswrapper[4909]: I1128 16:11:26.260743 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:26 crc kubenswrapper[4909]: I1128 16:11:26.260760 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:26 crc kubenswrapper[4909]: I1128 16:11:26.260787 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:26 crc kubenswrapper[4909]: I1128 16:11:26.260808 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:26Z","lastTransitionTime":"2025-11-28T16:11:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:26 crc kubenswrapper[4909]: I1128 16:11:26.363243 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:26 crc kubenswrapper[4909]: I1128 16:11:26.363286 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:26 crc kubenswrapper[4909]: I1128 16:11:26.363297 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:26 crc kubenswrapper[4909]: I1128 16:11:26.363312 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:26 crc kubenswrapper[4909]: I1128 16:11:26.363322 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:26Z","lastTransitionTime":"2025-11-28T16:11:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:26 crc kubenswrapper[4909]: I1128 16:11:26.466306 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:26 crc kubenswrapper[4909]: I1128 16:11:26.466353 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:26 crc kubenswrapper[4909]: I1128 16:11:26.466369 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:26 crc kubenswrapper[4909]: I1128 16:11:26.466392 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:26 crc kubenswrapper[4909]: I1128 16:11:26.466413 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:26Z","lastTransitionTime":"2025-11-28T16:11:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:26 crc kubenswrapper[4909]: I1128 16:11:26.569346 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:26 crc kubenswrapper[4909]: I1128 16:11:26.569389 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:26 crc kubenswrapper[4909]: I1128 16:11:26.569401 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:26 crc kubenswrapper[4909]: I1128 16:11:26.569415 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:26 crc kubenswrapper[4909]: I1128 16:11:26.569425 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:26Z","lastTransitionTime":"2025-11-28T16:11:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:26 crc kubenswrapper[4909]: I1128 16:11:26.672870 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:26 crc kubenswrapper[4909]: I1128 16:11:26.672934 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:26 crc kubenswrapper[4909]: I1128 16:11:26.672951 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:26 crc kubenswrapper[4909]: I1128 16:11:26.672975 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:26 crc kubenswrapper[4909]: I1128 16:11:26.672995 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:26Z","lastTransitionTime":"2025-11-28T16:11:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:26 crc kubenswrapper[4909]: I1128 16:11:26.685230 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-wx2jj_6e3805b2-8ad3-4fa6-b88f-e0ae42294202/kube-multus/0.log" Nov 28 16:11:26 crc kubenswrapper[4909]: I1128 16:11:26.685304 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-wx2jj" event={"ID":"6e3805b2-8ad3-4fa6-b88f-e0ae42294202","Type":"ContainerStarted","Data":"e3a9a82264968374209ed690b43cf96557d426af065ba14cd189ae9e31ed0f0a"} Nov 28 16:11:26 crc kubenswrapper[4909]: I1128 16:11:26.700177 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7907b546dbb614e80485187b026c4c5ca17f52d88d5c28ce26a7bf5e3c09e76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:26Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:26 crc kubenswrapper[4909]: I1128 16:11:26.716298 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5f0ac931-d37b-4342-8c12-c2779b455cc5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae003675d8b34489b946224bcd380cc68ff49acec6769edfe74a8345018e7909\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5xrns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c2757c4dc287e41bc57c065df2906fc5961d005829fa24f22d3b5078d17555a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5xrns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:38Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-d5nd7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:26Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:26 crc kubenswrapper[4909]: I1128 16:11:26.740047 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c17e2fff-c7ee-475c-8c17-58a394744b91\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c9f22dca75c10791e7f5e083e737d4d6aad20598ff0d29dd7562fa99b5dbdfd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://697293d217dd4fbc4cecc3bc4be7acc14dbb5aa97b9faf0a608792ef7e4c8dba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d0b87eb388aab62c94756b5f20b73cf95fc80da8aa3d83f2848757f860cdd38e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://031c30950c06bdcae1736062069b96a6ce7cb66d5f286b73d691ee2d2db5832e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://66731a2e0518dd575f621ddf9e760e844e647a224138957440ccaecad3e4b8b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://983e9f73b8e344dbe445f95436cbe143d63f1da2e868201b441496df8cdc99ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://32babd1d5575ade6650fc51e99fbd5a062f2f8c3e97ffa12588d753f4f133902\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://32babd1d5575ade6650fc51e99fbd5a062f2f8c3e97ffa12588d753f4f133902\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T16:11:07Z\\\",\\\"message\\\":\\\"04 6563 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1128 16:11:06.748135 6563 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1128 16:11:06.748850 6563 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1128 16:11:06.749020 6563 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1128 16:11:06.755731 6563 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1128 16:11:06.755787 6563 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1128 16:11:06.755852 6563 factory.go:656] Stopping watch factory\\\\nI1128 16:11:06.755873 6563 handler.go:208] Removed *v1.Node event handler 2\\\\nI1128 16:11:06.755890 6563 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1128 16:11:06.764172 6563 shared_informer.go:320] Caches are synced for node-tracker-controller\\\\nI1128 16:11:06.764189 6563 services_controller.go:204] Setting up event handlers for services for network=default\\\\nI1128 16:11:06.764255 6563 ovnkube.go:599] Stopped ovnkube\\\\nI1128 16:11:06.764281 6563 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1128 16:11:06.764366 6563 ovnkube.go:137] failed to run ov\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:06Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-qxw94_openshift-ovn-kubernetes(c17e2fff-c7ee-475c-8c17-58a394744b91)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://841b67fe2e8ae0e4ebf834f141b83ad5e850b8e81c3f6410cef7df9894c33dc6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e1fffda4b5571ab3fe797073c7201feadb7818eac66f6dc76b7761ef13a8d0ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e1fffda4b5571ab3fe797073c7201feadb7818eac66f6dc76b7761ef13a8d0ef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-qxw94\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:26Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:26 crc kubenswrapper[4909]: I1128 16:11:26.755167 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:26Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:26 crc kubenswrapper[4909]: I1128 16:11:26.766642 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-bwbm6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a458a03-0fea-47c0-9748-510145f40b30\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dd8c3d64058ca1f2862ce478295e1a694117d00f9172f78c7c5e2945d7357aad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmxp7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:36Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-bwbm6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:26Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:26 crc kubenswrapper[4909]: I1128 16:11:26.774645 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:26 crc kubenswrapper[4909]: I1128 16:11:26.774684 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:26 crc kubenswrapper[4909]: I1128 16:11:26.774694 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:26 crc kubenswrapper[4909]: I1128 16:11:26.774707 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:26 crc kubenswrapper[4909]: I1128 16:11:26.774715 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:26Z","lastTransitionTime":"2025-11-28T16:11:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:26 crc kubenswrapper[4909]: I1128 16:11:26.786140 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gdz9b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d9d93f2d-2a90-4d2d-b8e6-e48973be876f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e765772d8b12200fadfb28064b55c1abb9a8a6654602159c4910d2ea5b2d307\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b45f41a92db18043fc03c9cd7d9bbeef0020790595045abd17a9105d0f01367b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b45f41a92db18043fc03c9cd7d9bbeef0020790595045abd17a9105d0f01367b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f7a3a2a5eea9704ba4cd09ac6fa00e33c97d163973a852882979b1c8f2064979\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f7a3a2a5eea9704ba4cd09ac6fa00e33c97d163973a852882979b1c8f2064979\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0348e1d7066423ab9a595f60893021830d02de3c18fca2fdb0082efa41644124\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0348e1d7066423ab9a595f60893021830d02de3c18fca2fdb0082efa41644124\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbb9ad3dff8afd5ac9ec18bd48d273f46b76a21b791dc6a17aea1d7559750c93\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cbb9ad3dff8afd5ac9ec18bd48d273f46b76a21b791dc6a17aea1d7559750c93\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5898b3475414a39ef005c45982f12791683eb027e734ec8d25dc3489c93026a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5898b3475414a39ef005c45982f12791683eb027e734ec8d25dc3489c93026a2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2056e26b7e46cd32f8a12d6d054e249ed43f9854c0f7720791e98f9e6c5b5346\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2056e26b7e46cd32f8a12d6d054e249ed43f9854c0f7720791e98f9e6c5b5346\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gdz9b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:26Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:26 crc kubenswrapper[4909]: I1128 16:11:26.797839 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-8rjn2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ffceca0e-d9b5-484f-8753-5e0269eec811\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:51Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:51Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wgql6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wgql6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:51Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-8rjn2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:26Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:26 crc kubenswrapper[4909]: I1128 16:11:26.811853 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"443537be-83fe-4770-9aff-5fb3d2bef9a6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3d62be23143f1f28ff523369b4a6b5cb91146ac54236b31cc8d91d200bd8598e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f88b0080f0e1e8677f2525f47faf3bcd7fa2f54bc91057b318d4c1f86a16f9f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://34312055c6f3a6714d732ad11b27c20139c8a9be7636a9dd215a6e680803afd6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a891f34668e5de053a5fef2b954e97fb437e1a9e3bc2ed26b9bb767a3dda592d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a891f34668e5de053a5fef2b954e97fb437e1a9e3bc2ed26b9bb767a3dda592d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:18Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:18Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:26Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:26 crc kubenswrapper[4909]: I1128 16:11:26.825153 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:26Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:26 crc kubenswrapper[4909]: I1128 16:11:26.839044 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://475d2c473e130345e0d544c2b7990c476e83c479c644db89e1ceda2f4278d667\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://18059a14081d677863133d95ec3cecf9359d6464af62be1e53bcd9514311ff59\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:26Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:26 crc kubenswrapper[4909]: I1128 16:11:26.849227 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-q8nfv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6b77cc4b-dc69-4ece-8e10-64eebc98a578\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://71f6384b2d5c86c668a4d7dfc23b14a893f93b9ec587bec43f74eb0926cc2c64\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-prc9l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:37Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-q8nfv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:26Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:26 crc kubenswrapper[4909]: I1128 16:11:26.863796 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-8s8f9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5a7347b-5536-45b5-be75-4bf0ed1b922b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://75a9b32698a7aaaa2b6f88541c9069902a98ad0146bf478ec58ce3a97fb410ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mglcp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://309f7a04e1c92fde11e7fedfe089ff043e14ea788f60339fb7acf8c6df0c8c8d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mglcp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-8s8f9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:26Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:26 crc kubenswrapper[4909]: I1128 16:11:26.877509 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:26 crc kubenswrapper[4909]: I1128 16:11:26.877569 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:26 crc kubenswrapper[4909]: I1128 16:11:26.877587 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:26 crc kubenswrapper[4909]: I1128 16:11:26.877612 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:26 crc kubenswrapper[4909]: I1128 16:11:26.877629 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:26Z","lastTransitionTime":"2025-11-28T16:11:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:26 crc kubenswrapper[4909]: I1128 16:11:26.883151 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-wx2jj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6e3805b2-8ad3-4fa6-b88f-e0ae42294202\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e3a9a82264968374209ed690b43cf96557d426af065ba14cd189ae9e31ed0f0a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0777b571ce4049338437a97264761c89ab7517b4da8400edcd3381d58aef32e4\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T16:11:24Z\\\",\\\"message\\\":\\\"2025-11-28T16:10:39+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_76b37028-5be7-441a-8772-e096e031ea50\\\\n2025-11-28T16:10:39+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_76b37028-5be7-441a-8772-e096e031ea50 to /host/opt/cni/bin/\\\\n2025-11-28T16:10:39Z [verbose] multus-daemon started\\\\n2025-11-28T16:10:39Z [verbose] Readiness Indicator file check\\\\n2025-11-28T16:11:24Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x6lxd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-wx2jj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:26Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:26 crc kubenswrapper[4909]: I1128 16:11:26.900639 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-8rjn2" Nov 28 16:11:26 crc kubenswrapper[4909]: I1128 16:11:26.900599 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dda7db1a-d603-4121-8cd6-e72c9ae02961\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://107b559f7d62f02041439f8c727185e9a819cd655afa0898e818d8618cc54cc7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://76ac0606fbf94f3746fb7ffa549a3cb684d40ee72224dd36c96bb47db493b482\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d5716d50bbed7c047029bbd7964431e97f78c6516505f57fe76d2a1548a665d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://53a5c9679a2c94c1297318d2e554c2445efbfab9cf72d25e48d753d66fa217d5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://da70cc63e1edf58e6d6fed49fc9266d54fabba5f7ce3216d910eb18aebc45c34\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0700ae65eb592e5b2110d37bb15dbb1dbdd228986de015ab5dc8fe13672032ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0700ae65eb592e5b2110d37bb15dbb1dbdd228986de015ab5dc8fe13672032ef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:18Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:26Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:26 crc kubenswrapper[4909]: E1128 16:11:26.900760 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-8rjn2" podUID="ffceca0e-d9b5-484f-8753-5e0269eec811" Nov 28 16:11:26 crc kubenswrapper[4909]: I1128 16:11:26.916162 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b5b5c4c2-af06-4771-b6eb-d13a2819665c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ea29d0be350e786147f355c5bb4902924aa0f921413b432ad093a796d21b9d05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b41cf0481323080f4056d35add13c2254c6ffbe432b1651555067d3fc9d163eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94501e940379fe9b429e532a91799701e733bb5c3be1c5f32da07a9957f955b3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://37dc41c4e260f683c1ae04ae87a883fe78e4f1f620a946ccb6a87191a5eae0ae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:18Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:26Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:26 crc kubenswrapper[4909]: I1128 16:11:26.931633 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31f84564a3775e0ece3a5c0f176e8d9607466d4a7a505173e9668c51fa2229e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:26Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:26 crc kubenswrapper[4909]: I1128 16:11:26.946954 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:26Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:26 crc kubenswrapper[4909]: I1128 16:11:26.980183 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:26 crc kubenswrapper[4909]: I1128 16:11:26.980235 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:26 crc kubenswrapper[4909]: I1128 16:11:26.980243 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:26 crc kubenswrapper[4909]: I1128 16:11:26.980257 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:26 crc kubenswrapper[4909]: I1128 16:11:26.980266 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:26Z","lastTransitionTime":"2025-11-28T16:11:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:27 crc kubenswrapper[4909]: I1128 16:11:27.082383 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:27 crc kubenswrapper[4909]: I1128 16:11:27.082426 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:27 crc kubenswrapper[4909]: I1128 16:11:27.082438 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:27 crc kubenswrapper[4909]: I1128 16:11:27.082455 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:27 crc kubenswrapper[4909]: I1128 16:11:27.082467 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:27Z","lastTransitionTime":"2025-11-28T16:11:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:27 crc kubenswrapper[4909]: I1128 16:11:27.185057 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:27 crc kubenswrapper[4909]: I1128 16:11:27.185103 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:27 crc kubenswrapper[4909]: I1128 16:11:27.185114 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:27 crc kubenswrapper[4909]: I1128 16:11:27.185128 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:27 crc kubenswrapper[4909]: I1128 16:11:27.185139 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:27Z","lastTransitionTime":"2025-11-28T16:11:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:27 crc kubenswrapper[4909]: I1128 16:11:27.287522 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:27 crc kubenswrapper[4909]: I1128 16:11:27.287588 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:27 crc kubenswrapper[4909]: I1128 16:11:27.287597 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:27 crc kubenswrapper[4909]: I1128 16:11:27.287612 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:27 crc kubenswrapper[4909]: I1128 16:11:27.287624 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:27Z","lastTransitionTime":"2025-11-28T16:11:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:27 crc kubenswrapper[4909]: I1128 16:11:27.390891 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:27 crc kubenswrapper[4909]: I1128 16:11:27.391776 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:27 crc kubenswrapper[4909]: I1128 16:11:27.391850 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:27 crc kubenswrapper[4909]: I1128 16:11:27.391879 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:27 crc kubenswrapper[4909]: I1128 16:11:27.391902 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:27Z","lastTransitionTime":"2025-11-28T16:11:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:27 crc kubenswrapper[4909]: I1128 16:11:27.495020 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:27 crc kubenswrapper[4909]: I1128 16:11:27.495073 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:27 crc kubenswrapper[4909]: I1128 16:11:27.495093 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:27 crc kubenswrapper[4909]: I1128 16:11:27.495122 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:27 crc kubenswrapper[4909]: I1128 16:11:27.495147 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:27Z","lastTransitionTime":"2025-11-28T16:11:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:27 crc kubenswrapper[4909]: I1128 16:11:27.597735 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:27 crc kubenswrapper[4909]: I1128 16:11:27.597813 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:27 crc kubenswrapper[4909]: I1128 16:11:27.597837 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:27 crc kubenswrapper[4909]: I1128 16:11:27.597866 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:27 crc kubenswrapper[4909]: I1128 16:11:27.597890 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:27Z","lastTransitionTime":"2025-11-28T16:11:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:27 crc kubenswrapper[4909]: I1128 16:11:27.699397 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:27 crc kubenswrapper[4909]: I1128 16:11:27.699442 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:27 crc kubenswrapper[4909]: I1128 16:11:27.699454 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:27 crc kubenswrapper[4909]: I1128 16:11:27.699470 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:27 crc kubenswrapper[4909]: I1128 16:11:27.699482 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:27Z","lastTransitionTime":"2025-11-28T16:11:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:27 crc kubenswrapper[4909]: I1128 16:11:27.801987 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:27 crc kubenswrapper[4909]: I1128 16:11:27.802017 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:27 crc kubenswrapper[4909]: I1128 16:11:27.802027 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:27 crc kubenswrapper[4909]: I1128 16:11:27.802042 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:27 crc kubenswrapper[4909]: I1128 16:11:27.802053 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:27Z","lastTransitionTime":"2025-11-28T16:11:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:27 crc kubenswrapper[4909]: I1128 16:11:27.901517 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 16:11:27 crc kubenswrapper[4909]: I1128 16:11:27.901605 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 16:11:27 crc kubenswrapper[4909]: E1128 16:11:27.902307 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 16:11:27 crc kubenswrapper[4909]: E1128 16:11:27.902339 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 16:11:27 crc kubenswrapper[4909]: I1128 16:11:27.902441 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 16:11:27 crc kubenswrapper[4909]: E1128 16:11:27.903026 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 16:11:27 crc kubenswrapper[4909]: I1128 16:11:27.904033 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:27 crc kubenswrapper[4909]: I1128 16:11:27.904258 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:27 crc kubenswrapper[4909]: I1128 16:11:27.904436 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:27 crc kubenswrapper[4909]: I1128 16:11:27.904611 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:27 crc kubenswrapper[4909]: I1128 16:11:27.904823 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:27Z","lastTransitionTime":"2025-11-28T16:11:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:27 crc kubenswrapper[4909]: I1128 16:11:27.915890 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7907b546dbb614e80485187b026c4c5ca17f52d88d5c28ce26a7bf5e3c09e76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:27Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:27 crc kubenswrapper[4909]: I1128 16:11:27.929199 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5f0ac931-d37b-4342-8c12-c2779b455cc5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae003675d8b34489b946224bcd380cc68ff49acec6769edfe74a8345018e7909\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5xrns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c2757c4dc287e41bc57c065df2906fc5961d005829fa24f22d3b5078d17555a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5xrns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:38Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-d5nd7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:27Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:27 crc kubenswrapper[4909]: I1128 16:11:27.947875 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c17e2fff-c7ee-475c-8c17-58a394744b91\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c9f22dca75c10791e7f5e083e737d4d6aad20598ff0d29dd7562fa99b5dbdfd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://697293d217dd4fbc4cecc3bc4be7acc14dbb5aa97b9faf0a608792ef7e4c8dba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d0b87eb388aab62c94756b5f20b73cf95fc80da8aa3d83f2848757f860cdd38e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://031c30950c06bdcae1736062069b96a6ce7cb66d5f286b73d691ee2d2db5832e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://66731a2e0518dd575f621ddf9e760e844e647a224138957440ccaecad3e4b8b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://983e9f73b8e344dbe445f95436cbe143d63f1da2e868201b441496df8cdc99ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://32babd1d5575ade6650fc51e99fbd5a062f2f8c3e97ffa12588d753f4f133902\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://32babd1d5575ade6650fc51e99fbd5a062f2f8c3e97ffa12588d753f4f133902\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T16:11:07Z\\\",\\\"message\\\":\\\"04 6563 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1128 16:11:06.748135 6563 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1128 16:11:06.748850 6563 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1128 16:11:06.749020 6563 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1128 16:11:06.755731 6563 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1128 16:11:06.755787 6563 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1128 16:11:06.755852 6563 factory.go:656] Stopping watch factory\\\\nI1128 16:11:06.755873 6563 handler.go:208] Removed *v1.Node event handler 2\\\\nI1128 16:11:06.755890 6563 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1128 16:11:06.764172 6563 shared_informer.go:320] Caches are synced for node-tracker-controller\\\\nI1128 16:11:06.764189 6563 services_controller.go:204] Setting up event handlers for services for network=default\\\\nI1128 16:11:06.764255 6563 ovnkube.go:599] Stopped ovnkube\\\\nI1128 16:11:06.764281 6563 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1128 16:11:06.764366 6563 ovnkube.go:137] failed to run ov\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:06Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-qxw94_openshift-ovn-kubernetes(c17e2fff-c7ee-475c-8c17-58a394744b91)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://841b67fe2e8ae0e4ebf834f141b83ad5e850b8e81c3f6410cef7df9894c33dc6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e1fffda4b5571ab3fe797073c7201feadb7818eac66f6dc76b7761ef13a8d0ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e1fffda4b5571ab3fe797073c7201feadb7818eac66f6dc76b7761ef13a8d0ef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-qxw94\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:27Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:27 crc kubenswrapper[4909]: I1128 16:11:27.961799 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:27Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:27 crc kubenswrapper[4909]: I1128 16:11:27.976782 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:27Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:27 crc kubenswrapper[4909]: I1128 16:11:27.987201 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-bwbm6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a458a03-0fea-47c0-9748-510145f40b30\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dd8c3d64058ca1f2862ce478295e1a694117d00f9172f78c7c5e2945d7357aad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmxp7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:36Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-bwbm6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:27Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:28 crc kubenswrapper[4909]: I1128 16:11:28.006386 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:28 crc kubenswrapper[4909]: I1128 16:11:28.006415 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:28 crc kubenswrapper[4909]: I1128 16:11:28.006425 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:28 crc kubenswrapper[4909]: I1128 16:11:28.006437 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:28 crc kubenswrapper[4909]: I1128 16:11:28.006446 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:28Z","lastTransitionTime":"2025-11-28T16:11:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:28 crc kubenswrapper[4909]: I1128 16:11:28.012156 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gdz9b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d9d93f2d-2a90-4d2d-b8e6-e48973be876f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e765772d8b12200fadfb28064b55c1abb9a8a6654602159c4910d2ea5b2d307\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b45f41a92db18043fc03c9cd7d9bbeef0020790595045abd17a9105d0f01367b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b45f41a92db18043fc03c9cd7d9bbeef0020790595045abd17a9105d0f01367b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f7a3a2a5eea9704ba4cd09ac6fa00e33c97d163973a852882979b1c8f2064979\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f7a3a2a5eea9704ba4cd09ac6fa00e33c97d163973a852882979b1c8f2064979\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0348e1d7066423ab9a595f60893021830d02de3c18fca2fdb0082efa41644124\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0348e1d7066423ab9a595f60893021830d02de3c18fca2fdb0082efa41644124\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbb9ad3dff8afd5ac9ec18bd48d273f46b76a21b791dc6a17aea1d7559750c93\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cbb9ad3dff8afd5ac9ec18bd48d273f46b76a21b791dc6a17aea1d7559750c93\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5898b3475414a39ef005c45982f12791683eb027e734ec8d25dc3489c93026a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5898b3475414a39ef005c45982f12791683eb027e734ec8d25dc3489c93026a2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2056e26b7e46cd32f8a12d6d054e249ed43f9854c0f7720791e98f9e6c5b5346\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2056e26b7e46cd32f8a12d6d054e249ed43f9854c0f7720791e98f9e6c5b5346\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gdz9b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:28Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:28 crc kubenswrapper[4909]: I1128 16:11:28.024026 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-8rjn2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ffceca0e-d9b5-484f-8753-5e0269eec811\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:51Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:51Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wgql6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wgql6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:51Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-8rjn2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:28Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:28 crc kubenswrapper[4909]: I1128 16:11:28.036696 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"443537be-83fe-4770-9aff-5fb3d2bef9a6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3d62be23143f1f28ff523369b4a6b5cb91146ac54236b31cc8d91d200bd8598e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f88b0080f0e1e8677f2525f47faf3bcd7fa2f54bc91057b318d4c1f86a16f9f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://34312055c6f3a6714d732ad11b27c20139c8a9be7636a9dd215a6e680803afd6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a891f34668e5de053a5fef2b954e97fb437e1a9e3bc2ed26b9bb767a3dda592d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a891f34668e5de053a5fef2b954e97fb437e1a9e3bc2ed26b9bb767a3dda592d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:18Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:18Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:28Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:28 crc kubenswrapper[4909]: I1128 16:11:28.050791 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://475d2c473e130345e0d544c2b7990c476e83c479c644db89e1ceda2f4278d667\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://18059a14081d677863133d95ec3cecf9359d6464af62be1e53bcd9514311ff59\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:28Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:28 crc kubenswrapper[4909]: I1128 16:11:28.061869 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-q8nfv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6b77cc4b-dc69-4ece-8e10-64eebc98a578\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://71f6384b2d5c86c668a4d7dfc23b14a893f93b9ec587bec43f74eb0926cc2c64\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-prc9l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:37Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-q8nfv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:28Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:28 crc kubenswrapper[4909]: I1128 16:11:28.073642 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-8s8f9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5a7347b-5536-45b5-be75-4bf0ed1b922b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://75a9b32698a7aaaa2b6f88541c9069902a98ad0146bf478ec58ce3a97fb410ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mglcp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://309f7a04e1c92fde11e7fedfe089ff043e14ea788f60339fb7acf8c6df0c8c8d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mglcp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-8s8f9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:28Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:28 crc kubenswrapper[4909]: I1128 16:11:28.086297 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:28Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:28 crc kubenswrapper[4909]: I1128 16:11:28.099880 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-wx2jj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6e3805b2-8ad3-4fa6-b88f-e0ae42294202\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e3a9a82264968374209ed690b43cf96557d426af065ba14cd189ae9e31ed0f0a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0777b571ce4049338437a97264761c89ab7517b4da8400edcd3381d58aef32e4\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T16:11:24Z\\\",\\\"message\\\":\\\"2025-11-28T16:10:39+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_76b37028-5be7-441a-8772-e096e031ea50\\\\n2025-11-28T16:10:39+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_76b37028-5be7-441a-8772-e096e031ea50 to /host/opt/cni/bin/\\\\n2025-11-28T16:10:39Z [verbose] multus-daemon started\\\\n2025-11-28T16:10:39Z [verbose] Readiness Indicator file check\\\\n2025-11-28T16:11:24Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x6lxd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-wx2jj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:28Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:28 crc kubenswrapper[4909]: I1128 16:11:28.109026 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:28 crc kubenswrapper[4909]: I1128 16:11:28.109072 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:28 crc kubenswrapper[4909]: I1128 16:11:28.109088 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:28 crc kubenswrapper[4909]: I1128 16:11:28.109109 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:28 crc kubenswrapper[4909]: I1128 16:11:28.109147 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:28Z","lastTransitionTime":"2025-11-28T16:11:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:28 crc kubenswrapper[4909]: I1128 16:11:28.124281 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dda7db1a-d603-4121-8cd6-e72c9ae02961\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://107b559f7d62f02041439f8c727185e9a819cd655afa0898e818d8618cc54cc7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://76ac0606fbf94f3746fb7ffa549a3cb684d40ee72224dd36c96bb47db493b482\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d5716d50bbed7c047029bbd7964431e97f78c6516505f57fe76d2a1548a665d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://53a5c9679a2c94c1297318d2e554c2445efbfab9cf72d25e48d753d66fa217d5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://da70cc63e1edf58e6d6fed49fc9266d54fabba5f7ce3216d910eb18aebc45c34\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0700ae65eb592e5b2110d37bb15dbb1dbdd228986de015ab5dc8fe13672032ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0700ae65eb592e5b2110d37bb15dbb1dbdd228986de015ab5dc8fe13672032ef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:18Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:28Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:28 crc kubenswrapper[4909]: I1128 16:11:28.145224 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b5b5c4c2-af06-4771-b6eb-d13a2819665c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ea29d0be350e786147f355c5bb4902924aa0f921413b432ad093a796d21b9d05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b41cf0481323080f4056d35add13c2254c6ffbe432b1651555067d3fc9d163eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94501e940379fe9b429e532a91799701e733bb5c3be1c5f32da07a9957f955b3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://37dc41c4e260f683c1ae04ae87a883fe78e4f1f620a946ccb6a87191a5eae0ae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:18Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:28Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:28 crc kubenswrapper[4909]: I1128 16:11:28.160583 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31f84564a3775e0ece3a5c0f176e8d9607466d4a7a505173e9668c51fa2229e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:28Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:28 crc kubenswrapper[4909]: I1128 16:11:28.211490 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:28 crc kubenswrapper[4909]: I1128 16:11:28.211535 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:28 crc kubenswrapper[4909]: I1128 16:11:28.211545 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:28 crc kubenswrapper[4909]: I1128 16:11:28.211560 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:28 crc kubenswrapper[4909]: I1128 16:11:28.211569 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:28Z","lastTransitionTime":"2025-11-28T16:11:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:28 crc kubenswrapper[4909]: I1128 16:11:28.313170 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:28 crc kubenswrapper[4909]: I1128 16:11:28.313230 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:28 crc kubenswrapper[4909]: I1128 16:11:28.313247 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:28 crc kubenswrapper[4909]: I1128 16:11:28.313270 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:28 crc kubenswrapper[4909]: I1128 16:11:28.313288 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:28Z","lastTransitionTime":"2025-11-28T16:11:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:28 crc kubenswrapper[4909]: I1128 16:11:28.416115 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:28 crc kubenswrapper[4909]: I1128 16:11:28.416175 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:28 crc kubenswrapper[4909]: I1128 16:11:28.416193 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:28 crc kubenswrapper[4909]: I1128 16:11:28.416216 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:28 crc kubenswrapper[4909]: I1128 16:11:28.416234 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:28Z","lastTransitionTime":"2025-11-28T16:11:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:28 crc kubenswrapper[4909]: I1128 16:11:28.518526 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:28 crc kubenswrapper[4909]: I1128 16:11:28.518564 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:28 crc kubenswrapper[4909]: I1128 16:11:28.518576 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:28 crc kubenswrapper[4909]: I1128 16:11:28.518590 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:28 crc kubenswrapper[4909]: I1128 16:11:28.518602 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:28Z","lastTransitionTime":"2025-11-28T16:11:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:28 crc kubenswrapper[4909]: I1128 16:11:28.620367 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:28 crc kubenswrapper[4909]: I1128 16:11:28.620411 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:28 crc kubenswrapper[4909]: I1128 16:11:28.620424 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:28 crc kubenswrapper[4909]: I1128 16:11:28.620439 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:28 crc kubenswrapper[4909]: I1128 16:11:28.620448 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:28Z","lastTransitionTime":"2025-11-28T16:11:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:28 crc kubenswrapper[4909]: I1128 16:11:28.723347 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:28 crc kubenswrapper[4909]: I1128 16:11:28.723397 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:28 crc kubenswrapper[4909]: I1128 16:11:28.723410 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:28 crc kubenswrapper[4909]: I1128 16:11:28.723428 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:28 crc kubenswrapper[4909]: I1128 16:11:28.723440 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:28Z","lastTransitionTime":"2025-11-28T16:11:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:28 crc kubenswrapper[4909]: I1128 16:11:28.825808 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:28 crc kubenswrapper[4909]: I1128 16:11:28.825849 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:28 crc kubenswrapper[4909]: I1128 16:11:28.825865 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:28 crc kubenswrapper[4909]: I1128 16:11:28.825887 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:28 crc kubenswrapper[4909]: I1128 16:11:28.825907 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:28Z","lastTransitionTime":"2025-11-28T16:11:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:28 crc kubenswrapper[4909]: I1128 16:11:28.900915 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-8rjn2" Nov 28 16:11:28 crc kubenswrapper[4909]: E1128 16:11:28.901107 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-8rjn2" podUID="ffceca0e-d9b5-484f-8753-5e0269eec811" Nov 28 16:11:28 crc kubenswrapper[4909]: I1128 16:11:28.928820 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:28 crc kubenswrapper[4909]: I1128 16:11:28.928866 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:28 crc kubenswrapper[4909]: I1128 16:11:28.928882 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:28 crc kubenswrapper[4909]: I1128 16:11:28.928905 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:28 crc kubenswrapper[4909]: I1128 16:11:28.928921 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:28Z","lastTransitionTime":"2025-11-28T16:11:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:29 crc kubenswrapper[4909]: I1128 16:11:29.031627 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:29 crc kubenswrapper[4909]: I1128 16:11:29.031705 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:29 crc kubenswrapper[4909]: I1128 16:11:29.031725 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:29 crc kubenswrapper[4909]: I1128 16:11:29.031748 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:29 crc kubenswrapper[4909]: I1128 16:11:29.031764 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:29Z","lastTransitionTime":"2025-11-28T16:11:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:29 crc kubenswrapper[4909]: I1128 16:11:29.134695 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:29 crc kubenswrapper[4909]: I1128 16:11:29.134917 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:29 crc kubenswrapper[4909]: I1128 16:11:29.134948 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:29 crc kubenswrapper[4909]: I1128 16:11:29.134977 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:29 crc kubenswrapper[4909]: I1128 16:11:29.135001 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:29Z","lastTransitionTime":"2025-11-28T16:11:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:29 crc kubenswrapper[4909]: I1128 16:11:29.237450 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:29 crc kubenswrapper[4909]: I1128 16:11:29.237514 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:29 crc kubenswrapper[4909]: I1128 16:11:29.237533 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:29 crc kubenswrapper[4909]: I1128 16:11:29.237561 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:29 crc kubenswrapper[4909]: I1128 16:11:29.237579 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:29Z","lastTransitionTime":"2025-11-28T16:11:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:29 crc kubenswrapper[4909]: I1128 16:11:29.340216 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:29 crc kubenswrapper[4909]: I1128 16:11:29.340286 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:29 crc kubenswrapper[4909]: I1128 16:11:29.340304 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:29 crc kubenswrapper[4909]: I1128 16:11:29.340328 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:29 crc kubenswrapper[4909]: I1128 16:11:29.340346 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:29Z","lastTransitionTime":"2025-11-28T16:11:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:29 crc kubenswrapper[4909]: I1128 16:11:29.443147 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:29 crc kubenswrapper[4909]: I1128 16:11:29.443211 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:29 crc kubenswrapper[4909]: I1128 16:11:29.443227 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:29 crc kubenswrapper[4909]: I1128 16:11:29.443254 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:29 crc kubenswrapper[4909]: I1128 16:11:29.443270 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:29Z","lastTransitionTime":"2025-11-28T16:11:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:29 crc kubenswrapper[4909]: I1128 16:11:29.545928 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:29 crc kubenswrapper[4909]: I1128 16:11:29.546007 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:29 crc kubenswrapper[4909]: I1128 16:11:29.546043 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:29 crc kubenswrapper[4909]: I1128 16:11:29.546075 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:29 crc kubenswrapper[4909]: I1128 16:11:29.546097 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:29Z","lastTransitionTime":"2025-11-28T16:11:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:29 crc kubenswrapper[4909]: I1128 16:11:29.649149 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:29 crc kubenswrapper[4909]: I1128 16:11:29.649213 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:29 crc kubenswrapper[4909]: I1128 16:11:29.649231 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:29 crc kubenswrapper[4909]: I1128 16:11:29.649256 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:29 crc kubenswrapper[4909]: I1128 16:11:29.649275 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:29Z","lastTransitionTime":"2025-11-28T16:11:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:29 crc kubenswrapper[4909]: I1128 16:11:29.753020 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:29 crc kubenswrapper[4909]: I1128 16:11:29.753067 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:29 crc kubenswrapper[4909]: I1128 16:11:29.753104 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:29 crc kubenswrapper[4909]: I1128 16:11:29.753126 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:29 crc kubenswrapper[4909]: I1128 16:11:29.753143 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:29Z","lastTransitionTime":"2025-11-28T16:11:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:29 crc kubenswrapper[4909]: I1128 16:11:29.856185 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:29 crc kubenswrapper[4909]: I1128 16:11:29.856230 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:29 crc kubenswrapper[4909]: I1128 16:11:29.856243 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:29 crc kubenswrapper[4909]: I1128 16:11:29.856258 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:29 crc kubenswrapper[4909]: I1128 16:11:29.856270 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:29Z","lastTransitionTime":"2025-11-28T16:11:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:29 crc kubenswrapper[4909]: I1128 16:11:29.901231 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 16:11:29 crc kubenswrapper[4909]: I1128 16:11:29.901336 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 16:11:29 crc kubenswrapper[4909]: I1128 16:11:29.901413 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 16:11:29 crc kubenswrapper[4909]: E1128 16:11:29.901429 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 16:11:29 crc kubenswrapper[4909]: E1128 16:11:29.901526 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 16:11:29 crc kubenswrapper[4909]: E1128 16:11:29.901590 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 16:11:29 crc kubenswrapper[4909]: I1128 16:11:29.902291 4909 scope.go:117] "RemoveContainer" containerID="32babd1d5575ade6650fc51e99fbd5a062f2f8c3e97ffa12588d753f4f133902" Nov 28 16:11:29 crc kubenswrapper[4909]: I1128 16:11:29.958562 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:29 crc kubenswrapper[4909]: I1128 16:11:29.958874 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:29 crc kubenswrapper[4909]: I1128 16:11:29.958885 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:29 crc kubenswrapper[4909]: I1128 16:11:29.958905 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:29 crc kubenswrapper[4909]: I1128 16:11:29.958918 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:29Z","lastTransitionTime":"2025-11-28T16:11:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:30 crc kubenswrapper[4909]: I1128 16:11:30.062463 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:30 crc kubenswrapper[4909]: I1128 16:11:30.062523 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:30 crc kubenswrapper[4909]: I1128 16:11:30.062539 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:30 crc kubenswrapper[4909]: I1128 16:11:30.062560 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:30 crc kubenswrapper[4909]: I1128 16:11:30.062575 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:30Z","lastTransitionTime":"2025-11-28T16:11:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:30 crc kubenswrapper[4909]: I1128 16:11:30.165211 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:30 crc kubenswrapper[4909]: I1128 16:11:30.165245 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:30 crc kubenswrapper[4909]: I1128 16:11:30.165255 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:30 crc kubenswrapper[4909]: I1128 16:11:30.165269 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:30 crc kubenswrapper[4909]: I1128 16:11:30.165278 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:30Z","lastTransitionTime":"2025-11-28T16:11:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:30 crc kubenswrapper[4909]: I1128 16:11:30.267792 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:30 crc kubenswrapper[4909]: I1128 16:11:30.267842 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:30 crc kubenswrapper[4909]: I1128 16:11:30.267855 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:30 crc kubenswrapper[4909]: I1128 16:11:30.267876 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:30 crc kubenswrapper[4909]: I1128 16:11:30.267890 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:30Z","lastTransitionTime":"2025-11-28T16:11:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:30 crc kubenswrapper[4909]: I1128 16:11:30.370036 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:30 crc kubenswrapper[4909]: I1128 16:11:30.370068 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:30 crc kubenswrapper[4909]: I1128 16:11:30.370079 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:30 crc kubenswrapper[4909]: I1128 16:11:30.370094 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:30 crc kubenswrapper[4909]: I1128 16:11:30.370103 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:30Z","lastTransitionTime":"2025-11-28T16:11:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:30 crc kubenswrapper[4909]: I1128 16:11:30.471862 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:30 crc kubenswrapper[4909]: I1128 16:11:30.471931 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:30 crc kubenswrapper[4909]: I1128 16:11:30.471952 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:30 crc kubenswrapper[4909]: I1128 16:11:30.471981 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:30 crc kubenswrapper[4909]: I1128 16:11:30.472002 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:30Z","lastTransitionTime":"2025-11-28T16:11:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:30 crc kubenswrapper[4909]: I1128 16:11:30.574634 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:30 crc kubenswrapper[4909]: I1128 16:11:30.574736 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:30 crc kubenswrapper[4909]: I1128 16:11:30.574750 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:30 crc kubenswrapper[4909]: I1128 16:11:30.574768 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:30 crc kubenswrapper[4909]: I1128 16:11:30.574778 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:30Z","lastTransitionTime":"2025-11-28T16:11:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:30 crc kubenswrapper[4909]: I1128 16:11:30.677367 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:30 crc kubenswrapper[4909]: I1128 16:11:30.677432 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:30 crc kubenswrapper[4909]: I1128 16:11:30.677450 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:30 crc kubenswrapper[4909]: I1128 16:11:30.677477 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:30 crc kubenswrapper[4909]: I1128 16:11:30.677496 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:30Z","lastTransitionTime":"2025-11-28T16:11:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:30 crc kubenswrapper[4909]: I1128 16:11:30.697999 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-qxw94_c17e2fff-c7ee-475c-8c17-58a394744b91/ovnkube-controller/2.log" Nov 28 16:11:30 crc kubenswrapper[4909]: I1128 16:11:30.700929 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" event={"ID":"c17e2fff-c7ee-475c-8c17-58a394744b91","Type":"ContainerStarted","Data":"3ddca078c58f2bc3df2f19603ac7190dde988c7fe8b49ca5d94af1bf01cd1162"} Nov 28 16:11:30 crc kubenswrapper[4909]: I1128 16:11:30.701528 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" Nov 28 16:11:30 crc kubenswrapper[4909]: I1128 16:11:30.714470 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:30Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:30 crc kubenswrapper[4909]: I1128 16:11:30.733919 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:30Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:30 crc kubenswrapper[4909]: I1128 16:11:30.746588 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-bwbm6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a458a03-0fea-47c0-9748-510145f40b30\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dd8c3d64058ca1f2862ce478295e1a694117d00f9172f78c7c5e2945d7357aad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmxp7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:36Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-bwbm6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:30Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:30 crc kubenswrapper[4909]: I1128 16:11:30.770068 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gdz9b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d9d93f2d-2a90-4d2d-b8e6-e48973be876f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e765772d8b12200fadfb28064b55c1abb9a8a6654602159c4910d2ea5b2d307\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b45f41a92db18043fc03c9cd7d9bbeef0020790595045abd17a9105d0f01367b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b45f41a92db18043fc03c9cd7d9bbeef0020790595045abd17a9105d0f01367b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f7a3a2a5eea9704ba4cd09ac6fa00e33c97d163973a852882979b1c8f2064979\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f7a3a2a5eea9704ba4cd09ac6fa00e33c97d163973a852882979b1c8f2064979\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0348e1d7066423ab9a595f60893021830d02de3c18fca2fdb0082efa41644124\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0348e1d7066423ab9a595f60893021830d02de3c18fca2fdb0082efa41644124\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbb9ad3dff8afd5ac9ec18bd48d273f46b76a21b791dc6a17aea1d7559750c93\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cbb9ad3dff8afd5ac9ec18bd48d273f46b76a21b791dc6a17aea1d7559750c93\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5898b3475414a39ef005c45982f12791683eb027e734ec8d25dc3489c93026a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5898b3475414a39ef005c45982f12791683eb027e734ec8d25dc3489c93026a2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2056e26b7e46cd32f8a12d6d054e249ed43f9854c0f7720791e98f9e6c5b5346\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2056e26b7e46cd32f8a12d6d054e249ed43f9854c0f7720791e98f9e6c5b5346\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gdz9b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:30Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:30 crc kubenswrapper[4909]: I1128 16:11:30.780311 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:30 crc kubenswrapper[4909]: I1128 16:11:30.780355 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:30 crc kubenswrapper[4909]: I1128 16:11:30.780368 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:30 crc kubenswrapper[4909]: I1128 16:11:30.780388 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:30 crc kubenswrapper[4909]: I1128 16:11:30.780400 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:30Z","lastTransitionTime":"2025-11-28T16:11:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:30 crc kubenswrapper[4909]: I1128 16:11:30.782020 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-8rjn2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ffceca0e-d9b5-484f-8753-5e0269eec811\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:51Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:51Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wgql6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wgql6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:51Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-8rjn2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:30Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:30 crc kubenswrapper[4909]: I1128 16:11:30.797285 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"443537be-83fe-4770-9aff-5fb3d2bef9a6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3d62be23143f1f28ff523369b4a6b5cb91146ac54236b31cc8d91d200bd8598e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f88b0080f0e1e8677f2525f47faf3bcd7fa2f54bc91057b318d4c1f86a16f9f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://34312055c6f3a6714d732ad11b27c20139c8a9be7636a9dd215a6e680803afd6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a891f34668e5de053a5fef2b954e97fb437e1a9e3bc2ed26b9bb767a3dda592d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a891f34668e5de053a5fef2b954e97fb437e1a9e3bc2ed26b9bb767a3dda592d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:18Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:18Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:30Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:30 crc kubenswrapper[4909]: I1128 16:11:30.814125 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://475d2c473e130345e0d544c2b7990c476e83c479c644db89e1ceda2f4278d667\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://18059a14081d677863133d95ec3cecf9359d6464af62be1e53bcd9514311ff59\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:30Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:30 crc kubenswrapper[4909]: I1128 16:11:30.826943 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-q8nfv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6b77cc4b-dc69-4ece-8e10-64eebc98a578\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://71f6384b2d5c86c668a4d7dfc23b14a893f93b9ec587bec43f74eb0926cc2c64\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-prc9l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:37Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-q8nfv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:30Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:30 crc kubenswrapper[4909]: I1128 16:11:30.837540 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-8s8f9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5a7347b-5536-45b5-be75-4bf0ed1b922b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://75a9b32698a7aaaa2b6f88541c9069902a98ad0146bf478ec58ce3a97fb410ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mglcp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://309f7a04e1c92fde11e7fedfe089ff043e14ea788f60339fb7acf8c6df0c8c8d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mglcp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-8s8f9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:30Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:30 crc kubenswrapper[4909]: I1128 16:11:30.852301 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:30Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:30 crc kubenswrapper[4909]: I1128 16:11:30.869477 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-wx2jj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6e3805b2-8ad3-4fa6-b88f-e0ae42294202\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e3a9a82264968374209ed690b43cf96557d426af065ba14cd189ae9e31ed0f0a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0777b571ce4049338437a97264761c89ab7517b4da8400edcd3381d58aef32e4\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T16:11:24Z\\\",\\\"message\\\":\\\"2025-11-28T16:10:39+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_76b37028-5be7-441a-8772-e096e031ea50\\\\n2025-11-28T16:10:39+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_76b37028-5be7-441a-8772-e096e031ea50 to /host/opt/cni/bin/\\\\n2025-11-28T16:10:39Z [verbose] multus-daemon started\\\\n2025-11-28T16:10:39Z [verbose] Readiness Indicator file check\\\\n2025-11-28T16:11:24Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x6lxd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-wx2jj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:30Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:30 crc kubenswrapper[4909]: I1128 16:11:30.883092 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:30 crc kubenswrapper[4909]: I1128 16:11:30.883168 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:30 crc kubenswrapper[4909]: I1128 16:11:30.883187 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:30 crc kubenswrapper[4909]: I1128 16:11:30.883213 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:30 crc kubenswrapper[4909]: I1128 16:11:30.883231 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:30Z","lastTransitionTime":"2025-11-28T16:11:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:30 crc kubenswrapper[4909]: I1128 16:11:30.887981 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dda7db1a-d603-4121-8cd6-e72c9ae02961\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://107b559f7d62f02041439f8c727185e9a819cd655afa0898e818d8618cc54cc7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://76ac0606fbf94f3746fb7ffa549a3cb684d40ee72224dd36c96bb47db493b482\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d5716d50bbed7c047029bbd7964431e97f78c6516505f57fe76d2a1548a665d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://53a5c9679a2c94c1297318d2e554c2445efbfab9cf72d25e48d753d66fa217d5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://da70cc63e1edf58e6d6fed49fc9266d54fabba5f7ce3216d910eb18aebc45c34\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0700ae65eb592e5b2110d37bb15dbb1dbdd228986de015ab5dc8fe13672032ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0700ae65eb592e5b2110d37bb15dbb1dbdd228986de015ab5dc8fe13672032ef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:18Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:30Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:30 crc kubenswrapper[4909]: I1128 16:11:30.901493 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-8rjn2" Nov 28 16:11:30 crc kubenswrapper[4909]: E1128 16:11:30.901717 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-8rjn2" podUID="ffceca0e-d9b5-484f-8753-5e0269eec811" Nov 28 16:11:30 crc kubenswrapper[4909]: I1128 16:11:30.903462 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b5b5c4c2-af06-4771-b6eb-d13a2819665c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ea29d0be350e786147f355c5bb4902924aa0f921413b432ad093a796d21b9d05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b41cf0481323080f4056d35add13c2254c6ffbe432b1651555067d3fc9d163eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94501e940379fe9b429e532a91799701e733bb5c3be1c5f32da07a9957f955b3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://37dc41c4e260f683c1ae04ae87a883fe78e4f1f620a946ccb6a87191a5eae0ae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:18Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:30Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:30 crc kubenswrapper[4909]: I1128 16:11:30.916703 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31f84564a3775e0ece3a5c0f176e8d9607466d4a7a505173e9668c51fa2229e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:30Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:30 crc kubenswrapper[4909]: I1128 16:11:30.934006 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7907b546dbb614e80485187b026c4c5ca17f52d88d5c28ce26a7bf5e3c09e76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:30Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:30 crc kubenswrapper[4909]: I1128 16:11:30.947506 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5f0ac931-d37b-4342-8c12-c2779b455cc5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae003675d8b34489b946224bcd380cc68ff49acec6769edfe74a8345018e7909\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5xrns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c2757c4dc287e41bc57c065df2906fc5961d005829fa24f22d3b5078d17555a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5xrns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:38Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-d5nd7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:30Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:30 crc kubenswrapper[4909]: I1128 16:11:30.974971 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c17e2fff-c7ee-475c-8c17-58a394744b91\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c9f22dca75c10791e7f5e083e737d4d6aad20598ff0d29dd7562fa99b5dbdfd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://697293d217dd4fbc4cecc3bc4be7acc14dbb5aa97b9faf0a608792ef7e4c8dba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d0b87eb388aab62c94756b5f20b73cf95fc80da8aa3d83f2848757f860cdd38e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://031c30950c06bdcae1736062069b96a6ce7cb66d5f286b73d691ee2d2db5832e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://66731a2e0518dd575f621ddf9e760e844e647a224138957440ccaecad3e4b8b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://983e9f73b8e344dbe445f95436cbe143d63f1da2e868201b441496df8cdc99ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ddca078c58f2bc3df2f19603ac7190dde988c7fe8b49ca5d94af1bf01cd1162\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://32babd1d5575ade6650fc51e99fbd5a062f2f8c3e97ffa12588d753f4f133902\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T16:11:07Z\\\",\\\"message\\\":\\\"04 6563 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1128 16:11:06.748135 6563 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1128 16:11:06.748850 6563 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1128 16:11:06.749020 6563 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1128 16:11:06.755731 6563 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1128 16:11:06.755787 6563 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1128 16:11:06.755852 6563 factory.go:656] Stopping watch factory\\\\nI1128 16:11:06.755873 6563 handler.go:208] Removed *v1.Node event handler 2\\\\nI1128 16:11:06.755890 6563 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1128 16:11:06.764172 6563 shared_informer.go:320] Caches are synced for node-tracker-controller\\\\nI1128 16:11:06.764189 6563 services_controller.go:204] Setting up event handlers for services for network=default\\\\nI1128 16:11:06.764255 6563 ovnkube.go:599] Stopped ovnkube\\\\nI1128 16:11:06.764281 6563 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1128 16:11:06.764366 6563 ovnkube.go:137] failed to run ov\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:06Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://841b67fe2e8ae0e4ebf834f141b83ad5e850b8e81c3f6410cef7df9894c33dc6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e1fffda4b5571ab3fe797073c7201feadb7818eac66f6dc76b7761ef13a8d0ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e1fffda4b5571ab3fe797073c7201feadb7818eac66f6dc76b7761ef13a8d0ef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-qxw94\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:30Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:30 crc kubenswrapper[4909]: I1128 16:11:30.985487 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:30 crc kubenswrapper[4909]: I1128 16:11:30.985577 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:30 crc kubenswrapper[4909]: I1128 16:11:30.985597 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:30 crc kubenswrapper[4909]: I1128 16:11:30.985620 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:30 crc kubenswrapper[4909]: I1128 16:11:30.985636 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:30Z","lastTransitionTime":"2025-11-28T16:11:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:31 crc kubenswrapper[4909]: I1128 16:11:31.088328 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:31 crc kubenswrapper[4909]: I1128 16:11:31.088372 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:31 crc kubenswrapper[4909]: I1128 16:11:31.088384 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:31 crc kubenswrapper[4909]: I1128 16:11:31.088398 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:31 crc kubenswrapper[4909]: I1128 16:11:31.088407 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:31Z","lastTransitionTime":"2025-11-28T16:11:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:31 crc kubenswrapper[4909]: I1128 16:11:31.190487 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:31 crc kubenswrapper[4909]: I1128 16:11:31.190551 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:31 crc kubenswrapper[4909]: I1128 16:11:31.190569 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:31 crc kubenswrapper[4909]: I1128 16:11:31.190594 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:31 crc kubenswrapper[4909]: I1128 16:11:31.190612 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:31Z","lastTransitionTime":"2025-11-28T16:11:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:31 crc kubenswrapper[4909]: I1128 16:11:31.293005 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:31 crc kubenswrapper[4909]: I1128 16:11:31.293051 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:31 crc kubenswrapper[4909]: I1128 16:11:31.293062 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:31 crc kubenswrapper[4909]: I1128 16:11:31.293078 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:31 crc kubenswrapper[4909]: I1128 16:11:31.293091 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:31Z","lastTransitionTime":"2025-11-28T16:11:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:31 crc kubenswrapper[4909]: I1128 16:11:31.395083 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:31 crc kubenswrapper[4909]: I1128 16:11:31.395153 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:31 crc kubenswrapper[4909]: I1128 16:11:31.395165 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:31 crc kubenswrapper[4909]: I1128 16:11:31.395181 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:31 crc kubenswrapper[4909]: I1128 16:11:31.395192 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:31Z","lastTransitionTime":"2025-11-28T16:11:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:31 crc kubenswrapper[4909]: I1128 16:11:31.511122 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:31 crc kubenswrapper[4909]: I1128 16:11:31.511173 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:31 crc kubenswrapper[4909]: I1128 16:11:31.511190 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:31 crc kubenswrapper[4909]: I1128 16:11:31.511211 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:31 crc kubenswrapper[4909]: I1128 16:11:31.511227 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:31Z","lastTransitionTime":"2025-11-28T16:11:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:31 crc kubenswrapper[4909]: I1128 16:11:31.613847 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:31 crc kubenswrapper[4909]: I1128 16:11:31.613872 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:31 crc kubenswrapper[4909]: I1128 16:11:31.613880 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:31 crc kubenswrapper[4909]: I1128 16:11:31.613892 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:31 crc kubenswrapper[4909]: I1128 16:11:31.613900 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:31Z","lastTransitionTime":"2025-11-28T16:11:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:31 crc kubenswrapper[4909]: I1128 16:11:31.705806 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-qxw94_c17e2fff-c7ee-475c-8c17-58a394744b91/ovnkube-controller/3.log" Nov 28 16:11:31 crc kubenswrapper[4909]: I1128 16:11:31.706522 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-qxw94_c17e2fff-c7ee-475c-8c17-58a394744b91/ovnkube-controller/2.log" Nov 28 16:11:31 crc kubenswrapper[4909]: I1128 16:11:31.709959 4909 generic.go:334] "Generic (PLEG): container finished" podID="c17e2fff-c7ee-475c-8c17-58a394744b91" containerID="3ddca078c58f2bc3df2f19603ac7190dde988c7fe8b49ca5d94af1bf01cd1162" exitCode=1 Nov 28 16:11:31 crc kubenswrapper[4909]: I1128 16:11:31.710017 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" event={"ID":"c17e2fff-c7ee-475c-8c17-58a394744b91","Type":"ContainerDied","Data":"3ddca078c58f2bc3df2f19603ac7190dde988c7fe8b49ca5d94af1bf01cd1162"} Nov 28 16:11:31 crc kubenswrapper[4909]: I1128 16:11:31.710064 4909 scope.go:117] "RemoveContainer" containerID="32babd1d5575ade6650fc51e99fbd5a062f2f8c3e97ffa12588d753f4f133902" Nov 28 16:11:31 crc kubenswrapper[4909]: I1128 16:11:31.711186 4909 scope.go:117] "RemoveContainer" containerID="3ddca078c58f2bc3df2f19603ac7190dde988c7fe8b49ca5d94af1bf01cd1162" Nov 28 16:11:31 crc kubenswrapper[4909]: E1128 16:11:31.711429 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-qxw94_openshift-ovn-kubernetes(c17e2fff-c7ee-475c-8c17-58a394744b91)\"" pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" podUID="c17e2fff-c7ee-475c-8c17-58a394744b91" Nov 28 16:11:31 crc kubenswrapper[4909]: I1128 16:11:31.718161 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:31 crc kubenswrapper[4909]: I1128 16:11:31.718221 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:31 crc kubenswrapper[4909]: I1128 16:11:31.718244 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:31 crc kubenswrapper[4909]: I1128 16:11:31.718274 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:31 crc kubenswrapper[4909]: I1128 16:11:31.718300 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:31Z","lastTransitionTime":"2025-11-28T16:11:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:31 crc kubenswrapper[4909]: I1128 16:11:31.734636 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://475d2c473e130345e0d544c2b7990c476e83c479c644db89e1ceda2f4278d667\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://18059a14081d677863133d95ec3cecf9359d6464af62be1e53bcd9514311ff59\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:31Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:31 crc kubenswrapper[4909]: I1128 16:11:31.750746 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-q8nfv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6b77cc4b-dc69-4ece-8e10-64eebc98a578\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://71f6384b2d5c86c668a4d7dfc23b14a893f93b9ec587bec43f74eb0926cc2c64\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-prc9l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:37Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-q8nfv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:31Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:31 crc kubenswrapper[4909]: I1128 16:11:31.764043 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-8s8f9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5a7347b-5536-45b5-be75-4bf0ed1b922b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://75a9b32698a7aaaa2b6f88541c9069902a98ad0146bf478ec58ce3a97fb410ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mglcp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://309f7a04e1c92fde11e7fedfe089ff043e14ea788f60339fb7acf8c6df0c8c8d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mglcp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-8s8f9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:31Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:31 crc kubenswrapper[4909]: I1128 16:11:31.780435 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dda7db1a-d603-4121-8cd6-e72c9ae02961\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://107b559f7d62f02041439f8c727185e9a819cd655afa0898e818d8618cc54cc7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://76ac0606fbf94f3746fb7ffa549a3cb684d40ee72224dd36c96bb47db493b482\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d5716d50bbed7c047029bbd7964431e97f78c6516505f57fe76d2a1548a665d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://53a5c9679a2c94c1297318d2e554c2445efbfab9cf72d25e48d753d66fa217d5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://da70cc63e1edf58e6d6fed49fc9266d54fabba5f7ce3216d910eb18aebc45c34\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0700ae65eb592e5b2110d37bb15dbb1dbdd228986de015ab5dc8fe13672032ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0700ae65eb592e5b2110d37bb15dbb1dbdd228986de015ab5dc8fe13672032ef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:18Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:31Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:31 crc kubenswrapper[4909]: I1128 16:11:31.799290 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b5b5c4c2-af06-4771-b6eb-d13a2819665c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ea29d0be350e786147f355c5bb4902924aa0f921413b432ad093a796d21b9d05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b41cf0481323080f4056d35add13c2254c6ffbe432b1651555067d3fc9d163eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94501e940379fe9b429e532a91799701e733bb5c3be1c5f32da07a9957f955b3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://37dc41c4e260f683c1ae04ae87a883fe78e4f1f620a946ccb6a87191a5eae0ae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:18Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:31Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:31 crc kubenswrapper[4909]: I1128 16:11:31.817540 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31f84564a3775e0ece3a5c0f176e8d9607466d4a7a505173e9668c51fa2229e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:31Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:31 crc kubenswrapper[4909]: I1128 16:11:31.821185 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:31 crc kubenswrapper[4909]: I1128 16:11:31.821253 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:31 crc kubenswrapper[4909]: I1128 16:11:31.821277 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:31 crc kubenswrapper[4909]: I1128 16:11:31.821308 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:31 crc kubenswrapper[4909]: I1128 16:11:31.821332 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:31Z","lastTransitionTime":"2025-11-28T16:11:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:31 crc kubenswrapper[4909]: I1128 16:11:31.835478 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:31Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:31 crc kubenswrapper[4909]: I1128 16:11:31.856434 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-wx2jj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6e3805b2-8ad3-4fa6-b88f-e0ae42294202\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e3a9a82264968374209ed690b43cf96557d426af065ba14cd189ae9e31ed0f0a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0777b571ce4049338437a97264761c89ab7517b4da8400edcd3381d58aef32e4\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T16:11:24Z\\\",\\\"message\\\":\\\"2025-11-28T16:10:39+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_76b37028-5be7-441a-8772-e096e031ea50\\\\n2025-11-28T16:10:39+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_76b37028-5be7-441a-8772-e096e031ea50 to /host/opt/cni/bin/\\\\n2025-11-28T16:10:39Z [verbose] multus-daemon started\\\\n2025-11-28T16:10:39Z [verbose] Readiness Indicator file check\\\\n2025-11-28T16:11:24Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x6lxd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-wx2jj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:31Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:31 crc kubenswrapper[4909]: I1128 16:11:31.877185 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7907b546dbb614e80485187b026c4c5ca17f52d88d5c28ce26a7bf5e3c09e76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:31Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:31 crc kubenswrapper[4909]: I1128 16:11:31.894367 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5f0ac931-d37b-4342-8c12-c2779b455cc5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae003675d8b34489b946224bcd380cc68ff49acec6769edfe74a8345018e7909\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5xrns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c2757c4dc287e41bc57c065df2906fc5961d005829fa24f22d3b5078d17555a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5xrns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:38Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-d5nd7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:31Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:31 crc kubenswrapper[4909]: I1128 16:11:31.900533 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 16:11:31 crc kubenswrapper[4909]: E1128 16:11:31.900732 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 16:11:31 crc kubenswrapper[4909]: I1128 16:11:31.901112 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 16:11:31 crc kubenswrapper[4909]: E1128 16:11:31.901230 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 16:11:31 crc kubenswrapper[4909]: I1128 16:11:31.901467 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 16:11:31 crc kubenswrapper[4909]: E1128 16:11:31.901566 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 16:11:31 crc kubenswrapper[4909]: I1128 16:11:31.926437 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:31 crc kubenswrapper[4909]: I1128 16:11:31.926484 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:31 crc kubenswrapper[4909]: I1128 16:11:31.926503 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:31 crc kubenswrapper[4909]: I1128 16:11:31.926527 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:31 crc kubenswrapper[4909]: I1128 16:11:31.926545 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:31Z","lastTransitionTime":"2025-11-28T16:11:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:31 crc kubenswrapper[4909]: I1128 16:11:31.927695 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c17e2fff-c7ee-475c-8c17-58a394744b91\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c9f22dca75c10791e7f5e083e737d4d6aad20598ff0d29dd7562fa99b5dbdfd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://697293d217dd4fbc4cecc3bc4be7acc14dbb5aa97b9faf0a608792ef7e4c8dba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d0b87eb388aab62c94756b5f20b73cf95fc80da8aa3d83f2848757f860cdd38e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://031c30950c06bdcae1736062069b96a6ce7cb66d5f286b73d691ee2d2db5832e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://66731a2e0518dd575f621ddf9e760e844e647a224138957440ccaecad3e4b8b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://983e9f73b8e344dbe445f95436cbe143d63f1da2e868201b441496df8cdc99ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ddca078c58f2bc3df2f19603ac7190dde988c7fe8b49ca5d94af1bf01cd1162\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://32babd1d5575ade6650fc51e99fbd5a062f2f8c3e97ffa12588d753f4f133902\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T16:11:07Z\\\",\\\"message\\\":\\\"04 6563 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1128 16:11:06.748135 6563 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1128 16:11:06.748850 6563 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1128 16:11:06.749020 6563 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1128 16:11:06.755731 6563 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1128 16:11:06.755787 6563 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1128 16:11:06.755852 6563 factory.go:656] Stopping watch factory\\\\nI1128 16:11:06.755873 6563 handler.go:208] Removed *v1.Node event handler 2\\\\nI1128 16:11:06.755890 6563 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1128 16:11:06.764172 6563 shared_informer.go:320] Caches are synced for node-tracker-controller\\\\nI1128 16:11:06.764189 6563 services_controller.go:204] Setting up event handlers for services for network=default\\\\nI1128 16:11:06.764255 6563 ovnkube.go:599] Stopped ovnkube\\\\nI1128 16:11:06.764281 6563 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1128 16:11:06.764366 6563 ovnkube.go:137] failed to run ov\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:06Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ddca078c58f2bc3df2f19603ac7190dde988c7fe8b49ca5d94af1bf01cd1162\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T16:11:31Z\\\",\\\"message\\\":\\\"\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-operator-lifecycle-manager/package-server-manager-metrics\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.4.110\\\\\\\", Port:8443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nI1128 16:11:30.757598 6903 obj_retry.go:303] Retry object setup: *v1.Pod openshift-network-diagnostics/network-check-target-xd92c\\\\nI1128 16:11:30.757607 6903 obj_retry.go:303] Retry object setup: *v1.Pod openshift-dns/node-resolver-bwbm6\\\\nF1128 16:11:30.757612 6903 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal er\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://841b67fe2e8ae0e4ebf834f141b83ad5e850b8e81c3f6410cef7df9894c33dc6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e1fffda4b5571ab3fe797073c7201feadb7818eac66f6dc76b7761ef13a8d0ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e1fffda4b5571ab3fe797073c7201feadb7818eac66f6dc76b7761ef13a8d0ef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-qxw94\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:31Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:31 crc kubenswrapper[4909]: I1128 16:11:31.960586 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gdz9b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d9d93f2d-2a90-4d2d-b8e6-e48973be876f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e765772d8b12200fadfb28064b55c1abb9a8a6654602159c4910d2ea5b2d307\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b45f41a92db18043fc03c9cd7d9bbeef0020790595045abd17a9105d0f01367b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b45f41a92db18043fc03c9cd7d9bbeef0020790595045abd17a9105d0f01367b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f7a3a2a5eea9704ba4cd09ac6fa00e33c97d163973a852882979b1c8f2064979\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f7a3a2a5eea9704ba4cd09ac6fa00e33c97d163973a852882979b1c8f2064979\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0348e1d7066423ab9a595f60893021830d02de3c18fca2fdb0082efa41644124\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0348e1d7066423ab9a595f60893021830d02de3c18fca2fdb0082efa41644124\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbb9ad3dff8afd5ac9ec18bd48d273f46b76a21b791dc6a17aea1d7559750c93\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cbb9ad3dff8afd5ac9ec18bd48d273f46b76a21b791dc6a17aea1d7559750c93\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5898b3475414a39ef005c45982f12791683eb027e734ec8d25dc3489c93026a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5898b3475414a39ef005c45982f12791683eb027e734ec8d25dc3489c93026a2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2056e26b7e46cd32f8a12d6d054e249ed43f9854c0f7720791e98f9e6c5b5346\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2056e26b7e46cd32f8a12d6d054e249ed43f9854c0f7720791e98f9e6c5b5346\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gdz9b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:31Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:32 crc kubenswrapper[4909]: I1128 16:11:32.010754 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-8rjn2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ffceca0e-d9b5-484f-8753-5e0269eec811\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:51Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:51Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wgql6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wgql6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:51Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-8rjn2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:32Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:32 crc kubenswrapper[4909]: I1128 16:11:32.023445 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"443537be-83fe-4770-9aff-5fb3d2bef9a6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3d62be23143f1f28ff523369b4a6b5cb91146ac54236b31cc8d91d200bd8598e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f88b0080f0e1e8677f2525f47faf3bcd7fa2f54bc91057b318d4c1f86a16f9f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://34312055c6f3a6714d732ad11b27c20139c8a9be7636a9dd215a6e680803afd6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a891f34668e5de053a5fef2b954e97fb437e1a9e3bc2ed26b9bb767a3dda592d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a891f34668e5de053a5fef2b954e97fb437e1a9e3bc2ed26b9bb767a3dda592d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:18Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:18Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:32Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:32 crc kubenswrapper[4909]: I1128 16:11:32.028828 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:32 crc kubenswrapper[4909]: I1128 16:11:32.028863 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:32 crc kubenswrapper[4909]: I1128 16:11:32.028874 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:32 crc kubenswrapper[4909]: I1128 16:11:32.028889 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:32 crc kubenswrapper[4909]: I1128 16:11:32.028929 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:32Z","lastTransitionTime":"2025-11-28T16:11:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:32 crc kubenswrapper[4909]: I1128 16:11:32.038265 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:32Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:32 crc kubenswrapper[4909]: I1128 16:11:32.053383 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:32Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:32 crc kubenswrapper[4909]: I1128 16:11:32.063929 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-bwbm6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a458a03-0fea-47c0-9748-510145f40b30\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dd8c3d64058ca1f2862ce478295e1a694117d00f9172f78c7c5e2945d7357aad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmxp7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:36Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-bwbm6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:32Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:32 crc kubenswrapper[4909]: I1128 16:11:32.130967 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:32 crc kubenswrapper[4909]: I1128 16:11:32.131014 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:32 crc kubenswrapper[4909]: I1128 16:11:32.131022 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:32 crc kubenswrapper[4909]: I1128 16:11:32.131039 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:32 crc kubenswrapper[4909]: I1128 16:11:32.131049 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:32Z","lastTransitionTime":"2025-11-28T16:11:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:32 crc kubenswrapper[4909]: I1128 16:11:32.234748 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:32 crc kubenswrapper[4909]: I1128 16:11:32.234830 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:32 crc kubenswrapper[4909]: I1128 16:11:32.234853 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:32 crc kubenswrapper[4909]: I1128 16:11:32.234881 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:32 crc kubenswrapper[4909]: I1128 16:11:32.234905 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:32Z","lastTransitionTime":"2025-11-28T16:11:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:32 crc kubenswrapper[4909]: I1128 16:11:32.337830 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:32 crc kubenswrapper[4909]: I1128 16:11:32.337862 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:32 crc kubenswrapper[4909]: I1128 16:11:32.337870 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:32 crc kubenswrapper[4909]: I1128 16:11:32.337897 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:32 crc kubenswrapper[4909]: I1128 16:11:32.337908 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:32Z","lastTransitionTime":"2025-11-28T16:11:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:32 crc kubenswrapper[4909]: I1128 16:11:32.440566 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:32 crc kubenswrapper[4909]: I1128 16:11:32.440639 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:32 crc kubenswrapper[4909]: I1128 16:11:32.440699 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:32 crc kubenswrapper[4909]: I1128 16:11:32.440730 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:32 crc kubenswrapper[4909]: I1128 16:11:32.440756 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:32Z","lastTransitionTime":"2025-11-28T16:11:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:32 crc kubenswrapper[4909]: I1128 16:11:32.543971 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:32 crc kubenswrapper[4909]: I1128 16:11:32.544010 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:32 crc kubenswrapper[4909]: I1128 16:11:32.544021 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:32 crc kubenswrapper[4909]: I1128 16:11:32.544035 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:32 crc kubenswrapper[4909]: I1128 16:11:32.544046 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:32Z","lastTransitionTime":"2025-11-28T16:11:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:32 crc kubenswrapper[4909]: I1128 16:11:32.646758 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:32 crc kubenswrapper[4909]: I1128 16:11:32.646833 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:32 crc kubenswrapper[4909]: I1128 16:11:32.646852 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:32 crc kubenswrapper[4909]: I1128 16:11:32.646873 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:32 crc kubenswrapper[4909]: I1128 16:11:32.646890 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:32Z","lastTransitionTime":"2025-11-28T16:11:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:32 crc kubenswrapper[4909]: I1128 16:11:32.687923 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:32 crc kubenswrapper[4909]: I1128 16:11:32.687992 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:32 crc kubenswrapper[4909]: I1128 16:11:32.688006 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:32 crc kubenswrapper[4909]: I1128 16:11:32.688025 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:32 crc kubenswrapper[4909]: I1128 16:11:32.688061 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:32Z","lastTransitionTime":"2025-11-28T16:11:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:32 crc kubenswrapper[4909]: E1128 16:11:32.714442 4909 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:32Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:32Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:32Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:32Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:32Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:32Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:32Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:32Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"b44f6a6c-5ae2-4ed6-9fc9-6c0acf034e9d\\\",\\\"systemUUID\\\":\\\"1e8d38e9-395c-4d37-b567-3bfe4869e3f7\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:32Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:32 crc kubenswrapper[4909]: I1128 16:11:32.717747 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-qxw94_c17e2fff-c7ee-475c-8c17-58a394744b91/ovnkube-controller/3.log" Nov 28 16:11:32 crc kubenswrapper[4909]: I1128 16:11:32.719419 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:32 crc kubenswrapper[4909]: I1128 16:11:32.719463 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:32 crc kubenswrapper[4909]: I1128 16:11:32.719472 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:32 crc kubenswrapper[4909]: I1128 16:11:32.719503 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:32 crc kubenswrapper[4909]: I1128 16:11:32.719515 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:32Z","lastTransitionTime":"2025-11-28T16:11:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:32 crc kubenswrapper[4909]: I1128 16:11:32.721705 4909 scope.go:117] "RemoveContainer" containerID="3ddca078c58f2bc3df2f19603ac7190dde988c7fe8b49ca5d94af1bf01cd1162" Nov 28 16:11:32 crc kubenswrapper[4909]: E1128 16:11:32.721852 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-qxw94_openshift-ovn-kubernetes(c17e2fff-c7ee-475c-8c17-58a394744b91)\"" pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" podUID="c17e2fff-c7ee-475c-8c17-58a394744b91" Nov 28 16:11:32 crc kubenswrapper[4909]: E1128 16:11:32.736142 4909 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:32Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:32Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:32Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:32Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:32Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:32Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:32Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:32Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"b44f6a6c-5ae2-4ed6-9fc9-6c0acf034e9d\\\",\\\"systemUUID\\\":\\\"1e8d38e9-395c-4d37-b567-3bfe4869e3f7\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:32Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:32 crc kubenswrapper[4909]: I1128 16:11:32.738286 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5f0ac931-d37b-4342-8c12-c2779b455cc5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae003675d8b34489b946224bcd380cc68ff49acec6769edfe74a8345018e7909\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5xrns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c2757c4dc287e41bc57c065df2906fc5961d005829fa24f22d3b5078d17555a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5xrns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:38Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-d5nd7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:32Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:32 crc kubenswrapper[4909]: I1128 16:11:32.743050 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:32 crc kubenswrapper[4909]: I1128 16:11:32.743152 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:32 crc kubenswrapper[4909]: I1128 16:11:32.743172 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:32 crc kubenswrapper[4909]: I1128 16:11:32.743199 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:32 crc kubenswrapper[4909]: I1128 16:11:32.743226 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:32Z","lastTransitionTime":"2025-11-28T16:11:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:32 crc kubenswrapper[4909]: E1128 16:11:32.764553 4909 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:32Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:32Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:32Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:32Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:32Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:32Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:32Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:32Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"b44f6a6c-5ae2-4ed6-9fc9-6c0acf034e9d\\\",\\\"systemUUID\\\":\\\"1e8d38e9-395c-4d37-b567-3bfe4869e3f7\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:32Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:32 crc kubenswrapper[4909]: I1128 16:11:32.770380 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:32 crc kubenswrapper[4909]: I1128 16:11:32.770463 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:32 crc kubenswrapper[4909]: I1128 16:11:32.770488 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:32 crc kubenswrapper[4909]: I1128 16:11:32.770524 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:32 crc kubenswrapper[4909]: I1128 16:11:32.770549 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:32Z","lastTransitionTime":"2025-11-28T16:11:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:32 crc kubenswrapper[4909]: I1128 16:11:32.770880 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c17e2fff-c7ee-475c-8c17-58a394744b91\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c9f22dca75c10791e7f5e083e737d4d6aad20598ff0d29dd7562fa99b5dbdfd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://697293d217dd4fbc4cecc3bc4be7acc14dbb5aa97b9faf0a608792ef7e4c8dba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d0b87eb388aab62c94756b5f20b73cf95fc80da8aa3d83f2848757f860cdd38e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://031c30950c06bdcae1736062069b96a6ce7cb66d5f286b73d691ee2d2db5832e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://66731a2e0518dd575f621ddf9e760e844e647a224138957440ccaecad3e4b8b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://983e9f73b8e344dbe445f95436cbe143d63f1da2e868201b441496df8cdc99ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ddca078c58f2bc3df2f19603ac7190dde988c7fe8b49ca5d94af1bf01cd1162\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ddca078c58f2bc3df2f19603ac7190dde988c7fe8b49ca5d94af1bf01cd1162\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T16:11:31Z\\\",\\\"message\\\":\\\"\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-operator-lifecycle-manager/package-server-manager-metrics\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.4.110\\\\\\\", Port:8443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nI1128 16:11:30.757598 6903 obj_retry.go:303] Retry object setup: *v1.Pod openshift-network-diagnostics/network-check-target-xd92c\\\\nI1128 16:11:30.757607 6903 obj_retry.go:303] Retry object setup: *v1.Pod openshift-dns/node-resolver-bwbm6\\\\nF1128 16:11:30.757612 6903 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal er\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:30Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-qxw94_openshift-ovn-kubernetes(c17e2fff-c7ee-475c-8c17-58a394744b91)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://841b67fe2e8ae0e4ebf834f141b83ad5e850b8e81c3f6410cef7df9894c33dc6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e1fffda4b5571ab3fe797073c7201feadb7818eac66f6dc76b7761ef13a8d0ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e1fffda4b5571ab3fe797073c7201feadb7818eac66f6dc76b7761ef13a8d0ef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-qxw94\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:32Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:32 crc kubenswrapper[4909]: E1128 16:11:32.789212 4909 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:32Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:32Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:32Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:32Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:32Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:32Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:32Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:32Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"b44f6a6c-5ae2-4ed6-9fc9-6c0acf034e9d\\\",\\\"systemUUID\\\":\\\"1e8d38e9-395c-4d37-b567-3bfe4869e3f7\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:32Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:32 crc kubenswrapper[4909]: I1128 16:11:32.792010 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7907b546dbb614e80485187b026c4c5ca17f52d88d5c28ce26a7bf5e3c09e76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:32Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:32 crc kubenswrapper[4909]: I1128 16:11:32.792949 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:32 crc kubenswrapper[4909]: I1128 16:11:32.793086 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:32 crc kubenswrapper[4909]: I1128 16:11:32.793101 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:32 crc kubenswrapper[4909]: I1128 16:11:32.793117 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:32 crc kubenswrapper[4909]: I1128 16:11:32.793141 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:32Z","lastTransitionTime":"2025-11-28T16:11:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:32 crc kubenswrapper[4909]: I1128 16:11:32.808949 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"443537be-83fe-4770-9aff-5fb3d2bef9a6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3d62be23143f1f28ff523369b4a6b5cb91146ac54236b31cc8d91d200bd8598e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f88b0080f0e1e8677f2525f47faf3bcd7fa2f54bc91057b318d4c1f86a16f9f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://34312055c6f3a6714d732ad11b27c20139c8a9be7636a9dd215a6e680803afd6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a891f34668e5de053a5fef2b954e97fb437e1a9e3bc2ed26b9bb767a3dda592d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a891f34668e5de053a5fef2b954e97fb437e1a9e3bc2ed26b9bb767a3dda592d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:18Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:18Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:32Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:32 crc kubenswrapper[4909]: E1128 16:11:32.812532 4909 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:32Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:32Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:32Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:32Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:32Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:32Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:32Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:32Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"b44f6a6c-5ae2-4ed6-9fc9-6c0acf034e9d\\\",\\\"systemUUID\\\":\\\"1e8d38e9-395c-4d37-b567-3bfe4869e3f7\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:32Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:32 crc kubenswrapper[4909]: E1128 16:11:32.812798 4909 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 28 16:11:32 crc kubenswrapper[4909]: I1128 16:11:32.814883 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:32 crc kubenswrapper[4909]: I1128 16:11:32.814951 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:32 crc kubenswrapper[4909]: I1128 16:11:32.814975 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:32 crc kubenswrapper[4909]: I1128 16:11:32.815003 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:32 crc kubenswrapper[4909]: I1128 16:11:32.815025 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:32Z","lastTransitionTime":"2025-11-28T16:11:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:32 crc kubenswrapper[4909]: I1128 16:11:32.825529 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:32Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:32 crc kubenswrapper[4909]: I1128 16:11:32.840746 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:32Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:32 crc kubenswrapper[4909]: I1128 16:11:32.858721 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-bwbm6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a458a03-0fea-47c0-9748-510145f40b30\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dd8c3d64058ca1f2862ce478295e1a694117d00f9172f78c7c5e2945d7357aad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmxp7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:36Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-bwbm6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:32Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:32 crc kubenswrapper[4909]: I1128 16:11:32.883721 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gdz9b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d9d93f2d-2a90-4d2d-b8e6-e48973be876f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e765772d8b12200fadfb28064b55c1abb9a8a6654602159c4910d2ea5b2d307\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b45f41a92db18043fc03c9cd7d9bbeef0020790595045abd17a9105d0f01367b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b45f41a92db18043fc03c9cd7d9bbeef0020790595045abd17a9105d0f01367b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f7a3a2a5eea9704ba4cd09ac6fa00e33c97d163973a852882979b1c8f2064979\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f7a3a2a5eea9704ba4cd09ac6fa00e33c97d163973a852882979b1c8f2064979\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0348e1d7066423ab9a595f60893021830d02de3c18fca2fdb0082efa41644124\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0348e1d7066423ab9a595f60893021830d02de3c18fca2fdb0082efa41644124\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbb9ad3dff8afd5ac9ec18bd48d273f46b76a21b791dc6a17aea1d7559750c93\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cbb9ad3dff8afd5ac9ec18bd48d273f46b76a21b791dc6a17aea1d7559750c93\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5898b3475414a39ef005c45982f12791683eb027e734ec8d25dc3489c93026a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5898b3475414a39ef005c45982f12791683eb027e734ec8d25dc3489c93026a2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2056e26b7e46cd32f8a12d6d054e249ed43f9854c0f7720791e98f9e6c5b5346\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2056e26b7e46cd32f8a12d6d054e249ed43f9854c0f7720791e98f9e6c5b5346\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gdz9b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:32Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:32 crc kubenswrapper[4909]: I1128 16:11:32.897341 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-8rjn2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ffceca0e-d9b5-484f-8753-5e0269eec811\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:51Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:51Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wgql6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wgql6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:51Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-8rjn2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:32Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:32 crc kubenswrapper[4909]: I1128 16:11:32.900573 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-8rjn2" Nov 28 16:11:32 crc kubenswrapper[4909]: E1128 16:11:32.900827 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-8rjn2" podUID="ffceca0e-d9b5-484f-8753-5e0269eec811" Nov 28 16:11:32 crc kubenswrapper[4909]: I1128 16:11:32.915583 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-q8nfv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6b77cc4b-dc69-4ece-8e10-64eebc98a578\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://71f6384b2d5c86c668a4d7dfc23b14a893f93b9ec587bec43f74eb0926cc2c64\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-prc9l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:37Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-q8nfv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:32Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:32 crc kubenswrapper[4909]: I1128 16:11:32.916970 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:32 crc kubenswrapper[4909]: I1128 16:11:32.917027 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:32 crc kubenswrapper[4909]: I1128 16:11:32.917044 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:32 crc kubenswrapper[4909]: I1128 16:11:32.917083 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:32 crc kubenswrapper[4909]: I1128 16:11:32.917101 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:32Z","lastTransitionTime":"2025-11-28T16:11:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:32 crc kubenswrapper[4909]: I1128 16:11:32.933904 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-8s8f9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5a7347b-5536-45b5-be75-4bf0ed1b922b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://75a9b32698a7aaaa2b6f88541c9069902a98ad0146bf478ec58ce3a97fb410ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mglcp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://309f7a04e1c92fde11e7fedfe089ff043e14ea788f60339fb7acf8c6df0c8c8d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mglcp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-8s8f9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:32Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:32 crc kubenswrapper[4909]: I1128 16:11:32.953949 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://475d2c473e130345e0d544c2b7990c476e83c479c644db89e1ceda2f4278d667\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://18059a14081d677863133d95ec3cecf9359d6464af62be1e53bcd9514311ff59\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:32Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:32 crc kubenswrapper[4909]: I1128 16:11:32.971848 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b5b5c4c2-af06-4771-b6eb-d13a2819665c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ea29d0be350e786147f355c5bb4902924aa0f921413b432ad093a796d21b9d05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b41cf0481323080f4056d35add13c2254c6ffbe432b1651555067d3fc9d163eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94501e940379fe9b429e532a91799701e733bb5c3be1c5f32da07a9957f955b3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://37dc41c4e260f683c1ae04ae87a883fe78e4f1f620a946ccb6a87191a5eae0ae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:18Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:32Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:32 crc kubenswrapper[4909]: I1128 16:11:32.988836 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31f84564a3775e0ece3a5c0f176e8d9607466d4a7a505173e9668c51fa2229e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:32Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:33 crc kubenswrapper[4909]: I1128 16:11:33.008706 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:33Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:33 crc kubenswrapper[4909]: I1128 16:11:33.020782 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:33 crc kubenswrapper[4909]: I1128 16:11:33.020854 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:33 crc kubenswrapper[4909]: I1128 16:11:33.020878 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:33 crc kubenswrapper[4909]: I1128 16:11:33.020908 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:33 crc kubenswrapper[4909]: I1128 16:11:33.020931 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:33Z","lastTransitionTime":"2025-11-28T16:11:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:33 crc kubenswrapper[4909]: I1128 16:11:33.024185 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-wx2jj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6e3805b2-8ad3-4fa6-b88f-e0ae42294202\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e3a9a82264968374209ed690b43cf96557d426af065ba14cd189ae9e31ed0f0a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0777b571ce4049338437a97264761c89ab7517b4da8400edcd3381d58aef32e4\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T16:11:24Z\\\",\\\"message\\\":\\\"2025-11-28T16:10:39+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_76b37028-5be7-441a-8772-e096e031ea50\\\\n2025-11-28T16:10:39+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_76b37028-5be7-441a-8772-e096e031ea50 to /host/opt/cni/bin/\\\\n2025-11-28T16:10:39Z [verbose] multus-daemon started\\\\n2025-11-28T16:10:39Z [verbose] Readiness Indicator file check\\\\n2025-11-28T16:11:24Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x6lxd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-wx2jj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:33Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:33 crc kubenswrapper[4909]: I1128 16:11:33.044628 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dda7db1a-d603-4121-8cd6-e72c9ae02961\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://107b559f7d62f02041439f8c727185e9a819cd655afa0898e818d8618cc54cc7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://76ac0606fbf94f3746fb7ffa549a3cb684d40ee72224dd36c96bb47db493b482\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d5716d50bbed7c047029bbd7964431e97f78c6516505f57fe76d2a1548a665d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://53a5c9679a2c94c1297318d2e554c2445efbfab9cf72d25e48d753d66fa217d5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://da70cc63e1edf58e6d6fed49fc9266d54fabba5f7ce3216d910eb18aebc45c34\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0700ae65eb592e5b2110d37bb15dbb1dbdd228986de015ab5dc8fe13672032ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0700ae65eb592e5b2110d37bb15dbb1dbdd228986de015ab5dc8fe13672032ef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:18Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:33Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:33 crc kubenswrapper[4909]: I1128 16:11:33.124167 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:33 crc kubenswrapper[4909]: I1128 16:11:33.124234 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:33 crc kubenswrapper[4909]: I1128 16:11:33.124251 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:33 crc kubenswrapper[4909]: I1128 16:11:33.124276 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:33 crc kubenswrapper[4909]: I1128 16:11:33.124293 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:33Z","lastTransitionTime":"2025-11-28T16:11:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:33 crc kubenswrapper[4909]: I1128 16:11:33.227746 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:33 crc kubenswrapper[4909]: I1128 16:11:33.227811 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:33 crc kubenswrapper[4909]: I1128 16:11:33.227829 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:33 crc kubenswrapper[4909]: I1128 16:11:33.227857 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:33 crc kubenswrapper[4909]: I1128 16:11:33.227876 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:33Z","lastTransitionTime":"2025-11-28T16:11:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:33 crc kubenswrapper[4909]: I1128 16:11:33.330389 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:33 crc kubenswrapper[4909]: I1128 16:11:33.330445 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:33 crc kubenswrapper[4909]: I1128 16:11:33.330461 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:33 crc kubenswrapper[4909]: I1128 16:11:33.330486 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:33 crc kubenswrapper[4909]: I1128 16:11:33.330504 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:33Z","lastTransitionTime":"2025-11-28T16:11:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:33 crc kubenswrapper[4909]: I1128 16:11:33.434127 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:33 crc kubenswrapper[4909]: I1128 16:11:33.434175 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:33 crc kubenswrapper[4909]: I1128 16:11:33.434192 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:33 crc kubenswrapper[4909]: I1128 16:11:33.434216 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:33 crc kubenswrapper[4909]: I1128 16:11:33.434234 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:33Z","lastTransitionTime":"2025-11-28T16:11:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:33 crc kubenswrapper[4909]: I1128 16:11:33.537511 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:33 crc kubenswrapper[4909]: I1128 16:11:33.537552 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:33 crc kubenswrapper[4909]: I1128 16:11:33.537569 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:33 crc kubenswrapper[4909]: I1128 16:11:33.537598 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:33 crc kubenswrapper[4909]: I1128 16:11:33.537620 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:33Z","lastTransitionTime":"2025-11-28T16:11:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:33 crc kubenswrapper[4909]: I1128 16:11:33.642136 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:33 crc kubenswrapper[4909]: I1128 16:11:33.642245 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:33 crc kubenswrapper[4909]: I1128 16:11:33.642266 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:33 crc kubenswrapper[4909]: I1128 16:11:33.642292 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:33 crc kubenswrapper[4909]: I1128 16:11:33.642322 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:33Z","lastTransitionTime":"2025-11-28T16:11:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:33 crc kubenswrapper[4909]: I1128 16:11:33.745794 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:33 crc kubenswrapper[4909]: I1128 16:11:33.746131 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:33 crc kubenswrapper[4909]: I1128 16:11:33.746316 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:33 crc kubenswrapper[4909]: I1128 16:11:33.746475 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:33 crc kubenswrapper[4909]: I1128 16:11:33.746641 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:33Z","lastTransitionTime":"2025-11-28T16:11:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:33 crc kubenswrapper[4909]: I1128 16:11:33.849969 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:33 crc kubenswrapper[4909]: I1128 16:11:33.850039 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:33 crc kubenswrapper[4909]: I1128 16:11:33.850060 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:33 crc kubenswrapper[4909]: I1128 16:11:33.850085 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:33 crc kubenswrapper[4909]: I1128 16:11:33.850104 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:33Z","lastTransitionTime":"2025-11-28T16:11:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:33 crc kubenswrapper[4909]: I1128 16:11:33.900837 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 16:11:33 crc kubenswrapper[4909]: I1128 16:11:33.900934 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 16:11:33 crc kubenswrapper[4909]: E1128 16:11:33.901045 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 16:11:33 crc kubenswrapper[4909]: E1128 16:11:33.901235 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 16:11:33 crc kubenswrapper[4909]: I1128 16:11:33.900866 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 16:11:33 crc kubenswrapper[4909]: E1128 16:11:33.901746 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 16:11:33 crc kubenswrapper[4909]: I1128 16:11:33.953049 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:33 crc kubenswrapper[4909]: I1128 16:11:33.953124 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:33 crc kubenswrapper[4909]: I1128 16:11:33.953148 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:33 crc kubenswrapper[4909]: I1128 16:11:33.953182 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:33 crc kubenswrapper[4909]: I1128 16:11:33.953204 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:33Z","lastTransitionTime":"2025-11-28T16:11:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:34 crc kubenswrapper[4909]: I1128 16:11:34.056540 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:34 crc kubenswrapper[4909]: I1128 16:11:34.056616 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:34 crc kubenswrapper[4909]: I1128 16:11:34.056637 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:34 crc kubenswrapper[4909]: I1128 16:11:34.056700 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:34 crc kubenswrapper[4909]: I1128 16:11:34.056725 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:34Z","lastTransitionTime":"2025-11-28T16:11:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:34 crc kubenswrapper[4909]: I1128 16:11:34.160023 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:34 crc kubenswrapper[4909]: I1128 16:11:34.160081 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:34 crc kubenswrapper[4909]: I1128 16:11:34.160099 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:34 crc kubenswrapper[4909]: I1128 16:11:34.160120 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:34 crc kubenswrapper[4909]: I1128 16:11:34.160139 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:34Z","lastTransitionTime":"2025-11-28T16:11:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:34 crc kubenswrapper[4909]: I1128 16:11:34.263217 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:34 crc kubenswrapper[4909]: I1128 16:11:34.263307 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:34 crc kubenswrapper[4909]: I1128 16:11:34.263326 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:34 crc kubenswrapper[4909]: I1128 16:11:34.263349 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:34 crc kubenswrapper[4909]: I1128 16:11:34.263460 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:34Z","lastTransitionTime":"2025-11-28T16:11:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:34 crc kubenswrapper[4909]: I1128 16:11:34.366874 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:34 crc kubenswrapper[4909]: I1128 16:11:34.367233 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:34 crc kubenswrapper[4909]: I1128 16:11:34.367386 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:34 crc kubenswrapper[4909]: I1128 16:11:34.367565 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:34 crc kubenswrapper[4909]: I1128 16:11:34.367759 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:34Z","lastTransitionTime":"2025-11-28T16:11:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:34 crc kubenswrapper[4909]: I1128 16:11:34.471260 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:34 crc kubenswrapper[4909]: I1128 16:11:34.471704 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:34 crc kubenswrapper[4909]: I1128 16:11:34.471852 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:34 crc kubenswrapper[4909]: I1128 16:11:34.472002 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:34 crc kubenswrapper[4909]: I1128 16:11:34.472161 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:34Z","lastTransitionTime":"2025-11-28T16:11:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:34 crc kubenswrapper[4909]: I1128 16:11:34.575421 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:34 crc kubenswrapper[4909]: I1128 16:11:34.575474 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:34 crc kubenswrapper[4909]: I1128 16:11:34.575491 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:34 crc kubenswrapper[4909]: I1128 16:11:34.575513 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:34 crc kubenswrapper[4909]: I1128 16:11:34.575529 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:34Z","lastTransitionTime":"2025-11-28T16:11:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:34 crc kubenswrapper[4909]: I1128 16:11:34.678928 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:34 crc kubenswrapper[4909]: I1128 16:11:34.678991 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:34 crc kubenswrapper[4909]: I1128 16:11:34.679015 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:34 crc kubenswrapper[4909]: I1128 16:11:34.679043 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:34 crc kubenswrapper[4909]: I1128 16:11:34.679063 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:34Z","lastTransitionTime":"2025-11-28T16:11:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:34 crc kubenswrapper[4909]: I1128 16:11:34.781988 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:34 crc kubenswrapper[4909]: I1128 16:11:34.782057 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:34 crc kubenswrapper[4909]: I1128 16:11:34.782072 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:34 crc kubenswrapper[4909]: I1128 16:11:34.782093 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:34 crc kubenswrapper[4909]: I1128 16:11:34.782108 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:34Z","lastTransitionTime":"2025-11-28T16:11:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:34 crc kubenswrapper[4909]: I1128 16:11:34.884681 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:34 crc kubenswrapper[4909]: I1128 16:11:34.884743 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:34 crc kubenswrapper[4909]: I1128 16:11:34.884761 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:34 crc kubenswrapper[4909]: I1128 16:11:34.884785 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:34 crc kubenswrapper[4909]: I1128 16:11:34.884806 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:34Z","lastTransitionTime":"2025-11-28T16:11:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:34 crc kubenswrapper[4909]: I1128 16:11:34.900702 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-8rjn2" Nov 28 16:11:34 crc kubenswrapper[4909]: E1128 16:11:34.900939 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-8rjn2" podUID="ffceca0e-d9b5-484f-8753-5e0269eec811" Nov 28 16:11:34 crc kubenswrapper[4909]: I1128 16:11:34.988159 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:34 crc kubenswrapper[4909]: I1128 16:11:34.988204 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:34 crc kubenswrapper[4909]: I1128 16:11:34.988216 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:34 crc kubenswrapper[4909]: I1128 16:11:34.988236 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:34 crc kubenswrapper[4909]: I1128 16:11:34.988249 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:34Z","lastTransitionTime":"2025-11-28T16:11:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:35 crc kubenswrapper[4909]: I1128 16:11:35.090132 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:35 crc kubenswrapper[4909]: I1128 16:11:35.090174 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:35 crc kubenswrapper[4909]: I1128 16:11:35.090190 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:35 crc kubenswrapper[4909]: I1128 16:11:35.090211 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:35 crc kubenswrapper[4909]: I1128 16:11:35.090222 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:35Z","lastTransitionTime":"2025-11-28T16:11:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:35 crc kubenswrapper[4909]: I1128 16:11:35.193496 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:35 crc kubenswrapper[4909]: I1128 16:11:35.193552 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:35 crc kubenswrapper[4909]: I1128 16:11:35.193568 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:35 crc kubenswrapper[4909]: I1128 16:11:35.193596 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:35 crc kubenswrapper[4909]: I1128 16:11:35.193614 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:35Z","lastTransitionTime":"2025-11-28T16:11:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:35 crc kubenswrapper[4909]: I1128 16:11:35.297283 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:35 crc kubenswrapper[4909]: I1128 16:11:35.297355 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:35 crc kubenswrapper[4909]: I1128 16:11:35.297378 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:35 crc kubenswrapper[4909]: I1128 16:11:35.297409 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:35 crc kubenswrapper[4909]: I1128 16:11:35.297430 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:35Z","lastTransitionTime":"2025-11-28T16:11:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:35 crc kubenswrapper[4909]: I1128 16:11:35.400473 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:35 crc kubenswrapper[4909]: I1128 16:11:35.400542 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:35 crc kubenswrapper[4909]: I1128 16:11:35.400574 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:35 crc kubenswrapper[4909]: I1128 16:11:35.400605 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:35 crc kubenswrapper[4909]: I1128 16:11:35.400626 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:35Z","lastTransitionTime":"2025-11-28T16:11:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:35 crc kubenswrapper[4909]: I1128 16:11:35.503857 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:35 crc kubenswrapper[4909]: I1128 16:11:35.503928 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:35 crc kubenswrapper[4909]: I1128 16:11:35.503952 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:35 crc kubenswrapper[4909]: I1128 16:11:35.503981 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:35 crc kubenswrapper[4909]: I1128 16:11:35.504003 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:35Z","lastTransitionTime":"2025-11-28T16:11:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:35 crc kubenswrapper[4909]: I1128 16:11:35.607471 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:35 crc kubenswrapper[4909]: I1128 16:11:35.607548 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:35 crc kubenswrapper[4909]: I1128 16:11:35.607566 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:35 crc kubenswrapper[4909]: I1128 16:11:35.608057 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:35 crc kubenswrapper[4909]: I1128 16:11:35.608116 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:35Z","lastTransitionTime":"2025-11-28T16:11:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:35 crc kubenswrapper[4909]: I1128 16:11:35.712093 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:35 crc kubenswrapper[4909]: I1128 16:11:35.712161 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:35 crc kubenswrapper[4909]: I1128 16:11:35.712183 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:35 crc kubenswrapper[4909]: I1128 16:11:35.712213 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:35 crc kubenswrapper[4909]: I1128 16:11:35.712237 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:35Z","lastTransitionTime":"2025-11-28T16:11:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:35 crc kubenswrapper[4909]: I1128 16:11:35.815641 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:35 crc kubenswrapper[4909]: I1128 16:11:35.815764 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:35 crc kubenswrapper[4909]: I1128 16:11:35.815792 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:35 crc kubenswrapper[4909]: I1128 16:11:35.815821 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:35 crc kubenswrapper[4909]: I1128 16:11:35.815844 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:35Z","lastTransitionTime":"2025-11-28T16:11:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:35 crc kubenswrapper[4909]: I1128 16:11:35.901430 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 16:11:35 crc kubenswrapper[4909]: I1128 16:11:35.901501 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 16:11:35 crc kubenswrapper[4909]: I1128 16:11:35.901461 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 16:11:35 crc kubenswrapper[4909]: E1128 16:11:35.901619 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 16:11:35 crc kubenswrapper[4909]: E1128 16:11:35.901730 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 16:11:35 crc kubenswrapper[4909]: E1128 16:11:35.901830 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 16:11:35 crc kubenswrapper[4909]: I1128 16:11:35.919167 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:35 crc kubenswrapper[4909]: I1128 16:11:35.919222 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:35 crc kubenswrapper[4909]: I1128 16:11:35.919246 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:35 crc kubenswrapper[4909]: I1128 16:11:35.919267 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:35 crc kubenswrapper[4909]: I1128 16:11:35.919281 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:35Z","lastTransitionTime":"2025-11-28T16:11:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:36 crc kubenswrapper[4909]: I1128 16:11:36.021911 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:36 crc kubenswrapper[4909]: I1128 16:11:36.021971 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:36 crc kubenswrapper[4909]: I1128 16:11:36.021989 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:36 crc kubenswrapper[4909]: I1128 16:11:36.022015 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:36 crc kubenswrapper[4909]: I1128 16:11:36.022034 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:36Z","lastTransitionTime":"2025-11-28T16:11:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:36 crc kubenswrapper[4909]: I1128 16:11:36.125397 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:36 crc kubenswrapper[4909]: I1128 16:11:36.125494 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:36 crc kubenswrapper[4909]: I1128 16:11:36.125520 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:36 crc kubenswrapper[4909]: I1128 16:11:36.125553 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:36 crc kubenswrapper[4909]: I1128 16:11:36.125577 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:36Z","lastTransitionTime":"2025-11-28T16:11:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:36 crc kubenswrapper[4909]: I1128 16:11:36.227950 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:36 crc kubenswrapper[4909]: I1128 16:11:36.228239 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:36 crc kubenswrapper[4909]: I1128 16:11:36.228302 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:36 crc kubenswrapper[4909]: I1128 16:11:36.228365 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:36 crc kubenswrapper[4909]: I1128 16:11:36.228423 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:36Z","lastTransitionTime":"2025-11-28T16:11:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:36 crc kubenswrapper[4909]: I1128 16:11:36.331948 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:36 crc kubenswrapper[4909]: I1128 16:11:36.332374 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:36 crc kubenswrapper[4909]: I1128 16:11:36.332558 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:36 crc kubenswrapper[4909]: I1128 16:11:36.332741 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:36 crc kubenswrapper[4909]: I1128 16:11:36.332889 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:36Z","lastTransitionTime":"2025-11-28T16:11:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:36 crc kubenswrapper[4909]: I1128 16:11:36.436530 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:36 crc kubenswrapper[4909]: I1128 16:11:36.436935 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:36 crc kubenswrapper[4909]: I1128 16:11:36.437130 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:36 crc kubenswrapper[4909]: I1128 16:11:36.437305 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:36 crc kubenswrapper[4909]: I1128 16:11:36.437449 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:36Z","lastTransitionTime":"2025-11-28T16:11:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:36 crc kubenswrapper[4909]: I1128 16:11:36.540618 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:36 crc kubenswrapper[4909]: I1128 16:11:36.541210 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:36 crc kubenswrapper[4909]: I1128 16:11:36.541361 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:36 crc kubenswrapper[4909]: I1128 16:11:36.541500 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:36 crc kubenswrapper[4909]: I1128 16:11:36.541622 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:36Z","lastTransitionTime":"2025-11-28T16:11:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:36 crc kubenswrapper[4909]: I1128 16:11:36.644765 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:36 crc kubenswrapper[4909]: I1128 16:11:36.644801 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:36 crc kubenswrapper[4909]: I1128 16:11:36.644809 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:36 crc kubenswrapper[4909]: I1128 16:11:36.644822 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:36 crc kubenswrapper[4909]: I1128 16:11:36.644831 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:36Z","lastTransitionTime":"2025-11-28T16:11:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:36 crc kubenswrapper[4909]: I1128 16:11:36.747553 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:36 crc kubenswrapper[4909]: I1128 16:11:36.747633 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:36 crc kubenswrapper[4909]: I1128 16:11:36.747696 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:36 crc kubenswrapper[4909]: I1128 16:11:36.747730 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:36 crc kubenswrapper[4909]: I1128 16:11:36.747756 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:36Z","lastTransitionTime":"2025-11-28T16:11:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:36 crc kubenswrapper[4909]: I1128 16:11:36.851240 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:36 crc kubenswrapper[4909]: I1128 16:11:36.851297 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:36 crc kubenswrapper[4909]: I1128 16:11:36.851314 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:36 crc kubenswrapper[4909]: I1128 16:11:36.851341 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:36 crc kubenswrapper[4909]: I1128 16:11:36.851359 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:36Z","lastTransitionTime":"2025-11-28T16:11:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:36 crc kubenswrapper[4909]: I1128 16:11:36.901199 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-8rjn2" Nov 28 16:11:36 crc kubenswrapper[4909]: E1128 16:11:36.901684 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-8rjn2" podUID="ffceca0e-d9b5-484f-8753-5e0269eec811" Nov 28 16:11:36 crc kubenswrapper[4909]: I1128 16:11:36.954139 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:36 crc kubenswrapper[4909]: I1128 16:11:36.954213 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:36 crc kubenswrapper[4909]: I1128 16:11:36.954236 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:36 crc kubenswrapper[4909]: I1128 16:11:36.954269 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:36 crc kubenswrapper[4909]: I1128 16:11:36.954289 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:36Z","lastTransitionTime":"2025-11-28T16:11:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:37 crc kubenswrapper[4909]: I1128 16:11:37.056944 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:37 crc kubenswrapper[4909]: I1128 16:11:37.057230 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:37 crc kubenswrapper[4909]: I1128 16:11:37.057397 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:37 crc kubenswrapper[4909]: I1128 16:11:37.057533 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:37 crc kubenswrapper[4909]: I1128 16:11:37.057705 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:37Z","lastTransitionTime":"2025-11-28T16:11:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:37 crc kubenswrapper[4909]: I1128 16:11:37.160157 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:37 crc kubenswrapper[4909]: I1128 16:11:37.160234 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:37 crc kubenswrapper[4909]: I1128 16:11:37.160245 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:37 crc kubenswrapper[4909]: I1128 16:11:37.160263 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:37 crc kubenswrapper[4909]: I1128 16:11:37.160274 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:37Z","lastTransitionTime":"2025-11-28T16:11:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:37 crc kubenswrapper[4909]: I1128 16:11:37.262509 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:37 crc kubenswrapper[4909]: I1128 16:11:37.262814 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:37 crc kubenswrapper[4909]: I1128 16:11:37.262940 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:37 crc kubenswrapper[4909]: I1128 16:11:37.263081 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:37 crc kubenswrapper[4909]: I1128 16:11:37.263209 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:37Z","lastTransitionTime":"2025-11-28T16:11:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:37 crc kubenswrapper[4909]: I1128 16:11:37.365407 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:37 crc kubenswrapper[4909]: I1128 16:11:37.365454 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:37 crc kubenswrapper[4909]: I1128 16:11:37.365466 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:37 crc kubenswrapper[4909]: I1128 16:11:37.365483 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:37 crc kubenswrapper[4909]: I1128 16:11:37.365495 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:37Z","lastTransitionTime":"2025-11-28T16:11:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:37 crc kubenswrapper[4909]: I1128 16:11:37.469026 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:37 crc kubenswrapper[4909]: I1128 16:11:37.469089 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:37 crc kubenswrapper[4909]: I1128 16:11:37.469110 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:37 crc kubenswrapper[4909]: I1128 16:11:37.469136 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:37 crc kubenswrapper[4909]: I1128 16:11:37.469154 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:37Z","lastTransitionTime":"2025-11-28T16:11:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:37 crc kubenswrapper[4909]: I1128 16:11:37.572763 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:37 crc kubenswrapper[4909]: I1128 16:11:37.572835 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:37 crc kubenswrapper[4909]: I1128 16:11:37.572861 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:37 crc kubenswrapper[4909]: I1128 16:11:37.572891 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:37 crc kubenswrapper[4909]: I1128 16:11:37.572914 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:37Z","lastTransitionTime":"2025-11-28T16:11:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:37 crc kubenswrapper[4909]: I1128 16:11:37.675830 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:37 crc kubenswrapper[4909]: I1128 16:11:37.675928 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:37 crc kubenswrapper[4909]: I1128 16:11:37.675953 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:37 crc kubenswrapper[4909]: I1128 16:11:37.675981 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:37 crc kubenswrapper[4909]: I1128 16:11:37.676002 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:37Z","lastTransitionTime":"2025-11-28T16:11:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:37 crc kubenswrapper[4909]: I1128 16:11:37.778245 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:37 crc kubenswrapper[4909]: I1128 16:11:37.778324 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:37 crc kubenswrapper[4909]: I1128 16:11:37.778344 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:37 crc kubenswrapper[4909]: I1128 16:11:37.778370 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:37 crc kubenswrapper[4909]: I1128 16:11:37.778387 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:37Z","lastTransitionTime":"2025-11-28T16:11:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:37 crc kubenswrapper[4909]: I1128 16:11:37.880991 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:37 crc kubenswrapper[4909]: I1128 16:11:37.881043 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:37 crc kubenswrapper[4909]: I1128 16:11:37.881055 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:37 crc kubenswrapper[4909]: I1128 16:11:37.881073 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:37 crc kubenswrapper[4909]: I1128 16:11:37.881085 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:37Z","lastTransitionTime":"2025-11-28T16:11:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:37 crc kubenswrapper[4909]: I1128 16:11:37.900906 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 16:11:37 crc kubenswrapper[4909]: I1128 16:11:37.900969 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 16:11:37 crc kubenswrapper[4909]: E1128 16:11:37.901112 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 16:11:37 crc kubenswrapper[4909]: I1128 16:11:37.901192 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 16:11:37 crc kubenswrapper[4909]: E1128 16:11:37.901278 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 16:11:37 crc kubenswrapper[4909]: E1128 16:11:37.901783 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 16:11:37 crc kubenswrapper[4909]: I1128 16:11:37.919938 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dda7db1a-d603-4121-8cd6-e72c9ae02961\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://107b559f7d62f02041439f8c727185e9a819cd655afa0898e818d8618cc54cc7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://76ac0606fbf94f3746fb7ffa549a3cb684d40ee72224dd36c96bb47db493b482\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d5716d50bbed7c047029bbd7964431e97f78c6516505f57fe76d2a1548a665d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://53a5c9679a2c94c1297318d2e554c2445efbfab9cf72d25e48d753d66fa217d5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://da70cc63e1edf58e6d6fed49fc9266d54fabba5f7ce3216d910eb18aebc45c34\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0700ae65eb592e5b2110d37bb15dbb1dbdd228986de015ab5dc8fe13672032ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0700ae65eb592e5b2110d37bb15dbb1dbdd228986de015ab5dc8fe13672032ef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:18Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:37Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:37 crc kubenswrapper[4909]: I1128 16:11:37.937746 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b5b5c4c2-af06-4771-b6eb-d13a2819665c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ea29d0be350e786147f355c5bb4902924aa0f921413b432ad093a796d21b9d05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b41cf0481323080f4056d35add13c2254c6ffbe432b1651555067d3fc9d163eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94501e940379fe9b429e532a91799701e733bb5c3be1c5f32da07a9957f955b3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://37dc41c4e260f683c1ae04ae87a883fe78e4f1f620a946ccb6a87191a5eae0ae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:18Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:37Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:37 crc kubenswrapper[4909]: I1128 16:11:37.952708 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31f84564a3775e0ece3a5c0f176e8d9607466d4a7a505173e9668c51fa2229e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:37Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:37 crc kubenswrapper[4909]: I1128 16:11:37.966927 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:37Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:37 crc kubenswrapper[4909]: I1128 16:11:37.981214 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-wx2jj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6e3805b2-8ad3-4fa6-b88f-e0ae42294202\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e3a9a82264968374209ed690b43cf96557d426af065ba14cd189ae9e31ed0f0a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0777b571ce4049338437a97264761c89ab7517b4da8400edcd3381d58aef32e4\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T16:11:24Z\\\",\\\"message\\\":\\\"2025-11-28T16:10:39+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_76b37028-5be7-441a-8772-e096e031ea50\\\\n2025-11-28T16:10:39+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_76b37028-5be7-441a-8772-e096e031ea50 to /host/opt/cni/bin/\\\\n2025-11-28T16:10:39Z [verbose] multus-daemon started\\\\n2025-11-28T16:10:39Z [verbose] Readiness Indicator file check\\\\n2025-11-28T16:11:24Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x6lxd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-wx2jj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:37Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:37 crc kubenswrapper[4909]: I1128 16:11:37.984584 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:37 crc kubenswrapper[4909]: I1128 16:11:37.984639 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:37 crc kubenswrapper[4909]: I1128 16:11:37.984694 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:37 crc kubenswrapper[4909]: I1128 16:11:37.984727 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:37 crc kubenswrapper[4909]: I1128 16:11:37.984747 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:37Z","lastTransitionTime":"2025-11-28T16:11:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:37 crc kubenswrapper[4909]: I1128 16:11:37.999521 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7907b546dbb614e80485187b026c4c5ca17f52d88d5c28ce26a7bf5e3c09e76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:37Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:38 crc kubenswrapper[4909]: I1128 16:11:38.018711 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5f0ac931-d37b-4342-8c12-c2779b455cc5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae003675d8b34489b946224bcd380cc68ff49acec6769edfe74a8345018e7909\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5xrns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c2757c4dc287e41bc57c065df2906fc5961d005829fa24f22d3b5078d17555a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5xrns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:38Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-d5nd7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:38Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:38 crc kubenswrapper[4909]: I1128 16:11:38.041255 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c17e2fff-c7ee-475c-8c17-58a394744b91\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c9f22dca75c10791e7f5e083e737d4d6aad20598ff0d29dd7562fa99b5dbdfd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://697293d217dd4fbc4cecc3bc4be7acc14dbb5aa97b9faf0a608792ef7e4c8dba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d0b87eb388aab62c94756b5f20b73cf95fc80da8aa3d83f2848757f860cdd38e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://031c30950c06bdcae1736062069b96a6ce7cb66d5f286b73d691ee2d2db5832e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://66731a2e0518dd575f621ddf9e760e844e647a224138957440ccaecad3e4b8b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://983e9f73b8e344dbe445f95436cbe143d63f1da2e868201b441496df8cdc99ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ddca078c58f2bc3df2f19603ac7190dde988c7fe8b49ca5d94af1bf01cd1162\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ddca078c58f2bc3df2f19603ac7190dde988c7fe8b49ca5d94af1bf01cd1162\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T16:11:31Z\\\",\\\"message\\\":\\\"\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-operator-lifecycle-manager/package-server-manager-metrics\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.4.110\\\\\\\", Port:8443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nI1128 16:11:30.757598 6903 obj_retry.go:303] Retry object setup: *v1.Pod openshift-network-diagnostics/network-check-target-xd92c\\\\nI1128 16:11:30.757607 6903 obj_retry.go:303] Retry object setup: *v1.Pod openshift-dns/node-resolver-bwbm6\\\\nF1128 16:11:30.757612 6903 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal er\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:30Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-qxw94_openshift-ovn-kubernetes(c17e2fff-c7ee-475c-8c17-58a394744b91)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://841b67fe2e8ae0e4ebf834f141b83ad5e850b8e81c3f6410cef7df9894c33dc6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e1fffda4b5571ab3fe797073c7201feadb7818eac66f6dc76b7761ef13a8d0ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e1fffda4b5571ab3fe797073c7201feadb7818eac66f6dc76b7761ef13a8d0ef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-qxw94\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:38Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:38 crc kubenswrapper[4909]: I1128 16:11:38.059534 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"443537be-83fe-4770-9aff-5fb3d2bef9a6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3d62be23143f1f28ff523369b4a6b5cb91146ac54236b31cc8d91d200bd8598e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f88b0080f0e1e8677f2525f47faf3bcd7fa2f54bc91057b318d4c1f86a16f9f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://34312055c6f3a6714d732ad11b27c20139c8a9be7636a9dd215a6e680803afd6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a891f34668e5de053a5fef2b954e97fb437e1a9e3bc2ed26b9bb767a3dda592d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a891f34668e5de053a5fef2b954e97fb437e1a9e3bc2ed26b9bb767a3dda592d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:18Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:18Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:38Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:38 crc kubenswrapper[4909]: I1128 16:11:38.079498 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:38Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:38 crc kubenswrapper[4909]: I1128 16:11:38.087141 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:38 crc kubenswrapper[4909]: I1128 16:11:38.087193 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:38 crc kubenswrapper[4909]: I1128 16:11:38.087210 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:38 crc kubenswrapper[4909]: I1128 16:11:38.087234 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:38 crc kubenswrapper[4909]: I1128 16:11:38.087254 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:38Z","lastTransitionTime":"2025-11-28T16:11:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:38 crc kubenswrapper[4909]: I1128 16:11:38.097009 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:38Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:38 crc kubenswrapper[4909]: I1128 16:11:38.110566 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-bwbm6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a458a03-0fea-47c0-9748-510145f40b30\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dd8c3d64058ca1f2862ce478295e1a694117d00f9172f78c7c5e2945d7357aad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmxp7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:36Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-bwbm6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:38Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:38 crc kubenswrapper[4909]: I1128 16:11:38.125145 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gdz9b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d9d93f2d-2a90-4d2d-b8e6-e48973be876f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e765772d8b12200fadfb28064b55c1abb9a8a6654602159c4910d2ea5b2d307\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b45f41a92db18043fc03c9cd7d9bbeef0020790595045abd17a9105d0f01367b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b45f41a92db18043fc03c9cd7d9bbeef0020790595045abd17a9105d0f01367b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f7a3a2a5eea9704ba4cd09ac6fa00e33c97d163973a852882979b1c8f2064979\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f7a3a2a5eea9704ba4cd09ac6fa00e33c97d163973a852882979b1c8f2064979\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0348e1d7066423ab9a595f60893021830d02de3c18fca2fdb0082efa41644124\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0348e1d7066423ab9a595f60893021830d02de3c18fca2fdb0082efa41644124\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbb9ad3dff8afd5ac9ec18bd48d273f46b76a21b791dc6a17aea1d7559750c93\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cbb9ad3dff8afd5ac9ec18bd48d273f46b76a21b791dc6a17aea1d7559750c93\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5898b3475414a39ef005c45982f12791683eb027e734ec8d25dc3489c93026a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5898b3475414a39ef005c45982f12791683eb027e734ec8d25dc3489c93026a2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2056e26b7e46cd32f8a12d6d054e249ed43f9854c0f7720791e98f9e6c5b5346\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2056e26b7e46cd32f8a12d6d054e249ed43f9854c0f7720791e98f9e6c5b5346\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gdz9b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:38Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:38 crc kubenswrapper[4909]: I1128 16:11:38.137231 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-8rjn2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ffceca0e-d9b5-484f-8753-5e0269eec811\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:51Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:51Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wgql6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wgql6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:51Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-8rjn2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:38Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:38 crc kubenswrapper[4909]: I1128 16:11:38.150598 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://475d2c473e130345e0d544c2b7990c476e83c479c644db89e1ceda2f4278d667\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://18059a14081d677863133d95ec3cecf9359d6464af62be1e53bcd9514311ff59\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:38Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:38 crc kubenswrapper[4909]: I1128 16:11:38.161133 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-q8nfv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6b77cc4b-dc69-4ece-8e10-64eebc98a578\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://71f6384b2d5c86c668a4d7dfc23b14a893f93b9ec587bec43f74eb0926cc2c64\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-prc9l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:37Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-q8nfv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:38Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:38 crc kubenswrapper[4909]: I1128 16:11:38.175248 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-8s8f9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5a7347b-5536-45b5-be75-4bf0ed1b922b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://75a9b32698a7aaaa2b6f88541c9069902a98ad0146bf478ec58ce3a97fb410ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mglcp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://309f7a04e1c92fde11e7fedfe089ff043e14ea788f60339fb7acf8c6df0c8c8d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mglcp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-8s8f9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:38Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:38 crc kubenswrapper[4909]: I1128 16:11:38.190432 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:38 crc kubenswrapper[4909]: I1128 16:11:38.190688 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:38 crc kubenswrapper[4909]: I1128 16:11:38.190802 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:38 crc kubenswrapper[4909]: I1128 16:11:38.190872 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:38 crc kubenswrapper[4909]: I1128 16:11:38.190943 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:38Z","lastTransitionTime":"2025-11-28T16:11:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:38 crc kubenswrapper[4909]: I1128 16:11:38.293538 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:38 crc kubenswrapper[4909]: I1128 16:11:38.293844 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:38 crc kubenswrapper[4909]: I1128 16:11:38.293923 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:38 crc kubenswrapper[4909]: I1128 16:11:38.293982 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:38 crc kubenswrapper[4909]: I1128 16:11:38.294037 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:38Z","lastTransitionTime":"2025-11-28T16:11:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:38 crc kubenswrapper[4909]: I1128 16:11:38.397613 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:38 crc kubenswrapper[4909]: I1128 16:11:38.397734 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:38 crc kubenswrapper[4909]: I1128 16:11:38.397752 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:38 crc kubenswrapper[4909]: I1128 16:11:38.397775 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:38 crc kubenswrapper[4909]: I1128 16:11:38.397792 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:38Z","lastTransitionTime":"2025-11-28T16:11:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:38 crc kubenswrapper[4909]: I1128 16:11:38.500578 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:38 crc kubenswrapper[4909]: I1128 16:11:38.500872 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:38 crc kubenswrapper[4909]: I1128 16:11:38.500948 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:38 crc kubenswrapper[4909]: I1128 16:11:38.501019 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:38 crc kubenswrapper[4909]: I1128 16:11:38.501090 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:38Z","lastTransitionTime":"2025-11-28T16:11:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:38 crc kubenswrapper[4909]: I1128 16:11:38.603985 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:38 crc kubenswrapper[4909]: I1128 16:11:38.604063 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:38 crc kubenswrapper[4909]: I1128 16:11:38.604088 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:38 crc kubenswrapper[4909]: I1128 16:11:38.604118 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:38 crc kubenswrapper[4909]: I1128 16:11:38.604140 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:38Z","lastTransitionTime":"2025-11-28T16:11:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:38 crc kubenswrapper[4909]: I1128 16:11:38.707834 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:38 crc kubenswrapper[4909]: I1128 16:11:38.708086 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:38 crc kubenswrapper[4909]: I1128 16:11:38.708176 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:38 crc kubenswrapper[4909]: I1128 16:11:38.708277 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:38 crc kubenswrapper[4909]: I1128 16:11:38.708370 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:38Z","lastTransitionTime":"2025-11-28T16:11:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:38 crc kubenswrapper[4909]: I1128 16:11:38.811410 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:38 crc kubenswrapper[4909]: I1128 16:11:38.811472 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:38 crc kubenswrapper[4909]: I1128 16:11:38.811491 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:38 crc kubenswrapper[4909]: I1128 16:11:38.811516 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:38 crc kubenswrapper[4909]: I1128 16:11:38.811535 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:38Z","lastTransitionTime":"2025-11-28T16:11:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:38 crc kubenswrapper[4909]: I1128 16:11:38.900960 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-8rjn2" Nov 28 16:11:38 crc kubenswrapper[4909]: E1128 16:11:38.901150 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-8rjn2" podUID="ffceca0e-d9b5-484f-8753-5e0269eec811" Nov 28 16:11:38 crc kubenswrapper[4909]: I1128 16:11:38.914155 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:38 crc kubenswrapper[4909]: I1128 16:11:38.914330 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:38 crc kubenswrapper[4909]: I1128 16:11:38.914449 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:38 crc kubenswrapper[4909]: I1128 16:11:38.914605 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:38 crc kubenswrapper[4909]: I1128 16:11:38.914761 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:38Z","lastTransitionTime":"2025-11-28T16:11:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:39 crc kubenswrapper[4909]: I1128 16:11:39.017841 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:39 crc kubenswrapper[4909]: I1128 16:11:39.018186 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:39 crc kubenswrapper[4909]: I1128 16:11:39.018208 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:39 crc kubenswrapper[4909]: I1128 16:11:39.018236 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:39 crc kubenswrapper[4909]: I1128 16:11:39.018257 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:39Z","lastTransitionTime":"2025-11-28T16:11:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:39 crc kubenswrapper[4909]: I1128 16:11:39.121211 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:39 crc kubenswrapper[4909]: I1128 16:11:39.121630 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:39 crc kubenswrapper[4909]: I1128 16:11:39.122270 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:39 crc kubenswrapper[4909]: I1128 16:11:39.122907 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:39 crc kubenswrapper[4909]: I1128 16:11:39.123276 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:39Z","lastTransitionTime":"2025-11-28T16:11:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:39 crc kubenswrapper[4909]: I1128 16:11:39.227017 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:39 crc kubenswrapper[4909]: I1128 16:11:39.227484 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:39 crc kubenswrapper[4909]: I1128 16:11:39.227629 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:39 crc kubenswrapper[4909]: I1128 16:11:39.227907 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:39 crc kubenswrapper[4909]: I1128 16:11:39.228061 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:39Z","lastTransitionTime":"2025-11-28T16:11:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:39 crc kubenswrapper[4909]: I1128 16:11:39.330468 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:39 crc kubenswrapper[4909]: I1128 16:11:39.330539 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:39 crc kubenswrapper[4909]: I1128 16:11:39.330562 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:39 crc kubenswrapper[4909]: I1128 16:11:39.330592 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:39 crc kubenswrapper[4909]: I1128 16:11:39.330615 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:39Z","lastTransitionTime":"2025-11-28T16:11:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:39 crc kubenswrapper[4909]: I1128 16:11:39.433323 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:39 crc kubenswrapper[4909]: I1128 16:11:39.433391 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:39 crc kubenswrapper[4909]: I1128 16:11:39.433415 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:39 crc kubenswrapper[4909]: I1128 16:11:39.433444 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:39 crc kubenswrapper[4909]: I1128 16:11:39.433467 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:39Z","lastTransitionTime":"2025-11-28T16:11:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:39 crc kubenswrapper[4909]: I1128 16:11:39.536379 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:39 crc kubenswrapper[4909]: I1128 16:11:39.536422 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:39 crc kubenswrapper[4909]: I1128 16:11:39.536433 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:39 crc kubenswrapper[4909]: I1128 16:11:39.536449 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:39 crc kubenswrapper[4909]: I1128 16:11:39.536463 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:39Z","lastTransitionTime":"2025-11-28T16:11:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:39 crc kubenswrapper[4909]: I1128 16:11:39.638754 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:39 crc kubenswrapper[4909]: I1128 16:11:39.638825 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:39 crc kubenswrapper[4909]: I1128 16:11:39.638854 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:39 crc kubenswrapper[4909]: I1128 16:11:39.638884 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:39 crc kubenswrapper[4909]: I1128 16:11:39.638906 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:39Z","lastTransitionTime":"2025-11-28T16:11:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:39 crc kubenswrapper[4909]: I1128 16:11:39.741563 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:39 crc kubenswrapper[4909]: I1128 16:11:39.741618 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:39 crc kubenswrapper[4909]: I1128 16:11:39.741637 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:39 crc kubenswrapper[4909]: I1128 16:11:39.741686 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:39 crc kubenswrapper[4909]: I1128 16:11:39.741704 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:39Z","lastTransitionTime":"2025-11-28T16:11:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:39 crc kubenswrapper[4909]: I1128 16:11:39.845107 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:39 crc kubenswrapper[4909]: I1128 16:11:39.845170 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:39 crc kubenswrapper[4909]: I1128 16:11:39.845189 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:39 crc kubenswrapper[4909]: I1128 16:11:39.845217 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:39 crc kubenswrapper[4909]: I1128 16:11:39.845247 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:39Z","lastTransitionTime":"2025-11-28T16:11:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:39 crc kubenswrapper[4909]: I1128 16:11:39.901344 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 16:11:39 crc kubenswrapper[4909]: I1128 16:11:39.901344 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 16:11:39 crc kubenswrapper[4909]: E1128 16:11:39.901566 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 16:11:39 crc kubenswrapper[4909]: I1128 16:11:39.901597 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 16:11:39 crc kubenswrapper[4909]: E1128 16:11:39.902053 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 16:11:39 crc kubenswrapper[4909]: E1128 16:11:39.902186 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 16:11:39 crc kubenswrapper[4909]: I1128 16:11:39.948223 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:39 crc kubenswrapper[4909]: I1128 16:11:39.948288 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:39 crc kubenswrapper[4909]: I1128 16:11:39.948306 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:39 crc kubenswrapper[4909]: I1128 16:11:39.948331 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:39 crc kubenswrapper[4909]: I1128 16:11:39.948350 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:39Z","lastTransitionTime":"2025-11-28T16:11:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:40 crc kubenswrapper[4909]: I1128 16:11:40.050823 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:40 crc kubenswrapper[4909]: I1128 16:11:40.050868 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:40 crc kubenswrapper[4909]: I1128 16:11:40.050880 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:40 crc kubenswrapper[4909]: I1128 16:11:40.050897 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:40 crc kubenswrapper[4909]: I1128 16:11:40.050909 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:40Z","lastTransitionTime":"2025-11-28T16:11:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:40 crc kubenswrapper[4909]: I1128 16:11:40.153264 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:40 crc kubenswrapper[4909]: I1128 16:11:40.153302 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:40 crc kubenswrapper[4909]: I1128 16:11:40.153313 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:40 crc kubenswrapper[4909]: I1128 16:11:40.153328 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:40 crc kubenswrapper[4909]: I1128 16:11:40.153340 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:40Z","lastTransitionTime":"2025-11-28T16:11:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:40 crc kubenswrapper[4909]: I1128 16:11:40.256222 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:40 crc kubenswrapper[4909]: I1128 16:11:40.256304 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:40 crc kubenswrapper[4909]: I1128 16:11:40.256323 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:40 crc kubenswrapper[4909]: I1128 16:11:40.256346 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:40 crc kubenswrapper[4909]: I1128 16:11:40.256364 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:40Z","lastTransitionTime":"2025-11-28T16:11:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:40 crc kubenswrapper[4909]: I1128 16:11:40.361266 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:40 crc kubenswrapper[4909]: I1128 16:11:40.361533 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:40 crc kubenswrapper[4909]: I1128 16:11:40.361643 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:40 crc kubenswrapper[4909]: I1128 16:11:40.361764 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:40 crc kubenswrapper[4909]: I1128 16:11:40.361889 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:40Z","lastTransitionTime":"2025-11-28T16:11:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:40 crc kubenswrapper[4909]: I1128 16:11:40.420965 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 16:11:40 crc kubenswrapper[4909]: E1128 16:11:40.421134 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 16:12:44.421102779 +0000 UTC m=+146.817787343 (durationBeforeRetry 1m4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:11:40 crc kubenswrapper[4909]: I1128 16:11:40.464686 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:40 crc kubenswrapper[4909]: I1128 16:11:40.464738 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:40 crc kubenswrapper[4909]: I1128 16:11:40.464761 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:40 crc kubenswrapper[4909]: I1128 16:11:40.464793 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:40 crc kubenswrapper[4909]: I1128 16:11:40.464816 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:40Z","lastTransitionTime":"2025-11-28T16:11:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:40 crc kubenswrapper[4909]: I1128 16:11:40.522438 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 16:11:40 crc kubenswrapper[4909]: I1128 16:11:40.522519 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 16:11:40 crc kubenswrapper[4909]: I1128 16:11:40.522595 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 16:11:40 crc kubenswrapper[4909]: I1128 16:11:40.522630 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 16:11:40 crc kubenswrapper[4909]: E1128 16:11:40.522794 4909 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 28 16:11:40 crc kubenswrapper[4909]: E1128 16:11:40.522861 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-28 16:12:44.522838739 +0000 UTC m=+146.919523293 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 28 16:11:40 crc kubenswrapper[4909]: E1128 16:11:40.523078 4909 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 28 16:11:40 crc kubenswrapper[4909]: E1128 16:11:40.523123 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-28 16:12:44.523109327 +0000 UTC m=+146.919793891 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 28 16:11:40 crc kubenswrapper[4909]: E1128 16:11:40.523338 4909 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 28 16:11:40 crc kubenswrapper[4909]: E1128 16:11:40.523382 4909 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 28 16:11:40 crc kubenswrapper[4909]: E1128 16:11:40.523405 4909 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 16:11:40 crc kubenswrapper[4909]: E1128 16:11:40.523467 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-28 16:12:44.523446077 +0000 UTC m=+146.920130641 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 16:11:40 crc kubenswrapper[4909]: E1128 16:11:40.523991 4909 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 28 16:11:40 crc kubenswrapper[4909]: E1128 16:11:40.524148 4909 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 28 16:11:40 crc kubenswrapper[4909]: E1128 16:11:40.524267 4909 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 16:11:40 crc kubenswrapper[4909]: E1128 16:11:40.524490 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-28 16:12:44.524458016 +0000 UTC m=+146.921142570 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 16:11:40 crc kubenswrapper[4909]: I1128 16:11:40.568248 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:40 crc kubenswrapper[4909]: I1128 16:11:40.568545 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:40 crc kubenswrapper[4909]: I1128 16:11:40.568757 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:40 crc kubenswrapper[4909]: I1128 16:11:40.568943 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:40 crc kubenswrapper[4909]: I1128 16:11:40.569281 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:40Z","lastTransitionTime":"2025-11-28T16:11:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:40 crc kubenswrapper[4909]: I1128 16:11:40.672998 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:40 crc kubenswrapper[4909]: I1128 16:11:40.673050 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:40 crc kubenswrapper[4909]: I1128 16:11:40.673061 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:40 crc kubenswrapper[4909]: I1128 16:11:40.673077 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:40 crc kubenswrapper[4909]: I1128 16:11:40.673089 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:40Z","lastTransitionTime":"2025-11-28T16:11:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:40 crc kubenswrapper[4909]: I1128 16:11:40.776185 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:40 crc kubenswrapper[4909]: I1128 16:11:40.776274 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:40 crc kubenswrapper[4909]: I1128 16:11:40.776303 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:40 crc kubenswrapper[4909]: I1128 16:11:40.776339 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:40 crc kubenswrapper[4909]: I1128 16:11:40.776363 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:40Z","lastTransitionTime":"2025-11-28T16:11:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:40 crc kubenswrapper[4909]: I1128 16:11:40.879505 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:40 crc kubenswrapper[4909]: I1128 16:11:40.880119 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:40 crc kubenswrapper[4909]: I1128 16:11:40.880187 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:40 crc kubenswrapper[4909]: I1128 16:11:40.880260 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:40 crc kubenswrapper[4909]: I1128 16:11:40.880326 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:40Z","lastTransitionTime":"2025-11-28T16:11:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:40 crc kubenswrapper[4909]: I1128 16:11:40.900991 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-8rjn2" Nov 28 16:11:40 crc kubenswrapper[4909]: E1128 16:11:40.901137 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-8rjn2" podUID="ffceca0e-d9b5-484f-8753-5e0269eec811" Nov 28 16:11:40 crc kubenswrapper[4909]: I1128 16:11:40.983443 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:40 crc kubenswrapper[4909]: I1128 16:11:40.983932 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:40 crc kubenswrapper[4909]: I1128 16:11:40.984023 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:40 crc kubenswrapper[4909]: I1128 16:11:40.984145 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:40 crc kubenswrapper[4909]: I1128 16:11:40.984236 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:40Z","lastTransitionTime":"2025-11-28T16:11:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:41 crc kubenswrapper[4909]: I1128 16:11:41.087951 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:41 crc kubenswrapper[4909]: I1128 16:11:41.088322 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:41 crc kubenswrapper[4909]: I1128 16:11:41.088461 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:41 crc kubenswrapper[4909]: I1128 16:11:41.088617 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:41 crc kubenswrapper[4909]: I1128 16:11:41.089076 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:41Z","lastTransitionTime":"2025-11-28T16:11:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:41 crc kubenswrapper[4909]: I1128 16:11:41.192048 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:41 crc kubenswrapper[4909]: I1128 16:11:41.192084 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:41 crc kubenswrapper[4909]: I1128 16:11:41.192092 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:41 crc kubenswrapper[4909]: I1128 16:11:41.192108 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:41 crc kubenswrapper[4909]: I1128 16:11:41.192116 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:41Z","lastTransitionTime":"2025-11-28T16:11:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:41 crc kubenswrapper[4909]: I1128 16:11:41.296158 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:41 crc kubenswrapper[4909]: I1128 16:11:41.296220 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:41 crc kubenswrapper[4909]: I1128 16:11:41.296236 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:41 crc kubenswrapper[4909]: I1128 16:11:41.296258 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:41 crc kubenswrapper[4909]: I1128 16:11:41.296277 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:41Z","lastTransitionTime":"2025-11-28T16:11:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:41 crc kubenswrapper[4909]: I1128 16:11:41.398032 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:41 crc kubenswrapper[4909]: I1128 16:11:41.398069 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:41 crc kubenswrapper[4909]: I1128 16:11:41.398080 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:41 crc kubenswrapper[4909]: I1128 16:11:41.398095 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:41 crc kubenswrapper[4909]: I1128 16:11:41.398105 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:41Z","lastTransitionTime":"2025-11-28T16:11:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:41 crc kubenswrapper[4909]: I1128 16:11:41.501098 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:41 crc kubenswrapper[4909]: I1128 16:11:41.501145 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:41 crc kubenswrapper[4909]: I1128 16:11:41.501155 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:41 crc kubenswrapper[4909]: I1128 16:11:41.501172 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:41 crc kubenswrapper[4909]: I1128 16:11:41.501184 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:41Z","lastTransitionTime":"2025-11-28T16:11:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:41 crc kubenswrapper[4909]: I1128 16:11:41.604511 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:41 crc kubenswrapper[4909]: I1128 16:11:41.604552 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:41 crc kubenswrapper[4909]: I1128 16:11:41.604565 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:41 crc kubenswrapper[4909]: I1128 16:11:41.604580 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:41 crc kubenswrapper[4909]: I1128 16:11:41.604590 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:41Z","lastTransitionTime":"2025-11-28T16:11:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:41 crc kubenswrapper[4909]: I1128 16:11:41.708317 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:41 crc kubenswrapper[4909]: I1128 16:11:41.708401 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:41 crc kubenswrapper[4909]: I1128 16:11:41.708422 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:41 crc kubenswrapper[4909]: I1128 16:11:41.708453 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:41 crc kubenswrapper[4909]: I1128 16:11:41.708475 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:41Z","lastTransitionTime":"2025-11-28T16:11:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:41 crc kubenswrapper[4909]: I1128 16:11:41.811951 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:41 crc kubenswrapper[4909]: I1128 16:11:41.812009 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:41 crc kubenswrapper[4909]: I1128 16:11:41.812025 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:41 crc kubenswrapper[4909]: I1128 16:11:41.812049 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:41 crc kubenswrapper[4909]: I1128 16:11:41.812066 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:41Z","lastTransitionTime":"2025-11-28T16:11:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:41 crc kubenswrapper[4909]: I1128 16:11:41.900851 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 16:11:41 crc kubenswrapper[4909]: I1128 16:11:41.900951 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 16:11:41 crc kubenswrapper[4909]: E1128 16:11:41.901040 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 16:11:41 crc kubenswrapper[4909]: I1128 16:11:41.901323 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 16:11:41 crc kubenswrapper[4909]: E1128 16:11:41.901428 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 16:11:41 crc kubenswrapper[4909]: E1128 16:11:41.901497 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 16:11:41 crc kubenswrapper[4909]: I1128 16:11:41.914864 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:41 crc kubenswrapper[4909]: I1128 16:11:41.914912 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:41 crc kubenswrapper[4909]: I1128 16:11:41.914925 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:41 crc kubenswrapper[4909]: I1128 16:11:41.914942 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:41 crc kubenswrapper[4909]: I1128 16:11:41.914959 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:41Z","lastTransitionTime":"2025-11-28T16:11:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:42 crc kubenswrapper[4909]: I1128 16:11:42.017603 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:42 crc kubenswrapper[4909]: I1128 16:11:42.017707 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:42 crc kubenswrapper[4909]: I1128 16:11:42.017726 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:42 crc kubenswrapper[4909]: I1128 16:11:42.017754 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:42 crc kubenswrapper[4909]: I1128 16:11:42.017780 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:42Z","lastTransitionTime":"2025-11-28T16:11:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:42 crc kubenswrapper[4909]: I1128 16:11:42.121100 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:42 crc kubenswrapper[4909]: I1128 16:11:42.121161 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:42 crc kubenswrapper[4909]: I1128 16:11:42.121184 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:42 crc kubenswrapper[4909]: I1128 16:11:42.121211 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:42 crc kubenswrapper[4909]: I1128 16:11:42.121228 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:42Z","lastTransitionTime":"2025-11-28T16:11:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:42 crc kubenswrapper[4909]: I1128 16:11:42.232625 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:42 crc kubenswrapper[4909]: I1128 16:11:42.232732 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:42 crc kubenswrapper[4909]: I1128 16:11:42.233142 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:42 crc kubenswrapper[4909]: I1128 16:11:42.233207 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:42 crc kubenswrapper[4909]: I1128 16:11:42.233228 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:42Z","lastTransitionTime":"2025-11-28T16:11:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:42 crc kubenswrapper[4909]: I1128 16:11:42.337889 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:42 crc kubenswrapper[4909]: I1128 16:11:42.337976 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:42 crc kubenswrapper[4909]: I1128 16:11:42.338003 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:42 crc kubenswrapper[4909]: I1128 16:11:42.338037 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:42 crc kubenswrapper[4909]: I1128 16:11:42.338070 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:42Z","lastTransitionTime":"2025-11-28T16:11:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:42 crc kubenswrapper[4909]: I1128 16:11:42.442179 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:42 crc kubenswrapper[4909]: I1128 16:11:42.442251 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:42 crc kubenswrapper[4909]: I1128 16:11:42.442272 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:42 crc kubenswrapper[4909]: I1128 16:11:42.442300 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:42 crc kubenswrapper[4909]: I1128 16:11:42.442320 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:42Z","lastTransitionTime":"2025-11-28T16:11:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:42 crc kubenswrapper[4909]: I1128 16:11:42.546054 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:42 crc kubenswrapper[4909]: I1128 16:11:42.546142 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:42 crc kubenswrapper[4909]: I1128 16:11:42.546162 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:42 crc kubenswrapper[4909]: I1128 16:11:42.546193 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:42 crc kubenswrapper[4909]: I1128 16:11:42.546218 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:42Z","lastTransitionTime":"2025-11-28T16:11:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:42 crc kubenswrapper[4909]: I1128 16:11:42.650381 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:42 crc kubenswrapper[4909]: I1128 16:11:42.650468 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:42 crc kubenswrapper[4909]: I1128 16:11:42.650493 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:42 crc kubenswrapper[4909]: I1128 16:11:42.650524 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:42 crc kubenswrapper[4909]: I1128 16:11:42.650550 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:42Z","lastTransitionTime":"2025-11-28T16:11:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:42 crc kubenswrapper[4909]: I1128 16:11:42.754715 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:42 crc kubenswrapper[4909]: I1128 16:11:42.754779 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:42 crc kubenswrapper[4909]: I1128 16:11:42.754798 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:42 crc kubenswrapper[4909]: I1128 16:11:42.754823 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:42 crc kubenswrapper[4909]: I1128 16:11:42.754843 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:42Z","lastTransitionTime":"2025-11-28T16:11:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:42 crc kubenswrapper[4909]: I1128 16:11:42.859364 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:42 crc kubenswrapper[4909]: I1128 16:11:42.859412 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:42 crc kubenswrapper[4909]: I1128 16:11:42.859430 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:42 crc kubenswrapper[4909]: I1128 16:11:42.859456 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:42 crc kubenswrapper[4909]: I1128 16:11:42.859473 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:42Z","lastTransitionTime":"2025-11-28T16:11:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:42 crc kubenswrapper[4909]: I1128 16:11:42.901812 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-8rjn2" Nov 28 16:11:42 crc kubenswrapper[4909]: E1128 16:11:42.902020 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-8rjn2" podUID="ffceca0e-d9b5-484f-8753-5e0269eec811" Nov 28 16:11:42 crc kubenswrapper[4909]: I1128 16:11:42.962645 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:42 crc kubenswrapper[4909]: I1128 16:11:42.962739 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:42 crc kubenswrapper[4909]: I1128 16:11:42.962764 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:42 crc kubenswrapper[4909]: I1128 16:11:42.962789 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:42 crc kubenswrapper[4909]: I1128 16:11:42.962806 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:42Z","lastTransitionTime":"2025-11-28T16:11:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:42 crc kubenswrapper[4909]: I1128 16:11:42.964381 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:42 crc kubenswrapper[4909]: I1128 16:11:42.964425 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:42 crc kubenswrapper[4909]: I1128 16:11:42.964437 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:42 crc kubenswrapper[4909]: I1128 16:11:42.964466 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:42 crc kubenswrapper[4909]: I1128 16:11:42.964488 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:42Z","lastTransitionTime":"2025-11-28T16:11:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:42 crc kubenswrapper[4909]: E1128 16:11:42.983437 4909 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:42Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:42Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:42Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:42Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:42Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:42Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:42Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:42Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"b44f6a6c-5ae2-4ed6-9fc9-6c0acf034e9d\\\",\\\"systemUUID\\\":\\\"1e8d38e9-395c-4d37-b567-3bfe4869e3f7\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:42Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:42 crc kubenswrapper[4909]: I1128 16:11:42.988170 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:42 crc kubenswrapper[4909]: I1128 16:11:42.988407 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:42 crc kubenswrapper[4909]: I1128 16:11:42.988559 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:42 crc kubenswrapper[4909]: I1128 16:11:42.988743 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:42 crc kubenswrapper[4909]: I1128 16:11:42.988909 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:42Z","lastTransitionTime":"2025-11-28T16:11:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:43 crc kubenswrapper[4909]: E1128 16:11:43.009166 4909 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:42Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:42Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:42Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:42Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:42Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:42Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:42Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:42Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"b44f6a6c-5ae2-4ed6-9fc9-6c0acf034e9d\\\",\\\"systemUUID\\\":\\\"1e8d38e9-395c-4d37-b567-3bfe4869e3f7\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:43Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:43 crc kubenswrapper[4909]: I1128 16:11:43.014324 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:43 crc kubenswrapper[4909]: I1128 16:11:43.014389 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:43 crc kubenswrapper[4909]: I1128 16:11:43.014406 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:43 crc kubenswrapper[4909]: I1128 16:11:43.014431 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:43 crc kubenswrapper[4909]: I1128 16:11:43.014449 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:43Z","lastTransitionTime":"2025-11-28T16:11:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:43 crc kubenswrapper[4909]: E1128 16:11:43.033900 4909 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:43Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:43Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:43Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:43Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:43Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:43Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:43Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:43Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"b44f6a6c-5ae2-4ed6-9fc9-6c0acf034e9d\\\",\\\"systemUUID\\\":\\\"1e8d38e9-395c-4d37-b567-3bfe4869e3f7\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:43Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:43 crc kubenswrapper[4909]: I1128 16:11:43.039034 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:43 crc kubenswrapper[4909]: I1128 16:11:43.039255 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:43 crc kubenswrapper[4909]: I1128 16:11:43.039395 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:43 crc kubenswrapper[4909]: I1128 16:11:43.039751 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:43 crc kubenswrapper[4909]: I1128 16:11:43.039908 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:43Z","lastTransitionTime":"2025-11-28T16:11:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:43 crc kubenswrapper[4909]: E1128 16:11:43.059779 4909 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:43Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:43Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:43Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:43Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:43Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:43Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:43Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:43Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"b44f6a6c-5ae2-4ed6-9fc9-6c0acf034e9d\\\",\\\"systemUUID\\\":\\\"1e8d38e9-395c-4d37-b567-3bfe4869e3f7\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:43Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:43 crc kubenswrapper[4909]: I1128 16:11:43.064624 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:43 crc kubenswrapper[4909]: I1128 16:11:43.064729 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:43 crc kubenswrapper[4909]: I1128 16:11:43.064749 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:43 crc kubenswrapper[4909]: I1128 16:11:43.064802 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:43 crc kubenswrapper[4909]: I1128 16:11:43.064817 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:43Z","lastTransitionTime":"2025-11-28T16:11:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:43 crc kubenswrapper[4909]: E1128 16:11:43.080809 4909 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:43Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:43Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:43Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:43Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:43Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:43Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:43Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:43Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"b44f6a6c-5ae2-4ed6-9fc9-6c0acf034e9d\\\",\\\"systemUUID\\\":\\\"1e8d38e9-395c-4d37-b567-3bfe4869e3f7\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:43Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:43 crc kubenswrapper[4909]: E1128 16:11:43.081035 4909 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 28 16:11:43 crc kubenswrapper[4909]: I1128 16:11:43.082845 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:43 crc kubenswrapper[4909]: I1128 16:11:43.082878 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:43 crc kubenswrapper[4909]: I1128 16:11:43.082890 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:43 crc kubenswrapper[4909]: I1128 16:11:43.082929 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:43 crc kubenswrapper[4909]: I1128 16:11:43.082945 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:43Z","lastTransitionTime":"2025-11-28T16:11:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:43 crc kubenswrapper[4909]: I1128 16:11:43.185610 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:43 crc kubenswrapper[4909]: I1128 16:11:43.185718 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:43 crc kubenswrapper[4909]: I1128 16:11:43.185739 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:43 crc kubenswrapper[4909]: I1128 16:11:43.185763 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:43 crc kubenswrapper[4909]: I1128 16:11:43.185781 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:43Z","lastTransitionTime":"2025-11-28T16:11:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:43 crc kubenswrapper[4909]: I1128 16:11:43.289074 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:43 crc kubenswrapper[4909]: I1128 16:11:43.289177 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:43 crc kubenswrapper[4909]: I1128 16:11:43.289196 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:43 crc kubenswrapper[4909]: I1128 16:11:43.289283 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:43 crc kubenswrapper[4909]: I1128 16:11:43.289360 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:43Z","lastTransitionTime":"2025-11-28T16:11:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:43 crc kubenswrapper[4909]: I1128 16:11:43.392465 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:43 crc kubenswrapper[4909]: I1128 16:11:43.392538 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:43 crc kubenswrapper[4909]: I1128 16:11:43.392562 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:43 crc kubenswrapper[4909]: I1128 16:11:43.392596 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:43 crc kubenswrapper[4909]: I1128 16:11:43.392620 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:43Z","lastTransitionTime":"2025-11-28T16:11:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:43 crc kubenswrapper[4909]: I1128 16:11:43.496266 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:43 crc kubenswrapper[4909]: I1128 16:11:43.496397 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:43 crc kubenswrapper[4909]: I1128 16:11:43.496420 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:43 crc kubenswrapper[4909]: I1128 16:11:43.496443 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:43 crc kubenswrapper[4909]: I1128 16:11:43.496462 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:43Z","lastTransitionTime":"2025-11-28T16:11:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:43 crc kubenswrapper[4909]: I1128 16:11:43.599240 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:43 crc kubenswrapper[4909]: I1128 16:11:43.599560 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:43 crc kubenswrapper[4909]: I1128 16:11:43.599913 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:43 crc kubenswrapper[4909]: I1128 16:11:43.600183 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:43 crc kubenswrapper[4909]: I1128 16:11:43.600400 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:43Z","lastTransitionTime":"2025-11-28T16:11:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:43 crc kubenswrapper[4909]: I1128 16:11:43.703344 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:43 crc kubenswrapper[4909]: I1128 16:11:43.703408 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:43 crc kubenswrapper[4909]: I1128 16:11:43.703426 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:43 crc kubenswrapper[4909]: I1128 16:11:43.703451 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:43 crc kubenswrapper[4909]: I1128 16:11:43.703468 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:43Z","lastTransitionTime":"2025-11-28T16:11:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:43 crc kubenswrapper[4909]: I1128 16:11:43.806588 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:43 crc kubenswrapper[4909]: I1128 16:11:43.807213 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:43 crc kubenswrapper[4909]: I1128 16:11:43.807408 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:43 crc kubenswrapper[4909]: I1128 16:11:43.807586 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:43 crc kubenswrapper[4909]: I1128 16:11:43.807834 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:43Z","lastTransitionTime":"2025-11-28T16:11:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:43 crc kubenswrapper[4909]: I1128 16:11:43.901538 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 16:11:43 crc kubenswrapper[4909]: I1128 16:11:43.901740 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 16:11:43 crc kubenswrapper[4909]: I1128 16:11:43.902068 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 16:11:43 crc kubenswrapper[4909]: E1128 16:11:43.902279 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 16:11:43 crc kubenswrapper[4909]: E1128 16:11:43.902592 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 16:11:43 crc kubenswrapper[4909]: E1128 16:11:43.903099 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 16:11:43 crc kubenswrapper[4909]: I1128 16:11:43.911897 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:43 crc kubenswrapper[4909]: I1128 16:11:43.911963 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:43 crc kubenswrapper[4909]: I1128 16:11:43.911989 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:43 crc kubenswrapper[4909]: I1128 16:11:43.912020 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:43 crc kubenswrapper[4909]: I1128 16:11:43.912042 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:43Z","lastTransitionTime":"2025-11-28T16:11:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:43 crc kubenswrapper[4909]: I1128 16:11:43.918826 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/kube-rbac-proxy-crio-crc"] Nov 28 16:11:44 crc kubenswrapper[4909]: I1128 16:11:44.015456 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:44 crc kubenswrapper[4909]: I1128 16:11:44.015868 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:44 crc kubenswrapper[4909]: I1128 16:11:44.016049 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:44 crc kubenswrapper[4909]: I1128 16:11:44.016216 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:44 crc kubenswrapper[4909]: I1128 16:11:44.016364 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:44Z","lastTransitionTime":"2025-11-28T16:11:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:44 crc kubenswrapper[4909]: I1128 16:11:44.119605 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:44 crc kubenswrapper[4909]: I1128 16:11:44.119688 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:44 crc kubenswrapper[4909]: I1128 16:11:44.119705 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:44 crc kubenswrapper[4909]: I1128 16:11:44.119728 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:44 crc kubenswrapper[4909]: I1128 16:11:44.119744 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:44Z","lastTransitionTime":"2025-11-28T16:11:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:44 crc kubenswrapper[4909]: I1128 16:11:44.223274 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:44 crc kubenswrapper[4909]: I1128 16:11:44.223323 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:44 crc kubenswrapper[4909]: I1128 16:11:44.223339 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:44 crc kubenswrapper[4909]: I1128 16:11:44.223362 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:44 crc kubenswrapper[4909]: I1128 16:11:44.223379 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:44Z","lastTransitionTime":"2025-11-28T16:11:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:44 crc kubenswrapper[4909]: I1128 16:11:44.327145 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:44 crc kubenswrapper[4909]: I1128 16:11:44.327191 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:44 crc kubenswrapper[4909]: I1128 16:11:44.327207 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:44 crc kubenswrapper[4909]: I1128 16:11:44.327229 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:44 crc kubenswrapper[4909]: I1128 16:11:44.327248 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:44Z","lastTransitionTime":"2025-11-28T16:11:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:44 crc kubenswrapper[4909]: I1128 16:11:44.429414 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:44 crc kubenswrapper[4909]: I1128 16:11:44.429470 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:44 crc kubenswrapper[4909]: I1128 16:11:44.429493 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:44 crc kubenswrapper[4909]: I1128 16:11:44.429521 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:44 crc kubenswrapper[4909]: I1128 16:11:44.429542 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:44Z","lastTransitionTime":"2025-11-28T16:11:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:44 crc kubenswrapper[4909]: I1128 16:11:44.532200 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:44 crc kubenswrapper[4909]: I1128 16:11:44.532266 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:44 crc kubenswrapper[4909]: I1128 16:11:44.532288 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:44 crc kubenswrapper[4909]: I1128 16:11:44.532335 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:44 crc kubenswrapper[4909]: I1128 16:11:44.532356 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:44Z","lastTransitionTime":"2025-11-28T16:11:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:44 crc kubenswrapper[4909]: I1128 16:11:44.635520 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:44 crc kubenswrapper[4909]: I1128 16:11:44.635571 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:44 crc kubenswrapper[4909]: I1128 16:11:44.635588 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:44 crc kubenswrapper[4909]: I1128 16:11:44.635618 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:44 crc kubenswrapper[4909]: I1128 16:11:44.635635 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:44Z","lastTransitionTime":"2025-11-28T16:11:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:44 crc kubenswrapper[4909]: I1128 16:11:44.738752 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:44 crc kubenswrapper[4909]: I1128 16:11:44.738806 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:44 crc kubenswrapper[4909]: I1128 16:11:44.738828 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:44 crc kubenswrapper[4909]: I1128 16:11:44.738859 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:44 crc kubenswrapper[4909]: I1128 16:11:44.738880 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:44Z","lastTransitionTime":"2025-11-28T16:11:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:44 crc kubenswrapper[4909]: I1128 16:11:44.842420 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:44 crc kubenswrapper[4909]: I1128 16:11:44.842486 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:44 crc kubenswrapper[4909]: I1128 16:11:44.842506 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:44 crc kubenswrapper[4909]: I1128 16:11:44.842532 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:44 crc kubenswrapper[4909]: I1128 16:11:44.842557 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:44Z","lastTransitionTime":"2025-11-28T16:11:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:44 crc kubenswrapper[4909]: I1128 16:11:44.900522 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-8rjn2" Nov 28 16:11:44 crc kubenswrapper[4909]: E1128 16:11:44.900799 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-8rjn2" podUID="ffceca0e-d9b5-484f-8753-5e0269eec811" Nov 28 16:11:44 crc kubenswrapper[4909]: I1128 16:11:44.946144 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:44 crc kubenswrapper[4909]: I1128 16:11:44.946214 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:44 crc kubenswrapper[4909]: I1128 16:11:44.946239 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:44 crc kubenswrapper[4909]: I1128 16:11:44.946271 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:44 crc kubenswrapper[4909]: I1128 16:11:44.946297 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:44Z","lastTransitionTime":"2025-11-28T16:11:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:45 crc kubenswrapper[4909]: I1128 16:11:45.049041 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:45 crc kubenswrapper[4909]: I1128 16:11:45.049116 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:45 crc kubenswrapper[4909]: I1128 16:11:45.049142 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:45 crc kubenswrapper[4909]: I1128 16:11:45.049171 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:45 crc kubenswrapper[4909]: I1128 16:11:45.049192 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:45Z","lastTransitionTime":"2025-11-28T16:11:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:45 crc kubenswrapper[4909]: I1128 16:11:45.152680 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:45 crc kubenswrapper[4909]: I1128 16:11:45.152740 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:45 crc kubenswrapper[4909]: I1128 16:11:45.152757 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:45 crc kubenswrapper[4909]: I1128 16:11:45.152780 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:45 crc kubenswrapper[4909]: I1128 16:11:45.152797 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:45Z","lastTransitionTime":"2025-11-28T16:11:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:45 crc kubenswrapper[4909]: I1128 16:11:45.255180 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:45 crc kubenswrapper[4909]: I1128 16:11:45.255244 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:45 crc kubenswrapper[4909]: I1128 16:11:45.255262 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:45 crc kubenswrapper[4909]: I1128 16:11:45.255286 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:45 crc kubenswrapper[4909]: I1128 16:11:45.255309 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:45Z","lastTransitionTime":"2025-11-28T16:11:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:45 crc kubenswrapper[4909]: I1128 16:11:45.358500 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:45 crc kubenswrapper[4909]: I1128 16:11:45.358556 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:45 crc kubenswrapper[4909]: I1128 16:11:45.358574 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:45 crc kubenswrapper[4909]: I1128 16:11:45.358596 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:45 crc kubenswrapper[4909]: I1128 16:11:45.358613 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:45Z","lastTransitionTime":"2025-11-28T16:11:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:45 crc kubenswrapper[4909]: I1128 16:11:45.461563 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:45 crc kubenswrapper[4909]: I1128 16:11:45.461628 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:45 crc kubenswrapper[4909]: I1128 16:11:45.461686 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:45 crc kubenswrapper[4909]: I1128 16:11:45.461720 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:45 crc kubenswrapper[4909]: I1128 16:11:45.461743 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:45Z","lastTransitionTime":"2025-11-28T16:11:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:45 crc kubenswrapper[4909]: I1128 16:11:45.565459 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:45 crc kubenswrapper[4909]: I1128 16:11:45.565518 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:45 crc kubenswrapper[4909]: I1128 16:11:45.565541 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:45 crc kubenswrapper[4909]: I1128 16:11:45.565570 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:45 crc kubenswrapper[4909]: I1128 16:11:45.565594 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:45Z","lastTransitionTime":"2025-11-28T16:11:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:45 crc kubenswrapper[4909]: I1128 16:11:45.668595 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:45 crc kubenswrapper[4909]: I1128 16:11:45.668692 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:45 crc kubenswrapper[4909]: I1128 16:11:45.668717 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:45 crc kubenswrapper[4909]: I1128 16:11:45.668745 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:45 crc kubenswrapper[4909]: I1128 16:11:45.668766 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:45Z","lastTransitionTime":"2025-11-28T16:11:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:45 crc kubenswrapper[4909]: I1128 16:11:45.770788 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:45 crc kubenswrapper[4909]: I1128 16:11:45.770832 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:45 crc kubenswrapper[4909]: I1128 16:11:45.770843 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:45 crc kubenswrapper[4909]: I1128 16:11:45.770860 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:45 crc kubenswrapper[4909]: I1128 16:11:45.770870 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:45Z","lastTransitionTime":"2025-11-28T16:11:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:45 crc kubenswrapper[4909]: I1128 16:11:45.874062 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:45 crc kubenswrapper[4909]: I1128 16:11:45.874290 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:45 crc kubenswrapper[4909]: I1128 16:11:45.874307 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:45 crc kubenswrapper[4909]: I1128 16:11:45.874331 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:45 crc kubenswrapper[4909]: I1128 16:11:45.874348 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:45Z","lastTransitionTime":"2025-11-28T16:11:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:45 crc kubenswrapper[4909]: I1128 16:11:45.901626 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 16:11:45 crc kubenswrapper[4909]: I1128 16:11:45.901690 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 16:11:45 crc kubenswrapper[4909]: E1128 16:11:45.901844 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 16:11:45 crc kubenswrapper[4909]: I1128 16:11:45.901900 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 16:11:45 crc kubenswrapper[4909]: E1128 16:11:45.902527 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 16:11:45 crc kubenswrapper[4909]: E1128 16:11:45.902604 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 16:11:45 crc kubenswrapper[4909]: I1128 16:11:45.903205 4909 scope.go:117] "RemoveContainer" containerID="3ddca078c58f2bc3df2f19603ac7190dde988c7fe8b49ca5d94af1bf01cd1162" Nov 28 16:11:45 crc kubenswrapper[4909]: E1128 16:11:45.903504 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-qxw94_openshift-ovn-kubernetes(c17e2fff-c7ee-475c-8c17-58a394744b91)\"" pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" podUID="c17e2fff-c7ee-475c-8c17-58a394744b91" Nov 28 16:11:45 crc kubenswrapper[4909]: I1128 16:11:45.977355 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:45 crc kubenswrapper[4909]: I1128 16:11:45.977433 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:45 crc kubenswrapper[4909]: I1128 16:11:45.977458 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:45 crc kubenswrapper[4909]: I1128 16:11:45.977492 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:45 crc kubenswrapper[4909]: I1128 16:11:45.977505 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:45Z","lastTransitionTime":"2025-11-28T16:11:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:46 crc kubenswrapper[4909]: I1128 16:11:46.079894 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:46 crc kubenswrapper[4909]: I1128 16:11:46.079964 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:46 crc kubenswrapper[4909]: I1128 16:11:46.079976 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:46 crc kubenswrapper[4909]: I1128 16:11:46.080020 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:46 crc kubenswrapper[4909]: I1128 16:11:46.080036 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:46Z","lastTransitionTime":"2025-11-28T16:11:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:46 crc kubenswrapper[4909]: I1128 16:11:46.183695 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:46 crc kubenswrapper[4909]: I1128 16:11:46.183762 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:46 crc kubenswrapper[4909]: I1128 16:11:46.183785 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:46 crc kubenswrapper[4909]: I1128 16:11:46.183817 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:46 crc kubenswrapper[4909]: I1128 16:11:46.183839 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:46Z","lastTransitionTime":"2025-11-28T16:11:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:46 crc kubenswrapper[4909]: I1128 16:11:46.287068 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:46 crc kubenswrapper[4909]: I1128 16:11:46.287112 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:46 crc kubenswrapper[4909]: I1128 16:11:46.287124 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:46 crc kubenswrapper[4909]: I1128 16:11:46.287142 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:46 crc kubenswrapper[4909]: I1128 16:11:46.287154 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:46Z","lastTransitionTime":"2025-11-28T16:11:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:46 crc kubenswrapper[4909]: I1128 16:11:46.389276 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:46 crc kubenswrapper[4909]: I1128 16:11:46.389304 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:46 crc kubenswrapper[4909]: I1128 16:11:46.389313 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:46 crc kubenswrapper[4909]: I1128 16:11:46.389328 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:46 crc kubenswrapper[4909]: I1128 16:11:46.389339 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:46Z","lastTransitionTime":"2025-11-28T16:11:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:46 crc kubenswrapper[4909]: I1128 16:11:46.491765 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:46 crc kubenswrapper[4909]: I1128 16:11:46.491803 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:46 crc kubenswrapper[4909]: I1128 16:11:46.491829 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:46 crc kubenswrapper[4909]: I1128 16:11:46.491847 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:46 crc kubenswrapper[4909]: I1128 16:11:46.491858 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:46Z","lastTransitionTime":"2025-11-28T16:11:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:46 crc kubenswrapper[4909]: I1128 16:11:46.594476 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:46 crc kubenswrapper[4909]: I1128 16:11:46.594538 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:46 crc kubenswrapper[4909]: I1128 16:11:46.594556 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:46 crc kubenswrapper[4909]: I1128 16:11:46.594581 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:46 crc kubenswrapper[4909]: I1128 16:11:46.594600 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:46Z","lastTransitionTime":"2025-11-28T16:11:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:46 crc kubenswrapper[4909]: I1128 16:11:46.697596 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:46 crc kubenswrapper[4909]: I1128 16:11:46.697647 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:46 crc kubenswrapper[4909]: I1128 16:11:46.697678 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:46 crc kubenswrapper[4909]: I1128 16:11:46.697701 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:46 crc kubenswrapper[4909]: I1128 16:11:46.697717 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:46Z","lastTransitionTime":"2025-11-28T16:11:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:46 crc kubenswrapper[4909]: I1128 16:11:46.800733 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:46 crc kubenswrapper[4909]: I1128 16:11:46.800796 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:46 crc kubenswrapper[4909]: I1128 16:11:46.800816 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:46 crc kubenswrapper[4909]: I1128 16:11:46.800840 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:46 crc kubenswrapper[4909]: I1128 16:11:46.800858 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:46Z","lastTransitionTime":"2025-11-28T16:11:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:46 crc kubenswrapper[4909]: I1128 16:11:46.901529 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-8rjn2" Nov 28 16:11:46 crc kubenswrapper[4909]: E1128 16:11:46.902163 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-8rjn2" podUID="ffceca0e-d9b5-484f-8753-5e0269eec811" Nov 28 16:11:46 crc kubenswrapper[4909]: I1128 16:11:46.903392 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:46 crc kubenswrapper[4909]: I1128 16:11:46.903440 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:46 crc kubenswrapper[4909]: I1128 16:11:46.903456 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:46 crc kubenswrapper[4909]: I1128 16:11:46.903479 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:46 crc kubenswrapper[4909]: I1128 16:11:46.903497 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:46Z","lastTransitionTime":"2025-11-28T16:11:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:47 crc kubenswrapper[4909]: I1128 16:11:47.007289 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:47 crc kubenswrapper[4909]: I1128 16:11:47.007382 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:47 crc kubenswrapper[4909]: I1128 16:11:47.007409 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:47 crc kubenswrapper[4909]: I1128 16:11:47.007440 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:47 crc kubenswrapper[4909]: I1128 16:11:47.007462 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:47Z","lastTransitionTime":"2025-11-28T16:11:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:47 crc kubenswrapper[4909]: I1128 16:11:47.110893 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:47 crc kubenswrapper[4909]: I1128 16:11:47.110950 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:47 crc kubenswrapper[4909]: I1128 16:11:47.110970 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:47 crc kubenswrapper[4909]: I1128 16:11:47.110992 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:47 crc kubenswrapper[4909]: I1128 16:11:47.111009 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:47Z","lastTransitionTime":"2025-11-28T16:11:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:47 crc kubenswrapper[4909]: I1128 16:11:47.213785 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:47 crc kubenswrapper[4909]: I1128 16:11:47.213843 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:47 crc kubenswrapper[4909]: I1128 16:11:47.213857 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:47 crc kubenswrapper[4909]: I1128 16:11:47.213876 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:47 crc kubenswrapper[4909]: I1128 16:11:47.213888 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:47Z","lastTransitionTime":"2025-11-28T16:11:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:47 crc kubenswrapper[4909]: I1128 16:11:47.317126 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:47 crc kubenswrapper[4909]: I1128 16:11:47.317613 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:47 crc kubenswrapper[4909]: I1128 16:11:47.317862 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:47 crc kubenswrapper[4909]: I1128 16:11:47.318090 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:47 crc kubenswrapper[4909]: I1128 16:11:47.318306 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:47Z","lastTransitionTime":"2025-11-28T16:11:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:47 crc kubenswrapper[4909]: I1128 16:11:47.420726 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:47 crc kubenswrapper[4909]: I1128 16:11:47.420974 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:47 crc kubenswrapper[4909]: I1128 16:11:47.421092 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:47 crc kubenswrapper[4909]: I1128 16:11:47.421239 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:47 crc kubenswrapper[4909]: I1128 16:11:47.421360 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:47Z","lastTransitionTime":"2025-11-28T16:11:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:47 crc kubenswrapper[4909]: I1128 16:11:47.524175 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:47 crc kubenswrapper[4909]: I1128 16:11:47.524406 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:47 crc kubenswrapper[4909]: I1128 16:11:47.524521 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:47 crc kubenswrapper[4909]: I1128 16:11:47.524614 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:47 crc kubenswrapper[4909]: I1128 16:11:47.524735 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:47Z","lastTransitionTime":"2025-11-28T16:11:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:47 crc kubenswrapper[4909]: I1128 16:11:47.627432 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:47 crc kubenswrapper[4909]: I1128 16:11:47.627510 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:47 crc kubenswrapper[4909]: I1128 16:11:47.627528 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:47 crc kubenswrapper[4909]: I1128 16:11:47.627552 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:47 crc kubenswrapper[4909]: I1128 16:11:47.627569 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:47Z","lastTransitionTime":"2025-11-28T16:11:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:47 crc kubenswrapper[4909]: I1128 16:11:47.730480 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:47 crc kubenswrapper[4909]: I1128 16:11:47.730541 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:47 crc kubenswrapper[4909]: I1128 16:11:47.730558 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:47 crc kubenswrapper[4909]: I1128 16:11:47.730582 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:47 crc kubenswrapper[4909]: I1128 16:11:47.730599 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:47Z","lastTransitionTime":"2025-11-28T16:11:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:47 crc kubenswrapper[4909]: I1128 16:11:47.833024 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:47 crc kubenswrapper[4909]: I1128 16:11:47.833075 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:47 crc kubenswrapper[4909]: I1128 16:11:47.833091 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:47 crc kubenswrapper[4909]: I1128 16:11:47.833114 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:47 crc kubenswrapper[4909]: I1128 16:11:47.833134 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:47Z","lastTransitionTime":"2025-11-28T16:11:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:47 crc kubenswrapper[4909]: I1128 16:11:47.901088 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 16:11:47 crc kubenswrapper[4909]: I1128 16:11:47.901292 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 16:11:47 crc kubenswrapper[4909]: I1128 16:11:47.901339 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 16:11:47 crc kubenswrapper[4909]: E1128 16:11:47.901499 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 16:11:47 crc kubenswrapper[4909]: E1128 16:11:47.901644 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 16:11:47 crc kubenswrapper[4909]: E1128 16:11:47.901867 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 16:11:47 crc kubenswrapper[4909]: I1128 16:11:47.921862 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-8s8f9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5a7347b-5536-45b5-be75-4bf0ed1b922b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://75a9b32698a7aaaa2b6f88541c9069902a98ad0146bf478ec58ce3a97fb410ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mglcp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://309f7a04e1c92fde11e7fedfe089ff043e14ea788f60339fb7acf8c6df0c8c8d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mglcp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-8s8f9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:47Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:47 crc kubenswrapper[4909]: I1128 16:11:47.936465 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:47 crc kubenswrapper[4909]: I1128 16:11:47.936578 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:47 crc kubenswrapper[4909]: I1128 16:11:47.936605 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:47 crc kubenswrapper[4909]: I1128 16:11:47.936711 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:47 crc kubenswrapper[4909]: I1128 16:11:47.936743 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:47Z","lastTransitionTime":"2025-11-28T16:11:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:47 crc kubenswrapper[4909]: I1128 16:11:47.944267 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://475d2c473e130345e0d544c2b7990c476e83c479c644db89e1ceda2f4278d667\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://18059a14081d677863133d95ec3cecf9359d6464af62be1e53bcd9514311ff59\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:47Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:47 crc kubenswrapper[4909]: I1128 16:11:47.958264 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-q8nfv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6b77cc4b-dc69-4ece-8e10-64eebc98a578\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://71f6384b2d5c86c668a4d7dfc23b14a893f93b9ec587bec43f74eb0926cc2c64\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-prc9l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:37Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-q8nfv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:47Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:47 crc kubenswrapper[4909]: I1128 16:11:47.972736 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31f84564a3775e0ece3a5c0f176e8d9607466d4a7a505173e9668c51fa2229e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:47Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:47 crc kubenswrapper[4909]: I1128 16:11:47.988756 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:47Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:48 crc kubenswrapper[4909]: I1128 16:11:48.009262 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-wx2jj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6e3805b2-8ad3-4fa6-b88f-e0ae42294202\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e3a9a82264968374209ed690b43cf96557d426af065ba14cd189ae9e31ed0f0a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0777b571ce4049338437a97264761c89ab7517b4da8400edcd3381d58aef32e4\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T16:11:24Z\\\",\\\"message\\\":\\\"2025-11-28T16:10:39+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_76b37028-5be7-441a-8772-e096e031ea50\\\\n2025-11-28T16:10:39+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_76b37028-5be7-441a-8772-e096e031ea50 to /host/opt/cni/bin/\\\\n2025-11-28T16:10:39Z [verbose] multus-daemon started\\\\n2025-11-28T16:10:39Z [verbose] Readiness Indicator file check\\\\n2025-11-28T16:11:24Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x6lxd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-wx2jj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:48Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:48 crc kubenswrapper[4909]: I1128 16:11:48.026958 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dda7db1a-d603-4121-8cd6-e72c9ae02961\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://107b559f7d62f02041439f8c727185e9a819cd655afa0898e818d8618cc54cc7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://76ac0606fbf94f3746fb7ffa549a3cb684d40ee72224dd36c96bb47db493b482\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d5716d50bbed7c047029bbd7964431e97f78c6516505f57fe76d2a1548a665d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://53a5c9679a2c94c1297318d2e554c2445efbfab9cf72d25e48d753d66fa217d5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://da70cc63e1edf58e6d6fed49fc9266d54fabba5f7ce3216d910eb18aebc45c34\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0700ae65eb592e5b2110d37bb15dbb1dbdd228986de015ab5dc8fe13672032ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0700ae65eb592e5b2110d37bb15dbb1dbdd228986de015ab5dc8fe13672032ef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:18Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:48Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:48 crc kubenswrapper[4909]: I1128 16:11:48.039727 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:48 crc kubenswrapper[4909]: I1128 16:11:48.039800 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:48 crc kubenswrapper[4909]: I1128 16:11:48.039819 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:48 crc kubenswrapper[4909]: I1128 16:11:48.039846 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:48 crc kubenswrapper[4909]: I1128 16:11:48.039864 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:48Z","lastTransitionTime":"2025-11-28T16:11:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:48 crc kubenswrapper[4909]: I1128 16:11:48.042377 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b5b5c4c2-af06-4771-b6eb-d13a2819665c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ea29d0be350e786147f355c5bb4902924aa0f921413b432ad093a796d21b9d05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b41cf0481323080f4056d35add13c2254c6ffbe432b1651555067d3fc9d163eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94501e940379fe9b429e532a91799701e733bb5c3be1c5f32da07a9957f955b3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://37dc41c4e260f683c1ae04ae87a883fe78e4f1f620a946ccb6a87191a5eae0ae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:18Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:48Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:48 crc kubenswrapper[4909]: I1128 16:11:48.063215 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c17e2fff-c7ee-475c-8c17-58a394744b91\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c9f22dca75c10791e7f5e083e737d4d6aad20598ff0d29dd7562fa99b5dbdfd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://697293d217dd4fbc4cecc3bc4be7acc14dbb5aa97b9faf0a608792ef7e4c8dba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d0b87eb388aab62c94756b5f20b73cf95fc80da8aa3d83f2848757f860cdd38e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://031c30950c06bdcae1736062069b96a6ce7cb66d5f286b73d691ee2d2db5832e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://66731a2e0518dd575f621ddf9e760e844e647a224138957440ccaecad3e4b8b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://983e9f73b8e344dbe445f95436cbe143d63f1da2e868201b441496df8cdc99ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ddca078c58f2bc3df2f19603ac7190dde988c7fe8b49ca5d94af1bf01cd1162\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ddca078c58f2bc3df2f19603ac7190dde988c7fe8b49ca5d94af1bf01cd1162\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T16:11:31Z\\\",\\\"message\\\":\\\"\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-operator-lifecycle-manager/package-server-manager-metrics\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.4.110\\\\\\\", Port:8443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nI1128 16:11:30.757598 6903 obj_retry.go:303] Retry object setup: *v1.Pod openshift-network-diagnostics/network-check-target-xd92c\\\\nI1128 16:11:30.757607 6903 obj_retry.go:303] Retry object setup: *v1.Pod openshift-dns/node-resolver-bwbm6\\\\nF1128 16:11:30.757612 6903 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal er\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:30Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-qxw94_openshift-ovn-kubernetes(c17e2fff-c7ee-475c-8c17-58a394744b91)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://841b67fe2e8ae0e4ebf834f141b83ad5e850b8e81c3f6410cef7df9894c33dc6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e1fffda4b5571ab3fe797073c7201feadb7818eac66f6dc76b7761ef13a8d0ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e1fffda4b5571ab3fe797073c7201feadb7818eac66f6dc76b7761ef13a8d0ef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zbrc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-qxw94\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:48Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:48 crc kubenswrapper[4909]: I1128 16:11:48.082029 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7907b546dbb614e80485187b026c4c5ca17f52d88d5c28ce26a7bf5e3c09e76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:48Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:48 crc kubenswrapper[4909]: I1128 16:11:48.103053 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5f0ac931-d37b-4342-8c12-c2779b455cc5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae003675d8b34489b946224bcd380cc68ff49acec6769edfe74a8345018e7909\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5xrns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c2757c4dc287e41bc57c065df2906fc5961d005829fa24f22d3b5078d17555a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5xrns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:38Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-d5nd7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:48Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:48 crc kubenswrapper[4909]: I1128 16:11:48.119787 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"443537be-83fe-4770-9aff-5fb3d2bef9a6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3d62be23143f1f28ff523369b4a6b5cb91146ac54236b31cc8d91d200bd8598e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f88b0080f0e1e8677f2525f47faf3bcd7fa2f54bc91057b318d4c1f86a16f9f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://34312055c6f3a6714d732ad11b27c20139c8a9be7636a9dd215a6e680803afd6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a891f34668e5de053a5fef2b954e97fb437e1a9e3bc2ed26b9bb767a3dda592d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a891f34668e5de053a5fef2b954e97fb437e1a9e3bc2ed26b9bb767a3dda592d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:18Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:18Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:48Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:48 crc kubenswrapper[4909]: I1128 16:11:48.137443 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:48Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:48 crc kubenswrapper[4909]: I1128 16:11:48.142502 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:48 crc kubenswrapper[4909]: I1128 16:11:48.142702 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:48 crc kubenswrapper[4909]: I1128 16:11:48.142830 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:48 crc kubenswrapper[4909]: I1128 16:11:48.142966 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:48 crc kubenswrapper[4909]: I1128 16:11:48.143073 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:48Z","lastTransitionTime":"2025-11-28T16:11:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:48 crc kubenswrapper[4909]: I1128 16:11:48.157433 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:35Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:48Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:48 crc kubenswrapper[4909]: I1128 16:11:48.170801 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-bwbm6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a458a03-0fea-47c0-9748-510145f40b30\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dd8c3d64058ca1f2862ce478295e1a694117d00f9172f78c7c5e2945d7357aad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmxp7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:36Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-bwbm6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:48Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:48 crc kubenswrapper[4909]: I1128 16:11:48.188733 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gdz9b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d9d93f2d-2a90-4d2d-b8e6-e48973be876f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e765772d8b12200fadfb28064b55c1abb9a8a6654602159c4910d2ea5b2d307\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b45f41a92db18043fc03c9cd7d9bbeef0020790595045abd17a9105d0f01367b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b45f41a92db18043fc03c9cd7d9bbeef0020790595045abd17a9105d0f01367b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f7a3a2a5eea9704ba4cd09ac6fa00e33c97d163973a852882979b1c8f2064979\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f7a3a2a5eea9704ba4cd09ac6fa00e33c97d163973a852882979b1c8f2064979\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0348e1d7066423ab9a595f60893021830d02de3c18fca2fdb0082efa41644124\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0348e1d7066423ab9a595f60893021830d02de3c18fca2fdb0082efa41644124\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbb9ad3dff8afd5ac9ec18bd48d273f46b76a21b791dc6a17aea1d7559750c93\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cbb9ad3dff8afd5ac9ec18bd48d273f46b76a21b791dc6a17aea1d7559750c93\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5898b3475414a39ef005c45982f12791683eb027e734ec8d25dc3489c93026a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5898b3475414a39ef005c45982f12791683eb027e734ec8d25dc3489c93026a2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2056e26b7e46cd32f8a12d6d054e249ed43f9854c0f7720791e98f9e6c5b5346\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2056e26b7e46cd32f8a12d6d054e249ed43f9854c0f7720791e98f9e6c5b5346\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wwjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gdz9b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:48Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:48 crc kubenswrapper[4909]: I1128 16:11:48.207573 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-8rjn2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ffceca0e-d9b5-484f-8753-5e0269eec811\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:51Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:51Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wgql6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wgql6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:51Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-8rjn2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:48Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:48 crc kubenswrapper[4909]: I1128 16:11:48.220478 4909 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ed2ef34c-f14b-4479-9110-d1d466302d50\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://56b9768e934f5eba0145cbfba9668e6215d26784cf74c2c687147f910dc5f398\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://21c2d58c3f013ade0f092f36eefd505c6e49d502372094b569982b9f1b273887\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://21c2d58c3f013ade0f092f36eefd505c6e49d502372094b569982b9f1b273887\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:18Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:48Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:48 crc kubenswrapper[4909]: I1128 16:11:48.248732 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:48 crc kubenswrapper[4909]: I1128 16:11:48.248821 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:48 crc kubenswrapper[4909]: I1128 16:11:48.248842 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:48 crc kubenswrapper[4909]: I1128 16:11:48.248872 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:48 crc kubenswrapper[4909]: I1128 16:11:48.248900 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:48Z","lastTransitionTime":"2025-11-28T16:11:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:48 crc kubenswrapper[4909]: I1128 16:11:48.352078 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:48 crc kubenswrapper[4909]: I1128 16:11:48.352548 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:48 crc kubenswrapper[4909]: I1128 16:11:48.352779 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:48 crc kubenswrapper[4909]: I1128 16:11:48.352953 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:48 crc kubenswrapper[4909]: I1128 16:11:48.353262 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:48Z","lastTransitionTime":"2025-11-28T16:11:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:48 crc kubenswrapper[4909]: I1128 16:11:48.455583 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:48 crc kubenswrapper[4909]: I1128 16:11:48.455614 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:48 crc kubenswrapper[4909]: I1128 16:11:48.455623 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:48 crc kubenswrapper[4909]: I1128 16:11:48.455637 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:48 crc kubenswrapper[4909]: I1128 16:11:48.455648 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:48Z","lastTransitionTime":"2025-11-28T16:11:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:48 crc kubenswrapper[4909]: I1128 16:11:48.559125 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:48 crc kubenswrapper[4909]: I1128 16:11:48.559172 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:48 crc kubenswrapper[4909]: I1128 16:11:48.559189 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:48 crc kubenswrapper[4909]: I1128 16:11:48.559211 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:48 crc kubenswrapper[4909]: I1128 16:11:48.559228 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:48Z","lastTransitionTime":"2025-11-28T16:11:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:48 crc kubenswrapper[4909]: I1128 16:11:48.662001 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:48 crc kubenswrapper[4909]: I1128 16:11:48.662064 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:48 crc kubenswrapper[4909]: I1128 16:11:48.662082 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:48 crc kubenswrapper[4909]: I1128 16:11:48.662105 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:48 crc kubenswrapper[4909]: I1128 16:11:48.662121 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:48Z","lastTransitionTime":"2025-11-28T16:11:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:48 crc kubenswrapper[4909]: I1128 16:11:48.764953 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:48 crc kubenswrapper[4909]: I1128 16:11:48.765011 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:48 crc kubenswrapper[4909]: I1128 16:11:48.765024 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:48 crc kubenswrapper[4909]: I1128 16:11:48.765042 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:48 crc kubenswrapper[4909]: I1128 16:11:48.765055 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:48Z","lastTransitionTime":"2025-11-28T16:11:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:48 crc kubenswrapper[4909]: I1128 16:11:48.867999 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:48 crc kubenswrapper[4909]: I1128 16:11:48.868059 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:48 crc kubenswrapper[4909]: I1128 16:11:48.868076 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:48 crc kubenswrapper[4909]: I1128 16:11:48.868100 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:48 crc kubenswrapper[4909]: I1128 16:11:48.868117 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:48Z","lastTransitionTime":"2025-11-28T16:11:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:48 crc kubenswrapper[4909]: I1128 16:11:48.901439 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-8rjn2" Nov 28 16:11:48 crc kubenswrapper[4909]: E1128 16:11:48.901626 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-8rjn2" podUID="ffceca0e-d9b5-484f-8753-5e0269eec811" Nov 28 16:11:48 crc kubenswrapper[4909]: I1128 16:11:48.971224 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:48 crc kubenswrapper[4909]: I1128 16:11:48.971767 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:48 crc kubenswrapper[4909]: I1128 16:11:48.971935 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:48 crc kubenswrapper[4909]: I1128 16:11:48.972078 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:48 crc kubenswrapper[4909]: I1128 16:11:48.972209 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:48Z","lastTransitionTime":"2025-11-28T16:11:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:49 crc kubenswrapper[4909]: I1128 16:11:49.075758 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:49 crc kubenswrapper[4909]: I1128 16:11:49.076741 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:49 crc kubenswrapper[4909]: I1128 16:11:49.076966 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:49 crc kubenswrapper[4909]: I1128 16:11:49.077162 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:49 crc kubenswrapper[4909]: I1128 16:11:49.077333 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:49Z","lastTransitionTime":"2025-11-28T16:11:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:49 crc kubenswrapper[4909]: I1128 16:11:49.180298 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:49 crc kubenswrapper[4909]: I1128 16:11:49.180364 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:49 crc kubenswrapper[4909]: I1128 16:11:49.180387 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:49 crc kubenswrapper[4909]: I1128 16:11:49.180419 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:49 crc kubenswrapper[4909]: I1128 16:11:49.180442 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:49Z","lastTransitionTime":"2025-11-28T16:11:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:49 crc kubenswrapper[4909]: I1128 16:11:49.283547 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:49 crc kubenswrapper[4909]: I1128 16:11:49.283612 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:49 crc kubenswrapper[4909]: I1128 16:11:49.283629 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:49 crc kubenswrapper[4909]: I1128 16:11:49.283685 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:49 crc kubenswrapper[4909]: I1128 16:11:49.283704 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:49Z","lastTransitionTime":"2025-11-28T16:11:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:49 crc kubenswrapper[4909]: I1128 16:11:49.385860 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:49 crc kubenswrapper[4909]: I1128 16:11:49.385919 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:49 crc kubenswrapper[4909]: I1128 16:11:49.385937 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:49 crc kubenswrapper[4909]: I1128 16:11:49.385961 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:49 crc kubenswrapper[4909]: I1128 16:11:49.385981 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:49Z","lastTransitionTime":"2025-11-28T16:11:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:49 crc kubenswrapper[4909]: I1128 16:11:49.489806 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:49 crc kubenswrapper[4909]: I1128 16:11:49.490202 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:49 crc kubenswrapper[4909]: I1128 16:11:49.490221 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:49 crc kubenswrapper[4909]: I1128 16:11:49.490242 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:49 crc kubenswrapper[4909]: I1128 16:11:49.490259 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:49Z","lastTransitionTime":"2025-11-28T16:11:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:49 crc kubenswrapper[4909]: I1128 16:11:49.593149 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:49 crc kubenswrapper[4909]: I1128 16:11:49.593190 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:49 crc kubenswrapper[4909]: I1128 16:11:49.593201 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:49 crc kubenswrapper[4909]: I1128 16:11:49.593217 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:49 crc kubenswrapper[4909]: I1128 16:11:49.593228 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:49Z","lastTransitionTime":"2025-11-28T16:11:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:49 crc kubenswrapper[4909]: I1128 16:11:49.696863 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:49 crc kubenswrapper[4909]: I1128 16:11:49.696933 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:49 crc kubenswrapper[4909]: I1128 16:11:49.696950 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:49 crc kubenswrapper[4909]: I1128 16:11:49.696973 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:49 crc kubenswrapper[4909]: I1128 16:11:49.696993 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:49Z","lastTransitionTime":"2025-11-28T16:11:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:49 crc kubenswrapper[4909]: I1128 16:11:49.799716 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:49 crc kubenswrapper[4909]: I1128 16:11:49.799786 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:49 crc kubenswrapper[4909]: I1128 16:11:49.799804 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:49 crc kubenswrapper[4909]: I1128 16:11:49.799832 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:49 crc kubenswrapper[4909]: I1128 16:11:49.799849 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:49Z","lastTransitionTime":"2025-11-28T16:11:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:49 crc kubenswrapper[4909]: I1128 16:11:49.901133 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 16:11:49 crc kubenswrapper[4909]: I1128 16:11:49.901170 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 16:11:49 crc kubenswrapper[4909]: E1128 16:11:49.901312 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 16:11:49 crc kubenswrapper[4909]: E1128 16:11:49.901911 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 16:11:49 crc kubenswrapper[4909]: I1128 16:11:49.901993 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 16:11:49 crc kubenswrapper[4909]: E1128 16:11:49.902098 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 16:11:49 crc kubenswrapper[4909]: I1128 16:11:49.903517 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:49 crc kubenswrapper[4909]: I1128 16:11:49.903565 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:49 crc kubenswrapper[4909]: I1128 16:11:49.903582 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:49 crc kubenswrapper[4909]: I1128 16:11:49.903605 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:49 crc kubenswrapper[4909]: I1128 16:11:49.903623 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:49Z","lastTransitionTime":"2025-11-28T16:11:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:50 crc kubenswrapper[4909]: I1128 16:11:50.005907 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:50 crc kubenswrapper[4909]: I1128 16:11:50.005943 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:50 crc kubenswrapper[4909]: I1128 16:11:50.005954 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:50 crc kubenswrapper[4909]: I1128 16:11:50.005971 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:50 crc kubenswrapper[4909]: I1128 16:11:50.005986 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:50Z","lastTransitionTime":"2025-11-28T16:11:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:50 crc kubenswrapper[4909]: I1128 16:11:50.108862 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:50 crc kubenswrapper[4909]: I1128 16:11:50.108934 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:50 crc kubenswrapper[4909]: I1128 16:11:50.108954 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:50 crc kubenswrapper[4909]: I1128 16:11:50.108979 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:50 crc kubenswrapper[4909]: I1128 16:11:50.108996 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:50Z","lastTransitionTime":"2025-11-28T16:11:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:50 crc kubenswrapper[4909]: I1128 16:11:50.211924 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:50 crc kubenswrapper[4909]: I1128 16:11:50.211979 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:50 crc kubenswrapper[4909]: I1128 16:11:50.211990 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:50 crc kubenswrapper[4909]: I1128 16:11:50.212011 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:50 crc kubenswrapper[4909]: I1128 16:11:50.212031 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:50Z","lastTransitionTime":"2025-11-28T16:11:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:50 crc kubenswrapper[4909]: I1128 16:11:50.317034 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:50 crc kubenswrapper[4909]: I1128 16:11:50.317110 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:50 crc kubenswrapper[4909]: I1128 16:11:50.317133 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:50 crc kubenswrapper[4909]: I1128 16:11:50.317165 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:50 crc kubenswrapper[4909]: I1128 16:11:50.317184 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:50Z","lastTransitionTime":"2025-11-28T16:11:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:50 crc kubenswrapper[4909]: I1128 16:11:50.427533 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:50 crc kubenswrapper[4909]: I1128 16:11:50.427697 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:50 crc kubenswrapper[4909]: I1128 16:11:50.427726 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:50 crc kubenswrapper[4909]: I1128 16:11:50.428169 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:50 crc kubenswrapper[4909]: I1128 16:11:50.428722 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:50Z","lastTransitionTime":"2025-11-28T16:11:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:50 crc kubenswrapper[4909]: I1128 16:11:50.532097 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:50 crc kubenswrapper[4909]: I1128 16:11:50.532151 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:50 crc kubenswrapper[4909]: I1128 16:11:50.532169 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:50 crc kubenswrapper[4909]: I1128 16:11:50.532194 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:50 crc kubenswrapper[4909]: I1128 16:11:50.532215 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:50Z","lastTransitionTime":"2025-11-28T16:11:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:50 crc kubenswrapper[4909]: I1128 16:11:50.635246 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:50 crc kubenswrapper[4909]: I1128 16:11:50.635312 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:50 crc kubenswrapper[4909]: I1128 16:11:50.635330 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:50 crc kubenswrapper[4909]: I1128 16:11:50.635356 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:50 crc kubenswrapper[4909]: I1128 16:11:50.635377 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:50Z","lastTransitionTime":"2025-11-28T16:11:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:50 crc kubenswrapper[4909]: I1128 16:11:50.737717 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:50 crc kubenswrapper[4909]: I1128 16:11:50.738095 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:50 crc kubenswrapper[4909]: I1128 16:11:50.738280 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:50 crc kubenswrapper[4909]: I1128 16:11:50.738415 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:50 crc kubenswrapper[4909]: I1128 16:11:50.738539 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:50Z","lastTransitionTime":"2025-11-28T16:11:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:50 crc kubenswrapper[4909]: I1128 16:11:50.841632 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:50 crc kubenswrapper[4909]: I1128 16:11:50.841734 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:50 crc kubenswrapper[4909]: I1128 16:11:50.841745 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:50 crc kubenswrapper[4909]: I1128 16:11:50.841763 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:50 crc kubenswrapper[4909]: I1128 16:11:50.841774 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:50Z","lastTransitionTime":"2025-11-28T16:11:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:50 crc kubenswrapper[4909]: I1128 16:11:50.900596 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-8rjn2" Nov 28 16:11:50 crc kubenswrapper[4909]: E1128 16:11:50.900851 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-8rjn2" podUID="ffceca0e-d9b5-484f-8753-5e0269eec811" Nov 28 16:11:50 crc kubenswrapper[4909]: I1128 16:11:50.944153 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:50 crc kubenswrapper[4909]: I1128 16:11:50.944224 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:50 crc kubenswrapper[4909]: I1128 16:11:50.944243 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:50 crc kubenswrapper[4909]: I1128 16:11:50.944270 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:50 crc kubenswrapper[4909]: I1128 16:11:50.944289 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:50Z","lastTransitionTime":"2025-11-28T16:11:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:51 crc kubenswrapper[4909]: I1128 16:11:51.047787 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:51 crc kubenswrapper[4909]: I1128 16:11:51.048029 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:51 crc kubenswrapper[4909]: I1128 16:11:51.048132 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:51 crc kubenswrapper[4909]: I1128 16:11:51.048299 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:51 crc kubenswrapper[4909]: I1128 16:11:51.048398 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:51Z","lastTransitionTime":"2025-11-28T16:11:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:51 crc kubenswrapper[4909]: I1128 16:11:51.151883 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:51 crc kubenswrapper[4909]: I1128 16:11:51.151938 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:51 crc kubenswrapper[4909]: I1128 16:11:51.151955 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:51 crc kubenswrapper[4909]: I1128 16:11:51.152003 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:51 crc kubenswrapper[4909]: I1128 16:11:51.152025 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:51Z","lastTransitionTime":"2025-11-28T16:11:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:51 crc kubenswrapper[4909]: I1128 16:11:51.255013 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:51 crc kubenswrapper[4909]: I1128 16:11:51.255089 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:51 crc kubenswrapper[4909]: I1128 16:11:51.255112 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:51 crc kubenswrapper[4909]: I1128 16:11:51.255139 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:51 crc kubenswrapper[4909]: I1128 16:11:51.255161 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:51Z","lastTransitionTime":"2025-11-28T16:11:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:51 crc kubenswrapper[4909]: I1128 16:11:51.357989 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:51 crc kubenswrapper[4909]: I1128 16:11:51.358040 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:51 crc kubenswrapper[4909]: I1128 16:11:51.358056 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:51 crc kubenswrapper[4909]: I1128 16:11:51.358077 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:51 crc kubenswrapper[4909]: I1128 16:11:51.358101 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:51Z","lastTransitionTime":"2025-11-28T16:11:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:51 crc kubenswrapper[4909]: I1128 16:11:51.461470 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:51 crc kubenswrapper[4909]: I1128 16:11:51.461533 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:51 crc kubenswrapper[4909]: I1128 16:11:51.461550 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:51 crc kubenswrapper[4909]: I1128 16:11:51.461577 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:51 crc kubenswrapper[4909]: I1128 16:11:51.461596 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:51Z","lastTransitionTime":"2025-11-28T16:11:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:51 crc kubenswrapper[4909]: I1128 16:11:51.565252 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:51 crc kubenswrapper[4909]: I1128 16:11:51.565370 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:51 crc kubenswrapper[4909]: I1128 16:11:51.565400 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:51 crc kubenswrapper[4909]: I1128 16:11:51.565427 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:51 crc kubenswrapper[4909]: I1128 16:11:51.565446 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:51Z","lastTransitionTime":"2025-11-28T16:11:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:51 crc kubenswrapper[4909]: I1128 16:11:51.669084 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:51 crc kubenswrapper[4909]: I1128 16:11:51.669146 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:51 crc kubenswrapper[4909]: I1128 16:11:51.669163 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:51 crc kubenswrapper[4909]: I1128 16:11:51.669189 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:51 crc kubenswrapper[4909]: I1128 16:11:51.669210 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:51Z","lastTransitionTime":"2025-11-28T16:11:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:51 crc kubenswrapper[4909]: I1128 16:11:51.771624 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:51 crc kubenswrapper[4909]: I1128 16:11:51.771711 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:51 crc kubenswrapper[4909]: I1128 16:11:51.771730 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:51 crc kubenswrapper[4909]: I1128 16:11:51.771755 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:51 crc kubenswrapper[4909]: I1128 16:11:51.771772 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:51Z","lastTransitionTime":"2025-11-28T16:11:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:51 crc kubenswrapper[4909]: I1128 16:11:51.874929 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:51 crc kubenswrapper[4909]: I1128 16:11:51.875277 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:51 crc kubenswrapper[4909]: I1128 16:11:51.875443 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:51 crc kubenswrapper[4909]: I1128 16:11:51.875653 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:51 crc kubenswrapper[4909]: I1128 16:11:51.875986 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:51Z","lastTransitionTime":"2025-11-28T16:11:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:51 crc kubenswrapper[4909]: I1128 16:11:51.900536 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 16:11:51 crc kubenswrapper[4909]: I1128 16:11:51.900568 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 16:11:51 crc kubenswrapper[4909]: E1128 16:11:51.900994 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 16:11:51 crc kubenswrapper[4909]: E1128 16:11:51.901117 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 16:11:51 crc kubenswrapper[4909]: I1128 16:11:51.900644 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 16:11:51 crc kubenswrapper[4909]: E1128 16:11:51.901373 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 16:11:51 crc kubenswrapper[4909]: I1128 16:11:51.979334 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:51 crc kubenswrapper[4909]: I1128 16:11:51.979409 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:51 crc kubenswrapper[4909]: I1128 16:11:51.979448 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:51 crc kubenswrapper[4909]: I1128 16:11:51.979479 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:51 crc kubenswrapper[4909]: I1128 16:11:51.979502 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:51Z","lastTransitionTime":"2025-11-28T16:11:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:52 crc kubenswrapper[4909]: I1128 16:11:52.082199 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:52 crc kubenswrapper[4909]: I1128 16:11:52.082271 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:52 crc kubenswrapper[4909]: I1128 16:11:52.082287 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:52 crc kubenswrapper[4909]: I1128 16:11:52.082313 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:52 crc kubenswrapper[4909]: I1128 16:11:52.082333 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:52Z","lastTransitionTime":"2025-11-28T16:11:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:52 crc kubenswrapper[4909]: I1128 16:11:52.184790 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:52 crc kubenswrapper[4909]: I1128 16:11:52.184829 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:52 crc kubenswrapper[4909]: I1128 16:11:52.184840 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:52 crc kubenswrapper[4909]: I1128 16:11:52.184856 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:52 crc kubenswrapper[4909]: I1128 16:11:52.184867 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:52Z","lastTransitionTime":"2025-11-28T16:11:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:52 crc kubenswrapper[4909]: I1128 16:11:52.288218 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:52 crc kubenswrapper[4909]: I1128 16:11:52.288278 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:52 crc kubenswrapper[4909]: I1128 16:11:52.288295 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:52 crc kubenswrapper[4909]: I1128 16:11:52.288323 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:52 crc kubenswrapper[4909]: I1128 16:11:52.288340 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:52Z","lastTransitionTime":"2025-11-28T16:11:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:52 crc kubenswrapper[4909]: I1128 16:11:52.390819 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:52 crc kubenswrapper[4909]: I1128 16:11:52.390878 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:52 crc kubenswrapper[4909]: I1128 16:11:52.390895 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:52 crc kubenswrapper[4909]: I1128 16:11:52.390918 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:52 crc kubenswrapper[4909]: I1128 16:11:52.390935 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:52Z","lastTransitionTime":"2025-11-28T16:11:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:52 crc kubenswrapper[4909]: I1128 16:11:52.494048 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:52 crc kubenswrapper[4909]: I1128 16:11:52.494115 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:52 crc kubenswrapper[4909]: I1128 16:11:52.494132 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:52 crc kubenswrapper[4909]: I1128 16:11:52.494156 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:52 crc kubenswrapper[4909]: I1128 16:11:52.494173 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:52Z","lastTransitionTime":"2025-11-28T16:11:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:52 crc kubenswrapper[4909]: I1128 16:11:52.596995 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:52 crc kubenswrapper[4909]: I1128 16:11:52.597078 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:52 crc kubenswrapper[4909]: I1128 16:11:52.597091 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:52 crc kubenswrapper[4909]: I1128 16:11:52.597144 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:52 crc kubenswrapper[4909]: I1128 16:11:52.597162 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:52Z","lastTransitionTime":"2025-11-28T16:11:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:52 crc kubenswrapper[4909]: I1128 16:11:52.700409 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:52 crc kubenswrapper[4909]: I1128 16:11:52.700516 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:52 crc kubenswrapper[4909]: I1128 16:11:52.700538 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:52 crc kubenswrapper[4909]: I1128 16:11:52.700561 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:52 crc kubenswrapper[4909]: I1128 16:11:52.700578 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:52Z","lastTransitionTime":"2025-11-28T16:11:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:52 crc kubenswrapper[4909]: I1128 16:11:52.802710 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:52 crc kubenswrapper[4909]: I1128 16:11:52.802752 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:52 crc kubenswrapper[4909]: I1128 16:11:52.802760 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:52 crc kubenswrapper[4909]: I1128 16:11:52.802776 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:52 crc kubenswrapper[4909]: I1128 16:11:52.802785 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:52Z","lastTransitionTime":"2025-11-28T16:11:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:52 crc kubenswrapper[4909]: I1128 16:11:52.901232 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-8rjn2" Nov 28 16:11:52 crc kubenswrapper[4909]: E1128 16:11:52.901418 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-8rjn2" podUID="ffceca0e-d9b5-484f-8753-5e0269eec811" Nov 28 16:11:52 crc kubenswrapper[4909]: I1128 16:11:52.906194 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:52 crc kubenswrapper[4909]: I1128 16:11:52.906255 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:52 crc kubenswrapper[4909]: I1128 16:11:52.906272 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:52 crc kubenswrapper[4909]: I1128 16:11:52.906295 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:52 crc kubenswrapper[4909]: I1128 16:11:52.906311 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:52Z","lastTransitionTime":"2025-11-28T16:11:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:53 crc kubenswrapper[4909]: I1128 16:11:53.014223 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:53 crc kubenswrapper[4909]: I1128 16:11:53.014309 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:53 crc kubenswrapper[4909]: I1128 16:11:53.014335 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:53 crc kubenswrapper[4909]: I1128 16:11:53.014362 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:53 crc kubenswrapper[4909]: I1128 16:11:53.014444 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:53Z","lastTransitionTime":"2025-11-28T16:11:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:53 crc kubenswrapper[4909]: I1128 16:11:53.117800 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:53 crc kubenswrapper[4909]: I1128 16:11:53.117876 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:53 crc kubenswrapper[4909]: I1128 16:11:53.117899 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:53 crc kubenswrapper[4909]: I1128 16:11:53.117932 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:53 crc kubenswrapper[4909]: I1128 16:11:53.117953 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:53Z","lastTransitionTime":"2025-11-28T16:11:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:53 crc kubenswrapper[4909]: I1128 16:11:53.221155 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:53 crc kubenswrapper[4909]: I1128 16:11:53.221205 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:53 crc kubenswrapper[4909]: I1128 16:11:53.221218 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:53 crc kubenswrapper[4909]: I1128 16:11:53.221236 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:53 crc kubenswrapper[4909]: I1128 16:11:53.221251 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:53Z","lastTransitionTime":"2025-11-28T16:11:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:53 crc kubenswrapper[4909]: I1128 16:11:53.324199 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:53 crc kubenswrapper[4909]: I1128 16:11:53.324263 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:53 crc kubenswrapper[4909]: I1128 16:11:53.324281 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:53 crc kubenswrapper[4909]: I1128 16:11:53.324305 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:53 crc kubenswrapper[4909]: I1128 16:11:53.324325 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:53Z","lastTransitionTime":"2025-11-28T16:11:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:53 crc kubenswrapper[4909]: I1128 16:11:53.428097 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:53 crc kubenswrapper[4909]: I1128 16:11:53.428159 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:53 crc kubenswrapper[4909]: I1128 16:11:53.428176 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:53 crc kubenswrapper[4909]: I1128 16:11:53.428203 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:53 crc kubenswrapper[4909]: I1128 16:11:53.428222 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:53Z","lastTransitionTime":"2025-11-28T16:11:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:53 crc kubenswrapper[4909]: I1128 16:11:53.462802 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:53 crc kubenswrapper[4909]: I1128 16:11:53.462866 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:53 crc kubenswrapper[4909]: I1128 16:11:53.462883 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:53 crc kubenswrapper[4909]: I1128 16:11:53.462907 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:53 crc kubenswrapper[4909]: I1128 16:11:53.462925 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:53Z","lastTransitionTime":"2025-11-28T16:11:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:53 crc kubenswrapper[4909]: E1128 16:11:53.478451 4909 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:53Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:53Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:53Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:53Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:53Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:53Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:53Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:53Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"b44f6a6c-5ae2-4ed6-9fc9-6c0acf034e9d\\\",\\\"systemUUID\\\":\\\"1e8d38e9-395c-4d37-b567-3bfe4869e3f7\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:53Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:53 crc kubenswrapper[4909]: I1128 16:11:53.483802 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:53 crc kubenswrapper[4909]: I1128 16:11:53.483850 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:53 crc kubenswrapper[4909]: I1128 16:11:53.483866 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:53 crc kubenswrapper[4909]: I1128 16:11:53.483889 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:53 crc kubenswrapper[4909]: I1128 16:11:53.483909 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:53Z","lastTransitionTime":"2025-11-28T16:11:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:53 crc kubenswrapper[4909]: E1128 16:11:53.505534 4909 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:53Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:53Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:53Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:53Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:53Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:53Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:53Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:53Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"b44f6a6c-5ae2-4ed6-9fc9-6c0acf034e9d\\\",\\\"systemUUID\\\":\\\"1e8d38e9-395c-4d37-b567-3bfe4869e3f7\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:53Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:53 crc kubenswrapper[4909]: I1128 16:11:53.510863 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:53 crc kubenswrapper[4909]: I1128 16:11:53.510914 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:53 crc kubenswrapper[4909]: I1128 16:11:53.510931 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:53 crc kubenswrapper[4909]: I1128 16:11:53.510951 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:53 crc kubenswrapper[4909]: I1128 16:11:53.510967 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:53Z","lastTransitionTime":"2025-11-28T16:11:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:53 crc kubenswrapper[4909]: E1128 16:11:53.530780 4909 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:53Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:53Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:53Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:53Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:53Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:53Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:53Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:53Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"b44f6a6c-5ae2-4ed6-9fc9-6c0acf034e9d\\\",\\\"systemUUID\\\":\\\"1e8d38e9-395c-4d37-b567-3bfe4869e3f7\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:53Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:53 crc kubenswrapper[4909]: I1128 16:11:53.534168 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:53 crc kubenswrapper[4909]: I1128 16:11:53.534230 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:53 crc kubenswrapper[4909]: I1128 16:11:53.534249 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:53 crc kubenswrapper[4909]: I1128 16:11:53.534274 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:53 crc kubenswrapper[4909]: I1128 16:11:53.534293 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:53Z","lastTransitionTime":"2025-11-28T16:11:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:53 crc kubenswrapper[4909]: E1128 16:11:53.554116 4909 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:53Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:53Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:53Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:53Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:53Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:53Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:53Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:53Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"b44f6a6c-5ae2-4ed6-9fc9-6c0acf034e9d\\\",\\\"systemUUID\\\":\\\"1e8d38e9-395c-4d37-b567-3bfe4869e3f7\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:53Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:53 crc kubenswrapper[4909]: I1128 16:11:53.558732 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:53 crc kubenswrapper[4909]: I1128 16:11:53.558997 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:53 crc kubenswrapper[4909]: I1128 16:11:53.559170 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:53 crc kubenswrapper[4909]: I1128 16:11:53.559363 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:53 crc kubenswrapper[4909]: I1128 16:11:53.559521 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:53Z","lastTransitionTime":"2025-11-28T16:11:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:53 crc kubenswrapper[4909]: E1128 16:11:53.575734 4909 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:53Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:53Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:53Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:53Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:53Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:53Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:53Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:53Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"b44f6a6c-5ae2-4ed6-9fc9-6c0acf034e9d\\\",\\\"systemUUID\\\":\\\"1e8d38e9-395c-4d37-b567-3bfe4869e3f7\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:53Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:53 crc kubenswrapper[4909]: E1128 16:11:53.575851 4909 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 28 16:11:53 crc kubenswrapper[4909]: I1128 16:11:53.578242 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:53 crc kubenswrapper[4909]: I1128 16:11:53.578271 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:53 crc kubenswrapper[4909]: I1128 16:11:53.578280 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:53 crc kubenswrapper[4909]: I1128 16:11:53.578296 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:53 crc kubenswrapper[4909]: I1128 16:11:53.578307 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:53Z","lastTransitionTime":"2025-11-28T16:11:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:53 crc kubenswrapper[4909]: I1128 16:11:53.681075 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:53 crc kubenswrapper[4909]: I1128 16:11:53.681139 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:53 crc kubenswrapper[4909]: I1128 16:11:53.681156 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:53 crc kubenswrapper[4909]: I1128 16:11:53.681181 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:53 crc kubenswrapper[4909]: I1128 16:11:53.681198 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:53Z","lastTransitionTime":"2025-11-28T16:11:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:53 crc kubenswrapper[4909]: I1128 16:11:53.785182 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:53 crc kubenswrapper[4909]: I1128 16:11:53.785234 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:53 crc kubenswrapper[4909]: I1128 16:11:53.785250 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:53 crc kubenswrapper[4909]: I1128 16:11:53.785300 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:53 crc kubenswrapper[4909]: I1128 16:11:53.785317 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:53Z","lastTransitionTime":"2025-11-28T16:11:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:53 crc kubenswrapper[4909]: I1128 16:11:53.888413 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:53 crc kubenswrapper[4909]: I1128 16:11:53.888780 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:53 crc kubenswrapper[4909]: I1128 16:11:53.888937 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:53 crc kubenswrapper[4909]: I1128 16:11:53.889076 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:53 crc kubenswrapper[4909]: I1128 16:11:53.889254 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:53Z","lastTransitionTime":"2025-11-28T16:11:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:53 crc kubenswrapper[4909]: I1128 16:11:53.901047 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 16:11:53 crc kubenswrapper[4909]: I1128 16:11:53.901177 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 16:11:53 crc kubenswrapper[4909]: E1128 16:11:53.901384 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 16:11:53 crc kubenswrapper[4909]: I1128 16:11:53.901461 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 16:11:53 crc kubenswrapper[4909]: E1128 16:11:53.901584 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 16:11:53 crc kubenswrapper[4909]: E1128 16:11:53.901785 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 16:11:53 crc kubenswrapper[4909]: I1128 16:11:53.991080 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:53 crc kubenswrapper[4909]: I1128 16:11:53.991149 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:53 crc kubenswrapper[4909]: I1128 16:11:53.991170 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:53 crc kubenswrapper[4909]: I1128 16:11:53.991199 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:53 crc kubenswrapper[4909]: I1128 16:11:53.991223 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:53Z","lastTransitionTime":"2025-11-28T16:11:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:54 crc kubenswrapper[4909]: I1128 16:11:54.093848 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:54 crc kubenswrapper[4909]: I1128 16:11:54.093934 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:54 crc kubenswrapper[4909]: I1128 16:11:54.093957 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:54 crc kubenswrapper[4909]: I1128 16:11:54.093988 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:54 crc kubenswrapper[4909]: I1128 16:11:54.094015 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:54Z","lastTransitionTime":"2025-11-28T16:11:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:54 crc kubenswrapper[4909]: I1128 16:11:54.196994 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:54 crc kubenswrapper[4909]: I1128 16:11:54.197049 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:54 crc kubenswrapper[4909]: I1128 16:11:54.197067 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:54 crc kubenswrapper[4909]: I1128 16:11:54.197088 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:54 crc kubenswrapper[4909]: I1128 16:11:54.197106 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:54Z","lastTransitionTime":"2025-11-28T16:11:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:54 crc kubenswrapper[4909]: I1128 16:11:54.300337 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:54 crc kubenswrapper[4909]: I1128 16:11:54.300406 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:54 crc kubenswrapper[4909]: I1128 16:11:54.300423 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:54 crc kubenswrapper[4909]: I1128 16:11:54.300449 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:54 crc kubenswrapper[4909]: I1128 16:11:54.300469 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:54Z","lastTransitionTime":"2025-11-28T16:11:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:54 crc kubenswrapper[4909]: I1128 16:11:54.403243 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:54 crc kubenswrapper[4909]: I1128 16:11:54.403277 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:54 crc kubenswrapper[4909]: I1128 16:11:54.403285 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:54 crc kubenswrapper[4909]: I1128 16:11:54.403300 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:54 crc kubenswrapper[4909]: I1128 16:11:54.403310 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:54Z","lastTransitionTime":"2025-11-28T16:11:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:54 crc kubenswrapper[4909]: I1128 16:11:54.506438 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:54 crc kubenswrapper[4909]: I1128 16:11:54.506514 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:54 crc kubenswrapper[4909]: I1128 16:11:54.506532 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:54 crc kubenswrapper[4909]: I1128 16:11:54.506557 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:54 crc kubenswrapper[4909]: I1128 16:11:54.506577 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:54Z","lastTransitionTime":"2025-11-28T16:11:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:54 crc kubenswrapper[4909]: I1128 16:11:54.609778 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:54 crc kubenswrapper[4909]: I1128 16:11:54.609872 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:54 crc kubenswrapper[4909]: I1128 16:11:54.609891 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:54 crc kubenswrapper[4909]: I1128 16:11:54.609920 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:54 crc kubenswrapper[4909]: I1128 16:11:54.609940 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:54Z","lastTransitionTime":"2025-11-28T16:11:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:54 crc kubenswrapper[4909]: I1128 16:11:54.712578 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:54 crc kubenswrapper[4909]: I1128 16:11:54.712631 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:54 crc kubenswrapper[4909]: I1128 16:11:54.712686 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:54 crc kubenswrapper[4909]: I1128 16:11:54.712710 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:54 crc kubenswrapper[4909]: I1128 16:11:54.712728 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:54Z","lastTransitionTime":"2025-11-28T16:11:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:54 crc kubenswrapper[4909]: I1128 16:11:54.815337 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:54 crc kubenswrapper[4909]: I1128 16:11:54.815404 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:54 crc kubenswrapper[4909]: I1128 16:11:54.815422 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:54 crc kubenswrapper[4909]: I1128 16:11:54.815446 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:54 crc kubenswrapper[4909]: I1128 16:11:54.815466 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:54Z","lastTransitionTime":"2025-11-28T16:11:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:54 crc kubenswrapper[4909]: I1128 16:11:54.901147 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-8rjn2" Nov 28 16:11:54 crc kubenswrapper[4909]: E1128 16:11:54.901361 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-8rjn2" podUID="ffceca0e-d9b5-484f-8753-5e0269eec811" Nov 28 16:11:54 crc kubenswrapper[4909]: I1128 16:11:54.918335 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:54 crc kubenswrapper[4909]: I1128 16:11:54.918413 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:54 crc kubenswrapper[4909]: I1128 16:11:54.918438 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:54 crc kubenswrapper[4909]: I1128 16:11:54.918472 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:54 crc kubenswrapper[4909]: I1128 16:11:54.918495 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:54Z","lastTransitionTime":"2025-11-28T16:11:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:55 crc kubenswrapper[4909]: I1128 16:11:55.021928 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:55 crc kubenswrapper[4909]: I1128 16:11:55.021983 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:55 crc kubenswrapper[4909]: I1128 16:11:55.022008 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:55 crc kubenswrapper[4909]: I1128 16:11:55.022038 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:55 crc kubenswrapper[4909]: I1128 16:11:55.022062 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:55Z","lastTransitionTime":"2025-11-28T16:11:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:55 crc kubenswrapper[4909]: I1128 16:11:55.125908 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:55 crc kubenswrapper[4909]: I1128 16:11:55.125973 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:55 crc kubenswrapper[4909]: I1128 16:11:55.125990 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:55 crc kubenswrapper[4909]: I1128 16:11:55.126015 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:55 crc kubenswrapper[4909]: I1128 16:11:55.126033 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:55Z","lastTransitionTime":"2025-11-28T16:11:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:55 crc kubenswrapper[4909]: I1128 16:11:55.229719 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:55 crc kubenswrapper[4909]: I1128 16:11:55.229786 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:55 crc kubenswrapper[4909]: I1128 16:11:55.229804 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:55 crc kubenswrapper[4909]: I1128 16:11:55.229828 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:55 crc kubenswrapper[4909]: I1128 16:11:55.229847 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:55Z","lastTransitionTime":"2025-11-28T16:11:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:55 crc kubenswrapper[4909]: I1128 16:11:55.333814 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:55 crc kubenswrapper[4909]: I1128 16:11:55.333869 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:55 crc kubenswrapper[4909]: I1128 16:11:55.333888 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:55 crc kubenswrapper[4909]: I1128 16:11:55.333916 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:55 crc kubenswrapper[4909]: I1128 16:11:55.333937 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:55Z","lastTransitionTime":"2025-11-28T16:11:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:55 crc kubenswrapper[4909]: I1128 16:11:55.437082 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:55 crc kubenswrapper[4909]: I1128 16:11:55.437172 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:55 crc kubenswrapper[4909]: I1128 16:11:55.437193 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:55 crc kubenswrapper[4909]: I1128 16:11:55.437226 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:55 crc kubenswrapper[4909]: I1128 16:11:55.437247 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:55Z","lastTransitionTime":"2025-11-28T16:11:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:55 crc kubenswrapper[4909]: I1128 16:11:55.485968 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/ffceca0e-d9b5-484f-8753-5e0269eec811-metrics-certs\") pod \"network-metrics-daemon-8rjn2\" (UID: \"ffceca0e-d9b5-484f-8753-5e0269eec811\") " pod="openshift-multus/network-metrics-daemon-8rjn2" Nov 28 16:11:55 crc kubenswrapper[4909]: E1128 16:11:55.486137 4909 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 28 16:11:55 crc kubenswrapper[4909]: E1128 16:11:55.486238 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ffceca0e-d9b5-484f-8753-5e0269eec811-metrics-certs podName:ffceca0e-d9b5-484f-8753-5e0269eec811 nodeName:}" failed. No retries permitted until 2025-11-28 16:12:59.486215627 +0000 UTC m=+161.882900161 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/ffceca0e-d9b5-484f-8753-5e0269eec811-metrics-certs") pod "network-metrics-daemon-8rjn2" (UID: "ffceca0e-d9b5-484f-8753-5e0269eec811") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 28 16:11:55 crc kubenswrapper[4909]: I1128 16:11:55.539877 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:55 crc kubenswrapper[4909]: I1128 16:11:55.539950 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:55 crc kubenswrapper[4909]: I1128 16:11:55.540153 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:55 crc kubenswrapper[4909]: I1128 16:11:55.540209 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:55 crc kubenswrapper[4909]: I1128 16:11:55.540237 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:55Z","lastTransitionTime":"2025-11-28T16:11:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:55 crc kubenswrapper[4909]: I1128 16:11:55.642552 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:55 crc kubenswrapper[4909]: I1128 16:11:55.642607 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:55 crc kubenswrapper[4909]: I1128 16:11:55.642624 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:55 crc kubenswrapper[4909]: I1128 16:11:55.642646 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:55 crc kubenswrapper[4909]: I1128 16:11:55.642687 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:55Z","lastTransitionTime":"2025-11-28T16:11:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:55 crc kubenswrapper[4909]: I1128 16:11:55.746343 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:55 crc kubenswrapper[4909]: I1128 16:11:55.746428 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:55 crc kubenswrapper[4909]: I1128 16:11:55.746450 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:55 crc kubenswrapper[4909]: I1128 16:11:55.746476 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:55 crc kubenswrapper[4909]: I1128 16:11:55.746496 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:55Z","lastTransitionTime":"2025-11-28T16:11:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:55 crc kubenswrapper[4909]: I1128 16:11:55.849579 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:55 crc kubenswrapper[4909]: I1128 16:11:55.849651 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:55 crc kubenswrapper[4909]: I1128 16:11:55.849727 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:55 crc kubenswrapper[4909]: I1128 16:11:55.849757 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:55 crc kubenswrapper[4909]: I1128 16:11:55.849778 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:55Z","lastTransitionTime":"2025-11-28T16:11:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:55 crc kubenswrapper[4909]: I1128 16:11:55.901302 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 16:11:55 crc kubenswrapper[4909]: I1128 16:11:55.901347 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 16:11:55 crc kubenswrapper[4909]: I1128 16:11:55.901323 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 16:11:55 crc kubenswrapper[4909]: E1128 16:11:55.901528 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 16:11:55 crc kubenswrapper[4909]: E1128 16:11:55.901634 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 16:11:55 crc kubenswrapper[4909]: E1128 16:11:55.901846 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 16:11:55 crc kubenswrapper[4909]: I1128 16:11:55.952282 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:55 crc kubenswrapper[4909]: I1128 16:11:55.952341 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:55 crc kubenswrapper[4909]: I1128 16:11:55.952363 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:55 crc kubenswrapper[4909]: I1128 16:11:55.952394 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:55 crc kubenswrapper[4909]: I1128 16:11:55.952423 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:55Z","lastTransitionTime":"2025-11-28T16:11:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:56 crc kubenswrapper[4909]: I1128 16:11:56.055893 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:56 crc kubenswrapper[4909]: I1128 16:11:56.056020 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:56 crc kubenswrapper[4909]: I1128 16:11:56.056052 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:56 crc kubenswrapper[4909]: I1128 16:11:56.056075 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:56 crc kubenswrapper[4909]: I1128 16:11:56.056092 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:56Z","lastTransitionTime":"2025-11-28T16:11:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:56 crc kubenswrapper[4909]: I1128 16:11:56.158496 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:56 crc kubenswrapper[4909]: I1128 16:11:56.158560 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:56 crc kubenswrapper[4909]: I1128 16:11:56.158584 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:56 crc kubenswrapper[4909]: I1128 16:11:56.158614 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:56 crc kubenswrapper[4909]: I1128 16:11:56.158632 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:56Z","lastTransitionTime":"2025-11-28T16:11:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:56 crc kubenswrapper[4909]: I1128 16:11:56.262237 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:56 crc kubenswrapper[4909]: I1128 16:11:56.262317 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:56 crc kubenswrapper[4909]: I1128 16:11:56.262340 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:56 crc kubenswrapper[4909]: I1128 16:11:56.262366 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:56 crc kubenswrapper[4909]: I1128 16:11:56.262384 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:56Z","lastTransitionTime":"2025-11-28T16:11:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:56 crc kubenswrapper[4909]: I1128 16:11:56.365397 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:56 crc kubenswrapper[4909]: I1128 16:11:56.365478 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:56 crc kubenswrapper[4909]: I1128 16:11:56.365496 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:56 crc kubenswrapper[4909]: I1128 16:11:56.365520 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:56 crc kubenswrapper[4909]: I1128 16:11:56.365536 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:56Z","lastTransitionTime":"2025-11-28T16:11:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:56 crc kubenswrapper[4909]: I1128 16:11:56.468562 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:56 crc kubenswrapper[4909]: I1128 16:11:56.468621 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:56 crc kubenswrapper[4909]: I1128 16:11:56.468638 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:56 crc kubenswrapper[4909]: I1128 16:11:56.468700 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:56 crc kubenswrapper[4909]: I1128 16:11:56.468723 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:56Z","lastTransitionTime":"2025-11-28T16:11:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:56 crc kubenswrapper[4909]: I1128 16:11:56.571936 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:56 crc kubenswrapper[4909]: I1128 16:11:56.572019 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:56 crc kubenswrapper[4909]: I1128 16:11:56.572042 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:56 crc kubenswrapper[4909]: I1128 16:11:56.572071 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:56 crc kubenswrapper[4909]: I1128 16:11:56.572089 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:56Z","lastTransitionTime":"2025-11-28T16:11:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:56 crc kubenswrapper[4909]: I1128 16:11:56.675389 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:56 crc kubenswrapper[4909]: I1128 16:11:56.675459 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:56 crc kubenswrapper[4909]: I1128 16:11:56.675475 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:56 crc kubenswrapper[4909]: I1128 16:11:56.675501 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:56 crc kubenswrapper[4909]: I1128 16:11:56.675518 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:56Z","lastTransitionTime":"2025-11-28T16:11:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:56 crc kubenswrapper[4909]: I1128 16:11:56.778828 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:56 crc kubenswrapper[4909]: I1128 16:11:56.778911 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:56 crc kubenswrapper[4909]: I1128 16:11:56.778929 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:56 crc kubenswrapper[4909]: I1128 16:11:56.778951 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:56 crc kubenswrapper[4909]: I1128 16:11:56.778968 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:56Z","lastTransitionTime":"2025-11-28T16:11:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:56 crc kubenswrapper[4909]: I1128 16:11:56.881922 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:56 crc kubenswrapper[4909]: I1128 16:11:56.881996 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:56 crc kubenswrapper[4909]: I1128 16:11:56.882020 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:56 crc kubenswrapper[4909]: I1128 16:11:56.882049 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:56 crc kubenswrapper[4909]: I1128 16:11:56.882072 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:56Z","lastTransitionTime":"2025-11-28T16:11:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:56 crc kubenswrapper[4909]: I1128 16:11:56.900506 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-8rjn2" Nov 28 16:11:56 crc kubenswrapper[4909]: E1128 16:11:56.900735 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-8rjn2" podUID="ffceca0e-d9b5-484f-8753-5e0269eec811" Nov 28 16:11:56 crc kubenswrapper[4909]: I1128 16:11:56.985999 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:56 crc kubenswrapper[4909]: I1128 16:11:56.986065 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:56 crc kubenswrapper[4909]: I1128 16:11:56.986082 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:56 crc kubenswrapper[4909]: I1128 16:11:56.986106 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:56 crc kubenswrapper[4909]: I1128 16:11:56.986123 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:56Z","lastTransitionTime":"2025-11-28T16:11:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:57 crc kubenswrapper[4909]: I1128 16:11:57.089602 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:57 crc kubenswrapper[4909]: I1128 16:11:57.089706 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:57 crc kubenswrapper[4909]: I1128 16:11:57.089725 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:57 crc kubenswrapper[4909]: I1128 16:11:57.089751 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:57 crc kubenswrapper[4909]: I1128 16:11:57.089770 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:57Z","lastTransitionTime":"2025-11-28T16:11:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:57 crc kubenswrapper[4909]: I1128 16:11:57.192544 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:57 crc kubenswrapper[4909]: I1128 16:11:57.192631 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:57 crc kubenswrapper[4909]: I1128 16:11:57.192652 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:57 crc kubenswrapper[4909]: I1128 16:11:57.192705 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:57 crc kubenswrapper[4909]: I1128 16:11:57.192726 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:57Z","lastTransitionTime":"2025-11-28T16:11:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:57 crc kubenswrapper[4909]: I1128 16:11:57.295530 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:57 crc kubenswrapper[4909]: I1128 16:11:57.295609 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:57 crc kubenswrapper[4909]: I1128 16:11:57.295634 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:57 crc kubenswrapper[4909]: I1128 16:11:57.295703 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:57 crc kubenswrapper[4909]: I1128 16:11:57.295731 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:57Z","lastTransitionTime":"2025-11-28T16:11:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:57 crc kubenswrapper[4909]: I1128 16:11:57.398196 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:57 crc kubenswrapper[4909]: I1128 16:11:57.398273 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:57 crc kubenswrapper[4909]: I1128 16:11:57.398296 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:57 crc kubenswrapper[4909]: I1128 16:11:57.398322 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:57 crc kubenswrapper[4909]: I1128 16:11:57.398342 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:57Z","lastTransitionTime":"2025-11-28T16:11:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:57 crc kubenswrapper[4909]: I1128 16:11:57.501579 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:57 crc kubenswrapper[4909]: I1128 16:11:57.501640 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:57 crc kubenswrapper[4909]: I1128 16:11:57.501681 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:57 crc kubenswrapper[4909]: I1128 16:11:57.501706 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:57 crc kubenswrapper[4909]: I1128 16:11:57.501724 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:57Z","lastTransitionTime":"2025-11-28T16:11:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:57 crc kubenswrapper[4909]: I1128 16:11:57.606255 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:57 crc kubenswrapper[4909]: I1128 16:11:57.606313 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:57 crc kubenswrapper[4909]: I1128 16:11:57.606333 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:57 crc kubenswrapper[4909]: I1128 16:11:57.606371 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:57 crc kubenswrapper[4909]: I1128 16:11:57.606397 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:57Z","lastTransitionTime":"2025-11-28T16:11:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:57 crc kubenswrapper[4909]: I1128 16:11:57.709539 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:57 crc kubenswrapper[4909]: I1128 16:11:57.709620 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:57 crc kubenswrapper[4909]: I1128 16:11:57.709644 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:57 crc kubenswrapper[4909]: I1128 16:11:57.709718 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:57 crc kubenswrapper[4909]: I1128 16:11:57.709736 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:57Z","lastTransitionTime":"2025-11-28T16:11:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:57 crc kubenswrapper[4909]: I1128 16:11:57.812573 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:57 crc kubenswrapper[4909]: I1128 16:11:57.812638 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:57 crc kubenswrapper[4909]: I1128 16:11:57.812704 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:57 crc kubenswrapper[4909]: I1128 16:11:57.812743 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:57 crc kubenswrapper[4909]: I1128 16:11:57.812762 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:57Z","lastTransitionTime":"2025-11-28T16:11:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:57 crc kubenswrapper[4909]: I1128 16:11:57.901429 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 16:11:57 crc kubenswrapper[4909]: I1128 16:11:57.901571 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 16:11:57 crc kubenswrapper[4909]: E1128 16:11:57.901630 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 16:11:57 crc kubenswrapper[4909]: I1128 16:11:57.902219 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 16:11:57 crc kubenswrapper[4909]: E1128 16:11:57.902308 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 16:11:57 crc kubenswrapper[4909]: I1128 16:11:57.902721 4909 scope.go:117] "RemoveContainer" containerID="3ddca078c58f2bc3df2f19603ac7190dde988c7fe8b49ca5d94af1bf01cd1162" Nov 28 16:11:57 crc kubenswrapper[4909]: E1128 16:11:57.902528 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 16:11:57 crc kubenswrapper[4909]: E1128 16:11:57.902965 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-qxw94_openshift-ovn-kubernetes(c17e2fff-c7ee-475c-8c17-58a394744b91)\"" pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" podUID="c17e2fff-c7ee-475c-8c17-58a394744b91" Nov 28 16:11:57 crc kubenswrapper[4909]: I1128 16:11:57.915401 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:57 crc kubenswrapper[4909]: I1128 16:11:57.915456 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:57 crc kubenswrapper[4909]: I1128 16:11:57.915472 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:57 crc kubenswrapper[4909]: I1128 16:11:57.915489 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:57 crc kubenswrapper[4909]: I1128 16:11:57.915504 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:57Z","lastTransitionTime":"2025-11-28T16:11:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:57 crc kubenswrapper[4909]: I1128 16:11:57.941766 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=82.9417429 podStartE2EDuration="1m22.9417429s" podCreationTimestamp="2025-11-28 16:10:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:11:57.941711049 +0000 UTC m=+100.338395603" watchObservedRunningTime="2025-11-28 16:11:57.9417429 +0000 UTC m=+100.338427434" Nov 28 16:11:57 crc kubenswrapper[4909]: I1128 16:11:57.987239 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podStartSLOduration=80.987211159 podStartE2EDuration="1m20.987211159s" podCreationTimestamp="2025-11-28 16:10:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:11:57.96850044 +0000 UTC m=+100.365185004" watchObservedRunningTime="2025-11-28 16:11:57.987211159 +0000 UTC m=+100.383895713" Nov 28 16:11:58 crc kubenswrapper[4909]: I1128 16:11:58.017768 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:58 crc kubenswrapper[4909]: I1128 16:11:58.018497 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:58 crc kubenswrapper[4909]: I1128 16:11:58.019842 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:58 crc kubenswrapper[4909]: I1128 16:11:58.019898 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:58 crc kubenswrapper[4909]: I1128 16:11:58.019917 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:58Z","lastTransitionTime":"2025-11-28T16:11:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:58 crc kubenswrapper[4909]: I1128 16:11:58.026532 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-wx2jj" podStartSLOduration=81.026495431 podStartE2EDuration="1m21.026495431s" podCreationTimestamp="2025-11-28 16:10:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:11:58.026487211 +0000 UTC m=+100.423171765" watchObservedRunningTime="2025-11-28 16:11:58.026495431 +0000 UTC m=+100.423179975" Nov 28 16:11:58 crc kubenswrapper[4909]: I1128 16:11:58.100899 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podStartSLOduration=81.100875604 podStartE2EDuration="1m21.100875604s" podCreationTimestamp="2025-11-28 16:10:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:11:58.073488015 +0000 UTC m=+100.470172579" watchObservedRunningTime="2025-11-28 16:11:58.100875604 +0000 UTC m=+100.497560158" Nov 28 16:11:58 crc kubenswrapper[4909]: I1128 16:11:58.120381 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" podStartSLOduration=15.120361445 podStartE2EDuration="15.120361445s" podCreationTimestamp="2025-11-28 16:11:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:11:58.119967253 +0000 UTC m=+100.516651787" watchObservedRunningTime="2025-11-28 16:11:58.120361445 +0000 UTC m=+100.517045979" Nov 28 16:11:58 crc kubenswrapper[4909]: I1128 16:11:58.122896 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:58 crc kubenswrapper[4909]: I1128 16:11:58.122942 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:58 crc kubenswrapper[4909]: I1128 16:11:58.122954 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:58 crc kubenswrapper[4909]: I1128 16:11:58.122972 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:58 crc kubenswrapper[4909]: I1128 16:11:58.122986 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:58Z","lastTransitionTime":"2025-11-28T16:11:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:58 crc kubenswrapper[4909]: I1128 16:11:58.139090 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" podStartSLOduration=51.139065364 podStartE2EDuration="51.139065364s" podCreationTimestamp="2025-11-28 16:11:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:11:58.138168708 +0000 UTC m=+100.534853272" watchObservedRunningTime="2025-11-28 16:11:58.139065364 +0000 UTC m=+100.535749898" Nov 28 16:11:58 crc kubenswrapper[4909]: I1128 16:11:58.197268 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/node-resolver-bwbm6" podStartSLOduration=82.197236419 podStartE2EDuration="1m22.197236419s" podCreationTimestamp="2025-11-28 16:10:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:11:58.196318363 +0000 UTC m=+100.593002947" watchObservedRunningTime="2025-11-28 16:11:58.197236419 +0000 UTC m=+100.593920983" Nov 28 16:11:58 crc kubenswrapper[4909]: I1128 16:11:58.219071 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-additional-cni-plugins-gdz9b" podStartSLOduration=81.219051798 podStartE2EDuration="1m21.219051798s" podCreationTimestamp="2025-11-28 16:10:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:11:58.218823341 +0000 UTC m=+100.615507955" watchObservedRunningTime="2025-11-28 16:11:58.219051798 +0000 UTC m=+100.615736332" Nov 28 16:11:58 crc kubenswrapper[4909]: I1128 16:11:58.225309 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:58 crc kubenswrapper[4909]: I1128 16:11:58.225372 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:58 crc kubenswrapper[4909]: I1128 16:11:58.225389 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:58 crc kubenswrapper[4909]: I1128 16:11:58.225413 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:58 crc kubenswrapper[4909]: I1128 16:11:58.225429 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:58Z","lastTransitionTime":"2025-11-28T16:11:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:58 crc kubenswrapper[4909]: I1128 16:11:58.288683 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/node-ca-q8nfv" podStartSLOduration=81.288632852 podStartE2EDuration="1m21.288632852s" podCreationTimestamp="2025-11-28 16:10:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:11:58.273537727 +0000 UTC m=+100.670222281" watchObservedRunningTime="2025-11-28 16:11:58.288632852 +0000 UTC m=+100.685317406" Nov 28 16:11:58 crc kubenswrapper[4909]: I1128 16:11:58.328360 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:58 crc kubenswrapper[4909]: I1128 16:11:58.328412 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:58 crc kubenswrapper[4909]: I1128 16:11:58.328424 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:58 crc kubenswrapper[4909]: I1128 16:11:58.328441 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:58 crc kubenswrapper[4909]: I1128 16:11:58.328452 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:58Z","lastTransitionTime":"2025-11-28T16:11:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:58 crc kubenswrapper[4909]: I1128 16:11:58.460552 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:58 crc kubenswrapper[4909]: I1128 16:11:58.460610 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:58 crc kubenswrapper[4909]: I1128 16:11:58.460629 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:58 crc kubenswrapper[4909]: I1128 16:11:58.460652 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:58 crc kubenswrapper[4909]: I1128 16:11:58.460697 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:58Z","lastTransitionTime":"2025-11-28T16:11:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:58 crc kubenswrapper[4909]: I1128 16:11:58.563280 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:58 crc kubenswrapper[4909]: I1128 16:11:58.563333 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:58 crc kubenswrapper[4909]: I1128 16:11:58.563378 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:58 crc kubenswrapper[4909]: I1128 16:11:58.563397 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:58 crc kubenswrapper[4909]: I1128 16:11:58.563409 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:58Z","lastTransitionTime":"2025-11-28T16:11:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:58 crc kubenswrapper[4909]: I1128 16:11:58.666049 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:58 crc kubenswrapper[4909]: I1128 16:11:58.666095 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:58 crc kubenswrapper[4909]: I1128 16:11:58.666105 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:58 crc kubenswrapper[4909]: I1128 16:11:58.666120 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:58 crc kubenswrapper[4909]: I1128 16:11:58.666132 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:58Z","lastTransitionTime":"2025-11-28T16:11:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:58 crc kubenswrapper[4909]: I1128 16:11:58.768822 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:58 crc kubenswrapper[4909]: I1128 16:11:58.768892 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:58 crc kubenswrapper[4909]: I1128 16:11:58.768909 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:58 crc kubenswrapper[4909]: I1128 16:11:58.768936 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:58 crc kubenswrapper[4909]: I1128 16:11:58.768955 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:58Z","lastTransitionTime":"2025-11-28T16:11:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:58 crc kubenswrapper[4909]: I1128 16:11:58.873206 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:58 crc kubenswrapper[4909]: I1128 16:11:58.873287 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:58 crc kubenswrapper[4909]: I1128 16:11:58.873307 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:58 crc kubenswrapper[4909]: I1128 16:11:58.873338 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:58 crc kubenswrapper[4909]: I1128 16:11:58.873358 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:58Z","lastTransitionTime":"2025-11-28T16:11:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:58 crc kubenswrapper[4909]: I1128 16:11:58.901124 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-8rjn2" Nov 28 16:11:58 crc kubenswrapper[4909]: E1128 16:11:58.901405 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-8rjn2" podUID="ffceca0e-d9b5-484f-8753-5e0269eec811" Nov 28 16:11:58 crc kubenswrapper[4909]: I1128 16:11:58.928086 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-8s8f9" podStartSLOduration=80.928040751 podStartE2EDuration="1m20.928040751s" podCreationTimestamp="2025-11-28 16:10:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:11:58.288398865 +0000 UTC m=+100.685083399" watchObservedRunningTime="2025-11-28 16:11:58.928040751 +0000 UTC m=+101.324725315" Nov 28 16:11:58 crc kubenswrapper[4909]: I1128 16:11:58.929280 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd/etcd-crc"] Nov 28 16:11:58 crc kubenswrapper[4909]: I1128 16:11:58.976504 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:58 crc kubenswrapper[4909]: I1128 16:11:58.976611 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:58 crc kubenswrapper[4909]: I1128 16:11:58.976688 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:58 crc kubenswrapper[4909]: I1128 16:11:58.976717 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:58 crc kubenswrapper[4909]: I1128 16:11:58.976735 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:58Z","lastTransitionTime":"2025-11-28T16:11:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:59 crc kubenswrapper[4909]: I1128 16:11:59.079877 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:59 crc kubenswrapper[4909]: I1128 16:11:59.079955 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:59 crc kubenswrapper[4909]: I1128 16:11:59.079974 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:59 crc kubenswrapper[4909]: I1128 16:11:59.080009 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:59 crc kubenswrapper[4909]: I1128 16:11:59.080030 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:59Z","lastTransitionTime":"2025-11-28T16:11:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:59 crc kubenswrapper[4909]: I1128 16:11:59.184141 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:59 crc kubenswrapper[4909]: I1128 16:11:59.184245 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:59 crc kubenswrapper[4909]: I1128 16:11:59.184266 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:59 crc kubenswrapper[4909]: I1128 16:11:59.184300 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:59 crc kubenswrapper[4909]: I1128 16:11:59.184322 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:59Z","lastTransitionTime":"2025-11-28T16:11:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:59 crc kubenswrapper[4909]: I1128 16:11:59.287972 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:59 crc kubenswrapper[4909]: I1128 16:11:59.288045 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:59 crc kubenswrapper[4909]: I1128 16:11:59.288063 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:59 crc kubenswrapper[4909]: I1128 16:11:59.288089 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:59 crc kubenswrapper[4909]: I1128 16:11:59.288107 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:59Z","lastTransitionTime":"2025-11-28T16:11:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:59 crc kubenswrapper[4909]: I1128 16:11:59.391364 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:59 crc kubenswrapper[4909]: I1128 16:11:59.391438 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:59 crc kubenswrapper[4909]: I1128 16:11:59.391458 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:59 crc kubenswrapper[4909]: I1128 16:11:59.391483 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:59 crc kubenswrapper[4909]: I1128 16:11:59.391502 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:59Z","lastTransitionTime":"2025-11-28T16:11:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:59 crc kubenswrapper[4909]: I1128 16:11:59.494514 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:59 crc kubenswrapper[4909]: I1128 16:11:59.494593 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:59 crc kubenswrapper[4909]: I1128 16:11:59.494617 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:59 crc kubenswrapper[4909]: I1128 16:11:59.494647 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:59 crc kubenswrapper[4909]: I1128 16:11:59.494705 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:59Z","lastTransitionTime":"2025-11-28T16:11:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:59 crc kubenswrapper[4909]: I1128 16:11:59.598102 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:59 crc kubenswrapper[4909]: I1128 16:11:59.598185 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:59 crc kubenswrapper[4909]: I1128 16:11:59.598199 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:59 crc kubenswrapper[4909]: I1128 16:11:59.598215 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:59 crc kubenswrapper[4909]: I1128 16:11:59.598226 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:59Z","lastTransitionTime":"2025-11-28T16:11:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:59 crc kubenswrapper[4909]: I1128 16:11:59.701225 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:59 crc kubenswrapper[4909]: I1128 16:11:59.701300 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:59 crc kubenswrapper[4909]: I1128 16:11:59.701323 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:59 crc kubenswrapper[4909]: I1128 16:11:59.701352 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:59 crc kubenswrapper[4909]: I1128 16:11:59.701382 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:59Z","lastTransitionTime":"2025-11-28T16:11:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:59 crc kubenswrapper[4909]: I1128 16:11:59.804513 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:59 crc kubenswrapper[4909]: I1128 16:11:59.804581 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:59 crc kubenswrapper[4909]: I1128 16:11:59.804605 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:59 crc kubenswrapper[4909]: I1128 16:11:59.804635 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:59 crc kubenswrapper[4909]: I1128 16:11:59.804706 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:59Z","lastTransitionTime":"2025-11-28T16:11:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:59 crc kubenswrapper[4909]: I1128 16:11:59.901979 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 16:11:59 crc kubenswrapper[4909]: I1128 16:11:59.902111 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 16:11:59 crc kubenswrapper[4909]: I1128 16:11:59.902110 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 16:11:59 crc kubenswrapper[4909]: E1128 16:11:59.902342 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 16:11:59 crc kubenswrapper[4909]: E1128 16:11:59.902492 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 16:11:59 crc kubenswrapper[4909]: E1128 16:11:59.902690 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 16:11:59 crc kubenswrapper[4909]: I1128 16:11:59.908540 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:59 crc kubenswrapper[4909]: I1128 16:11:59.908613 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:59 crc kubenswrapper[4909]: I1128 16:11:59.908634 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:59 crc kubenswrapper[4909]: I1128 16:11:59.908683 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:59 crc kubenswrapper[4909]: I1128 16:11:59.908714 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:59Z","lastTransitionTime":"2025-11-28T16:11:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:00 crc kubenswrapper[4909]: I1128 16:12:00.011418 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:00 crc kubenswrapper[4909]: I1128 16:12:00.011519 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:00 crc kubenswrapper[4909]: I1128 16:12:00.011537 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:00 crc kubenswrapper[4909]: I1128 16:12:00.011562 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:00 crc kubenswrapper[4909]: I1128 16:12:00.011581 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:00Z","lastTransitionTime":"2025-11-28T16:12:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:00 crc kubenswrapper[4909]: I1128 16:12:00.114754 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:00 crc kubenswrapper[4909]: I1128 16:12:00.114813 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:00 crc kubenswrapper[4909]: I1128 16:12:00.114823 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:00 crc kubenswrapper[4909]: I1128 16:12:00.114838 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:00 crc kubenswrapper[4909]: I1128 16:12:00.114848 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:00Z","lastTransitionTime":"2025-11-28T16:12:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:00 crc kubenswrapper[4909]: I1128 16:12:00.216969 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:00 crc kubenswrapper[4909]: I1128 16:12:00.217007 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:00 crc kubenswrapper[4909]: I1128 16:12:00.217020 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:00 crc kubenswrapper[4909]: I1128 16:12:00.217039 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:00 crc kubenswrapper[4909]: I1128 16:12:00.217055 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:00Z","lastTransitionTime":"2025-11-28T16:12:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:00 crc kubenswrapper[4909]: I1128 16:12:00.320112 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:00 crc kubenswrapper[4909]: I1128 16:12:00.320169 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:00 crc kubenswrapper[4909]: I1128 16:12:00.320188 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:00 crc kubenswrapper[4909]: I1128 16:12:00.320213 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:00 crc kubenswrapper[4909]: I1128 16:12:00.320232 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:00Z","lastTransitionTime":"2025-11-28T16:12:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:00 crc kubenswrapper[4909]: I1128 16:12:00.423325 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:00 crc kubenswrapper[4909]: I1128 16:12:00.423426 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:00 crc kubenswrapper[4909]: I1128 16:12:00.423445 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:00 crc kubenswrapper[4909]: I1128 16:12:00.423469 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:00 crc kubenswrapper[4909]: I1128 16:12:00.423488 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:00Z","lastTransitionTime":"2025-11-28T16:12:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:00 crc kubenswrapper[4909]: I1128 16:12:00.526765 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:00 crc kubenswrapper[4909]: I1128 16:12:00.526819 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:00 crc kubenswrapper[4909]: I1128 16:12:00.526837 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:00 crc kubenswrapper[4909]: I1128 16:12:00.526860 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:00 crc kubenswrapper[4909]: I1128 16:12:00.526881 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:00Z","lastTransitionTime":"2025-11-28T16:12:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:00 crc kubenswrapper[4909]: I1128 16:12:00.630821 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:00 crc kubenswrapper[4909]: I1128 16:12:00.630946 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:00 crc kubenswrapper[4909]: I1128 16:12:00.630966 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:00 crc kubenswrapper[4909]: I1128 16:12:00.630989 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:00 crc kubenswrapper[4909]: I1128 16:12:00.631008 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:00Z","lastTransitionTime":"2025-11-28T16:12:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:00 crc kubenswrapper[4909]: I1128 16:12:00.733901 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:00 crc kubenswrapper[4909]: I1128 16:12:00.733969 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:00 crc kubenswrapper[4909]: I1128 16:12:00.733989 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:00 crc kubenswrapper[4909]: I1128 16:12:00.734013 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:00 crc kubenswrapper[4909]: I1128 16:12:00.734032 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:00Z","lastTransitionTime":"2025-11-28T16:12:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:00 crc kubenswrapper[4909]: I1128 16:12:00.836287 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:00 crc kubenswrapper[4909]: I1128 16:12:00.836342 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:00 crc kubenswrapper[4909]: I1128 16:12:00.836360 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:00 crc kubenswrapper[4909]: I1128 16:12:00.836381 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:00 crc kubenswrapper[4909]: I1128 16:12:00.836400 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:00Z","lastTransitionTime":"2025-11-28T16:12:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:00 crc kubenswrapper[4909]: I1128 16:12:00.901066 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-8rjn2" Nov 28 16:12:00 crc kubenswrapper[4909]: E1128 16:12:00.901267 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-8rjn2" podUID="ffceca0e-d9b5-484f-8753-5e0269eec811" Nov 28 16:12:00 crc kubenswrapper[4909]: I1128 16:12:00.939942 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:00 crc kubenswrapper[4909]: I1128 16:12:00.940052 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:00 crc kubenswrapper[4909]: I1128 16:12:00.940077 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:00 crc kubenswrapper[4909]: I1128 16:12:00.940107 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:00 crc kubenswrapper[4909]: I1128 16:12:00.940128 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:00Z","lastTransitionTime":"2025-11-28T16:12:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:01 crc kubenswrapper[4909]: I1128 16:12:01.043781 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:01 crc kubenswrapper[4909]: I1128 16:12:01.043868 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:01 crc kubenswrapper[4909]: I1128 16:12:01.043894 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:01 crc kubenswrapper[4909]: I1128 16:12:01.043929 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:01 crc kubenswrapper[4909]: I1128 16:12:01.043951 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:01Z","lastTransitionTime":"2025-11-28T16:12:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:01 crc kubenswrapper[4909]: I1128 16:12:01.147274 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:01 crc kubenswrapper[4909]: I1128 16:12:01.147359 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:01 crc kubenswrapper[4909]: I1128 16:12:01.147384 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:01 crc kubenswrapper[4909]: I1128 16:12:01.147417 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:01 crc kubenswrapper[4909]: I1128 16:12:01.147443 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:01Z","lastTransitionTime":"2025-11-28T16:12:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:01 crc kubenswrapper[4909]: I1128 16:12:01.250805 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:01 crc kubenswrapper[4909]: I1128 16:12:01.250874 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:01 crc kubenswrapper[4909]: I1128 16:12:01.250894 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:01 crc kubenswrapper[4909]: I1128 16:12:01.250919 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:01 crc kubenswrapper[4909]: I1128 16:12:01.250938 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:01Z","lastTransitionTime":"2025-11-28T16:12:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:01 crc kubenswrapper[4909]: I1128 16:12:01.353682 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:01 crc kubenswrapper[4909]: I1128 16:12:01.353762 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:01 crc kubenswrapper[4909]: I1128 16:12:01.353786 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:01 crc kubenswrapper[4909]: I1128 16:12:01.353816 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:01 crc kubenswrapper[4909]: I1128 16:12:01.353834 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:01Z","lastTransitionTime":"2025-11-28T16:12:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:01 crc kubenswrapper[4909]: I1128 16:12:01.457541 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:01 crc kubenswrapper[4909]: I1128 16:12:01.457605 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:01 crc kubenswrapper[4909]: I1128 16:12:01.457627 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:01 crc kubenswrapper[4909]: I1128 16:12:01.457683 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:01 crc kubenswrapper[4909]: I1128 16:12:01.457702 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:01Z","lastTransitionTime":"2025-11-28T16:12:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:01 crc kubenswrapper[4909]: I1128 16:12:01.560511 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:01 crc kubenswrapper[4909]: I1128 16:12:01.560697 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:01 crc kubenswrapper[4909]: I1128 16:12:01.560721 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:01 crc kubenswrapper[4909]: I1128 16:12:01.560744 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:01 crc kubenswrapper[4909]: I1128 16:12:01.560763 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:01Z","lastTransitionTime":"2025-11-28T16:12:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:01 crc kubenswrapper[4909]: I1128 16:12:01.664006 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:01 crc kubenswrapper[4909]: I1128 16:12:01.664065 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:01 crc kubenswrapper[4909]: I1128 16:12:01.664086 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:01 crc kubenswrapper[4909]: I1128 16:12:01.664113 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:01 crc kubenswrapper[4909]: I1128 16:12:01.664425 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:01Z","lastTransitionTime":"2025-11-28T16:12:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:01 crc kubenswrapper[4909]: I1128 16:12:01.767613 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:01 crc kubenswrapper[4909]: I1128 16:12:01.767730 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:01 crc kubenswrapper[4909]: I1128 16:12:01.767754 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:01 crc kubenswrapper[4909]: I1128 16:12:01.767780 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:01 crc kubenswrapper[4909]: I1128 16:12:01.767798 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:01Z","lastTransitionTime":"2025-11-28T16:12:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:01 crc kubenswrapper[4909]: I1128 16:12:01.870803 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:01 crc kubenswrapper[4909]: I1128 16:12:01.870856 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:01 crc kubenswrapper[4909]: I1128 16:12:01.870867 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:01 crc kubenswrapper[4909]: I1128 16:12:01.870884 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:01 crc kubenswrapper[4909]: I1128 16:12:01.870896 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:01Z","lastTransitionTime":"2025-11-28T16:12:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:01 crc kubenswrapper[4909]: I1128 16:12:01.900801 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 16:12:01 crc kubenswrapper[4909]: I1128 16:12:01.900921 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 16:12:01 crc kubenswrapper[4909]: I1128 16:12:01.901039 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 16:12:01 crc kubenswrapper[4909]: E1128 16:12:01.901249 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 16:12:01 crc kubenswrapper[4909]: E1128 16:12:01.901331 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 16:12:01 crc kubenswrapper[4909]: E1128 16:12:01.901972 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 16:12:01 crc kubenswrapper[4909]: I1128 16:12:01.974166 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:01 crc kubenswrapper[4909]: I1128 16:12:01.974221 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:01 crc kubenswrapper[4909]: I1128 16:12:01.974238 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:01 crc kubenswrapper[4909]: I1128 16:12:01.974261 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:01 crc kubenswrapper[4909]: I1128 16:12:01.974277 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:01Z","lastTransitionTime":"2025-11-28T16:12:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:02 crc kubenswrapper[4909]: I1128 16:12:02.077917 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:02 crc kubenswrapper[4909]: I1128 16:12:02.077987 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:02 crc kubenswrapper[4909]: I1128 16:12:02.078010 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:02 crc kubenswrapper[4909]: I1128 16:12:02.078049 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:02 crc kubenswrapper[4909]: I1128 16:12:02.078071 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:02Z","lastTransitionTime":"2025-11-28T16:12:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:02 crc kubenswrapper[4909]: I1128 16:12:02.180582 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:02 crc kubenswrapper[4909]: I1128 16:12:02.180638 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:02 crc kubenswrapper[4909]: I1128 16:12:02.180695 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:02 crc kubenswrapper[4909]: I1128 16:12:02.180727 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:02 crc kubenswrapper[4909]: I1128 16:12:02.180749 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:02Z","lastTransitionTime":"2025-11-28T16:12:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:02 crc kubenswrapper[4909]: I1128 16:12:02.283689 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:02 crc kubenswrapper[4909]: I1128 16:12:02.284220 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:02 crc kubenswrapper[4909]: I1128 16:12:02.284415 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:02 crc kubenswrapper[4909]: I1128 16:12:02.284565 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:02 crc kubenswrapper[4909]: I1128 16:12:02.284910 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:02Z","lastTransitionTime":"2025-11-28T16:12:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:02 crc kubenswrapper[4909]: I1128 16:12:02.389066 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:02 crc kubenswrapper[4909]: I1128 16:12:02.389135 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:02 crc kubenswrapper[4909]: I1128 16:12:02.389159 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:02 crc kubenswrapper[4909]: I1128 16:12:02.389189 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:02 crc kubenswrapper[4909]: I1128 16:12:02.389233 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:02Z","lastTransitionTime":"2025-11-28T16:12:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:02 crc kubenswrapper[4909]: I1128 16:12:02.491847 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:02 crc kubenswrapper[4909]: I1128 16:12:02.491890 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:02 crc kubenswrapper[4909]: I1128 16:12:02.491898 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:02 crc kubenswrapper[4909]: I1128 16:12:02.491913 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:02 crc kubenswrapper[4909]: I1128 16:12:02.491926 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:02Z","lastTransitionTime":"2025-11-28T16:12:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:02 crc kubenswrapper[4909]: I1128 16:12:02.593818 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:02 crc kubenswrapper[4909]: I1128 16:12:02.593850 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:02 crc kubenswrapper[4909]: I1128 16:12:02.593862 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:02 crc kubenswrapper[4909]: I1128 16:12:02.593876 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:02 crc kubenswrapper[4909]: I1128 16:12:02.593888 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:02Z","lastTransitionTime":"2025-11-28T16:12:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:02 crc kubenswrapper[4909]: I1128 16:12:02.696983 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:02 crc kubenswrapper[4909]: I1128 16:12:02.697030 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:02 crc kubenswrapper[4909]: I1128 16:12:02.697039 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:02 crc kubenswrapper[4909]: I1128 16:12:02.697053 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:02 crc kubenswrapper[4909]: I1128 16:12:02.697063 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:02Z","lastTransitionTime":"2025-11-28T16:12:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:02 crc kubenswrapper[4909]: I1128 16:12:02.800480 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:02 crc kubenswrapper[4909]: I1128 16:12:02.800537 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:02 crc kubenswrapper[4909]: I1128 16:12:02.800556 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:02 crc kubenswrapper[4909]: I1128 16:12:02.800579 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:02 crc kubenswrapper[4909]: I1128 16:12:02.800597 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:02Z","lastTransitionTime":"2025-11-28T16:12:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:02 crc kubenswrapper[4909]: I1128 16:12:02.901814 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-8rjn2" Nov 28 16:12:02 crc kubenswrapper[4909]: E1128 16:12:02.902962 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-8rjn2" podUID="ffceca0e-d9b5-484f-8753-5e0269eec811" Nov 28 16:12:02 crc kubenswrapper[4909]: I1128 16:12:02.905619 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:02 crc kubenswrapper[4909]: I1128 16:12:02.906266 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:02 crc kubenswrapper[4909]: I1128 16:12:02.906293 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:02 crc kubenswrapper[4909]: I1128 16:12:02.906323 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:02 crc kubenswrapper[4909]: I1128 16:12:02.906347 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:02Z","lastTransitionTime":"2025-11-28T16:12:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:03 crc kubenswrapper[4909]: I1128 16:12:03.008976 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:03 crc kubenswrapper[4909]: I1128 16:12:03.009030 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:03 crc kubenswrapper[4909]: I1128 16:12:03.009050 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:03 crc kubenswrapper[4909]: I1128 16:12:03.009073 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:03 crc kubenswrapper[4909]: I1128 16:12:03.009090 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:03Z","lastTransitionTime":"2025-11-28T16:12:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:03 crc kubenswrapper[4909]: I1128 16:12:03.112386 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:03 crc kubenswrapper[4909]: I1128 16:12:03.112438 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:03 crc kubenswrapper[4909]: I1128 16:12:03.112467 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:03 crc kubenswrapper[4909]: I1128 16:12:03.112512 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:03 crc kubenswrapper[4909]: I1128 16:12:03.112535 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:03Z","lastTransitionTime":"2025-11-28T16:12:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:03 crc kubenswrapper[4909]: I1128 16:12:03.215452 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:03 crc kubenswrapper[4909]: I1128 16:12:03.215533 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:03 crc kubenswrapper[4909]: I1128 16:12:03.215567 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:03 crc kubenswrapper[4909]: I1128 16:12:03.215586 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:03 crc kubenswrapper[4909]: I1128 16:12:03.215598 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:03Z","lastTransitionTime":"2025-11-28T16:12:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:03 crc kubenswrapper[4909]: I1128 16:12:03.318623 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:03 crc kubenswrapper[4909]: I1128 16:12:03.318726 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:03 crc kubenswrapper[4909]: I1128 16:12:03.318745 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:03 crc kubenswrapper[4909]: I1128 16:12:03.318770 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:03 crc kubenswrapper[4909]: I1128 16:12:03.318788 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:03Z","lastTransitionTime":"2025-11-28T16:12:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:03 crc kubenswrapper[4909]: I1128 16:12:03.421632 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:03 crc kubenswrapper[4909]: I1128 16:12:03.421724 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:03 crc kubenswrapper[4909]: I1128 16:12:03.421740 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:03 crc kubenswrapper[4909]: I1128 16:12:03.421763 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:03 crc kubenswrapper[4909]: I1128 16:12:03.421777 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:03Z","lastTransitionTime":"2025-11-28T16:12:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:03 crc kubenswrapper[4909]: I1128 16:12:03.524933 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:03 crc kubenswrapper[4909]: I1128 16:12:03.525019 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:03 crc kubenswrapper[4909]: I1128 16:12:03.525043 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:03 crc kubenswrapper[4909]: I1128 16:12:03.525073 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:03 crc kubenswrapper[4909]: I1128 16:12:03.525097 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:03Z","lastTransitionTime":"2025-11-28T16:12:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:03 crc kubenswrapper[4909]: I1128 16:12:03.632269 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:03 crc kubenswrapper[4909]: I1128 16:12:03.632329 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:03 crc kubenswrapper[4909]: I1128 16:12:03.632346 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:03 crc kubenswrapper[4909]: I1128 16:12:03.632369 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:03 crc kubenswrapper[4909]: I1128 16:12:03.632387 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:03Z","lastTransitionTime":"2025-11-28T16:12:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:03 crc kubenswrapper[4909]: I1128 16:12:03.691358 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:03 crc kubenswrapper[4909]: I1128 16:12:03.691529 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:03 crc kubenswrapper[4909]: I1128 16:12:03.691552 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:03 crc kubenswrapper[4909]: I1128 16:12:03.691577 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:03 crc kubenswrapper[4909]: I1128 16:12:03.691598 4909 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:03Z","lastTransitionTime":"2025-11-28T16:12:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:03 crc kubenswrapper[4909]: I1128 16:12:03.755742 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-version/cluster-version-operator-5c965bbfc6-c888q"] Nov 28 16:12:03 crc kubenswrapper[4909]: I1128 16:12:03.756150 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-c888q" Nov 28 16:12:03 crc kubenswrapper[4909]: I1128 16:12:03.759777 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Nov 28 16:12:03 crc kubenswrapper[4909]: I1128 16:12:03.760483 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Nov 28 16:12:03 crc kubenswrapper[4909]: I1128 16:12:03.760573 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Nov 28 16:12:03 crc kubenswrapper[4909]: I1128 16:12:03.761771 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Nov 28 16:12:03 crc kubenswrapper[4909]: I1128 16:12:03.817027 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd/etcd-crc" podStartSLOduration=5.816994089 podStartE2EDuration="5.816994089s" podCreationTimestamp="2025-11-28 16:11:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:12:03.816166505 +0000 UTC m=+106.212851049" watchObservedRunningTime="2025-11-28 16:12:03.816994089 +0000 UTC m=+106.213678663" Nov 28 16:12:03 crc kubenswrapper[4909]: I1128 16:12:03.876981 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e56ab9db-840d-490c-9ba3-c546f405735b-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-c888q\" (UID: \"e56ab9db-840d-490c-9ba3-c546f405735b\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-c888q" Nov 28 16:12:03 crc kubenswrapper[4909]: I1128 16:12:03.877062 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/e56ab9db-840d-490c-9ba3-c546f405735b-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-c888q\" (UID: \"e56ab9db-840d-490c-9ba3-c546f405735b\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-c888q" Nov 28 16:12:03 crc kubenswrapper[4909]: I1128 16:12:03.877144 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/e56ab9db-840d-490c-9ba3-c546f405735b-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-c888q\" (UID: \"e56ab9db-840d-490c-9ba3-c546f405735b\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-c888q" Nov 28 16:12:03 crc kubenswrapper[4909]: I1128 16:12:03.877181 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e56ab9db-840d-490c-9ba3-c546f405735b-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-c888q\" (UID: \"e56ab9db-840d-490c-9ba3-c546f405735b\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-c888q" Nov 28 16:12:03 crc kubenswrapper[4909]: I1128 16:12:03.877213 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/e56ab9db-840d-490c-9ba3-c546f405735b-service-ca\") pod \"cluster-version-operator-5c965bbfc6-c888q\" (UID: \"e56ab9db-840d-490c-9ba3-c546f405735b\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-c888q" Nov 28 16:12:03 crc kubenswrapper[4909]: I1128 16:12:03.901191 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 16:12:03 crc kubenswrapper[4909]: I1128 16:12:03.901200 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 16:12:03 crc kubenswrapper[4909]: I1128 16:12:03.901161 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 16:12:03 crc kubenswrapper[4909]: E1128 16:12:03.901472 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 16:12:03 crc kubenswrapper[4909]: E1128 16:12:03.901909 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 16:12:03 crc kubenswrapper[4909]: E1128 16:12:03.902041 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 16:12:03 crc kubenswrapper[4909]: I1128 16:12:03.978651 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/e56ab9db-840d-490c-9ba3-c546f405735b-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-c888q\" (UID: \"e56ab9db-840d-490c-9ba3-c546f405735b\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-c888q" Nov 28 16:12:03 crc kubenswrapper[4909]: I1128 16:12:03.978753 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e56ab9db-840d-490c-9ba3-c546f405735b-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-c888q\" (UID: \"e56ab9db-840d-490c-9ba3-c546f405735b\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-c888q" Nov 28 16:12:03 crc kubenswrapper[4909]: I1128 16:12:03.978788 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/e56ab9db-840d-490c-9ba3-c546f405735b-service-ca\") pod \"cluster-version-operator-5c965bbfc6-c888q\" (UID: \"e56ab9db-840d-490c-9ba3-c546f405735b\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-c888q" Nov 28 16:12:03 crc kubenswrapper[4909]: I1128 16:12:03.978878 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e56ab9db-840d-490c-9ba3-c546f405735b-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-c888q\" (UID: \"e56ab9db-840d-490c-9ba3-c546f405735b\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-c888q" Nov 28 16:12:03 crc kubenswrapper[4909]: I1128 16:12:03.978920 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/e56ab9db-840d-490c-9ba3-c546f405735b-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-c888q\" (UID: \"e56ab9db-840d-490c-9ba3-c546f405735b\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-c888q" Nov 28 16:12:03 crc kubenswrapper[4909]: I1128 16:12:03.978927 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/e56ab9db-840d-490c-9ba3-c546f405735b-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-c888q\" (UID: \"e56ab9db-840d-490c-9ba3-c546f405735b\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-c888q" Nov 28 16:12:03 crc kubenswrapper[4909]: I1128 16:12:03.978979 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/e56ab9db-840d-490c-9ba3-c546f405735b-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-c888q\" (UID: \"e56ab9db-840d-490c-9ba3-c546f405735b\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-c888q" Nov 28 16:12:03 crc kubenswrapper[4909]: I1128 16:12:03.982008 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/e56ab9db-840d-490c-9ba3-c546f405735b-service-ca\") pod \"cluster-version-operator-5c965bbfc6-c888q\" (UID: \"e56ab9db-840d-490c-9ba3-c546f405735b\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-c888q" Nov 28 16:12:03 crc kubenswrapper[4909]: I1128 16:12:03.990293 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e56ab9db-840d-490c-9ba3-c546f405735b-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-c888q\" (UID: \"e56ab9db-840d-490c-9ba3-c546f405735b\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-c888q" Nov 28 16:12:04 crc kubenswrapper[4909]: I1128 16:12:04.009049 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e56ab9db-840d-490c-9ba3-c546f405735b-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-c888q\" (UID: \"e56ab9db-840d-490c-9ba3-c546f405735b\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-c888q" Nov 28 16:12:04 crc kubenswrapper[4909]: I1128 16:12:04.075410 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-c888q" Nov 28 16:12:04 crc kubenswrapper[4909]: I1128 16:12:04.854626 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-c888q" event={"ID":"e56ab9db-840d-490c-9ba3-c546f405735b","Type":"ContainerStarted","Data":"56c3ec8124ccfbe43743867cea93f4246bc00101060eaa7b0978cac9ab94bc46"} Nov 28 16:12:04 crc kubenswrapper[4909]: I1128 16:12:04.855136 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-c888q" event={"ID":"e56ab9db-840d-490c-9ba3-c546f405735b","Type":"ContainerStarted","Data":"e51396c198fa5ea1ca94ecda755abc7b4d865a7ec68d794b863709782be6ae0e"} Nov 28 16:12:04 crc kubenswrapper[4909]: I1128 16:12:04.877883 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-c888q" podStartSLOduration=87.877859168 podStartE2EDuration="1m27.877859168s" podCreationTimestamp="2025-11-28 16:10:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:12:04.877433115 +0000 UTC m=+107.274117669" watchObservedRunningTime="2025-11-28 16:12:04.877859168 +0000 UTC m=+107.274543722" Nov 28 16:12:04 crc kubenswrapper[4909]: I1128 16:12:04.900957 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-8rjn2" Nov 28 16:12:04 crc kubenswrapper[4909]: E1128 16:12:04.901221 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-8rjn2" podUID="ffceca0e-d9b5-484f-8753-5e0269eec811" Nov 28 16:12:05 crc kubenswrapper[4909]: I1128 16:12:05.901214 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 16:12:05 crc kubenswrapper[4909]: I1128 16:12:05.901343 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 16:12:05 crc kubenswrapper[4909]: E1128 16:12:05.901430 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 16:12:05 crc kubenswrapper[4909]: I1128 16:12:05.901460 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 16:12:05 crc kubenswrapper[4909]: E1128 16:12:05.901811 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 16:12:05 crc kubenswrapper[4909]: E1128 16:12:05.902190 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 16:12:06 crc kubenswrapper[4909]: I1128 16:12:06.900472 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-8rjn2" Nov 28 16:12:06 crc kubenswrapper[4909]: E1128 16:12:06.900680 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-8rjn2" podUID="ffceca0e-d9b5-484f-8753-5e0269eec811" Nov 28 16:12:07 crc kubenswrapper[4909]: I1128 16:12:07.901605 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 16:12:07 crc kubenswrapper[4909]: I1128 16:12:07.901713 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 16:12:07 crc kubenswrapper[4909]: I1128 16:12:07.903642 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 16:12:07 crc kubenswrapper[4909]: E1128 16:12:07.903860 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 16:12:07 crc kubenswrapper[4909]: E1128 16:12:07.903977 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 16:12:07 crc kubenswrapper[4909]: E1128 16:12:07.904123 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 16:12:08 crc kubenswrapper[4909]: I1128 16:12:08.901362 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-8rjn2" Nov 28 16:12:08 crc kubenswrapper[4909]: E1128 16:12:08.901498 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-8rjn2" podUID="ffceca0e-d9b5-484f-8753-5e0269eec811" Nov 28 16:12:09 crc kubenswrapper[4909]: I1128 16:12:09.900967 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 16:12:09 crc kubenswrapper[4909]: E1128 16:12:09.901479 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 16:12:09 crc kubenswrapper[4909]: I1128 16:12:09.901980 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 16:12:09 crc kubenswrapper[4909]: E1128 16:12:09.902176 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 16:12:09 crc kubenswrapper[4909]: I1128 16:12:09.902230 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 16:12:09 crc kubenswrapper[4909]: E1128 16:12:09.902366 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 16:12:10 crc kubenswrapper[4909]: I1128 16:12:10.901352 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-8rjn2" Nov 28 16:12:10 crc kubenswrapper[4909]: E1128 16:12:10.901548 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-8rjn2" podUID="ffceca0e-d9b5-484f-8753-5e0269eec811" Nov 28 16:12:11 crc kubenswrapper[4909]: I1128 16:12:11.883726 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-wx2jj_6e3805b2-8ad3-4fa6-b88f-e0ae42294202/kube-multus/1.log" Nov 28 16:12:11 crc kubenswrapper[4909]: I1128 16:12:11.884183 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-wx2jj_6e3805b2-8ad3-4fa6-b88f-e0ae42294202/kube-multus/0.log" Nov 28 16:12:11 crc kubenswrapper[4909]: I1128 16:12:11.884228 4909 generic.go:334] "Generic (PLEG): container finished" podID="6e3805b2-8ad3-4fa6-b88f-e0ae42294202" containerID="e3a9a82264968374209ed690b43cf96557d426af065ba14cd189ae9e31ed0f0a" exitCode=1 Nov 28 16:12:11 crc kubenswrapper[4909]: I1128 16:12:11.884257 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-wx2jj" event={"ID":"6e3805b2-8ad3-4fa6-b88f-e0ae42294202","Type":"ContainerDied","Data":"e3a9a82264968374209ed690b43cf96557d426af065ba14cd189ae9e31ed0f0a"} Nov 28 16:12:11 crc kubenswrapper[4909]: I1128 16:12:11.884289 4909 scope.go:117] "RemoveContainer" containerID="0777b571ce4049338437a97264761c89ab7517b4da8400edcd3381d58aef32e4" Nov 28 16:12:11 crc kubenswrapper[4909]: I1128 16:12:11.884673 4909 scope.go:117] "RemoveContainer" containerID="e3a9a82264968374209ed690b43cf96557d426af065ba14cd189ae9e31ed0f0a" Nov 28 16:12:11 crc kubenswrapper[4909]: E1128 16:12:11.884863 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-multus pod=multus-wx2jj_openshift-multus(6e3805b2-8ad3-4fa6-b88f-e0ae42294202)\"" pod="openshift-multus/multus-wx2jj" podUID="6e3805b2-8ad3-4fa6-b88f-e0ae42294202" Nov 28 16:12:11 crc kubenswrapper[4909]: I1128 16:12:11.901240 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 16:12:11 crc kubenswrapper[4909]: E1128 16:12:11.901417 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 16:12:11 crc kubenswrapper[4909]: I1128 16:12:11.901731 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 16:12:11 crc kubenswrapper[4909]: E1128 16:12:11.901827 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 16:12:11 crc kubenswrapper[4909]: I1128 16:12:11.902121 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 16:12:11 crc kubenswrapper[4909]: E1128 16:12:11.902212 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 16:12:12 crc kubenswrapper[4909]: I1128 16:12:12.890012 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-wx2jj_6e3805b2-8ad3-4fa6-b88f-e0ae42294202/kube-multus/1.log" Nov 28 16:12:12 crc kubenswrapper[4909]: I1128 16:12:12.901405 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-8rjn2" Nov 28 16:12:12 crc kubenswrapper[4909]: E1128 16:12:12.901583 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-8rjn2" podUID="ffceca0e-d9b5-484f-8753-5e0269eec811" Nov 28 16:12:12 crc kubenswrapper[4909]: I1128 16:12:12.902547 4909 scope.go:117] "RemoveContainer" containerID="3ddca078c58f2bc3df2f19603ac7190dde988c7fe8b49ca5d94af1bf01cd1162" Nov 28 16:12:13 crc kubenswrapper[4909]: I1128 16:12:13.861196 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-8rjn2"] Nov 28 16:12:13 crc kubenswrapper[4909]: I1128 16:12:13.898440 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-qxw94_c17e2fff-c7ee-475c-8c17-58a394744b91/ovnkube-controller/3.log" Nov 28 16:12:13 crc kubenswrapper[4909]: I1128 16:12:13.900459 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 16:12:13 crc kubenswrapper[4909]: I1128 16:12:13.900532 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 16:12:13 crc kubenswrapper[4909]: I1128 16:12:13.900463 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 16:12:13 crc kubenswrapper[4909]: E1128 16:12:13.900621 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 16:12:13 crc kubenswrapper[4909]: E1128 16:12:13.900767 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 16:12:13 crc kubenswrapper[4909]: E1128 16:12:13.900877 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 16:12:13 crc kubenswrapper[4909]: I1128 16:12:13.903335 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-8rjn2" Nov 28 16:12:13 crc kubenswrapper[4909]: E1128 16:12:13.903486 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-8rjn2" podUID="ffceca0e-d9b5-484f-8753-5e0269eec811" Nov 28 16:12:13 crc kubenswrapper[4909]: I1128 16:12:13.907385 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" event={"ID":"c17e2fff-c7ee-475c-8c17-58a394744b91","Type":"ContainerStarted","Data":"8dfb42d6b85ca4de9c79dd23da69ea50faaaca28d01cf5b666fcebc2058e7cf1"} Nov 28 16:12:13 crc kubenswrapper[4909]: I1128 16:12:13.908094 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" Nov 28 16:12:13 crc kubenswrapper[4909]: I1128 16:12:13.949222 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" podStartSLOduration=96.949196813 podStartE2EDuration="1m36.949196813s" podCreationTimestamp="2025-11-28 16:10:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:12:13.947946997 +0000 UTC m=+116.344631561" watchObservedRunningTime="2025-11-28 16:12:13.949196813 +0000 UTC m=+116.345881377" Nov 28 16:12:15 crc kubenswrapper[4909]: I1128 16:12:15.901135 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 16:12:15 crc kubenswrapper[4909]: I1128 16:12:15.901151 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 16:12:15 crc kubenswrapper[4909]: I1128 16:12:15.901163 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-8rjn2" Nov 28 16:12:15 crc kubenswrapper[4909]: I1128 16:12:15.901201 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 16:12:15 crc kubenswrapper[4909]: E1128 16:12:15.902115 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 16:12:15 crc kubenswrapper[4909]: E1128 16:12:15.902367 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 16:12:15 crc kubenswrapper[4909]: E1128 16:12:15.902481 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-8rjn2" podUID="ffceca0e-d9b5-484f-8753-5e0269eec811" Nov 28 16:12:15 crc kubenswrapper[4909]: E1128 16:12:15.902533 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 16:12:17 crc kubenswrapper[4909]: E1128 16:12:17.886032 4909 kubelet_node_status.go:497] "Node not becoming ready in time after startup" Nov 28 16:12:17 crc kubenswrapper[4909]: I1128 16:12:17.900540 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 16:12:17 crc kubenswrapper[4909]: I1128 16:12:17.900740 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 16:12:17 crc kubenswrapper[4909]: E1128 16:12:17.902445 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 16:12:17 crc kubenswrapper[4909]: I1128 16:12:17.902490 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 16:12:17 crc kubenswrapper[4909]: I1128 16:12:17.902538 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-8rjn2" Nov 28 16:12:17 crc kubenswrapper[4909]: E1128 16:12:17.902743 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 16:12:17 crc kubenswrapper[4909]: E1128 16:12:17.902857 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 16:12:17 crc kubenswrapper[4909]: E1128 16:12:17.902981 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-8rjn2" podUID="ffceca0e-d9b5-484f-8753-5e0269eec811" Nov 28 16:12:18 crc kubenswrapper[4909]: E1128 16:12:18.004353 4909 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 28 16:12:18 crc kubenswrapper[4909]: I1128 16:12:18.477219 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" Nov 28 16:12:19 crc kubenswrapper[4909]: I1128 16:12:19.901536 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 16:12:19 crc kubenswrapper[4909]: I1128 16:12:19.901554 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 16:12:19 crc kubenswrapper[4909]: I1128 16:12:19.901739 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-8rjn2" Nov 28 16:12:19 crc kubenswrapper[4909]: I1128 16:12:19.901775 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 16:12:19 crc kubenswrapper[4909]: E1128 16:12:19.903308 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 16:12:19 crc kubenswrapper[4909]: E1128 16:12:19.902972 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 16:12:19 crc kubenswrapper[4909]: E1128 16:12:19.903346 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 16:12:19 crc kubenswrapper[4909]: E1128 16:12:19.903375 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-8rjn2" podUID="ffceca0e-d9b5-484f-8753-5e0269eec811" Nov 28 16:12:21 crc kubenswrapper[4909]: I1128 16:12:21.901855 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 16:12:21 crc kubenswrapper[4909]: I1128 16:12:21.901938 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 16:12:21 crc kubenswrapper[4909]: E1128 16:12:21.902111 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 16:12:21 crc kubenswrapper[4909]: E1128 16:12:21.902202 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 16:12:21 crc kubenswrapper[4909]: I1128 16:12:21.902590 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-8rjn2" Nov 28 16:12:21 crc kubenswrapper[4909]: I1128 16:12:21.902602 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 16:12:21 crc kubenswrapper[4909]: E1128 16:12:21.903012 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-8rjn2" podUID="ffceca0e-d9b5-484f-8753-5e0269eec811" Nov 28 16:12:21 crc kubenswrapper[4909]: E1128 16:12:21.903384 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 16:12:23 crc kubenswrapper[4909]: E1128 16:12:23.005391 4909 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 28 16:12:23 crc kubenswrapper[4909]: I1128 16:12:23.900852 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 16:12:23 crc kubenswrapper[4909]: I1128 16:12:23.900923 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-8rjn2" Nov 28 16:12:23 crc kubenswrapper[4909]: E1128 16:12:23.901463 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 16:12:23 crc kubenswrapper[4909]: I1128 16:12:23.901023 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 16:12:23 crc kubenswrapper[4909]: I1128 16:12:23.900955 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 16:12:23 crc kubenswrapper[4909]: E1128 16:12:23.901643 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-8rjn2" podUID="ffceca0e-d9b5-484f-8753-5e0269eec811" Nov 28 16:12:23 crc kubenswrapper[4909]: E1128 16:12:23.901763 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 16:12:23 crc kubenswrapper[4909]: E1128 16:12:23.901831 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 16:12:25 crc kubenswrapper[4909]: I1128 16:12:25.901575 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-8rjn2" Nov 28 16:12:25 crc kubenswrapper[4909]: I1128 16:12:25.901649 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 16:12:25 crc kubenswrapper[4909]: I1128 16:12:25.901598 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 16:12:25 crc kubenswrapper[4909]: I1128 16:12:25.901810 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 16:12:25 crc kubenswrapper[4909]: E1128 16:12:25.901887 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 16:12:25 crc kubenswrapper[4909]: E1128 16:12:25.902007 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 16:12:25 crc kubenswrapper[4909]: E1128 16:12:25.902163 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 16:12:25 crc kubenswrapper[4909]: E1128 16:12:25.902289 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-8rjn2" podUID="ffceca0e-d9b5-484f-8753-5e0269eec811" Nov 28 16:12:26 crc kubenswrapper[4909]: I1128 16:12:26.901947 4909 scope.go:117] "RemoveContainer" containerID="e3a9a82264968374209ed690b43cf96557d426af065ba14cd189ae9e31ed0f0a" Nov 28 16:12:27 crc kubenswrapper[4909]: I1128 16:12:27.901076 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 16:12:27 crc kubenswrapper[4909]: I1128 16:12:27.901145 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 16:12:27 crc kubenswrapper[4909]: I1128 16:12:27.901180 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 16:12:27 crc kubenswrapper[4909]: E1128 16:12:27.902560 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 16:12:27 crc kubenswrapper[4909]: I1128 16:12:27.902601 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-8rjn2" Nov 28 16:12:27 crc kubenswrapper[4909]: E1128 16:12:27.902744 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 16:12:27 crc kubenswrapper[4909]: E1128 16:12:27.902853 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 16:12:27 crc kubenswrapper[4909]: E1128 16:12:27.903015 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-8rjn2" podUID="ffceca0e-d9b5-484f-8753-5e0269eec811" Nov 28 16:12:27 crc kubenswrapper[4909]: I1128 16:12:27.957206 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-wx2jj_6e3805b2-8ad3-4fa6-b88f-e0ae42294202/kube-multus/1.log" Nov 28 16:12:27 crc kubenswrapper[4909]: I1128 16:12:27.957308 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-wx2jj" event={"ID":"6e3805b2-8ad3-4fa6-b88f-e0ae42294202","Type":"ContainerStarted","Data":"ff55a4f30c31edf7245bab6cba501e9e5bde33dd3277e5a9e39f85eb66c216aa"} Nov 28 16:12:28 crc kubenswrapper[4909]: E1128 16:12:28.007035 4909 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 28 16:12:29 crc kubenswrapper[4909]: I1128 16:12:29.901918 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 16:12:29 crc kubenswrapper[4909]: E1128 16:12:29.902122 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 16:12:29 crc kubenswrapper[4909]: I1128 16:12:29.902395 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 16:12:29 crc kubenswrapper[4909]: E1128 16:12:29.902485 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 16:12:29 crc kubenswrapper[4909]: I1128 16:12:29.902721 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-8rjn2" Nov 28 16:12:29 crc kubenswrapper[4909]: E1128 16:12:29.902823 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-8rjn2" podUID="ffceca0e-d9b5-484f-8753-5e0269eec811" Nov 28 16:12:29 crc kubenswrapper[4909]: I1128 16:12:29.903204 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 16:12:29 crc kubenswrapper[4909]: E1128 16:12:29.903362 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 16:12:31 crc kubenswrapper[4909]: I1128 16:12:31.900885 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 16:12:31 crc kubenswrapper[4909]: I1128 16:12:31.901027 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 16:12:31 crc kubenswrapper[4909]: I1128 16:12:31.901059 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-8rjn2" Nov 28 16:12:31 crc kubenswrapper[4909]: E1128 16:12:31.901372 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 16:12:31 crc kubenswrapper[4909]: E1128 16:12:31.901447 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 16:12:31 crc kubenswrapper[4909]: E1128 16:12:31.901642 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-8rjn2" podUID="ffceca0e-d9b5-484f-8753-5e0269eec811" Nov 28 16:12:31 crc kubenswrapper[4909]: I1128 16:12:31.901880 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 16:12:31 crc kubenswrapper[4909]: E1128 16:12:31.902054 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 16:12:33 crc kubenswrapper[4909]: I1128 16:12:33.900789 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 16:12:33 crc kubenswrapper[4909]: I1128 16:12:33.900830 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 16:12:33 crc kubenswrapper[4909]: I1128 16:12:33.900909 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-8rjn2" Nov 28 16:12:33 crc kubenswrapper[4909]: I1128 16:12:33.900789 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 16:12:33 crc kubenswrapper[4909]: I1128 16:12:33.905178 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Nov 28 16:12:33 crc kubenswrapper[4909]: I1128 16:12:33.905794 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Nov 28 16:12:33 crc kubenswrapper[4909]: I1128 16:12:33.905818 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Nov 28 16:12:33 crc kubenswrapper[4909]: I1128 16:12:33.905877 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Nov 28 16:12:33 crc kubenswrapper[4909]: I1128 16:12:33.906052 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Nov 28 16:12:33 crc kubenswrapper[4909]: I1128 16:12:33.906060 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.336425 4909 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeReady" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.396055 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-machine-approver/machine-approver-56656f9798-k67zs"] Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.398085 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-ld6fg"] Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.398240 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-k67zs" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.417225 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-5x9zh"] Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.417452 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-ld6fg" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.421546 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-c7bds"] Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.430361 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.432402 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-5x9zh" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.432932 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.433506 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-t67vb"] Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.433988 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-f9d7485db-qjf9t"] Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.434487 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.434526 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-c7bds" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.435076 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-t67vb" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.435143 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.435394 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.435507 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.436076 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.436107 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.436735 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-dvcwf"] Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.437211 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-zqhkp"] Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.437922 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-zqhkp" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.438457 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-qjf9t" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.439389 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.439520 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.439938 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.440426 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-dvcwf" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.443116 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.443173 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.444858 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.444921 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.445194 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.445458 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.445734 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.446341 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-mg8mw"] Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.446931 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-mg8mw" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.449754 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.449953 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.450396 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.450500 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.450684 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.450723 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.450809 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.450826 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.450920 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.450976 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.451034 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.451150 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.451817 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.451935 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.453439 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.453855 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.453895 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.453972 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.454039 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.454066 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.454148 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.454351 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.454593 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.455292 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.457704 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-mdb95"] Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.458092 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console-operator/console-operator-58897d9998-b4wbn"] Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.459111 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.459605 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.459733 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.459765 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.459922 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.460038 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.460250 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.460358 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.460497 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.460605 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.460842 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.459741 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.462175 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.462204 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-b4wbn" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.462282 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-mdb95" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.462398 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.462563 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.470329 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.470503 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.470582 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.470680 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.471335 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.472998 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/downloads-7954f5f757-l2xpj"] Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.473463 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-l2xpj" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.473599 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.473848 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.474600 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-nrl4j"] Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.475343 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-nrl4j" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.475797 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-l5snl"] Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.476270 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-l5snl" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.479300 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.480535 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-7xr8n"] Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.481264 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.481590 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.482072 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-7xr8n" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.482167 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.482374 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.482461 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-dmgdn"] Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.497198 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.499482 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.501413 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.502110 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-4p52q"] Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.516383 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.517086 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.518801 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.519094 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.519981 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.520274 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.520508 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.520894 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.521244 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.521367 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.521465 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-dmgdn" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.521974 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-4p52q" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.522123 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.522334 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.522449 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.522546 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.522993 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.523153 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.522485 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.525144 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.525890 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.526154 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.526630 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.527286 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-wpjbf"] Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.527906 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.528248 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-wpjbf" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.529205 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.529426 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-ddm7r"] Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.529816 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-ddm7r" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.530184 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-p5p29"] Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.530527 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-p5p29" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.531102 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-nlb6g"] Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.531507 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-nlb6g" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.532089 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.532141 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-x4wv5"] Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.532902 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.533451 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.533631 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.533911 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.534955 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-x4wv5" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.534966 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.538032 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.540203 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.540861 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/06224dc2-1e32-47b8-8a11-0c90a61084cf-trusted-ca-bundle\") pod \"console-f9d7485db-qjf9t\" (UID: \"06224dc2-1e32-47b8-8a11-0c90a61084cf\") " pod="openshift-console/console-f9d7485db-qjf9t" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.540906 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/bc0316f8-a276-4bea-a4cb-bf56c011c64a-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-t67vb\" (UID: \"bc0316f8-a276-4bea-a4cb-bf56c011c64a\") " pod="openshift-authentication/oauth-openshift-558db77b4-t67vb" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.540929 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/fe88e922-b9a9-4c6c-8b36-5cd5eaf1b7b8-serving-cert\") pod \"route-controller-manager-6576b87f9c-c7bds\" (UID: \"fe88e922-b9a9-4c6c-8b36-5cd5eaf1b7b8\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-c7bds" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.540949 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0d41d267-53c8-4859-9c63-737eda42098f-serving-cert\") pod \"controller-manager-879f6c89f-5x9zh\" (UID: \"0d41d267-53c8-4859-9c63-737eda42098f\") " pod="openshift-controller-manager/controller-manager-879f6c89f-5x9zh" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.540968 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h7snv\" (UniqueName: \"kubernetes.io/projected/0d41d267-53c8-4859-9c63-737eda42098f-kube-api-access-h7snv\") pod \"controller-manager-879f6c89f-5x9zh\" (UID: \"0d41d267-53c8-4859-9c63-737eda42098f\") " pod="openshift-controller-manager/controller-manager-879f6c89f-5x9zh" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.540986 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/aeb1d3c3-35d9-4d08-8026-9faa832a32f0-config\") pod \"openshift-apiserver-operator-796bbdcf4f-dvcwf\" (UID: \"aeb1d3c3-35d9-4d08-8026-9faa832a32f0\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-dvcwf" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.541002 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/06224dc2-1e32-47b8-8a11-0c90a61084cf-console-config\") pod \"console-f9d7485db-qjf9t\" (UID: \"06224dc2-1e32-47b8-8a11-0c90a61084cf\") " pod="openshift-console/console-f9d7485db-qjf9t" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.541026 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/bc0316f8-a276-4bea-a4cb-bf56c011c64a-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-t67vb\" (UID: \"bc0316f8-a276-4bea-a4cb-bf56c011c64a\") " pod="openshift-authentication/oauth-openshift-558db77b4-t67vb" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.541045 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/0120f632-f92f-4cdc-a4c5-0d63471be0ef-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-zqhkp\" (UID: \"0120f632-f92f-4cdc-a4c5-0d63471be0ef\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-zqhkp" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.541065 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5c7e3790-aa06-4b95-94a7-c41d909ec984-serving-cert\") pod \"apiserver-76f77b778f-ld6fg\" (UID: \"5c7e3790-aa06-4b95-94a7-c41d909ec984\") " pod="openshift-apiserver/apiserver-76f77b778f-ld6fg" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.541113 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/bc0316f8-a276-4bea-a4cb-bf56c011c64a-audit-policies\") pod \"oauth-openshift-558db77b4-t67vb\" (UID: \"bc0316f8-a276-4bea-a4cb-bf56c011c64a\") " pod="openshift-authentication/oauth-openshift-558db77b4-t67vb" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.541130 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cc6ws\" (UniqueName: \"kubernetes.io/projected/fe88e922-b9a9-4c6c-8b36-5cd5eaf1b7b8-kube-api-access-cc6ws\") pod \"route-controller-manager-6576b87f9c-c7bds\" (UID: \"fe88e922-b9a9-4c6c-8b36-5cd5eaf1b7b8\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-c7bds" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.541066 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.541573 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/5c7e3790-aa06-4b95-94a7-c41d909ec984-encryption-config\") pod \"apiserver-76f77b778f-ld6fg\" (UID: \"5c7e3790-aa06-4b95-94a7-c41d909ec984\") " pod="openshift-apiserver/apiserver-76f77b778f-ld6fg" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.542224 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/06224dc2-1e32-47b8-8a11-0c90a61084cf-service-ca\") pod \"console-f9d7485db-qjf9t\" (UID: \"06224dc2-1e32-47b8-8a11-0c90a61084cf\") " pod="openshift-console/console-f9d7485db-qjf9t" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.542270 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f955b72b-dc83-4219-b488-163acebcf367-config\") pod \"machine-approver-56656f9798-k67zs\" (UID: \"f955b72b-dc83-4219-b488-163acebcf367\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-k67zs" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.542288 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/5c7e3790-aa06-4b95-94a7-c41d909ec984-trusted-ca-bundle\") pod \"apiserver-76f77b778f-ld6fg\" (UID: \"5c7e3790-aa06-4b95-94a7-c41d909ec984\") " pod="openshift-apiserver/apiserver-76f77b778f-ld6fg" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.542388 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/0d41d267-53c8-4859-9c63-737eda42098f-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-5x9zh\" (UID: \"0d41d267-53c8-4859-9c63-737eda42098f\") " pod="openshift-controller-manager/controller-manager-879f6c89f-5x9zh" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.542412 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/bc0316f8-a276-4bea-a4cb-bf56c011c64a-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-t67vb\" (UID: \"bc0316f8-a276-4bea-a4cb-bf56c011c64a\") " pod="openshift-authentication/oauth-openshift-558db77b4-t67vb" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.542846 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h2zzq\" (UniqueName: \"kubernetes.io/projected/0120f632-f92f-4cdc-a4c5-0d63471be0ef-kube-api-access-h2zzq\") pod \"apiserver-7bbb656c7d-zqhkp\" (UID: \"0120f632-f92f-4cdc-a4c5-0d63471be0ef\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-zqhkp" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.542907 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/5c7e3790-aa06-4b95-94a7-c41d909ec984-audit-dir\") pod \"apiserver-76f77b778f-ld6fg\" (UID: \"5c7e3790-aa06-4b95-94a7-c41d909ec984\") " pod="openshift-apiserver/apiserver-76f77b778f-ld6fg" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.542977 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/bc0316f8-a276-4bea-a4cb-bf56c011c64a-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-t67vb\" (UID: \"bc0316f8-a276-4bea-a4cb-bf56c011c64a\") " pod="openshift-authentication/oauth-openshift-558db77b4-t67vb" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.543002 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/f955b72b-dc83-4219-b488-163acebcf367-auth-proxy-config\") pod \"machine-approver-56656f9798-k67zs\" (UID: \"f955b72b-dc83-4219-b488-163acebcf367\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-k67zs" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.543047 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0120f632-f92f-4cdc-a4c5-0d63471be0ef-serving-cert\") pod \"apiserver-7bbb656c7d-zqhkp\" (UID: \"0120f632-f92f-4cdc-a4c5-0d63471be0ef\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-zqhkp" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.543070 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/5c7e3790-aa06-4b95-94a7-c41d909ec984-node-pullsecrets\") pod \"apiserver-76f77b778f-ld6fg\" (UID: \"5c7e3790-aa06-4b95-94a7-c41d909ec984\") " pod="openshift-apiserver/apiserver-76f77b778f-ld6fg" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.543146 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9hbkn\" (UniqueName: \"kubernetes.io/projected/bc0316f8-a276-4bea-a4cb-bf56c011c64a-kube-api-access-9hbkn\") pod \"oauth-openshift-558db77b4-t67vb\" (UID: \"bc0316f8-a276-4bea-a4cb-bf56c011c64a\") " pod="openshift-authentication/oauth-openshift-558db77b4-t67vb" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.543228 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/bc0316f8-a276-4bea-a4cb-bf56c011c64a-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-t67vb\" (UID: \"bc0316f8-a276-4bea-a4cb-bf56c011c64a\") " pod="openshift-authentication/oauth-openshift-558db77b4-t67vb" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.543255 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/5c7e3790-aa06-4b95-94a7-c41d909ec984-etcd-serving-ca\") pod \"apiserver-76f77b778f-ld6fg\" (UID: \"5c7e3790-aa06-4b95-94a7-c41d909ec984\") " pod="openshift-apiserver/apiserver-76f77b778f-ld6fg" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.543325 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/bc0316f8-a276-4bea-a4cb-bf56c011c64a-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-t67vb\" (UID: \"bc0316f8-a276-4bea-a4cb-bf56c011c64a\") " pod="openshift-authentication/oauth-openshift-558db77b4-t67vb" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.543388 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/0d41d267-53c8-4859-9c63-737eda42098f-client-ca\") pod \"controller-manager-879f6c89f-5x9zh\" (UID: \"0d41d267-53c8-4859-9c63-737eda42098f\") " pod="openshift-controller-manager/controller-manager-879f6c89f-5x9zh" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.543413 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h449v\" (UniqueName: \"kubernetes.io/projected/aeb1d3c3-35d9-4d08-8026-9faa832a32f0-kube-api-access-h449v\") pod \"openshift-apiserver-operator-796bbdcf4f-dvcwf\" (UID: \"aeb1d3c3-35d9-4d08-8026-9faa832a32f0\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-dvcwf" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.543483 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0d41d267-53c8-4859-9c63-737eda42098f-config\") pod \"controller-manager-879f6c89f-5x9zh\" (UID: \"0d41d267-53c8-4859-9c63-737eda42098f\") " pod="openshift-controller-manager/controller-manager-879f6c89f-5x9zh" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.545903 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xfzz6\" (UniqueName: \"kubernetes.io/projected/06224dc2-1e32-47b8-8a11-0c90a61084cf-kube-api-access-xfzz6\") pod \"console-f9d7485db-qjf9t\" (UID: \"06224dc2-1e32-47b8-8a11-0c90a61084cf\") " pod="openshift-console/console-f9d7485db-qjf9t" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.545953 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rndzv\" (UniqueName: \"kubernetes.io/projected/f955b72b-dc83-4219-b488-163acebcf367-kube-api-access-rndzv\") pod \"machine-approver-56656f9798-k67zs\" (UID: \"f955b72b-dc83-4219-b488-163acebcf367\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-k67zs" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.545975 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/bc0316f8-a276-4bea-a4cb-bf56c011c64a-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-t67vb\" (UID: \"bc0316f8-a276-4bea-a4cb-bf56c011c64a\") " pod="openshift-authentication/oauth-openshift-558db77b4-t67vb" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.545991 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/f955b72b-dc83-4219-b488-163acebcf367-machine-approver-tls\") pod \"machine-approver-56656f9798-k67zs\" (UID: \"f955b72b-dc83-4219-b488-163acebcf367\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-k67zs" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.546016 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/0120f632-f92f-4cdc-a4c5-0d63471be0ef-audit-policies\") pod \"apiserver-7bbb656c7d-zqhkp\" (UID: \"0120f632-f92f-4cdc-a4c5-0d63471be0ef\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-zqhkp" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.546040 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/5c7e3790-aa06-4b95-94a7-c41d909ec984-audit\") pod \"apiserver-76f77b778f-ld6fg\" (UID: \"5c7e3790-aa06-4b95-94a7-c41d909ec984\") " pod="openshift-apiserver/apiserver-76f77b778f-ld6fg" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.546055 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/06224dc2-1e32-47b8-8a11-0c90a61084cf-oauth-serving-cert\") pod \"console-f9d7485db-qjf9t\" (UID: \"06224dc2-1e32-47b8-8a11-0c90a61084cf\") " pod="openshift-console/console-f9d7485db-qjf9t" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.546071 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/aeb1d3c3-35d9-4d08-8026-9faa832a32f0-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-dvcwf\" (UID: \"aeb1d3c3-35d9-4d08-8026-9faa832a32f0\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-dvcwf" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.546089 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/bc0316f8-a276-4bea-a4cb-bf56c011c64a-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-t67vb\" (UID: \"bc0316f8-a276-4bea-a4cb-bf56c011c64a\") " pod="openshift-authentication/oauth-openshift-558db77b4-t67vb" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.546941 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/bc0316f8-a276-4bea-a4cb-bf56c011c64a-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-t67vb\" (UID: \"bc0316f8-a276-4bea-a4cb-bf56c011c64a\") " pod="openshift-authentication/oauth-openshift-558db77b4-t67vb" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.546961 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/0120f632-f92f-4cdc-a4c5-0d63471be0ef-etcd-client\") pod \"apiserver-7bbb656c7d-zqhkp\" (UID: \"0120f632-f92f-4cdc-a4c5-0d63471be0ef\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-zqhkp" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.546978 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/5c7e3790-aa06-4b95-94a7-c41d909ec984-image-import-ca\") pod \"apiserver-76f77b778f-ld6fg\" (UID: \"5c7e3790-aa06-4b95-94a7-c41d909ec984\") " pod="openshift-apiserver/apiserver-76f77b778f-ld6fg" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.546994 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fe88e922-b9a9-4c6c-8b36-5cd5eaf1b7b8-config\") pod \"route-controller-manager-6576b87f9c-c7bds\" (UID: \"fe88e922-b9a9-4c6c-8b36-5cd5eaf1b7b8\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-c7bds" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.547013 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g6r6p\" (UniqueName: \"kubernetes.io/projected/5c7e3790-aa06-4b95-94a7-c41d909ec984-kube-api-access-g6r6p\") pod \"apiserver-76f77b778f-ld6fg\" (UID: \"5c7e3790-aa06-4b95-94a7-c41d909ec984\") " pod="openshift-apiserver/apiserver-76f77b778f-ld6fg" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.547032 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/06224dc2-1e32-47b8-8a11-0c90a61084cf-console-oauth-config\") pod \"console-f9d7485db-qjf9t\" (UID: \"06224dc2-1e32-47b8-8a11-0c90a61084cf\") " pod="openshift-console/console-f9d7485db-qjf9t" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.547086 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/0120f632-f92f-4cdc-a4c5-0d63471be0ef-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-zqhkp\" (UID: \"0120f632-f92f-4cdc-a4c5-0d63471be0ef\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-zqhkp" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.547102 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/06224dc2-1e32-47b8-8a11-0c90a61084cf-console-serving-cert\") pod \"console-f9d7485db-qjf9t\" (UID: \"06224dc2-1e32-47b8-8a11-0c90a61084cf\") " pod="openshift-console/console-f9d7485db-qjf9t" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.547121 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/0120f632-f92f-4cdc-a4c5-0d63471be0ef-audit-dir\") pod \"apiserver-7bbb656c7d-zqhkp\" (UID: \"0120f632-f92f-4cdc-a4c5-0d63471be0ef\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-zqhkp" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.547140 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/bc0316f8-a276-4bea-a4cb-bf56c011c64a-audit-dir\") pod \"oauth-openshift-558db77b4-t67vb\" (UID: \"bc0316f8-a276-4bea-a4cb-bf56c011c64a\") " pod="openshift-authentication/oauth-openshift-558db77b4-t67vb" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.547219 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5c7e3790-aa06-4b95-94a7-c41d909ec984-config\") pod \"apiserver-76f77b778f-ld6fg\" (UID: \"5c7e3790-aa06-4b95-94a7-c41d909ec984\") " pod="openshift-apiserver/apiserver-76f77b778f-ld6fg" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.547242 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/fe88e922-b9a9-4c6c-8b36-5cd5eaf1b7b8-client-ca\") pod \"route-controller-manager-6576b87f9c-c7bds\" (UID: \"fe88e922-b9a9-4c6c-8b36-5cd5eaf1b7b8\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-c7bds" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.547258 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/0120f632-f92f-4cdc-a4c5-0d63471be0ef-encryption-config\") pod \"apiserver-7bbb656c7d-zqhkp\" (UID: \"0120f632-f92f-4cdc-a4c5-0d63471be0ef\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-zqhkp" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.547293 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/bc0316f8-a276-4bea-a4cb-bf56c011c64a-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-t67vb\" (UID: \"bc0316f8-a276-4bea-a4cb-bf56c011c64a\") " pod="openshift-authentication/oauth-openshift-558db77b4-t67vb" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.547313 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/bc0316f8-a276-4bea-a4cb-bf56c011c64a-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-t67vb\" (UID: \"bc0316f8-a276-4bea-a4cb-bf56c011c64a\") " pod="openshift-authentication/oauth-openshift-558db77b4-t67vb" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.547329 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/5c7e3790-aa06-4b95-94a7-c41d909ec984-etcd-client\") pod \"apiserver-76f77b778f-ld6fg\" (UID: \"5c7e3790-aa06-4b95-94a7-c41d909ec984\") " pod="openshift-apiserver/apiserver-76f77b778f-ld6fg" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.550224 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.555675 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-hstbg"] Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.556344 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-hstbg" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.557235 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-7zdl4"] Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.557951 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-7zdl4" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.559196 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-j4f6w"] Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.559721 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-j4f6w" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.561328 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-gpb8r"] Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.561898 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-9d46g"] Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.562157 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-qjl8k"] Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.563113 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-gpb8r" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.568853 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-9d46g" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.569303 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-c4k79"] Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.570321 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-qjl8k" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.570715 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.572188 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-c4k79" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.581452 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-9h22n"] Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.590943 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-cml8b"] Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.592099 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-9h22n" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.592172 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress/router-default-5444994796-g6mgs"] Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.592101 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.592815 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-cml8b" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.592999 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-g6mgs" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.606783 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-ld6fg"] Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.608285 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-5x9zh"] Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.608797 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405760-qjm9k"] Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.609562 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405760-qjm9k" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.609694 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-qbnrs"] Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.610363 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-qbnrs" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.613601 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-87jr2"] Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.614000 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-pnjkw"] Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.614556 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-mdb95"] Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.614610 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-pnjkw" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.615066 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-87jr2" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.617782 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-t67vb"] Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.620125 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-4j2wt"] Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.620604 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-b4wbn"] Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.620617 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-bp2ss"] Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.621183 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-bp2ss" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.621546 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-4j2wt" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.625155 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-qjf9t"] Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.625173 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-7zdl4"] Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.625183 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-4p52q"] Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.628671 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-dmgdn"] Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.628688 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-wpjbf"] Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.632262 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-j4f6w"] Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.632278 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-l5snl"] Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.632288 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-nrl4j"] Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.635763 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-x4wv5"] Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.635782 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-dvcwf"] Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.635793 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/dns-default-jcpqp"] Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.636342 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-jcpqp" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.640175 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-c7bds"] Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.640194 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-7xr8n"] Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.640204 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-mg8mw"] Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.643786 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-l2xpj"] Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.643805 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-qjl8k"] Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.643816 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-ddm7r"] Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.647072 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-xz6qx"] Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.647744 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-canary/ingress-canary-rwfb8"] Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.648033 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-hstbg"] Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.648081 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-rwfb8" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.648388 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-xz6qx" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.651508 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-87jr2"] Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.651524 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-9d46g"] Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.651534 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-9h22n"] Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.654557 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-gpb8r"] Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.654573 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-cml8b"] Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.654582 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-nlb6g"] Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.657645 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-p5p29"] Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.657678 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-zqhkp"] Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.657689 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-bp2ss"] Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.661240 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-pnjkw"] Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.661257 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-c4k79"] Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.661267 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-qbnrs"] Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.664841 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-xz6qx"] Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.664857 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-jcpqp"] Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.664867 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-4j2wt"] Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.667242 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405760-qjm9k"] Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.667279 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-rwfb8"] Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.672188 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.672589 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.679561 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h2zzq\" (UniqueName: \"kubernetes.io/projected/0120f632-f92f-4cdc-a4c5-0d63471be0ef-kube-api-access-h2zzq\") pod \"apiserver-7bbb656c7d-zqhkp\" (UID: \"0120f632-f92f-4cdc-a4c5-0d63471be0ef\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-zqhkp" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.679611 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/5c7e3790-aa06-4b95-94a7-c41d909ec984-audit-dir\") pod \"apiserver-76f77b778f-ld6fg\" (UID: \"5c7e3790-aa06-4b95-94a7-c41d909ec984\") " pod="openshift-apiserver/apiserver-76f77b778f-ld6fg" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.679640 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/bc0316f8-a276-4bea-a4cb-bf56c011c64a-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-t67vb\" (UID: \"bc0316f8-a276-4bea-a4cb-bf56c011c64a\") " pod="openshift-authentication/oauth-openshift-558db77b4-t67vb" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.679689 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/f955b72b-dc83-4219-b488-163acebcf367-auth-proxy-config\") pod \"machine-approver-56656f9798-k67zs\" (UID: \"f955b72b-dc83-4219-b488-163acebcf367\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-k67zs" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.679707 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0120f632-f92f-4cdc-a4c5-0d63471be0ef-serving-cert\") pod \"apiserver-7bbb656c7d-zqhkp\" (UID: \"0120f632-f92f-4cdc-a4c5-0d63471be0ef\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-zqhkp" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.679723 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/5c7e3790-aa06-4b95-94a7-c41d909ec984-node-pullsecrets\") pod \"apiserver-76f77b778f-ld6fg\" (UID: \"5c7e3790-aa06-4b95-94a7-c41d909ec984\") " pod="openshift-apiserver/apiserver-76f77b778f-ld6fg" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.679744 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9hbkn\" (UniqueName: \"kubernetes.io/projected/bc0316f8-a276-4bea-a4cb-bf56c011c64a-kube-api-access-9hbkn\") pod \"oauth-openshift-558db77b4-t67vb\" (UID: \"bc0316f8-a276-4bea-a4cb-bf56c011c64a\") " pod="openshift-authentication/oauth-openshift-558db77b4-t67vb" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.679779 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/bc0316f8-a276-4bea-a4cb-bf56c011c64a-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-t67vb\" (UID: \"bc0316f8-a276-4bea-a4cb-bf56c011c64a\") " pod="openshift-authentication/oauth-openshift-558db77b4-t67vb" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.679807 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/5c7e3790-aa06-4b95-94a7-c41d909ec984-etcd-serving-ca\") pod \"apiserver-76f77b778f-ld6fg\" (UID: \"5c7e3790-aa06-4b95-94a7-c41d909ec984\") " pod="openshift-apiserver/apiserver-76f77b778f-ld6fg" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.679838 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/bc0316f8-a276-4bea-a4cb-bf56c011c64a-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-t67vb\" (UID: \"bc0316f8-a276-4bea-a4cb-bf56c011c64a\") " pod="openshift-authentication/oauth-openshift-558db77b4-t67vb" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.679875 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/0d41d267-53c8-4859-9c63-737eda42098f-client-ca\") pod \"controller-manager-879f6c89f-5x9zh\" (UID: \"0d41d267-53c8-4859-9c63-737eda42098f\") " pod="openshift-controller-manager/controller-manager-879f6c89f-5x9zh" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.679902 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h449v\" (UniqueName: \"kubernetes.io/projected/aeb1d3c3-35d9-4d08-8026-9faa832a32f0-kube-api-access-h449v\") pod \"openshift-apiserver-operator-796bbdcf4f-dvcwf\" (UID: \"aeb1d3c3-35d9-4d08-8026-9faa832a32f0\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-dvcwf" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.679932 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/f15e54ab-209a-490e-a93a-4c86fe1b6b2b-webhook-cert\") pod \"packageserver-d55dfcdfc-x4wv5\" (UID: \"f15e54ab-209a-490e-a93a-4c86fe1b6b2b\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-x4wv5" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.679968 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0d41d267-53c8-4859-9c63-737eda42098f-config\") pod \"controller-manager-879f6c89f-5x9zh\" (UID: \"0d41d267-53c8-4859-9c63-737eda42098f\") " pod="openshift-controller-manager/controller-manager-879f6c89f-5x9zh" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.679998 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xfzz6\" (UniqueName: \"kubernetes.io/projected/06224dc2-1e32-47b8-8a11-0c90a61084cf-kube-api-access-xfzz6\") pod \"console-f9d7485db-qjf9t\" (UID: \"06224dc2-1e32-47b8-8a11-0c90a61084cf\") " pod="openshift-console/console-f9d7485db-qjf9t" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.680033 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rndzv\" (UniqueName: \"kubernetes.io/projected/f955b72b-dc83-4219-b488-163acebcf367-kube-api-access-rndzv\") pod \"machine-approver-56656f9798-k67zs\" (UID: \"f955b72b-dc83-4219-b488-163acebcf367\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-k67zs" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.680066 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/f955b72b-dc83-4219-b488-163acebcf367-machine-approver-tls\") pod \"machine-approver-56656f9798-k67zs\" (UID: \"f955b72b-dc83-4219-b488-163acebcf367\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-k67zs" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.680090 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/bc0316f8-a276-4bea-a4cb-bf56c011c64a-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-t67vb\" (UID: \"bc0316f8-a276-4bea-a4cb-bf56c011c64a\") " pod="openshift-authentication/oauth-openshift-558db77b4-t67vb" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.680117 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/0120f632-f92f-4cdc-a4c5-0d63471be0ef-audit-policies\") pod \"apiserver-7bbb656c7d-zqhkp\" (UID: \"0120f632-f92f-4cdc-a4c5-0d63471be0ef\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-zqhkp" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.680139 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/5c7e3790-aa06-4b95-94a7-c41d909ec984-audit\") pod \"apiserver-76f77b778f-ld6fg\" (UID: \"5c7e3790-aa06-4b95-94a7-c41d909ec984\") " pod="openshift-apiserver/apiserver-76f77b778f-ld6fg" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.680156 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/06224dc2-1e32-47b8-8a11-0c90a61084cf-oauth-serving-cert\") pod \"console-f9d7485db-qjf9t\" (UID: \"06224dc2-1e32-47b8-8a11-0c90a61084cf\") " pod="openshift-console/console-f9d7485db-qjf9t" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.680175 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/aeb1d3c3-35d9-4d08-8026-9faa832a32f0-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-dvcwf\" (UID: \"aeb1d3c3-35d9-4d08-8026-9faa832a32f0\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-dvcwf" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.680193 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/bc0316f8-a276-4bea-a4cb-bf56c011c64a-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-t67vb\" (UID: \"bc0316f8-a276-4bea-a4cb-bf56c011c64a\") " pod="openshift-authentication/oauth-openshift-558db77b4-t67vb" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.680212 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/bc0316f8-a276-4bea-a4cb-bf56c011c64a-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-t67vb\" (UID: \"bc0316f8-a276-4bea-a4cb-bf56c011c64a\") " pod="openshift-authentication/oauth-openshift-558db77b4-t67vb" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.680228 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/0120f632-f92f-4cdc-a4c5-0d63471be0ef-etcd-client\") pod \"apiserver-7bbb656c7d-zqhkp\" (UID: \"0120f632-f92f-4cdc-a4c5-0d63471be0ef\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-zqhkp" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.680244 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/5c7e3790-aa06-4b95-94a7-c41d909ec984-image-import-ca\") pod \"apiserver-76f77b778f-ld6fg\" (UID: \"5c7e3790-aa06-4b95-94a7-c41d909ec984\") " pod="openshift-apiserver/apiserver-76f77b778f-ld6fg" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.680270 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fe88e922-b9a9-4c6c-8b36-5cd5eaf1b7b8-config\") pod \"route-controller-manager-6576b87f9c-c7bds\" (UID: \"fe88e922-b9a9-4c6c-8b36-5cd5eaf1b7b8\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-c7bds" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.680296 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g6r6p\" (UniqueName: \"kubernetes.io/projected/5c7e3790-aa06-4b95-94a7-c41d909ec984-kube-api-access-g6r6p\") pod \"apiserver-76f77b778f-ld6fg\" (UID: \"5c7e3790-aa06-4b95-94a7-c41d909ec984\") " pod="openshift-apiserver/apiserver-76f77b778f-ld6fg" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.680326 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/0120f632-f92f-4cdc-a4c5-0d63471be0ef-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-zqhkp\" (UID: \"0120f632-f92f-4cdc-a4c5-0d63471be0ef\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-zqhkp" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.680332 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/5c7e3790-aa06-4b95-94a7-c41d909ec984-audit-dir\") pod \"apiserver-76f77b778f-ld6fg\" (UID: \"5c7e3790-aa06-4b95-94a7-c41d909ec984\") " pod="openshift-apiserver/apiserver-76f77b778f-ld6fg" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.680350 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/06224dc2-1e32-47b8-8a11-0c90a61084cf-console-serving-cert\") pod \"console-f9d7485db-qjf9t\" (UID: \"06224dc2-1e32-47b8-8a11-0c90a61084cf\") " pod="openshift-console/console-f9d7485db-qjf9t" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.680374 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/06224dc2-1e32-47b8-8a11-0c90a61084cf-console-oauth-config\") pod \"console-f9d7485db-qjf9t\" (UID: \"06224dc2-1e32-47b8-8a11-0c90a61084cf\") " pod="openshift-console/console-f9d7485db-qjf9t" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.680402 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/0120f632-f92f-4cdc-a4c5-0d63471be0ef-audit-dir\") pod \"apiserver-7bbb656c7d-zqhkp\" (UID: \"0120f632-f92f-4cdc-a4c5-0d63471be0ef\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-zqhkp" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.680416 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/5c7e3790-aa06-4b95-94a7-c41d909ec984-node-pullsecrets\") pod \"apiserver-76f77b778f-ld6fg\" (UID: \"5c7e3790-aa06-4b95-94a7-c41d909ec984\") " pod="openshift-apiserver/apiserver-76f77b778f-ld6fg" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.680427 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/bc0316f8-a276-4bea-a4cb-bf56c011c64a-audit-dir\") pod \"oauth-openshift-558db77b4-t67vb\" (UID: \"bc0316f8-a276-4bea-a4cb-bf56c011c64a\") " pod="openshift-authentication/oauth-openshift-558db77b4-t67vb" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.680604 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5c7e3790-aa06-4b95-94a7-c41d909ec984-config\") pod \"apiserver-76f77b778f-ld6fg\" (UID: \"5c7e3790-aa06-4b95-94a7-c41d909ec984\") " pod="openshift-apiserver/apiserver-76f77b778f-ld6fg" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.680637 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/bc0316f8-a276-4bea-a4cb-bf56c011c64a-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-t67vb\" (UID: \"bc0316f8-a276-4bea-a4cb-bf56c011c64a\") " pod="openshift-authentication/oauth-openshift-558db77b4-t67vb" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.680700 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/bc0316f8-a276-4bea-a4cb-bf56c011c64a-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-t67vb\" (UID: \"bc0316f8-a276-4bea-a4cb-bf56c011c64a\") " pod="openshift-authentication/oauth-openshift-558db77b4-t67vb" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.680728 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/fe88e922-b9a9-4c6c-8b36-5cd5eaf1b7b8-client-ca\") pod \"route-controller-manager-6576b87f9c-c7bds\" (UID: \"fe88e922-b9a9-4c6c-8b36-5cd5eaf1b7b8\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-c7bds" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.680750 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/0120f632-f92f-4cdc-a4c5-0d63471be0ef-encryption-config\") pod \"apiserver-7bbb656c7d-zqhkp\" (UID: \"0120f632-f92f-4cdc-a4c5-0d63471be0ef\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-zqhkp" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.680771 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/5c7e3790-aa06-4b95-94a7-c41d909ec984-etcd-client\") pod \"apiserver-76f77b778f-ld6fg\" (UID: \"5c7e3790-aa06-4b95-94a7-c41d909ec984\") " pod="openshift-apiserver/apiserver-76f77b778f-ld6fg" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.680796 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/06224dc2-1e32-47b8-8a11-0c90a61084cf-trusted-ca-bundle\") pod \"console-f9d7485db-qjf9t\" (UID: \"06224dc2-1e32-47b8-8a11-0c90a61084cf\") " pod="openshift-console/console-f9d7485db-qjf9t" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.680820 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/f15e54ab-209a-490e-a93a-4c86fe1b6b2b-tmpfs\") pod \"packageserver-d55dfcdfc-x4wv5\" (UID: \"f15e54ab-209a-490e-a93a-4c86fe1b6b2b\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-x4wv5" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.680852 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0d41d267-53c8-4859-9c63-737eda42098f-serving-cert\") pod \"controller-manager-879f6c89f-5x9zh\" (UID: \"0d41d267-53c8-4859-9c63-737eda42098f\") " pod="openshift-controller-manager/controller-manager-879f6c89f-5x9zh" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.680881 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h7snv\" (UniqueName: \"kubernetes.io/projected/0d41d267-53c8-4859-9c63-737eda42098f-kube-api-access-h7snv\") pod \"controller-manager-879f6c89f-5x9zh\" (UID: \"0d41d267-53c8-4859-9c63-737eda42098f\") " pod="openshift-controller-manager/controller-manager-879f6c89f-5x9zh" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.680903 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/aeb1d3c3-35d9-4d08-8026-9faa832a32f0-config\") pod \"openshift-apiserver-operator-796bbdcf4f-dvcwf\" (UID: \"aeb1d3c3-35d9-4d08-8026-9faa832a32f0\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-dvcwf" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.680928 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/bc0316f8-a276-4bea-a4cb-bf56c011c64a-audit-dir\") pod \"oauth-openshift-558db77b4-t67vb\" (UID: \"bc0316f8-a276-4bea-a4cb-bf56c011c64a\") " pod="openshift-authentication/oauth-openshift-558db77b4-t67vb" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.680928 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/bc0316f8-a276-4bea-a4cb-bf56c011c64a-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-t67vb\" (UID: \"bc0316f8-a276-4bea-a4cb-bf56c011c64a\") " pod="openshift-authentication/oauth-openshift-558db77b4-t67vb" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.680974 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/fe88e922-b9a9-4c6c-8b36-5cd5eaf1b7b8-serving-cert\") pod \"route-controller-manager-6576b87f9c-c7bds\" (UID: \"fe88e922-b9a9-4c6c-8b36-5cd5eaf1b7b8\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-c7bds" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.680994 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/06224dc2-1e32-47b8-8a11-0c90a61084cf-console-config\") pod \"console-f9d7485db-qjf9t\" (UID: \"06224dc2-1e32-47b8-8a11-0c90a61084cf\") " pod="openshift-console/console-f9d7485db-qjf9t" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.681012 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/bc0316f8-a276-4bea-a4cb-bf56c011c64a-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-t67vb\" (UID: \"bc0316f8-a276-4bea-a4cb-bf56c011c64a\") " pod="openshift-authentication/oauth-openshift-558db77b4-t67vb" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.681059 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/0120f632-f92f-4cdc-a4c5-0d63471be0ef-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-zqhkp\" (UID: \"0120f632-f92f-4cdc-a4c5-0d63471be0ef\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-zqhkp" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.681078 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5c7e3790-aa06-4b95-94a7-c41d909ec984-serving-cert\") pod \"apiserver-76f77b778f-ld6fg\" (UID: \"5c7e3790-aa06-4b95-94a7-c41d909ec984\") " pod="openshift-apiserver/apiserver-76f77b778f-ld6fg" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.681095 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6l6nt\" (UniqueName: \"kubernetes.io/projected/f15e54ab-209a-490e-a93a-4c86fe1b6b2b-kube-api-access-6l6nt\") pod \"packageserver-d55dfcdfc-x4wv5\" (UID: \"f15e54ab-209a-490e-a93a-4c86fe1b6b2b\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-x4wv5" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.681121 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/bc0316f8-a276-4bea-a4cb-bf56c011c64a-audit-policies\") pod \"oauth-openshift-558db77b4-t67vb\" (UID: \"bc0316f8-a276-4bea-a4cb-bf56c011c64a\") " pod="openshift-authentication/oauth-openshift-558db77b4-t67vb" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.681138 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cc6ws\" (UniqueName: \"kubernetes.io/projected/fe88e922-b9a9-4c6c-8b36-5cd5eaf1b7b8-kube-api-access-cc6ws\") pod \"route-controller-manager-6576b87f9c-c7bds\" (UID: \"fe88e922-b9a9-4c6c-8b36-5cd5eaf1b7b8\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-c7bds" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.681154 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/06224dc2-1e32-47b8-8a11-0c90a61084cf-service-ca\") pod \"console-f9d7485db-qjf9t\" (UID: \"06224dc2-1e32-47b8-8a11-0c90a61084cf\") " pod="openshift-console/console-f9d7485db-qjf9t" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.681172 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/f15e54ab-209a-490e-a93a-4c86fe1b6b2b-apiservice-cert\") pod \"packageserver-d55dfcdfc-x4wv5\" (UID: \"f15e54ab-209a-490e-a93a-4c86fe1b6b2b\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-x4wv5" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.681190 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/5c7e3790-aa06-4b95-94a7-c41d909ec984-encryption-config\") pod \"apiserver-76f77b778f-ld6fg\" (UID: \"5c7e3790-aa06-4b95-94a7-c41d909ec984\") " pod="openshift-apiserver/apiserver-76f77b778f-ld6fg" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.681208 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/0d41d267-53c8-4859-9c63-737eda42098f-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-5x9zh\" (UID: \"0d41d267-53c8-4859-9c63-737eda42098f\") " pod="openshift-controller-manager/controller-manager-879f6c89f-5x9zh" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.681227 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/bc0316f8-a276-4bea-a4cb-bf56c011c64a-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-t67vb\" (UID: \"bc0316f8-a276-4bea-a4cb-bf56c011c64a\") " pod="openshift-authentication/oauth-openshift-558db77b4-t67vb" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.681244 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f955b72b-dc83-4219-b488-163acebcf367-config\") pod \"machine-approver-56656f9798-k67zs\" (UID: \"f955b72b-dc83-4219-b488-163acebcf367\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-k67zs" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.681291 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/5c7e3790-aa06-4b95-94a7-c41d909ec984-trusted-ca-bundle\") pod \"apiserver-76f77b778f-ld6fg\" (UID: \"5c7e3790-aa06-4b95-94a7-c41d909ec984\") " pod="openshift-apiserver/apiserver-76f77b778f-ld6fg" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.681617 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/0d41d267-53c8-4859-9c63-737eda42098f-client-ca\") pod \"controller-manager-879f6c89f-5x9zh\" (UID: \"0d41d267-53c8-4859-9c63-737eda42098f\") " pod="openshift-controller-manager/controller-manager-879f6c89f-5x9zh" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.681823 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/5c7e3790-aa06-4b95-94a7-c41d909ec984-etcd-serving-ca\") pod \"apiserver-76f77b778f-ld6fg\" (UID: \"5c7e3790-aa06-4b95-94a7-c41d909ec984\") " pod="openshift-apiserver/apiserver-76f77b778f-ld6fg" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.681984 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/bc0316f8-a276-4bea-a4cb-bf56c011c64a-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-t67vb\" (UID: \"bc0316f8-a276-4bea-a4cb-bf56c011c64a\") " pod="openshift-authentication/oauth-openshift-558db77b4-t67vb" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.682345 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.682487 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/5c7e3790-aa06-4b95-94a7-c41d909ec984-audit\") pod \"apiserver-76f77b778f-ld6fg\" (UID: \"5c7e3790-aa06-4b95-94a7-c41d909ec984\") " pod="openshift-apiserver/apiserver-76f77b778f-ld6fg" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.682537 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fe88e922-b9a9-4c6c-8b36-5cd5eaf1b7b8-config\") pod \"route-controller-manager-6576b87f9c-c7bds\" (UID: \"fe88e922-b9a9-4c6c-8b36-5cd5eaf1b7b8\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-c7bds" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.682551 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/0120f632-f92f-4cdc-a4c5-0d63471be0ef-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-zqhkp\" (UID: \"0120f632-f92f-4cdc-a4c5-0d63471be0ef\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-zqhkp" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.680531 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/f955b72b-dc83-4219-b488-163acebcf367-auth-proxy-config\") pod \"machine-approver-56656f9798-k67zs\" (UID: \"f955b72b-dc83-4219-b488-163acebcf367\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-k67zs" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.684227 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5c7e3790-aa06-4b95-94a7-c41d909ec984-config\") pod \"apiserver-76f77b778f-ld6fg\" (UID: \"5c7e3790-aa06-4b95-94a7-c41d909ec984\") " pod="openshift-apiserver/apiserver-76f77b778f-ld6fg" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.684279 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/bc0316f8-a276-4bea-a4cb-bf56c011c64a-audit-policies\") pod \"oauth-openshift-558db77b4-t67vb\" (UID: \"bc0316f8-a276-4bea-a4cb-bf56c011c64a\") " pod="openshift-authentication/oauth-openshift-558db77b4-t67vb" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.684599 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/5c7e3790-aa06-4b95-94a7-c41d909ec984-image-import-ca\") pod \"apiserver-76f77b778f-ld6fg\" (UID: \"5c7e3790-aa06-4b95-94a7-c41d909ec984\") " pod="openshift-apiserver/apiserver-76f77b778f-ld6fg" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.686217 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/aeb1d3c3-35d9-4d08-8026-9faa832a32f0-config\") pod \"openshift-apiserver-operator-796bbdcf4f-dvcwf\" (UID: \"aeb1d3c3-35d9-4d08-8026-9faa832a32f0\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-dvcwf" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.686434 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/06224dc2-1e32-47b8-8a11-0c90a61084cf-trusted-ca-bundle\") pod \"console-f9d7485db-qjf9t\" (UID: \"06224dc2-1e32-47b8-8a11-0c90a61084cf\") " pod="openshift-console/console-f9d7485db-qjf9t" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.686486 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/0120f632-f92f-4cdc-a4c5-0d63471be0ef-audit-policies\") pod \"apiserver-7bbb656c7d-zqhkp\" (UID: \"0120f632-f92f-4cdc-a4c5-0d63471be0ef\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-zqhkp" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.686872 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/0120f632-f92f-4cdc-a4c5-0d63471be0ef-etcd-client\") pod \"apiserver-7bbb656c7d-zqhkp\" (UID: \"0120f632-f92f-4cdc-a4c5-0d63471be0ef\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-zqhkp" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.687116 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/0d41d267-53c8-4859-9c63-737eda42098f-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-5x9zh\" (UID: \"0d41d267-53c8-4859-9c63-737eda42098f\") " pod="openshift-controller-manager/controller-manager-879f6c89f-5x9zh" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.686897 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/5c7e3790-aa06-4b95-94a7-c41d909ec984-etcd-client\") pod \"apiserver-76f77b778f-ld6fg\" (UID: \"5c7e3790-aa06-4b95-94a7-c41d909ec984\") " pod="openshift-apiserver/apiserver-76f77b778f-ld6fg" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.686881 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5c7e3790-aa06-4b95-94a7-c41d909ec984-serving-cert\") pod \"apiserver-76f77b778f-ld6fg\" (UID: \"5c7e3790-aa06-4b95-94a7-c41d909ec984\") " pod="openshift-apiserver/apiserver-76f77b778f-ld6fg" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.687370 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0d41d267-53c8-4859-9c63-737eda42098f-config\") pod \"controller-manager-879f6c89f-5x9zh\" (UID: \"0d41d267-53c8-4859-9c63-737eda42098f\") " pod="openshift-controller-manager/controller-manager-879f6c89f-5x9zh" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.688584 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/5c7e3790-aa06-4b95-94a7-c41d909ec984-trusted-ca-bundle\") pod \"apiserver-76f77b778f-ld6fg\" (UID: \"5c7e3790-aa06-4b95-94a7-c41d909ec984\") " pod="openshift-apiserver/apiserver-76f77b778f-ld6fg" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.688940 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f955b72b-dc83-4219-b488-163acebcf367-config\") pod \"machine-approver-56656f9798-k67zs\" (UID: \"f955b72b-dc83-4219-b488-163acebcf367\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-k67zs" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.689336 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.690102 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/06224dc2-1e32-47b8-8a11-0c90a61084cf-oauth-serving-cert\") pod \"console-f9d7485db-qjf9t\" (UID: \"06224dc2-1e32-47b8-8a11-0c90a61084cf\") " pod="openshift-console/console-f9d7485db-qjf9t" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.690324 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-server-h9mgh"] Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.691587 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-h9mgh" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.692082 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/5c7e3790-aa06-4b95-94a7-c41d909ec984-encryption-config\") pod \"apiserver-76f77b778f-ld6fg\" (UID: \"5c7e3790-aa06-4b95-94a7-c41d909ec984\") " pod="openshift-apiserver/apiserver-76f77b778f-ld6fg" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.692541 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/bc0316f8-a276-4bea-a4cb-bf56c011c64a-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-t67vb\" (UID: \"bc0316f8-a276-4bea-a4cb-bf56c011c64a\") " pod="openshift-authentication/oauth-openshift-558db77b4-t67vb" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.692850 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/fe88e922-b9a9-4c6c-8b36-5cd5eaf1b7b8-client-ca\") pod \"route-controller-manager-6576b87f9c-c7bds\" (UID: \"fe88e922-b9a9-4c6c-8b36-5cd5eaf1b7b8\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-c7bds" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.693239 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/bc0316f8-a276-4bea-a4cb-bf56c011c64a-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-t67vb\" (UID: \"bc0316f8-a276-4bea-a4cb-bf56c011c64a\") " pod="openshift-authentication/oauth-openshift-558db77b4-t67vb" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.693249 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/0120f632-f92f-4cdc-a4c5-0d63471be0ef-encryption-config\") pod \"apiserver-7bbb656c7d-zqhkp\" (UID: \"0120f632-f92f-4cdc-a4c5-0d63471be0ef\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-zqhkp" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.693334 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/0120f632-f92f-4cdc-a4c5-0d63471be0ef-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-zqhkp\" (UID: \"0120f632-f92f-4cdc-a4c5-0d63471be0ef\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-zqhkp" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.693911 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/06224dc2-1e32-47b8-8a11-0c90a61084cf-console-config\") pod \"console-f9d7485db-qjf9t\" (UID: \"06224dc2-1e32-47b8-8a11-0c90a61084cf\") " pod="openshift-console/console-f9d7485db-qjf9t" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.693922 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/bc0316f8-a276-4bea-a4cb-bf56c011c64a-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-t67vb\" (UID: \"bc0316f8-a276-4bea-a4cb-bf56c011c64a\") " pod="openshift-authentication/oauth-openshift-558db77b4-t67vb" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.694071 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/bc0316f8-a276-4bea-a4cb-bf56c011c64a-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-t67vb\" (UID: \"bc0316f8-a276-4bea-a4cb-bf56c011c64a\") " pod="openshift-authentication/oauth-openshift-558db77b4-t67vb" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.694535 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/06224dc2-1e32-47b8-8a11-0c90a61084cf-service-ca\") pod \"console-f9d7485db-qjf9t\" (UID: \"06224dc2-1e32-47b8-8a11-0c90a61084cf\") " pod="openshift-console/console-f9d7485db-qjf9t" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.694895 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/06224dc2-1e32-47b8-8a11-0c90a61084cf-console-serving-cert\") pod \"console-f9d7485db-qjf9t\" (UID: \"06224dc2-1e32-47b8-8a11-0c90a61084cf\") " pod="openshift-console/console-f9d7485db-qjf9t" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.694938 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/bc0316f8-a276-4bea-a4cb-bf56c011c64a-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-t67vb\" (UID: \"bc0316f8-a276-4bea-a4cb-bf56c011c64a\") " pod="openshift-authentication/oauth-openshift-558db77b4-t67vb" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.695037 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/0120f632-f92f-4cdc-a4c5-0d63471be0ef-audit-dir\") pod \"apiserver-7bbb656c7d-zqhkp\" (UID: \"0120f632-f92f-4cdc-a4c5-0d63471be0ef\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-zqhkp" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.696312 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/fe88e922-b9a9-4c6c-8b36-5cd5eaf1b7b8-serving-cert\") pod \"route-controller-manager-6576b87f9c-c7bds\" (UID: \"fe88e922-b9a9-4c6c-8b36-5cd5eaf1b7b8\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-c7bds" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.696358 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0120f632-f92f-4cdc-a4c5-0d63471be0ef-serving-cert\") pod \"apiserver-7bbb656c7d-zqhkp\" (UID: \"0120f632-f92f-4cdc-a4c5-0d63471be0ef\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-zqhkp" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.696530 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/aeb1d3c3-35d9-4d08-8026-9faa832a32f0-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-dvcwf\" (UID: \"aeb1d3c3-35d9-4d08-8026-9faa832a32f0\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-dvcwf" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.696777 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/bc0316f8-a276-4bea-a4cb-bf56c011c64a-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-t67vb\" (UID: \"bc0316f8-a276-4bea-a4cb-bf56c011c64a\") " pod="openshift-authentication/oauth-openshift-558db77b4-t67vb" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.697723 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/bc0316f8-a276-4bea-a4cb-bf56c011c64a-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-t67vb\" (UID: \"bc0316f8-a276-4bea-a4cb-bf56c011c64a\") " pod="openshift-authentication/oauth-openshift-558db77b4-t67vb" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.699058 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/bc0316f8-a276-4bea-a4cb-bf56c011c64a-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-t67vb\" (UID: \"bc0316f8-a276-4bea-a4cb-bf56c011c64a\") " pod="openshift-authentication/oauth-openshift-558db77b4-t67vb" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.699306 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/bc0316f8-a276-4bea-a4cb-bf56c011c64a-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-t67vb\" (UID: \"bc0316f8-a276-4bea-a4cb-bf56c011c64a\") " pod="openshift-authentication/oauth-openshift-558db77b4-t67vb" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.699970 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/bc0316f8-a276-4bea-a4cb-bf56c011c64a-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-t67vb\" (UID: \"bc0316f8-a276-4bea-a4cb-bf56c011c64a\") " pod="openshift-authentication/oauth-openshift-558db77b4-t67vb" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.700550 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0d41d267-53c8-4859-9c63-737eda42098f-serving-cert\") pod \"controller-manager-879f6c89f-5x9zh\" (UID: \"0d41d267-53c8-4859-9c63-737eda42098f\") " pod="openshift-controller-manager/controller-manager-879f6c89f-5x9zh" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.700667 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/f955b72b-dc83-4219-b488-163acebcf367-machine-approver-tls\") pod \"machine-approver-56656f9798-k67zs\" (UID: \"f955b72b-dc83-4219-b488-163acebcf367\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-k67zs" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.702418 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/06224dc2-1e32-47b8-8a11-0c90a61084cf-console-oauth-config\") pod \"console-f9d7485db-qjf9t\" (UID: \"06224dc2-1e32-47b8-8a11-0c90a61084cf\") " pod="openshift-console/console-f9d7485db-qjf9t" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.709403 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.729089 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.755396 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.769770 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.782252 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6l6nt\" (UniqueName: \"kubernetes.io/projected/f15e54ab-209a-490e-a93a-4c86fe1b6b2b-kube-api-access-6l6nt\") pod \"packageserver-d55dfcdfc-x4wv5\" (UID: \"f15e54ab-209a-490e-a93a-4c86fe1b6b2b\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-x4wv5" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.782295 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/f15e54ab-209a-490e-a93a-4c86fe1b6b2b-apiservice-cert\") pod \"packageserver-d55dfcdfc-x4wv5\" (UID: \"f15e54ab-209a-490e-a93a-4c86fe1b6b2b\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-x4wv5" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.782347 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/f15e54ab-209a-490e-a93a-4c86fe1b6b2b-webhook-cert\") pod \"packageserver-d55dfcdfc-x4wv5\" (UID: \"f15e54ab-209a-490e-a93a-4c86fe1b6b2b\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-x4wv5" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.782410 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/f15e54ab-209a-490e-a93a-4c86fe1b6b2b-tmpfs\") pod \"packageserver-d55dfcdfc-x4wv5\" (UID: \"f15e54ab-209a-490e-a93a-4c86fe1b6b2b\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-x4wv5" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.783196 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/f15e54ab-209a-490e-a93a-4c86fe1b6b2b-tmpfs\") pod \"packageserver-d55dfcdfc-x4wv5\" (UID: \"f15e54ab-209a-490e-a93a-4c86fe1b6b2b\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-x4wv5" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.788693 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.808720 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.828218 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.849923 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.869147 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.888749 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.908683 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.929685 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.949044 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.968218 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Nov 28 16:12:34 crc kubenswrapper[4909]: I1128 16:12:34.998803 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Nov 28 16:12:35 crc kubenswrapper[4909]: I1128 16:12:35.010410 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Nov 28 16:12:35 crc kubenswrapper[4909]: I1128 16:12:35.019344 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/f15e54ab-209a-490e-a93a-4c86fe1b6b2b-apiservice-cert\") pod \"packageserver-d55dfcdfc-x4wv5\" (UID: \"f15e54ab-209a-490e-a93a-4c86fe1b6b2b\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-x4wv5" Nov 28 16:12:35 crc kubenswrapper[4909]: I1128 16:12:35.019403 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/f15e54ab-209a-490e-a93a-4c86fe1b6b2b-webhook-cert\") pod \"packageserver-d55dfcdfc-x4wv5\" (UID: \"f15e54ab-209a-490e-a93a-4c86fe1b6b2b\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-x4wv5" Nov 28 16:12:35 crc kubenswrapper[4909]: I1128 16:12:35.050071 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Nov 28 16:12:35 crc kubenswrapper[4909]: I1128 16:12:35.070406 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Nov 28 16:12:35 crc kubenswrapper[4909]: I1128 16:12:35.090384 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Nov 28 16:12:35 crc kubenswrapper[4909]: I1128 16:12:35.109875 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Nov 28 16:12:35 crc kubenswrapper[4909]: I1128 16:12:35.130393 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Nov 28 16:12:35 crc kubenswrapper[4909]: I1128 16:12:35.150322 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Nov 28 16:12:35 crc kubenswrapper[4909]: I1128 16:12:35.169897 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Nov 28 16:12:35 crc kubenswrapper[4909]: I1128 16:12:35.190064 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Nov 28 16:12:35 crc kubenswrapper[4909]: I1128 16:12:35.211440 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Nov 28 16:12:35 crc kubenswrapper[4909]: I1128 16:12:35.229938 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Nov 28 16:12:35 crc kubenswrapper[4909]: I1128 16:12:35.264916 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Nov 28 16:12:35 crc kubenswrapper[4909]: I1128 16:12:35.268514 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Nov 28 16:12:35 crc kubenswrapper[4909]: I1128 16:12:35.289297 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Nov 28 16:12:35 crc kubenswrapper[4909]: I1128 16:12:35.309813 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Nov 28 16:12:35 crc kubenswrapper[4909]: I1128 16:12:35.329427 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Nov 28 16:12:35 crc kubenswrapper[4909]: I1128 16:12:35.350399 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Nov 28 16:12:35 crc kubenswrapper[4909]: I1128 16:12:35.370197 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Nov 28 16:12:35 crc kubenswrapper[4909]: I1128 16:12:35.389350 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Nov 28 16:12:35 crc kubenswrapper[4909]: I1128 16:12:35.410789 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Nov 28 16:12:35 crc kubenswrapper[4909]: I1128 16:12:35.429621 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Nov 28 16:12:35 crc kubenswrapper[4909]: I1128 16:12:35.449101 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Nov 28 16:12:35 crc kubenswrapper[4909]: I1128 16:12:35.470322 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Nov 28 16:12:35 crc kubenswrapper[4909]: I1128 16:12:35.490256 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Nov 28 16:12:35 crc kubenswrapper[4909]: I1128 16:12:35.509490 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Nov 28 16:12:35 crc kubenswrapper[4909]: I1128 16:12:35.528948 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Nov 28 16:12:35 crc kubenswrapper[4909]: I1128 16:12:35.550159 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Nov 28 16:12:35 crc kubenswrapper[4909]: I1128 16:12:35.569413 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Nov 28 16:12:35 crc kubenswrapper[4909]: I1128 16:12:35.589119 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Nov 28 16:12:35 crc kubenswrapper[4909]: I1128 16:12:35.607759 4909 request.go:700] Waited for 1.014694239s due to client-side throttling, not priority and fairness, request: GET:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-storage-version-migrator-operator/secrets?fieldSelector=metadata.name%3Dserving-cert&limit=500&resourceVersion=0 Nov 28 16:12:35 crc kubenswrapper[4909]: I1128 16:12:35.610090 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Nov 28 16:12:35 crc kubenswrapper[4909]: I1128 16:12:35.629004 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Nov 28 16:12:35 crc kubenswrapper[4909]: I1128 16:12:35.649723 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Nov 28 16:12:35 crc kubenswrapper[4909]: I1128 16:12:35.670250 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Nov 28 16:12:35 crc kubenswrapper[4909]: I1128 16:12:35.690357 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Nov 28 16:12:35 crc kubenswrapper[4909]: I1128 16:12:35.709563 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Nov 28 16:12:35 crc kubenswrapper[4909]: I1128 16:12:35.730111 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Nov 28 16:12:35 crc kubenswrapper[4909]: I1128 16:12:35.749491 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Nov 28 16:12:35 crc kubenswrapper[4909]: I1128 16:12:35.770379 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Nov 28 16:12:35 crc kubenswrapper[4909]: I1128 16:12:35.788821 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Nov 28 16:12:35 crc kubenswrapper[4909]: I1128 16:12:35.809906 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 28 16:12:35 crc kubenswrapper[4909]: I1128 16:12:35.829619 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 28 16:12:35 crc kubenswrapper[4909]: I1128 16:12:35.850194 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Nov 28 16:12:35 crc kubenswrapper[4909]: I1128 16:12:35.870806 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Nov 28 16:12:35 crc kubenswrapper[4909]: I1128 16:12:35.889531 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Nov 28 16:12:35 crc kubenswrapper[4909]: I1128 16:12:35.908177 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Nov 28 16:12:35 crc kubenswrapper[4909]: I1128 16:12:35.929652 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Nov 28 16:12:35 crc kubenswrapper[4909]: I1128 16:12:35.949064 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Nov 28 16:12:35 crc kubenswrapper[4909]: I1128 16:12:35.981496 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Nov 28 16:12:35 crc kubenswrapper[4909]: I1128 16:12:35.990137 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.009768 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.030059 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.050286 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.069599 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.089326 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.109572 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.130634 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.148853 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.170220 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.189421 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.209250 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.229029 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.249298 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.269772 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.289583 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.309567 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.328875 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.349362 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.369458 4909 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.390262 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.447962 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h2zzq\" (UniqueName: \"kubernetes.io/projected/0120f632-f92f-4cdc-a4c5-0d63471be0ef-kube-api-access-h2zzq\") pod \"apiserver-7bbb656c7d-zqhkp\" (UID: \"0120f632-f92f-4cdc-a4c5-0d63471be0ef\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-zqhkp" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.470343 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9hbkn\" (UniqueName: \"kubernetes.io/projected/bc0316f8-a276-4bea-a4cb-bf56c011c64a-kube-api-access-9hbkn\") pod \"oauth-openshift-558db77b4-t67vb\" (UID: \"bc0316f8-a276-4bea-a4cb-bf56c011c64a\") " pod="openshift-authentication/oauth-openshift-558db77b4-t67vb" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.493962 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g6r6p\" (UniqueName: \"kubernetes.io/projected/5c7e3790-aa06-4b95-94a7-c41d909ec984-kube-api-access-g6r6p\") pod \"apiserver-76f77b778f-ld6fg\" (UID: \"5c7e3790-aa06-4b95-94a7-c41d909ec984\") " pod="openshift-apiserver/apiserver-76f77b778f-ld6fg" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.508477 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.510138 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h7snv\" (UniqueName: \"kubernetes.io/projected/0d41d267-53c8-4859-9c63-737eda42098f-kube-api-access-h7snv\") pod \"controller-manager-879f6c89f-5x9zh\" (UID: \"0d41d267-53c8-4859-9c63-737eda42098f\") " pod="openshift-controller-manager/controller-manager-879f6c89f-5x9zh" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.543125 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h449v\" (UniqueName: \"kubernetes.io/projected/aeb1d3c3-35d9-4d08-8026-9faa832a32f0-kube-api-access-h449v\") pod \"openshift-apiserver-operator-796bbdcf4f-dvcwf\" (UID: \"aeb1d3c3-35d9-4d08-8026-9faa832a32f0\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-dvcwf" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.550190 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.552222 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-ld6fg" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.568863 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-5x9zh" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.573034 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.617277 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cc6ws\" (UniqueName: \"kubernetes.io/projected/fe88e922-b9a9-4c6c-8b36-5cd5eaf1b7b8-kube-api-access-cc6ws\") pod \"route-controller-manager-6576b87f9c-c7bds\" (UID: \"fe88e922-b9a9-4c6c-8b36-5cd5eaf1b7b8\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-c7bds" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.627610 4909 request.go:700] Waited for 1.932552549s due to client-side throttling, not priority and fairness, request: POST:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-console/serviceaccounts/console/token Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.630986 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-t67vb" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.638749 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rndzv\" (UniqueName: \"kubernetes.io/projected/f955b72b-dc83-4219-b488-163acebcf367-kube-api-access-rndzv\") pod \"machine-approver-56656f9798-k67zs\" (UID: \"f955b72b-dc83-4219-b488-163acebcf367\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-k67zs" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.639072 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-zqhkp" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.655984 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-dvcwf" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.674713 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xfzz6\" (UniqueName: \"kubernetes.io/projected/06224dc2-1e32-47b8-8a11-0c90a61084cf-kube-api-access-xfzz6\") pod \"console-f9d7485db-qjf9t\" (UID: \"06224dc2-1e32-47b8-8a11-0c90a61084cf\") " pod="openshift-console/console-f9d7485db-qjf9t" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.683972 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6l6nt\" (UniqueName: \"kubernetes.io/projected/f15e54ab-209a-490e-a93a-4c86fe1b6b2b-kube-api-access-6l6nt\") pod \"packageserver-d55dfcdfc-x4wv5\" (UID: \"f15e54ab-209a-490e-a93a-4c86fe1b6b2b\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-x4wv5" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.686383 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-qjf9t" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.707835 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/d7cb9451-0082-4c4b-b90e-7d90581fe2b9-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-wpjbf\" (UID: \"d7cb9451-0082-4c4b-b90e-7d90581fe2b9\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-wpjbf" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.707902 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7cbf4a3d-f0e2-4a50-9122-85fd3f1df269-config\") pod \"console-operator-58897d9998-b4wbn\" (UID: \"7cbf4a3d-f0e2-4a50-9122-85fd3f1df269\") " pod="openshift-console-operator/console-operator-58897d9998-b4wbn" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.707954 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4xzdg\" (UniqueName: \"kubernetes.io/projected/c6a2e168-6cb6-43ff-8abd-e16e5b14ea6c-kube-api-access-4xzdg\") pod \"authentication-operator-69f744f599-nlb6g\" (UID: \"c6a2e168-6cb6-43ff-8abd-e16e5b14ea6c\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-nlb6g" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.708005 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/cbee12e6-b82e-4451-8292-dca1540e2ab5-registry-certificates\") pod \"image-registry-697d97f7c8-p5p29\" (UID: \"cbee12e6-b82e-4451-8292-dca1540e2ab5\") " pod="openshift-image-registry/image-registry-697d97f7c8-p5p29" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.708050 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d7cb9451-0082-4c4b-b90e-7d90581fe2b9-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-wpjbf\" (UID: \"d7cb9451-0082-4c4b-b90e-7d90581fe2b9\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-wpjbf" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.708135 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d229415a-fd7f-4553-9752-c6c401709252-config\") pod \"kube-apiserver-operator-766d6c64bb-dmgdn\" (UID: \"d229415a-fd7f-4553-9752-c6c401709252\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-dmgdn" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.708182 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/b6bb0f55-f1dd-4a71-b917-5f9b7209478e-available-featuregates\") pod \"openshift-config-operator-7777fb866f-mg8mw\" (UID: \"b6bb0f55-f1dd-4a71-b917-5f9b7209478e\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-mg8mw" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.708228 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/7b62fa09-bcc3-4c5f-a6d4-91cb6c253b83-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-nrl4j\" (UID: \"7b62fa09-bcc3-4c5f-a6d4-91cb6c253b83\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-nrl4j" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.708350 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/ba89323c-1e8c-4731-85d4-8b07766e3609-metrics-tls\") pod \"dns-operator-744455d44c-7xr8n\" (UID: \"ba89323c-1e8c-4731-85d4-8b07766e3609\") " pod="openshift-dns-operator/dns-operator-744455d44c-7xr8n" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.708390 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wtdg6\" (UniqueName: \"kubernetes.io/projected/b6bb0f55-f1dd-4a71-b917-5f9b7209478e-kube-api-access-wtdg6\") pod \"openshift-config-operator-7777fb866f-mg8mw\" (UID: \"b6bb0f55-f1dd-4a71-b917-5f9b7209478e\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-mg8mw" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.708428 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/03717037-d782-4ec7-bc3d-845c4b455107-profile-collector-cert\") pod \"olm-operator-6b444d44fb-4p52q\" (UID: \"03717037-d782-4ec7-bc3d-845c4b455107\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-4p52q" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.708581 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/355a0053-b22d-4e37-9ad1-ecfbdf391bc2-etcd-service-ca\") pod \"etcd-operator-b45778765-mdb95\" (UID: \"355a0053-b22d-4e37-9ad1-ecfbdf391bc2\") " pod="openshift-etcd-operator/etcd-operator-b45778765-mdb95" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.708689 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/355a0053-b22d-4e37-9ad1-ecfbdf391bc2-etcd-ca\") pod \"etcd-operator-b45778765-mdb95\" (UID: \"355a0053-b22d-4e37-9ad1-ecfbdf391bc2\") " pod="openshift-etcd-operator/etcd-operator-b45778765-mdb95" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.708814 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-khgx9\" (UniqueName: \"kubernetes.io/projected/7b62fa09-bcc3-4c5f-a6d4-91cb6c253b83-kube-api-access-khgx9\") pod \"cluster-samples-operator-665b6dd947-nrl4j\" (UID: \"7b62fa09-bcc3-4c5f-a6d4-91cb6c253b83\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-nrl4j" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.708897 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/355a0053-b22d-4e37-9ad1-ecfbdf391bc2-serving-cert\") pod \"etcd-operator-b45778765-mdb95\" (UID: \"355a0053-b22d-4e37-9ad1-ecfbdf391bc2\") " pod="openshift-etcd-operator/etcd-operator-b45778765-mdb95" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.708970 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/43c2b623-501b-489b-b5d5-3dbe0d23d265-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-l5snl\" (UID: \"43c2b623-501b-489b-b5d5-3dbe0d23d265\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-l5snl" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.709042 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/cbee12e6-b82e-4451-8292-dca1540e2ab5-bound-sa-token\") pod \"image-registry-697d97f7c8-p5p29\" (UID: \"cbee12e6-b82e-4451-8292-dca1540e2ab5\") " pod="openshift-image-registry/image-registry-697d97f7c8-p5p29" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.709089 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/7cbf4a3d-f0e2-4a50-9122-85fd3f1df269-trusted-ca\") pod \"console-operator-58897d9998-b4wbn\" (UID: \"7cbf4a3d-f0e2-4a50-9122-85fd3f1df269\") " pod="openshift-console-operator/console-operator-58897d9998-b4wbn" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.709132 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/03717037-d782-4ec7-bc3d-845c4b455107-srv-cert\") pod \"olm-operator-6b444d44fb-4p52q\" (UID: \"03717037-d782-4ec7-bc3d-845c4b455107\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-4p52q" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.709175 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c6a2e168-6cb6-43ff-8abd-e16e5b14ea6c-serving-cert\") pod \"authentication-operator-69f744f599-nlb6g\" (UID: \"c6a2e168-6cb6-43ff-8abd-e16e5b14ea6c\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-nlb6g" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.709218 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-szctj\" (UniqueName: \"kubernetes.io/projected/355a0053-b22d-4e37-9ad1-ecfbdf391bc2-kube-api-access-szctj\") pod \"etcd-operator-b45778765-mdb95\" (UID: \"355a0053-b22d-4e37-9ad1-ecfbdf391bc2\") " pod="openshift-etcd-operator/etcd-operator-b45778765-mdb95" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.709325 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-p5p29\" (UID: \"cbee12e6-b82e-4451-8292-dca1540e2ab5\") " pod="openshift-image-registry/image-registry-697d97f7c8-p5p29" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.709468 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/cbee12e6-b82e-4451-8292-dca1540e2ab5-ca-trust-extracted\") pod \"image-registry-697d97f7c8-p5p29\" (UID: \"cbee12e6-b82e-4451-8292-dca1540e2ab5\") " pod="openshift-image-registry/image-registry-697d97f7c8-p5p29" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.709508 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/aacec508-dd9f-438d-bb9e-d408e73f0d05-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-ddm7r\" (UID: \"aacec508-dd9f-438d-bb9e-d408e73f0d05\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-ddm7r" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.709543 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b6bb0f55-f1dd-4a71-b917-5f9b7209478e-serving-cert\") pod \"openshift-config-operator-7777fb866f-mg8mw\" (UID: \"b6bb0f55-f1dd-4a71-b917-5f9b7209478e\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-mg8mw" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.709607 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/cbee12e6-b82e-4451-8292-dca1540e2ab5-trusted-ca\") pod \"image-registry-697d97f7c8-p5p29\" (UID: \"cbee12e6-b82e-4451-8292-dca1540e2ab5\") " pod="openshift-image-registry/image-registry-697d97f7c8-p5p29" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.709641 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/aacec508-dd9f-438d-bb9e-d408e73f0d05-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-ddm7r\" (UID: \"aacec508-dd9f-438d-bb9e-d408e73f0d05\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-ddm7r" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.709704 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qfgm8\" (UniqueName: \"kubernetes.io/projected/03717037-d782-4ec7-bc3d-845c4b455107-kube-api-access-qfgm8\") pod \"olm-operator-6b444d44fb-4p52q\" (UID: \"03717037-d782-4ec7-bc3d-845c4b455107\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-4p52q" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.709735 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/355a0053-b22d-4e37-9ad1-ecfbdf391bc2-etcd-client\") pod \"etcd-operator-b45778765-mdb95\" (UID: \"355a0053-b22d-4e37-9ad1-ecfbdf391bc2\") " pod="openshift-etcd-operator/etcd-operator-b45778765-mdb95" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.709897 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d7cb9451-0082-4c4b-b90e-7d90581fe2b9-config\") pod \"kube-controller-manager-operator-78b949d7b-wpjbf\" (UID: \"d7cb9451-0082-4c4b-b90e-7d90581fe2b9\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-wpjbf" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.709950 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/d229415a-fd7f-4553-9752-c6c401709252-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-dmgdn\" (UID: \"d229415a-fd7f-4553-9752-c6c401709252\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-dmgdn" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.710154 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lvh4n\" (UniqueName: \"kubernetes.io/projected/7cbf4a3d-f0e2-4a50-9122-85fd3f1df269-kube-api-access-lvh4n\") pod \"console-operator-58897d9998-b4wbn\" (UID: \"7cbf4a3d-f0e2-4a50-9122-85fd3f1df269\") " pod="openshift-console-operator/console-operator-58897d9998-b4wbn" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.710258 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c6a2e168-6cb6-43ff-8abd-e16e5b14ea6c-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-nlb6g\" (UID: \"c6a2e168-6cb6-43ff-8abd-e16e5b14ea6c\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-nlb6g" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.710340 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9mjmj\" (UniqueName: \"kubernetes.io/projected/cbee12e6-b82e-4451-8292-dca1540e2ab5-kube-api-access-9mjmj\") pod \"image-registry-697d97f7c8-p5p29\" (UID: \"cbee12e6-b82e-4451-8292-dca1540e2ab5\") " pod="openshift-image-registry/image-registry-697d97f7c8-p5p29" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.710374 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fs75n\" (UniqueName: \"kubernetes.io/projected/ba89323c-1e8c-4731-85d4-8b07766e3609-kube-api-access-fs75n\") pod \"dns-operator-744455d44c-7xr8n\" (UID: \"ba89323c-1e8c-4731-85d4-8b07766e3609\") " pod="openshift-dns-operator/dns-operator-744455d44c-7xr8n" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.710406 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c6a2e168-6cb6-43ff-8abd-e16e5b14ea6c-service-ca-bundle\") pod \"authentication-operator-69f744f599-nlb6g\" (UID: \"c6a2e168-6cb6-43ff-8abd-e16e5b14ea6c\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-nlb6g" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.710505 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dlcg8\" (UniqueName: \"kubernetes.io/projected/aacec508-dd9f-438d-bb9e-d408e73f0d05-kube-api-access-dlcg8\") pod \"cluster-image-registry-operator-dc59b4c8b-ddm7r\" (UID: \"aacec508-dd9f-438d-bb9e-d408e73f0d05\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-ddm7r" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.710714 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ldtbz\" (UniqueName: \"kubernetes.io/projected/e4c200b0-224e-4a58-af74-96a8568ddb3d-kube-api-access-ldtbz\") pod \"downloads-7954f5f757-l2xpj\" (UID: \"e4c200b0-224e-4a58-af74-96a8568ddb3d\") " pod="openshift-console/downloads-7954f5f757-l2xpj" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.710753 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-84lcp\" (UniqueName: \"kubernetes.io/projected/43c2b623-501b-489b-b5d5-3dbe0d23d265-kube-api-access-84lcp\") pod \"openshift-controller-manager-operator-756b6f6bc6-l5snl\" (UID: \"43c2b623-501b-489b-b5d5-3dbe0d23d265\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-l5snl" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.710782 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c6a2e168-6cb6-43ff-8abd-e16e5b14ea6c-config\") pod \"authentication-operator-69f744f599-nlb6g\" (UID: \"c6a2e168-6cb6-43ff-8abd-e16e5b14ea6c\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-nlb6g" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.710841 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/cbee12e6-b82e-4451-8292-dca1540e2ab5-installation-pull-secrets\") pod \"image-registry-697d97f7c8-p5p29\" (UID: \"cbee12e6-b82e-4451-8292-dca1540e2ab5\") " pod="openshift-image-registry/image-registry-697d97f7c8-p5p29" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.710881 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/355a0053-b22d-4e37-9ad1-ecfbdf391bc2-config\") pod \"etcd-operator-b45778765-mdb95\" (UID: \"355a0053-b22d-4e37-9ad1-ecfbdf391bc2\") " pod="openshift-etcd-operator/etcd-operator-b45778765-mdb95" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.710997 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/aacec508-dd9f-438d-bb9e-d408e73f0d05-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-ddm7r\" (UID: \"aacec508-dd9f-438d-bb9e-d408e73f0d05\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-ddm7r" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.711025 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7cbf4a3d-f0e2-4a50-9122-85fd3f1df269-serving-cert\") pod \"console-operator-58897d9998-b4wbn\" (UID: \"7cbf4a3d-f0e2-4a50-9122-85fd3f1df269\") " pod="openshift-console-operator/console-operator-58897d9998-b4wbn" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.711150 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/43c2b623-501b-489b-b5d5-3dbe0d23d265-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-l5snl\" (UID: \"43c2b623-501b-489b-b5d5-3dbe0d23d265\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-l5snl" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.713796 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/cbee12e6-b82e-4451-8292-dca1540e2ab5-registry-tls\") pod \"image-registry-697d97f7c8-p5p29\" (UID: \"cbee12e6-b82e-4451-8292-dca1540e2ab5\") " pod="openshift-image-registry/image-registry-697d97f7c8-p5p29" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.713841 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d229415a-fd7f-4553-9752-c6c401709252-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-dmgdn\" (UID: \"d229415a-fd7f-4553-9752-c6c401709252\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-dmgdn" Nov 28 16:12:36 crc kubenswrapper[4909]: E1128 16:12:36.715151 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 16:12:37.215134705 +0000 UTC m=+139.611819239 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-p5p29" (UID: "cbee12e6-b82e-4451-8292-dca1540e2ab5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.814938 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 16:12:36 crc kubenswrapper[4909]: E1128 16:12:36.815096 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 16:12:37.315067664 +0000 UTC m=+139.711752208 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.815184 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/cbee12e6-b82e-4451-8292-dca1540e2ab5-registry-tls\") pod \"image-registry-697d97f7c8-p5p29\" (UID: \"cbee12e6-b82e-4451-8292-dca1540e2ab5\") " pod="openshift-image-registry/image-registry-697d97f7c8-p5p29" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.815238 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/53ff20ea-24c0-4364-9bbb-6ef3a48fdd6f-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-bp2ss\" (UID: \"53ff20ea-24c0-4364-9bbb-6ef3a48fdd6f\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-bp2ss" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.815260 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7cbf4a3d-f0e2-4a50-9122-85fd3f1df269-config\") pod \"console-operator-58897d9998-b4wbn\" (UID: \"7cbf4a3d-f0e2-4a50-9122-85fd3f1df269\") " pod="openshift-console-operator/console-operator-58897d9998-b4wbn" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.815279 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4xzdg\" (UniqueName: \"kubernetes.io/projected/c6a2e168-6cb6-43ff-8abd-e16e5b14ea6c-kube-api-access-4xzdg\") pod \"authentication-operator-69f744f599-nlb6g\" (UID: \"c6a2e168-6cb6-43ff-8abd-e16e5b14ea6c\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-nlb6g" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.815316 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lq6w2\" (UniqueName: \"kubernetes.io/projected/eeee7a7e-bba7-4704-984d-d18e9813a7ca-kube-api-access-lq6w2\") pod \"service-ca-9c57cc56f-7zdl4\" (UID: \"eeee7a7e-bba7-4704-984d-d18e9813a7ca\") " pod="openshift-service-ca/service-ca-9c57cc56f-7zdl4" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.815333 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/cbee12e6-b82e-4451-8292-dca1540e2ab5-registry-certificates\") pod \"image-registry-697d97f7c8-p5p29\" (UID: \"cbee12e6-b82e-4451-8292-dca1540e2ab5\") " pod="openshift-image-registry/image-registry-697d97f7c8-p5p29" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.815348 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d7cb9451-0082-4c4b-b90e-7d90581fe2b9-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-wpjbf\" (UID: \"d7cb9451-0082-4c4b-b90e-7d90581fe2b9\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-wpjbf" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.815381 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d229415a-fd7f-4553-9752-c6c401709252-config\") pod \"kube-apiserver-operator-766d6c64bb-dmgdn\" (UID: \"d229415a-fd7f-4553-9752-c6c401709252\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-dmgdn" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.815401 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/b6bb0f55-f1dd-4a71-b917-5f9b7209478e-available-featuregates\") pod \"openshift-config-operator-7777fb866f-mg8mw\" (UID: \"b6bb0f55-f1dd-4a71-b917-5f9b7209478e\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-mg8mw" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.815417 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n58dg\" (UniqueName: \"kubernetes.io/projected/562fb423-75b3-4716-9370-bc090c7fbbee-kube-api-access-n58dg\") pod \"dns-default-jcpqp\" (UID: \"562fb423-75b3-4716-9370-bc090c7fbbee\") " pod="openshift-dns/dns-default-jcpqp" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.815437 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/7b62fa09-bcc3-4c5f-a6d4-91cb6c253b83-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-nrl4j\" (UID: \"7b62fa09-bcc3-4c5f-a6d4-91cb6c253b83\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-nrl4j" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.815477 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/ba89323c-1e8c-4731-85d4-8b07766e3609-metrics-tls\") pod \"dns-operator-744455d44c-7xr8n\" (UID: \"ba89323c-1e8c-4731-85d4-8b07766e3609\") " pod="openshift-dns-operator/dns-operator-744455d44c-7xr8n" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.815504 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wtdg6\" (UniqueName: \"kubernetes.io/projected/b6bb0f55-f1dd-4a71-b917-5f9b7209478e-kube-api-access-wtdg6\") pod \"openshift-config-operator-7777fb866f-mg8mw\" (UID: \"b6bb0f55-f1dd-4a71-b917-5f9b7209478e\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-mg8mw" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.815540 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/0731ea3e-3015-4311-88f4-bcd1f9f8204e-config-volume\") pod \"collect-profiles-29405760-qjm9k\" (UID: \"0731ea3e-3015-4311-88f4-bcd1f9f8204e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405760-qjm9k" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.817177 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/42d7dcab-41fb-48a8-a08b-f4fca523e27b-bound-sa-token\") pod \"ingress-operator-5b745b69d9-j4f6w\" (UID: \"42d7dcab-41fb-48a8-a08b-f4fca523e27b\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-j4f6w" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.817210 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/eeee7a7e-bba7-4704-984d-d18e9813a7ca-signing-key\") pod \"service-ca-9c57cc56f-7zdl4\" (UID: \"eeee7a7e-bba7-4704-984d-d18e9813a7ca\") " pod="openshift-service-ca/service-ca-9c57cc56f-7zdl4" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.817240 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/355a0053-b22d-4e37-9ad1-ecfbdf391bc2-etcd-ca\") pod \"etcd-operator-b45778765-mdb95\" (UID: \"355a0053-b22d-4e37-9ad1-ecfbdf391bc2\") " pod="openshift-etcd-operator/etcd-operator-b45778765-mdb95" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.817266 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x2ntx\" (UniqueName: \"kubernetes.io/projected/3d9c07f9-d9e4-45b0-8567-49936f18f930-kube-api-access-x2ntx\") pod \"service-ca-operator-777779d784-9h22n\" (UID: \"3d9c07f9-d9e4-45b0-8567-49936f18f930\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-9h22n" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.817285 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/39ad8798-c58e-4c22-b31c-2eb95b257309-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-cml8b\" (UID: \"39ad8798-c58e-4c22-b31c-2eb95b257309\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-cml8b" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.817307 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/05f40c85-8360-453c-80d9-907bb6db611b-mountpoint-dir\") pod \"csi-hostpathplugin-xz6qx\" (UID: \"05f40c85-8360-453c-80d9-907bb6db611b\") " pod="hostpath-provisioner/csi-hostpathplugin-xz6qx" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.817330 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/355a0053-b22d-4e37-9ad1-ecfbdf391bc2-serving-cert\") pod \"etcd-operator-b45778765-mdb95\" (UID: \"355a0053-b22d-4e37-9ad1-ecfbdf391bc2\") " pod="openshift-etcd-operator/etcd-operator-b45778765-mdb95" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.817355 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/43c2b623-501b-489b-b5d5-3dbe0d23d265-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-l5snl\" (UID: \"43c2b623-501b-489b-b5d5-3dbe0d23d265\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-l5snl" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.817386 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/03717037-d782-4ec7-bc3d-845c4b455107-srv-cert\") pod \"olm-operator-6b444d44fb-4p52q\" (UID: \"03717037-d782-4ec7-bc3d-845c4b455107\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-4p52q" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.817407 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c6a2e168-6cb6-43ff-8abd-e16e5b14ea6c-serving-cert\") pod \"authentication-operator-69f744f599-nlb6g\" (UID: \"c6a2e168-6cb6-43ff-8abd-e16e5b14ea6c\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-nlb6g" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.817430 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-szctj\" (UniqueName: \"kubernetes.io/projected/355a0053-b22d-4e37-9ad1-ecfbdf391bc2-kube-api-access-szctj\") pod \"etcd-operator-b45778765-mdb95\" (UID: \"355a0053-b22d-4e37-9ad1-ecfbdf391bc2\") " pod="openshift-etcd-operator/etcd-operator-b45778765-mdb95" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.817448 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ps8mt\" (UniqueName: \"kubernetes.io/projected/40eb22b8-4c66-4dee-a689-e41144bd9f3e-kube-api-access-ps8mt\") pod \"machine-config-controller-84d6567774-gpb8r\" (UID: \"40eb22b8-4c66-4dee-a689-e41144bd9f3e\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-gpb8r" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.817472 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/0731ea3e-3015-4311-88f4-bcd1f9f8204e-secret-volume\") pod \"collect-profiles-29405760-qjm9k\" (UID: \"0731ea3e-3015-4311-88f4-bcd1f9f8204e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405760-qjm9k" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.817494 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/76a8915d-4ebe-4679-be00-6b290832b9cb-proxy-tls\") pod \"machine-config-operator-74547568cd-c4k79\" (UID: \"76a8915d-4ebe-4679-be00-6b290832b9cb\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-c4k79" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.817526 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/aacec508-dd9f-438d-bb9e-d408e73f0d05-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-ddm7r\" (UID: \"aacec508-dd9f-438d-bb9e-d408e73f0d05\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-ddm7r" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.817552 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lwf69\" (UniqueName: \"kubernetes.io/projected/76a8915d-4ebe-4679-be00-6b290832b9cb-kube-api-access-lwf69\") pod \"machine-config-operator-74547568cd-c4k79\" (UID: \"76a8915d-4ebe-4679-be00-6b290832b9cb\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-c4k79" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.817583 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/aacec508-dd9f-438d-bb9e-d408e73f0d05-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-ddm7r\" (UID: \"aacec508-dd9f-438d-bb9e-d408e73f0d05\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-ddm7r" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.817607 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/355a0053-b22d-4e37-9ad1-ecfbdf391bc2-etcd-client\") pod \"etcd-operator-b45778765-mdb95\" (UID: \"355a0053-b22d-4e37-9ad1-ecfbdf391bc2\") " pod="openshift-etcd-operator/etcd-operator-b45778765-mdb95" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.817633 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/40eb22b8-4c66-4dee-a689-e41144bd9f3e-proxy-tls\") pod \"machine-config-controller-84d6567774-gpb8r\" (UID: \"40eb22b8-4c66-4dee-a689-e41144bd9f3e\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-gpb8r" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.817689 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/42d7dcab-41fb-48a8-a08b-f4fca523e27b-metrics-tls\") pod \"ingress-operator-5b745b69d9-j4f6w\" (UID: \"42d7dcab-41fb-48a8-a08b-f4fca523e27b\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-j4f6w" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.817706 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/42d7dcab-41fb-48a8-a08b-f4fca523e27b-trusted-ca\") pod \"ingress-operator-5b745b69d9-j4f6w\" (UID: \"42d7dcab-41fb-48a8-a08b-f4fca523e27b\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-j4f6w" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.817741 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/6b7310f1-adae-43c2-ab22-53385dabd116-cert\") pod \"ingress-canary-rwfb8\" (UID: \"6b7310f1-adae-43c2-ab22-53385dabd116\") " pod="openshift-ingress-canary/ingress-canary-rwfb8" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.817763 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d7cb9451-0082-4c4b-b90e-7d90581fe2b9-config\") pod \"kube-controller-manager-operator-78b949d7b-wpjbf\" (UID: \"d7cb9451-0082-4c4b-b90e-7d90581fe2b9\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-wpjbf" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.817782 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/562fb423-75b3-4716-9370-bc090c7fbbee-metrics-tls\") pod \"dns-default-jcpqp\" (UID: \"562fb423-75b3-4716-9370-bc090c7fbbee\") " pod="openshift-dns/dns-default-jcpqp" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.817804 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/4262b1b7-b141-48fd-ad22-8827154ba68a-node-bootstrap-token\") pod \"machine-config-server-h9mgh\" (UID: \"4262b1b7-b141-48fd-ad22-8827154ba68a\") " pod="openshift-machine-config-operator/machine-config-server-h9mgh" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.817834 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/403639b0-0067-40f7-bf1c-fcddd8320e62-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-qjl8k\" (UID: \"403639b0-0067-40f7-bf1c-fcddd8320e62\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-qjl8k" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.817858 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c6a2e168-6cb6-43ff-8abd-e16e5b14ea6c-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-nlb6g\" (UID: \"c6a2e168-6cb6-43ff-8abd-e16e5b14ea6c\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-nlb6g" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.817875 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/410969ad-6fe5-4169-a78a-5e459f402cd3-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-pnjkw\" (UID: \"410969ad-6fe5-4169-a78a-5e459f402cd3\") " pod="openshift-marketplace/marketplace-operator-79b997595-pnjkw" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.817900 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9mjmj\" (UniqueName: \"kubernetes.io/projected/cbee12e6-b82e-4451-8292-dca1540e2ab5-kube-api-access-9mjmj\") pod \"image-registry-697d97f7c8-p5p29\" (UID: \"cbee12e6-b82e-4451-8292-dca1540e2ab5\") " pod="openshift-image-registry/image-registry-697d97f7c8-p5p29" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.817922 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fs75n\" (UniqueName: \"kubernetes.io/projected/ba89323c-1e8c-4731-85d4-8b07766e3609-kube-api-access-fs75n\") pod \"dns-operator-744455d44c-7xr8n\" (UID: \"ba89323c-1e8c-4731-85d4-8b07766e3609\") " pod="openshift-dns-operator/dns-operator-744455d44c-7xr8n" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.817944 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c6a2e168-6cb6-43ff-8abd-e16e5b14ea6c-service-ca-bundle\") pod \"authentication-operator-69f744f599-nlb6g\" (UID: \"c6a2e168-6cb6-43ff-8abd-e16e5b14ea6c\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-nlb6g" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.817969 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-djb5c\" (UniqueName: \"kubernetes.io/projected/c73706cd-a7f9-436c-88a9-0fa3120649aa-kube-api-access-djb5c\") pod \"package-server-manager-789f6589d5-hstbg\" (UID: \"c73706cd-a7f9-436c-88a9-0fa3120649aa\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-hstbg" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.817989 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/05f40c85-8360-453c-80d9-907bb6db611b-socket-dir\") pod \"csi-hostpathplugin-xz6qx\" (UID: \"05f40c85-8360-453c-80d9-907bb6db611b\") " pod="hostpath-provisioner/csi-hostpathplugin-xz6qx" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.818164 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7cbf4a3d-f0e2-4a50-9122-85fd3f1df269-config\") pod \"console-operator-58897d9998-b4wbn\" (UID: \"7cbf4a3d-f0e2-4a50-9122-85fd3f1df269\") " pod="openshift-console-operator/console-operator-58897d9998-b4wbn" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.818190 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/410969ad-6fe5-4169-a78a-5e459f402cd3-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-pnjkw\" (UID: \"410969ad-6fe5-4169-a78a-5e459f402cd3\") " pod="openshift-marketplace/marketplace-operator-79b997595-pnjkw" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.818216 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/76a8915d-4ebe-4679-be00-6b290832b9cb-images\") pod \"machine-config-operator-74547568cd-c4k79\" (UID: \"76a8915d-4ebe-4679-be00-6b290832b9cb\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-c4k79" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.818239 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/355a0053-b22d-4e37-9ad1-ecfbdf391bc2-config\") pod \"etcd-operator-b45778765-mdb95\" (UID: \"355a0053-b22d-4e37-9ad1-ecfbdf391bc2\") " pod="openshift-etcd-operator/etcd-operator-b45778765-mdb95" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.818241 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/b6bb0f55-f1dd-4a71-b917-5f9b7209478e-available-featuregates\") pod \"openshift-config-operator-7777fb866f-mg8mw\" (UID: \"b6bb0f55-f1dd-4a71-b917-5f9b7209478e\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-mg8mw" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.818316 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/74a58b61-df24-4f27-8382-ff175aa2bb14-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-qbnrs\" (UID: \"74a58b61-df24-4f27-8382-ff175aa2bb14\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-qbnrs" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.818292 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/cbee12e6-b82e-4451-8292-dca1540e2ab5-registry-certificates\") pod \"image-registry-697d97f7c8-p5p29\" (UID: \"cbee12e6-b82e-4451-8292-dca1540e2ab5\") " pod="openshift-image-registry/image-registry-697d97f7c8-p5p29" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.819939 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-np4gr\" (UniqueName: \"kubernetes.io/projected/05f40c85-8360-453c-80d9-907bb6db611b-kube-api-access-np4gr\") pod \"csi-hostpathplugin-xz6qx\" (UID: \"05f40c85-8360-453c-80d9-907bb6db611b\") " pod="hostpath-provisioner/csi-hostpathplugin-xz6qx" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.820073 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/4262b1b7-b141-48fd-ad22-8827154ba68a-certs\") pod \"machine-config-server-h9mgh\" (UID: \"4262b1b7-b141-48fd-ad22-8827154ba68a\") " pod="openshift-machine-config-operator/machine-config-server-h9mgh" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.820121 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/43c2b623-501b-489b-b5d5-3dbe0d23d265-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-l5snl\" (UID: \"43c2b623-501b-489b-b5d5-3dbe0d23d265\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-l5snl" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.820159 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d229415a-fd7f-4553-9752-c6c401709252-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-dmgdn\" (UID: \"d229415a-fd7f-4553-9752-c6c401709252\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-dmgdn" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.820225 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/d7cb9451-0082-4c4b-b90e-7d90581fe2b9-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-wpjbf\" (UID: \"d7cb9451-0082-4c4b-b90e-7d90581fe2b9\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-wpjbf" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.820465 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/355a0053-b22d-4e37-9ad1-ecfbdf391bc2-config\") pod \"etcd-operator-b45778765-mdb95\" (UID: \"355a0053-b22d-4e37-9ad1-ecfbdf391bc2\") " pod="openshift-etcd-operator/etcd-operator-b45778765-mdb95" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.821527 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d229415a-fd7f-4553-9752-c6c401709252-config\") pod \"kube-apiserver-operator-766d6c64bb-dmgdn\" (UID: \"d229415a-fd7f-4553-9752-c6c401709252\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-dmgdn" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.822217 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c6a2e168-6cb6-43ff-8abd-e16e5b14ea6c-service-ca-bundle\") pod \"authentication-operator-69f744f599-nlb6g\" (UID: \"c6a2e168-6cb6-43ff-8abd-e16e5b14ea6c\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-nlb6g" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.822915 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/355a0053-b22d-4e37-9ad1-ecfbdf391bc2-etcd-ca\") pod \"etcd-operator-b45778765-mdb95\" (UID: \"355a0053-b22d-4e37-9ad1-ecfbdf391bc2\") " pod="openshift-etcd-operator/etcd-operator-b45778765-mdb95" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.823070 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c6a2e168-6cb6-43ff-8abd-e16e5b14ea6c-serving-cert\") pod \"authentication-operator-69f744f599-nlb6g\" (UID: \"c6a2e168-6cb6-43ff-8abd-e16e5b14ea6c\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-nlb6g" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.823754 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d7cb9451-0082-4c4b-b90e-7d90581fe2b9-config\") pod \"kube-controller-manager-operator-78b949d7b-wpjbf\" (UID: \"d7cb9451-0082-4c4b-b90e-7d90581fe2b9\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-wpjbf" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.824484 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c6a2e168-6cb6-43ff-8abd-e16e5b14ea6c-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-nlb6g\" (UID: \"c6a2e168-6cb6-43ff-8abd-e16e5b14ea6c\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-nlb6g" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.824726 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/aacec508-dd9f-438d-bb9e-d408e73f0d05-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-ddm7r\" (UID: \"aacec508-dd9f-438d-bb9e-d408e73f0d05\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-ddm7r" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.826164 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/ba89323c-1e8c-4731-85d4-8b07766e3609-metrics-tls\") pod \"dns-operator-744455d44c-7xr8n\" (UID: \"ba89323c-1e8c-4731-85d4-8b07766e3609\") " pod="openshift-dns-operator/dns-operator-744455d44c-7xr8n" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.827210 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-5x9zh"] Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.833364 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/7b62fa09-bcc3-4c5f-a6d4-91cb6c253b83-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-nrl4j\" (UID: \"7b62fa09-bcc3-4c5f-a6d4-91cb6c253b83\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-nrl4j" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.834185 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d7cb9451-0082-4c4b-b90e-7d90581fe2b9-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-wpjbf\" (UID: \"d7cb9451-0082-4c4b-b90e-7d90581fe2b9\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-wpjbf" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.837337 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/355a0053-b22d-4e37-9ad1-ecfbdf391bc2-etcd-client\") pod \"etcd-operator-b45778765-mdb95\" (UID: \"355a0053-b22d-4e37-9ad1-ecfbdf391bc2\") " pod="openshift-etcd-operator/etcd-operator-b45778765-mdb95" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.839808 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/562fb423-75b3-4716-9370-bc090c7fbbee-config-volume\") pod \"dns-default-jcpqp\" (UID: \"562fb423-75b3-4716-9370-bc090c7fbbee\") " pod="openshift-dns/dns-default-jcpqp" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.839881 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/03717037-d782-4ec7-bc3d-845c4b455107-profile-collector-cert\") pod \"olm-operator-6b444d44fb-4p52q\" (UID: \"03717037-d782-4ec7-bc3d-845c4b455107\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-4p52q" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.840309 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/355a0053-b22d-4e37-9ad1-ecfbdf391bc2-etcd-service-ca\") pod \"etcd-operator-b45778765-mdb95\" (UID: \"355a0053-b22d-4e37-9ad1-ecfbdf391bc2\") " pod="openshift-etcd-operator/etcd-operator-b45778765-mdb95" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.840351 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a56d38d4-9ee2-46f0-8fc8-3edbc68647d4-service-ca-bundle\") pod \"router-default-5444994796-g6mgs\" (UID: \"a56d38d4-9ee2-46f0-8fc8-3edbc68647d4\") " pod="openshift-ingress/router-default-5444994796-g6mgs" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.840407 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/a56d38d4-9ee2-46f0-8fc8-3edbc68647d4-metrics-certs\") pod \"router-default-5444994796-g6mgs\" (UID: \"a56d38d4-9ee2-46f0-8fc8-3edbc68647d4\") " pod="openshift-ingress/router-default-5444994796-g6mgs" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.840427 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3d9c07f9-d9e4-45b0-8567-49936f18f930-config\") pod \"service-ca-operator-777779d784-9h22n\" (UID: \"3d9c07f9-d9e4-45b0-8567-49936f18f930\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-9h22n" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.840450 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d9xdg\" (UniqueName: \"kubernetes.io/projected/0731ea3e-3015-4311-88f4-bcd1f9f8204e-kube-api-access-d9xdg\") pod \"collect-profiles-29405760-qjm9k\" (UID: \"0731ea3e-3015-4311-88f4-bcd1f9f8204e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405760-qjm9k" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.840763 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/c73706cd-a7f9-436c-88a9-0fa3120649aa-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-hstbg\" (UID: \"c73706cd-a7f9-436c-88a9-0fa3120649aa\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-hstbg" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.841271 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/cbee12e6-b82e-4451-8292-dca1540e2ab5-registry-tls\") pod \"image-registry-697d97f7c8-p5p29\" (UID: \"cbee12e6-b82e-4451-8292-dca1540e2ab5\") " pod="openshift-image-registry/image-registry-697d97f7c8-p5p29" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.841314 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/355a0053-b22d-4e37-9ad1-ecfbdf391bc2-etcd-service-ca\") pod \"etcd-operator-b45778765-mdb95\" (UID: \"355a0053-b22d-4e37-9ad1-ecfbdf391bc2\") " pod="openshift-etcd-operator/etcd-operator-b45778765-mdb95" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.841423 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/43c2b623-501b-489b-b5d5-3dbe0d23d265-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-l5snl\" (UID: \"43c2b623-501b-489b-b5d5-3dbe0d23d265\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-l5snl" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.845323 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/43c2b623-501b-489b-b5d5-3dbe0d23d265-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-l5snl\" (UID: \"43c2b623-501b-489b-b5d5-3dbe0d23d265\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-l5snl" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.845576 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/aacec508-dd9f-438d-bb9e-d408e73f0d05-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-ddm7r\" (UID: \"aacec508-dd9f-438d-bb9e-d408e73f0d05\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-ddm7r" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.845594 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d229415a-fd7f-4553-9752-c6c401709252-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-dmgdn\" (UID: \"d229415a-fd7f-4553-9752-c6c401709252\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-dmgdn" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.848533 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/355a0053-b22d-4e37-9ad1-ecfbdf391bc2-serving-cert\") pod \"etcd-operator-b45778765-mdb95\" (UID: \"355a0053-b22d-4e37-9ad1-ecfbdf391bc2\") " pod="openshift-etcd-operator/etcd-operator-b45778765-mdb95" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.853926 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-k67zs" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.854110 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-khgx9\" (UniqueName: \"kubernetes.io/projected/7b62fa09-bcc3-4c5f-a6d4-91cb6c253b83-kube-api-access-khgx9\") pod \"cluster-samples-operator-665b6dd947-nrl4j\" (UID: \"7b62fa09-bcc3-4c5f-a6d4-91cb6c253b83\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-nrl4j" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.854136 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/eeee7a7e-bba7-4704-984d-d18e9813a7ca-signing-cabundle\") pod \"service-ca-9c57cc56f-7zdl4\" (UID: \"eeee7a7e-bba7-4704-984d-d18e9813a7ca\") " pod="openshift-service-ca/service-ca-9c57cc56f-7zdl4" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.854515 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/403639b0-0067-40f7-bf1c-fcddd8320e62-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-qjl8k\" (UID: \"403639b0-0067-40f7-bf1c-fcddd8320e62\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-qjl8k" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.854563 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/04ec2320-667b-4f35-976e-60c4a1eb3386-profile-collector-cert\") pod \"catalog-operator-68c6474976-9d46g\" (UID: \"04ec2320-667b-4f35-976e-60c4a1eb3386\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-9d46g" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.854582 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/05f40c85-8360-453c-80d9-907bb6db611b-registration-dir\") pod \"csi-hostpathplugin-xz6qx\" (UID: \"05f40c85-8360-453c-80d9-907bb6db611b\") " pod="hostpath-provisioner/csi-hostpathplugin-xz6qx" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.855172 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/cbee12e6-b82e-4451-8292-dca1540e2ab5-bound-sa-token\") pod \"image-registry-697d97f7c8-p5p29\" (UID: \"cbee12e6-b82e-4451-8292-dca1540e2ab5\") " pod="openshift-image-registry/image-registry-697d97f7c8-p5p29" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.855512 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-ld6fg"] Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.857487 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/7cbf4a3d-f0e2-4a50-9122-85fd3f1df269-trusted-ca\") pod \"console-operator-58897d9998-b4wbn\" (UID: \"7cbf4a3d-f0e2-4a50-9122-85fd3f1df269\") " pod="openshift-console-operator/console-operator-58897d9998-b4wbn" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.855383 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/7cbf4a3d-f0e2-4a50-9122-85fd3f1df269-trusted-ca\") pod \"console-operator-58897d9998-b4wbn\" (UID: \"7cbf4a3d-f0e2-4a50-9122-85fd3f1df269\") " pod="openshift-console-operator/console-operator-58897d9998-b4wbn" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.858149 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/76a8915d-4ebe-4679-be00-6b290832b9cb-auth-proxy-config\") pod \"machine-config-operator-74547568cd-c4k79\" (UID: \"76a8915d-4ebe-4679-be00-6b290832b9cb\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-c4k79" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.858204 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-p5p29\" (UID: \"cbee12e6-b82e-4451-8292-dca1540e2ab5\") " pod="openshift-image-registry/image-registry-697d97f7c8-p5p29" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.858236 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4d5l8\" (UniqueName: \"kubernetes.io/projected/74a58b61-df24-4f27-8382-ff175aa2bb14-kube-api-access-4d5l8\") pod \"multus-admission-controller-857f4d67dd-qbnrs\" (UID: \"74a58b61-df24-4f27-8382-ff175aa2bb14\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-qbnrs" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.858263 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cbzgq\" (UniqueName: \"kubernetes.io/projected/04ec2320-667b-4f35-976e-60c4a1eb3386-kube-api-access-cbzgq\") pod \"catalog-operator-68c6474976-9d46g\" (UID: \"04ec2320-667b-4f35-976e-60c4a1eb3386\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-9d46g" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.858290 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cfztf\" (UniqueName: \"kubernetes.io/projected/410969ad-6fe5-4169-a78a-5e459f402cd3-kube-api-access-cfztf\") pod \"marketplace-operator-79b997595-pnjkw\" (UID: \"410969ad-6fe5-4169-a78a-5e459f402cd3\") " pod="openshift-marketplace/marketplace-operator-79b997595-pnjkw" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.858329 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/403639b0-0067-40f7-bf1c-fcddd8320e62-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-qjl8k\" (UID: \"403639b0-0067-40f7-bf1c-fcddd8320e62\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-qjl8k" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.858360 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mn5s7\" (UniqueName: \"kubernetes.io/projected/f7ab1edf-70d0-4894-a7b1-ec68413d9b45-kube-api-access-mn5s7\") pod \"migrator-59844c95c7-4j2wt\" (UID: \"f7ab1edf-70d0-4894-a7b1-ec68413d9b45\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-4j2wt" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.858412 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/cbee12e6-b82e-4451-8292-dca1540e2ab5-ca-trust-extracted\") pod \"image-registry-697d97f7c8-p5p29\" (UID: \"cbee12e6-b82e-4451-8292-dca1540e2ab5\") " pod="openshift-image-registry/image-registry-697d97f7c8-p5p29" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.858442 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b6bb0f55-f1dd-4a71-b917-5f9b7209478e-serving-cert\") pod \"openshift-config-operator-7777fb866f-mg8mw\" (UID: \"b6bb0f55-f1dd-4a71-b917-5f9b7209478e\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-mg8mw" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.858466 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sz845\" (UniqueName: \"kubernetes.io/projected/4262b1b7-b141-48fd-ad22-8827154ba68a-kube-api-access-sz845\") pod \"machine-config-server-h9mgh\" (UID: \"4262b1b7-b141-48fd-ad22-8827154ba68a\") " pod="openshift-machine-config-operator/machine-config-server-h9mgh" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.858491 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/cbee12e6-b82e-4451-8292-dca1540e2ab5-trusted-ca\") pod \"image-registry-697d97f7c8-p5p29\" (UID: \"cbee12e6-b82e-4451-8292-dca1540e2ab5\") " pod="openshift-image-registry/image-registry-697d97f7c8-p5p29" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.858516 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qfgm8\" (UniqueName: \"kubernetes.io/projected/03717037-d782-4ec7-bc3d-845c4b455107-kube-api-access-qfgm8\") pod \"olm-operator-6b444d44fb-4p52q\" (UID: \"03717037-d782-4ec7-bc3d-845c4b455107\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-4p52q" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.858540 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/53ff20ea-24c0-4364-9bbb-6ef3a48fdd6f-config\") pod \"machine-api-operator-5694c8668f-bp2ss\" (UID: \"53ff20ea-24c0-4364-9bbb-6ef3a48fdd6f\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-bp2ss" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.858560 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/40eb22b8-4c66-4dee-a689-e41144bd9f3e-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-gpb8r\" (UID: \"40eb22b8-4c66-4dee-a689-e41144bd9f3e\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-gpb8r" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.858579 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/a56d38d4-9ee2-46f0-8fc8-3edbc68647d4-default-certificate\") pod \"router-default-5444994796-g6mgs\" (UID: \"a56d38d4-9ee2-46f0-8fc8-3edbc68647d4\") " pod="openshift-ingress/router-default-5444994796-g6mgs" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.858601 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z54pw\" (UniqueName: \"kubernetes.io/projected/6b7310f1-adae-43c2-ab22-53385dabd116-kube-api-access-z54pw\") pod \"ingress-canary-rwfb8\" (UID: \"6b7310f1-adae-43c2-ab22-53385dabd116\") " pod="openshift-ingress-canary/ingress-canary-rwfb8" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.858639 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b2t7k\" (UniqueName: \"kubernetes.io/projected/53ff20ea-24c0-4364-9bbb-6ef3a48fdd6f-kube-api-access-b2t7k\") pod \"machine-api-operator-5694c8668f-bp2ss\" (UID: \"53ff20ea-24c0-4364-9bbb-6ef3a48fdd6f\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-bp2ss" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.858695 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/d229415a-fd7f-4553-9752-c6c401709252-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-dmgdn\" (UID: \"d229415a-fd7f-4553-9752-c6c401709252\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-dmgdn" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.858720 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/39ad8798-c58e-4c22-b31c-2eb95b257309-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-cml8b\" (UID: \"39ad8798-c58e-4c22-b31c-2eb95b257309\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-cml8b" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.865799 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/05f40c85-8360-453c-80d9-907bb6db611b-plugins-dir\") pod \"csi-hostpathplugin-xz6qx\" (UID: \"05f40c85-8360-453c-80d9-907bb6db611b\") " pod="hostpath-provisioner/csi-hostpathplugin-xz6qx" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.865816 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/cbee12e6-b82e-4451-8292-dca1540e2ab5-ca-trust-extracted\") pod \"image-registry-697d97f7c8-p5p29\" (UID: \"cbee12e6-b82e-4451-8292-dca1540e2ab5\") " pod="openshift-image-registry/image-registry-697d97f7c8-p5p29" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.865828 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k4qwq\" (UniqueName: \"kubernetes.io/projected/42d7dcab-41fb-48a8-a08b-f4fca523e27b-kube-api-access-k4qwq\") pod \"ingress-operator-5b745b69d9-j4f6w\" (UID: \"42d7dcab-41fb-48a8-a08b-f4fca523e27b\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-j4f6w" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.865905 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lvh4n\" (UniqueName: \"kubernetes.io/projected/7cbf4a3d-f0e2-4a50-9122-85fd3f1df269-kube-api-access-lvh4n\") pod \"console-operator-58897d9998-b4wbn\" (UID: \"7cbf4a3d-f0e2-4a50-9122-85fd3f1df269\") " pod="openshift-console-operator/console-operator-58897d9998-b4wbn" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.865954 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3d9c07f9-d9e4-45b0-8567-49936f18f930-serving-cert\") pod \"service-ca-operator-777779d784-9h22n\" (UID: \"3d9c07f9-d9e4-45b0-8567-49936f18f930\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-9h22n" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.866094 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/a56d38d4-9ee2-46f0-8fc8-3edbc68647d4-stats-auth\") pod \"router-default-5444994796-g6mgs\" (UID: \"a56d38d4-9ee2-46f0-8fc8-3edbc68647d4\") " pod="openshift-ingress/router-default-5444994796-g6mgs" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.866122 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/53ff20ea-24c0-4364-9bbb-6ef3a48fdd6f-images\") pod \"machine-api-operator-5694c8668f-bp2ss\" (UID: \"53ff20ea-24c0-4364-9bbb-6ef3a48fdd6f\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-bp2ss" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.866155 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dlcg8\" (UniqueName: \"kubernetes.io/projected/aacec508-dd9f-438d-bb9e-d408e73f0d05-kube-api-access-dlcg8\") pod \"cluster-image-registry-operator-dc59b4c8b-ddm7r\" (UID: \"aacec508-dd9f-438d-bb9e-d408e73f0d05\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-ddm7r" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.866173 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5brq7\" (UniqueName: \"kubernetes.io/projected/39ad8798-c58e-4c22-b31c-2eb95b257309-kube-api-access-5brq7\") pod \"kube-storage-version-migrator-operator-b67b599dd-cml8b\" (UID: \"39ad8798-c58e-4c22-b31c-2eb95b257309\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-cml8b" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.866205 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ldtbz\" (UniqueName: \"kubernetes.io/projected/e4c200b0-224e-4a58-af74-96a8568ddb3d-kube-api-access-ldtbz\") pod \"downloads-7954f5f757-l2xpj\" (UID: \"e4c200b0-224e-4a58-af74-96a8568ddb3d\") " pod="openshift-console/downloads-7954f5f757-l2xpj" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.866221 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-84lcp\" (UniqueName: \"kubernetes.io/projected/43c2b623-501b-489b-b5d5-3dbe0d23d265-kube-api-access-84lcp\") pod \"openshift-controller-manager-operator-756b6f6bc6-l5snl\" (UID: \"43c2b623-501b-489b-b5d5-3dbe0d23d265\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-l5snl" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.866255 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c6a2e168-6cb6-43ff-8abd-e16e5b14ea6c-config\") pod \"authentication-operator-69f744f599-nlb6g\" (UID: \"c6a2e168-6cb6-43ff-8abd-e16e5b14ea6c\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-nlb6g" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.866295 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/ebbacd25-74d0-422d-a0b1-51bb64a57468-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-87jr2\" (UID: \"ebbacd25-74d0-422d-a0b1-51bb64a57468\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-87jr2" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.866319 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/04ec2320-667b-4f35-976e-60c4a1eb3386-srv-cert\") pod \"catalog-operator-68c6474976-9d46g\" (UID: \"04ec2320-667b-4f35-976e-60c4a1eb3386\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-9d46g" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.866351 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/cbee12e6-b82e-4451-8292-dca1540e2ab5-installation-pull-secrets\") pod \"image-registry-697d97f7c8-p5p29\" (UID: \"cbee12e6-b82e-4451-8292-dca1540e2ab5\") " pod="openshift-image-registry/image-registry-697d97f7c8-p5p29" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.866368 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gdph5\" (UniqueName: \"kubernetes.io/projected/a56d38d4-9ee2-46f0-8fc8-3edbc68647d4-kube-api-access-gdph5\") pod \"router-default-5444994796-g6mgs\" (UID: \"a56d38d4-9ee2-46f0-8fc8-3edbc68647d4\") " pod="openshift-ingress/router-default-5444994796-g6mgs" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.866635 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/aacec508-dd9f-438d-bb9e-d408e73f0d05-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-ddm7r\" (UID: \"aacec508-dd9f-438d-bb9e-d408e73f0d05\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-ddm7r" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.866754 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c6a2e168-6cb6-43ff-8abd-e16e5b14ea6c-config\") pod \"authentication-operator-69f744f599-nlb6g\" (UID: \"c6a2e168-6cb6-43ff-8abd-e16e5b14ea6c\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-nlb6g" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.867489 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7cbf4a3d-f0e2-4a50-9122-85fd3f1df269-serving-cert\") pod \"console-operator-58897d9998-b4wbn\" (UID: \"7cbf4a3d-f0e2-4a50-9122-85fd3f1df269\") " pod="openshift-console-operator/console-operator-58897d9998-b4wbn" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.867714 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-596z9\" (UniqueName: \"kubernetes.io/projected/ebbacd25-74d0-422d-a0b1-51bb64a57468-kube-api-access-596z9\") pod \"control-plane-machine-set-operator-78cbb6b69f-87jr2\" (UID: \"ebbacd25-74d0-422d-a0b1-51bb64a57468\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-87jr2" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.867761 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/05f40c85-8360-453c-80d9-907bb6db611b-csi-data-dir\") pod \"csi-hostpathplugin-xz6qx\" (UID: \"05f40c85-8360-453c-80d9-907bb6db611b\") " pod="hostpath-provisioner/csi-hostpathplugin-xz6qx" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.867766 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/cbee12e6-b82e-4451-8292-dca1540e2ab5-trusted-ca\") pod \"image-registry-697d97f7c8-p5p29\" (UID: \"cbee12e6-b82e-4451-8292-dca1540e2ab5\") " pod="openshift-image-registry/image-registry-697d97f7c8-p5p29" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.868417 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b6bb0f55-f1dd-4a71-b917-5f9b7209478e-serving-cert\") pod \"openshift-config-operator-7777fb866f-mg8mw\" (UID: \"b6bb0f55-f1dd-4a71-b917-5f9b7209478e\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-mg8mw" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.868991 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/03717037-d782-4ec7-bc3d-845c4b455107-profile-collector-cert\") pod \"olm-operator-6b444d44fb-4p52q\" (UID: \"03717037-d782-4ec7-bc3d-845c4b455107\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-4p52q" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.869298 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/03717037-d782-4ec7-bc3d-845c4b455107-srv-cert\") pod \"olm-operator-6b444d44fb-4p52q\" (UID: \"03717037-d782-4ec7-bc3d-845c4b455107\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-4p52q" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.870187 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7cbf4a3d-f0e2-4a50-9122-85fd3f1df269-serving-cert\") pod \"console-operator-58897d9998-b4wbn\" (UID: \"7cbf4a3d-f0e2-4a50-9122-85fd3f1df269\") " pod="openshift-console-operator/console-operator-58897d9998-b4wbn" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.870846 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wtdg6\" (UniqueName: \"kubernetes.io/projected/b6bb0f55-f1dd-4a71-b917-5f9b7209478e-kube-api-access-wtdg6\") pod \"openshift-config-operator-7777fb866f-mg8mw\" (UID: \"b6bb0f55-f1dd-4a71-b917-5f9b7209478e\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-mg8mw" Nov 28 16:12:36 crc kubenswrapper[4909]: E1128 16:12:36.871455 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 16:12:37.371422087 +0000 UTC m=+139.768106611 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-p5p29" (UID: "cbee12e6-b82e-4451-8292-dca1540e2ab5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.874004 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/cbee12e6-b82e-4451-8292-dca1540e2ab5-installation-pull-secrets\") pod \"image-registry-697d97f7c8-p5p29\" (UID: \"cbee12e6-b82e-4451-8292-dca1540e2ab5\") " pod="openshift-image-registry/image-registry-697d97f7c8-p5p29" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.883764 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4xzdg\" (UniqueName: \"kubernetes.io/projected/c6a2e168-6cb6-43ff-8abd-e16e5b14ea6c-kube-api-access-4xzdg\") pod \"authentication-operator-69f744f599-nlb6g\" (UID: \"c6a2e168-6cb6-43ff-8abd-e16e5b14ea6c\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-nlb6g" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.894543 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-nlb6g" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.901433 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fs75n\" (UniqueName: \"kubernetes.io/projected/ba89323c-1e8c-4731-85d4-8b07766e3609-kube-api-access-fs75n\") pod \"dns-operator-744455d44c-7xr8n\" (UID: \"ba89323c-1e8c-4731-85d4-8b07766e3609\") " pod="openshift-dns-operator/dns-operator-744455d44c-7xr8n" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.901722 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-x4wv5" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.904302 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-c7bds" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.908080 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-t67vb"] Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.920939 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9mjmj\" (UniqueName: \"kubernetes.io/projected/cbee12e6-b82e-4451-8292-dca1540e2ab5-kube-api-access-9mjmj\") pod \"image-registry-697d97f7c8-p5p29\" (UID: \"cbee12e6-b82e-4451-8292-dca1540e2ab5\") " pod="openshift-image-registry/image-registry-697d97f7c8-p5p29" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.941842 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-szctj\" (UniqueName: \"kubernetes.io/projected/355a0053-b22d-4e37-9ad1-ecfbdf391bc2-kube-api-access-szctj\") pod \"etcd-operator-b45778765-mdb95\" (UID: \"355a0053-b22d-4e37-9ad1-ecfbdf391bc2\") " pod="openshift-etcd-operator/etcd-operator-b45778765-mdb95" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.968553 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 16:12:36 crc kubenswrapper[4909]: E1128 16:12:36.968701 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 16:12:37.468678759 +0000 UTC m=+139.865363283 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.968751 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/76a8915d-4ebe-4679-be00-6b290832b9cb-proxy-tls\") pod \"machine-config-operator-74547568cd-c4k79\" (UID: \"76a8915d-4ebe-4679-be00-6b290832b9cb\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-c4k79" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.968785 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/42d7dcab-41fb-48a8-a08b-f4fca523e27b-metrics-tls\") pod \"ingress-operator-5b745b69d9-j4f6w\" (UID: \"42d7dcab-41fb-48a8-a08b-f4fca523e27b\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-j4f6w" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.968800 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/42d7dcab-41fb-48a8-a08b-f4fca523e27b-trusted-ca\") pod \"ingress-operator-5b745b69d9-j4f6w\" (UID: \"42d7dcab-41fb-48a8-a08b-f4fca523e27b\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-j4f6w" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.968819 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lwf69\" (UniqueName: \"kubernetes.io/projected/76a8915d-4ebe-4679-be00-6b290832b9cb-kube-api-access-lwf69\") pod \"machine-config-operator-74547568cd-c4k79\" (UID: \"76a8915d-4ebe-4679-be00-6b290832b9cb\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-c4k79" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.968845 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/40eb22b8-4c66-4dee-a689-e41144bd9f3e-proxy-tls\") pod \"machine-config-controller-84d6567774-gpb8r\" (UID: \"40eb22b8-4c66-4dee-a689-e41144bd9f3e\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-gpb8r" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.968865 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/6b7310f1-adae-43c2-ab22-53385dabd116-cert\") pod \"ingress-canary-rwfb8\" (UID: \"6b7310f1-adae-43c2-ab22-53385dabd116\") " pod="openshift-ingress-canary/ingress-canary-rwfb8" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.968881 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/562fb423-75b3-4716-9370-bc090c7fbbee-metrics-tls\") pod \"dns-default-jcpqp\" (UID: \"562fb423-75b3-4716-9370-bc090c7fbbee\") " pod="openshift-dns/dns-default-jcpqp" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.968916 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/4262b1b7-b141-48fd-ad22-8827154ba68a-node-bootstrap-token\") pod \"machine-config-server-h9mgh\" (UID: \"4262b1b7-b141-48fd-ad22-8827154ba68a\") " pod="openshift-machine-config-operator/machine-config-server-h9mgh" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.968942 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/403639b0-0067-40f7-bf1c-fcddd8320e62-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-qjl8k\" (UID: \"403639b0-0067-40f7-bf1c-fcddd8320e62\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-qjl8k" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.968959 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/410969ad-6fe5-4169-a78a-5e459f402cd3-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-pnjkw\" (UID: \"410969ad-6fe5-4169-a78a-5e459f402cd3\") " pod="openshift-marketplace/marketplace-operator-79b997595-pnjkw" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.968973 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-djb5c\" (UniqueName: \"kubernetes.io/projected/c73706cd-a7f9-436c-88a9-0fa3120649aa-kube-api-access-djb5c\") pod \"package-server-manager-789f6589d5-hstbg\" (UID: \"c73706cd-a7f9-436c-88a9-0fa3120649aa\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-hstbg" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.968996 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/76a8915d-4ebe-4679-be00-6b290832b9cb-images\") pod \"machine-config-operator-74547568cd-c4k79\" (UID: \"76a8915d-4ebe-4679-be00-6b290832b9cb\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-c4k79" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.969012 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/05f40c85-8360-453c-80d9-907bb6db611b-socket-dir\") pod \"csi-hostpathplugin-xz6qx\" (UID: \"05f40c85-8360-453c-80d9-907bb6db611b\") " pod="hostpath-provisioner/csi-hostpathplugin-xz6qx" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.969028 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/410969ad-6fe5-4169-a78a-5e459f402cd3-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-pnjkw\" (UID: \"410969ad-6fe5-4169-a78a-5e459f402cd3\") " pod="openshift-marketplace/marketplace-operator-79b997595-pnjkw" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.969044 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/74a58b61-df24-4f27-8382-ff175aa2bb14-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-qbnrs\" (UID: \"74a58b61-df24-4f27-8382-ff175aa2bb14\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-qbnrs" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.969060 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-np4gr\" (UniqueName: \"kubernetes.io/projected/05f40c85-8360-453c-80d9-907bb6db611b-kube-api-access-np4gr\") pod \"csi-hostpathplugin-xz6qx\" (UID: \"05f40c85-8360-453c-80d9-907bb6db611b\") " pod="hostpath-provisioner/csi-hostpathplugin-xz6qx" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.969078 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/4262b1b7-b141-48fd-ad22-8827154ba68a-certs\") pod \"machine-config-server-h9mgh\" (UID: \"4262b1b7-b141-48fd-ad22-8827154ba68a\") " pod="openshift-machine-config-operator/machine-config-server-h9mgh" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.969102 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/562fb423-75b3-4716-9370-bc090c7fbbee-config-volume\") pod \"dns-default-jcpqp\" (UID: \"562fb423-75b3-4716-9370-bc090c7fbbee\") " pod="openshift-dns/dns-default-jcpqp" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.969120 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a56d38d4-9ee2-46f0-8fc8-3edbc68647d4-service-ca-bundle\") pod \"router-default-5444994796-g6mgs\" (UID: \"a56d38d4-9ee2-46f0-8fc8-3edbc68647d4\") " pod="openshift-ingress/router-default-5444994796-g6mgs" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.969134 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/a56d38d4-9ee2-46f0-8fc8-3edbc68647d4-metrics-certs\") pod \"router-default-5444994796-g6mgs\" (UID: \"a56d38d4-9ee2-46f0-8fc8-3edbc68647d4\") " pod="openshift-ingress/router-default-5444994796-g6mgs" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.969152 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3d9c07f9-d9e4-45b0-8567-49936f18f930-config\") pod \"service-ca-operator-777779d784-9h22n\" (UID: \"3d9c07f9-d9e4-45b0-8567-49936f18f930\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-9h22n" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.969170 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d9xdg\" (UniqueName: \"kubernetes.io/projected/0731ea3e-3015-4311-88f4-bcd1f9f8204e-kube-api-access-d9xdg\") pod \"collect-profiles-29405760-qjm9k\" (UID: \"0731ea3e-3015-4311-88f4-bcd1f9f8204e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405760-qjm9k" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.969187 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/c73706cd-a7f9-436c-88a9-0fa3120649aa-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-hstbg\" (UID: \"c73706cd-a7f9-436c-88a9-0fa3120649aa\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-hstbg" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.969213 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/eeee7a7e-bba7-4704-984d-d18e9813a7ca-signing-cabundle\") pod \"service-ca-9c57cc56f-7zdl4\" (UID: \"eeee7a7e-bba7-4704-984d-d18e9813a7ca\") " pod="openshift-service-ca/service-ca-9c57cc56f-7zdl4" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.969230 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/403639b0-0067-40f7-bf1c-fcddd8320e62-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-qjl8k\" (UID: \"403639b0-0067-40f7-bf1c-fcddd8320e62\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-qjl8k" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.969246 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/04ec2320-667b-4f35-976e-60c4a1eb3386-profile-collector-cert\") pod \"catalog-operator-68c6474976-9d46g\" (UID: \"04ec2320-667b-4f35-976e-60c4a1eb3386\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-9d46g" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.969265 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/05f40c85-8360-453c-80d9-907bb6db611b-registration-dir\") pod \"csi-hostpathplugin-xz6qx\" (UID: \"05f40c85-8360-453c-80d9-907bb6db611b\") " pod="hostpath-provisioner/csi-hostpathplugin-xz6qx" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.969288 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/76a8915d-4ebe-4679-be00-6b290832b9cb-auth-proxy-config\") pod \"machine-config-operator-74547568cd-c4k79\" (UID: \"76a8915d-4ebe-4679-be00-6b290832b9cb\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-c4k79" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.969310 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-p5p29\" (UID: \"cbee12e6-b82e-4451-8292-dca1540e2ab5\") " pod="openshift-image-registry/image-registry-697d97f7c8-p5p29" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.969328 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4d5l8\" (UniqueName: \"kubernetes.io/projected/74a58b61-df24-4f27-8382-ff175aa2bb14-kube-api-access-4d5l8\") pod \"multus-admission-controller-857f4d67dd-qbnrs\" (UID: \"74a58b61-df24-4f27-8382-ff175aa2bb14\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-qbnrs" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.969345 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cbzgq\" (UniqueName: \"kubernetes.io/projected/04ec2320-667b-4f35-976e-60c4a1eb3386-kube-api-access-cbzgq\") pod \"catalog-operator-68c6474976-9d46g\" (UID: \"04ec2320-667b-4f35-976e-60c4a1eb3386\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-9d46g" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.969361 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cfztf\" (UniqueName: \"kubernetes.io/projected/410969ad-6fe5-4169-a78a-5e459f402cd3-kube-api-access-cfztf\") pod \"marketplace-operator-79b997595-pnjkw\" (UID: \"410969ad-6fe5-4169-a78a-5e459f402cd3\") " pod="openshift-marketplace/marketplace-operator-79b997595-pnjkw" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.969380 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mn5s7\" (UniqueName: \"kubernetes.io/projected/f7ab1edf-70d0-4894-a7b1-ec68413d9b45-kube-api-access-mn5s7\") pod \"migrator-59844c95c7-4j2wt\" (UID: \"f7ab1edf-70d0-4894-a7b1-ec68413d9b45\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-4j2wt" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.969397 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/403639b0-0067-40f7-bf1c-fcddd8320e62-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-qjl8k\" (UID: \"403639b0-0067-40f7-bf1c-fcddd8320e62\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-qjl8k" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.969415 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/53ff20ea-24c0-4364-9bbb-6ef3a48fdd6f-config\") pod \"machine-api-operator-5694c8668f-bp2ss\" (UID: \"53ff20ea-24c0-4364-9bbb-6ef3a48fdd6f\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-bp2ss" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.969431 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/40eb22b8-4c66-4dee-a689-e41144bd9f3e-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-gpb8r\" (UID: \"40eb22b8-4c66-4dee-a689-e41144bd9f3e\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-gpb8r" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.969445 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sz845\" (UniqueName: \"kubernetes.io/projected/4262b1b7-b141-48fd-ad22-8827154ba68a-kube-api-access-sz845\") pod \"machine-config-server-h9mgh\" (UID: \"4262b1b7-b141-48fd-ad22-8827154ba68a\") " pod="openshift-machine-config-operator/machine-config-server-h9mgh" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.969466 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/a56d38d4-9ee2-46f0-8fc8-3edbc68647d4-default-certificate\") pod \"router-default-5444994796-g6mgs\" (UID: \"a56d38d4-9ee2-46f0-8fc8-3edbc68647d4\") " pod="openshift-ingress/router-default-5444994796-g6mgs" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.969481 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z54pw\" (UniqueName: \"kubernetes.io/projected/6b7310f1-adae-43c2-ab22-53385dabd116-kube-api-access-z54pw\") pod \"ingress-canary-rwfb8\" (UID: \"6b7310f1-adae-43c2-ab22-53385dabd116\") " pod="openshift-ingress-canary/ingress-canary-rwfb8" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.969505 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b2t7k\" (UniqueName: \"kubernetes.io/projected/53ff20ea-24c0-4364-9bbb-6ef3a48fdd6f-kube-api-access-b2t7k\") pod \"machine-api-operator-5694c8668f-bp2ss\" (UID: \"53ff20ea-24c0-4364-9bbb-6ef3a48fdd6f\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-bp2ss" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.969513 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/05f40c85-8360-453c-80d9-907bb6db611b-socket-dir\") pod \"csi-hostpathplugin-xz6qx\" (UID: \"05f40c85-8360-453c-80d9-907bb6db611b\") " pod="hostpath-provisioner/csi-hostpathplugin-xz6qx" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.969525 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/39ad8798-c58e-4c22-b31c-2eb95b257309-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-cml8b\" (UID: \"39ad8798-c58e-4c22-b31c-2eb95b257309\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-cml8b" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.969612 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/05f40c85-8360-453c-80d9-907bb6db611b-plugins-dir\") pod \"csi-hostpathplugin-xz6qx\" (UID: \"05f40c85-8360-453c-80d9-907bb6db611b\") " pod="hostpath-provisioner/csi-hostpathplugin-xz6qx" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.969646 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k4qwq\" (UniqueName: \"kubernetes.io/projected/42d7dcab-41fb-48a8-a08b-f4fca523e27b-kube-api-access-k4qwq\") pod \"ingress-operator-5b745b69d9-j4f6w\" (UID: \"42d7dcab-41fb-48a8-a08b-f4fca523e27b\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-j4f6w" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.969702 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3d9c07f9-d9e4-45b0-8567-49936f18f930-serving-cert\") pod \"service-ca-operator-777779d784-9h22n\" (UID: \"3d9c07f9-d9e4-45b0-8567-49936f18f930\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-9h22n" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.969714 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/42d7dcab-41fb-48a8-a08b-f4fca523e27b-trusted-ca\") pod \"ingress-operator-5b745b69d9-j4f6w\" (UID: \"42d7dcab-41fb-48a8-a08b-f4fca523e27b\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-j4f6w" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.969731 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/a56d38d4-9ee2-46f0-8fc8-3edbc68647d4-stats-auth\") pod \"router-default-5444994796-g6mgs\" (UID: \"a56d38d4-9ee2-46f0-8fc8-3edbc68647d4\") " pod="openshift-ingress/router-default-5444994796-g6mgs" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.969757 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/53ff20ea-24c0-4364-9bbb-6ef3a48fdd6f-images\") pod \"machine-api-operator-5694c8668f-bp2ss\" (UID: \"53ff20ea-24c0-4364-9bbb-6ef3a48fdd6f\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-bp2ss" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.969790 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/ebbacd25-74d0-422d-a0b1-51bb64a57468-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-87jr2\" (UID: \"ebbacd25-74d0-422d-a0b1-51bb64a57468\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-87jr2" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.969816 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/04ec2320-667b-4f35-976e-60c4a1eb3386-srv-cert\") pod \"catalog-operator-68c6474976-9d46g\" (UID: \"04ec2320-667b-4f35-976e-60c4a1eb3386\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-9d46g" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.969838 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5brq7\" (UniqueName: \"kubernetes.io/projected/39ad8798-c58e-4c22-b31c-2eb95b257309-kube-api-access-5brq7\") pod \"kube-storage-version-migrator-operator-b67b599dd-cml8b\" (UID: \"39ad8798-c58e-4c22-b31c-2eb95b257309\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-cml8b" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.969884 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gdph5\" (UniqueName: \"kubernetes.io/projected/a56d38d4-9ee2-46f0-8fc8-3edbc68647d4-kube-api-access-gdph5\") pod \"router-default-5444994796-g6mgs\" (UID: \"a56d38d4-9ee2-46f0-8fc8-3edbc68647d4\") " pod="openshift-ingress/router-default-5444994796-g6mgs" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.969925 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-596z9\" (UniqueName: \"kubernetes.io/projected/ebbacd25-74d0-422d-a0b1-51bb64a57468-kube-api-access-596z9\") pod \"control-plane-machine-set-operator-78cbb6b69f-87jr2\" (UID: \"ebbacd25-74d0-422d-a0b1-51bb64a57468\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-87jr2" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.969957 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/05f40c85-8360-453c-80d9-907bb6db611b-csi-data-dir\") pod \"csi-hostpathplugin-xz6qx\" (UID: \"05f40c85-8360-453c-80d9-907bb6db611b\") " pod="hostpath-provisioner/csi-hostpathplugin-xz6qx" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.969992 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/53ff20ea-24c0-4364-9bbb-6ef3a48fdd6f-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-bp2ss\" (UID: \"53ff20ea-24c0-4364-9bbb-6ef3a48fdd6f\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-bp2ss" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.970023 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n58dg\" (UniqueName: \"kubernetes.io/projected/562fb423-75b3-4716-9370-bc090c7fbbee-kube-api-access-n58dg\") pod \"dns-default-jcpqp\" (UID: \"562fb423-75b3-4716-9370-bc090c7fbbee\") " pod="openshift-dns/dns-default-jcpqp" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.970049 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lq6w2\" (UniqueName: \"kubernetes.io/projected/eeee7a7e-bba7-4704-984d-d18e9813a7ca-kube-api-access-lq6w2\") pod \"service-ca-9c57cc56f-7zdl4\" (UID: \"eeee7a7e-bba7-4704-984d-d18e9813a7ca\") " pod="openshift-service-ca/service-ca-9c57cc56f-7zdl4" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.970056 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/39ad8798-c58e-4c22-b31c-2eb95b257309-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-cml8b\" (UID: \"39ad8798-c58e-4c22-b31c-2eb95b257309\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-cml8b" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.970089 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/0731ea3e-3015-4311-88f4-bcd1f9f8204e-config-volume\") pod \"collect-profiles-29405760-qjm9k\" (UID: \"0731ea3e-3015-4311-88f4-bcd1f9f8204e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405760-qjm9k" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.970120 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/42d7dcab-41fb-48a8-a08b-f4fca523e27b-bound-sa-token\") pod \"ingress-operator-5b745b69d9-j4f6w\" (UID: \"42d7dcab-41fb-48a8-a08b-f4fca523e27b\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-j4f6w" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.970145 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x2ntx\" (UniqueName: \"kubernetes.io/projected/3d9c07f9-d9e4-45b0-8567-49936f18f930-kube-api-access-x2ntx\") pod \"service-ca-operator-777779d784-9h22n\" (UID: \"3d9c07f9-d9e4-45b0-8567-49936f18f930\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-9h22n" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.970170 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/39ad8798-c58e-4c22-b31c-2eb95b257309-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-cml8b\" (UID: \"39ad8798-c58e-4c22-b31c-2eb95b257309\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-cml8b" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.970191 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/eeee7a7e-bba7-4704-984d-d18e9813a7ca-signing-key\") pod \"service-ca-9c57cc56f-7zdl4\" (UID: \"eeee7a7e-bba7-4704-984d-d18e9813a7ca\") " pod="openshift-service-ca/service-ca-9c57cc56f-7zdl4" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.970219 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/05f40c85-8360-453c-80d9-907bb6db611b-mountpoint-dir\") pod \"csi-hostpathplugin-xz6qx\" (UID: \"05f40c85-8360-453c-80d9-907bb6db611b\") " pod="hostpath-provisioner/csi-hostpathplugin-xz6qx" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.970346 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ps8mt\" (UniqueName: \"kubernetes.io/projected/40eb22b8-4c66-4dee-a689-e41144bd9f3e-kube-api-access-ps8mt\") pod \"machine-config-controller-84d6567774-gpb8r\" (UID: \"40eb22b8-4c66-4dee-a689-e41144bd9f3e\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-gpb8r" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.970413 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/0731ea3e-3015-4311-88f4-bcd1f9f8204e-secret-volume\") pod \"collect-profiles-29405760-qjm9k\" (UID: \"0731ea3e-3015-4311-88f4-bcd1f9f8204e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405760-qjm9k" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.970677 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-mg8mw" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.970809 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/05f40c85-8360-453c-80d9-907bb6db611b-registration-dir\") pod \"csi-hostpathplugin-xz6qx\" (UID: \"05f40c85-8360-453c-80d9-907bb6db611b\") " pod="hostpath-provisioner/csi-hostpathplugin-xz6qx" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.971565 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/76a8915d-4ebe-4679-be00-6b290832b9cb-auth-proxy-config\") pod \"machine-config-operator-74547568cd-c4k79\" (UID: \"76a8915d-4ebe-4679-be00-6b290832b9cb\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-c4k79" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.971578 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3d9c07f9-d9e4-45b0-8567-49936f18f930-config\") pod \"service-ca-operator-777779d784-9h22n\" (UID: \"3d9c07f9-d9e4-45b0-8567-49936f18f930\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-9h22n" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.972201 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/05f40c85-8360-453c-80d9-907bb6db611b-mountpoint-dir\") pod \"csi-hostpathplugin-xz6qx\" (UID: \"05f40c85-8360-453c-80d9-907bb6db611b\") " pod="hostpath-provisioner/csi-hostpathplugin-xz6qx" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.972702 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/562fb423-75b3-4716-9370-bc090c7fbbee-config-volume\") pod \"dns-default-jcpqp\" (UID: \"562fb423-75b3-4716-9370-bc090c7fbbee\") " pod="openshift-dns/dns-default-jcpqp" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.972904 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/0731ea3e-3015-4311-88f4-bcd1f9f8204e-config-volume\") pod \"collect-profiles-29405760-qjm9k\" (UID: \"0731ea3e-3015-4311-88f4-bcd1f9f8204e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405760-qjm9k" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.973286 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a56d38d4-9ee2-46f0-8fc8-3edbc68647d4-service-ca-bundle\") pod \"router-default-5444994796-g6mgs\" (UID: \"a56d38d4-9ee2-46f0-8fc8-3edbc68647d4\") " pod="openshift-ingress/router-default-5444994796-g6mgs" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.973866 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/403639b0-0067-40f7-bf1c-fcddd8320e62-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-qjl8k\" (UID: \"403639b0-0067-40f7-bf1c-fcddd8320e62\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-qjl8k" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.974147 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/53ff20ea-24c0-4364-9bbb-6ef3a48fdd6f-config\") pod \"machine-api-operator-5694c8668f-bp2ss\" (UID: \"53ff20ea-24c0-4364-9bbb-6ef3a48fdd6f\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-bp2ss" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.974512 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/eeee7a7e-bba7-4704-984d-d18e9813a7ca-signing-cabundle\") pod \"service-ca-9c57cc56f-7zdl4\" (UID: \"eeee7a7e-bba7-4704-984d-d18e9813a7ca\") " pod="openshift-service-ca/service-ca-9c57cc56f-7zdl4" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.974565 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/05f40c85-8360-453c-80d9-907bb6db611b-csi-data-dir\") pod \"csi-hostpathplugin-xz6qx\" (UID: \"05f40c85-8360-453c-80d9-907bb6db611b\") " pod="hostpath-provisioner/csi-hostpathplugin-xz6qx" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.975886 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/40eb22b8-4c66-4dee-a689-e41144bd9f3e-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-gpb8r\" (UID: \"40eb22b8-4c66-4dee-a689-e41144bd9f3e\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-gpb8r" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.976361 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/42d7dcab-41fb-48a8-a08b-f4fca523e27b-metrics-tls\") pod \"ingress-operator-5b745b69d9-j4f6w\" (UID: \"42d7dcab-41fb-48a8-a08b-f4fca523e27b\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-j4f6w" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.976836 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/410969ad-6fe5-4169-a78a-5e459f402cd3-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-pnjkw\" (UID: \"410969ad-6fe5-4169-a78a-5e459f402cd3\") " pod="openshift-marketplace/marketplace-operator-79b997595-pnjkw" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.977195 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/eeee7a7e-bba7-4704-984d-d18e9813a7ca-signing-key\") pod \"service-ca-9c57cc56f-7zdl4\" (UID: \"eeee7a7e-bba7-4704-984d-d18e9813a7ca\") " pod="openshift-service-ca/service-ca-9c57cc56f-7zdl4" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.977618 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/76a8915d-4ebe-4679-be00-6b290832b9cb-images\") pod \"machine-config-operator-74547568cd-c4k79\" (UID: \"76a8915d-4ebe-4679-be00-6b290832b9cb\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-c4k79" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.978026 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/40eb22b8-4c66-4dee-a689-e41144bd9f3e-proxy-tls\") pod \"machine-config-controller-84d6567774-gpb8r\" (UID: \"40eb22b8-4c66-4dee-a689-e41144bd9f3e\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-gpb8r" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.978238 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/05f40c85-8360-453c-80d9-907bb6db611b-plugins-dir\") pod \"csi-hostpathplugin-xz6qx\" (UID: \"05f40c85-8360-453c-80d9-907bb6db611b\") " pod="hostpath-provisioner/csi-hostpathplugin-xz6qx" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.978397 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/a56d38d4-9ee2-46f0-8fc8-3edbc68647d4-default-certificate\") pod \"router-default-5444994796-g6mgs\" (UID: \"a56d38d4-9ee2-46f0-8fc8-3edbc68647d4\") " pod="openshift-ingress/router-default-5444994796-g6mgs" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.978618 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/4262b1b7-b141-48fd-ad22-8827154ba68a-node-bootstrap-token\") pod \"machine-config-server-h9mgh\" (UID: \"4262b1b7-b141-48fd-ad22-8827154ba68a\") " pod="openshift-machine-config-operator/machine-config-server-h9mgh" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.978765 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/39ad8798-c58e-4c22-b31c-2eb95b257309-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-cml8b\" (UID: \"39ad8798-c58e-4c22-b31c-2eb95b257309\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-cml8b" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.979218 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/410969ad-6fe5-4169-a78a-5e459f402cd3-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-pnjkw\" (UID: \"410969ad-6fe5-4169-a78a-5e459f402cd3\") " pod="openshift-marketplace/marketplace-operator-79b997595-pnjkw" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.979330 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/53ff20ea-24c0-4364-9bbb-6ef3a48fdd6f-images\") pod \"machine-api-operator-5694c8668f-bp2ss\" (UID: \"53ff20ea-24c0-4364-9bbb-6ef3a48fdd6f\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-bp2ss" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.979345 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/0731ea3e-3015-4311-88f4-bcd1f9f8204e-secret-volume\") pod \"collect-profiles-29405760-qjm9k\" (UID: \"0731ea3e-3015-4311-88f4-bcd1f9f8204e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405760-qjm9k" Nov 28 16:12:36 crc kubenswrapper[4909]: E1128 16:12:36.979471 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 16:12:37.479459139 +0000 UTC m=+139.876143663 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-p5p29" (UID: "cbee12e6-b82e-4451-8292-dca1540e2ab5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.980331 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/a56d38d4-9ee2-46f0-8fc8-3edbc68647d4-metrics-certs\") pod \"router-default-5444994796-g6mgs\" (UID: \"a56d38d4-9ee2-46f0-8fc8-3edbc68647d4\") " pod="openshift-ingress/router-default-5444994796-g6mgs" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.980423 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/403639b0-0067-40f7-bf1c-fcddd8320e62-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-qjl8k\" (UID: \"403639b0-0067-40f7-bf1c-fcddd8320e62\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-qjl8k" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.980959 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/6b7310f1-adae-43c2-ab22-53385dabd116-cert\") pod \"ingress-canary-rwfb8\" (UID: \"6b7310f1-adae-43c2-ab22-53385dabd116\") " pod="openshift-ingress-canary/ingress-canary-rwfb8" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.982398 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/a56d38d4-9ee2-46f0-8fc8-3edbc68647d4-stats-auth\") pod \"router-default-5444994796-g6mgs\" (UID: \"a56d38d4-9ee2-46f0-8fc8-3edbc68647d4\") " pod="openshift-ingress/router-default-5444994796-g6mgs" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.982640 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/ebbacd25-74d0-422d-a0b1-51bb64a57468-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-87jr2\" (UID: \"ebbacd25-74d0-422d-a0b1-51bb64a57468\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-87jr2" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.982798 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3d9c07f9-d9e4-45b0-8567-49936f18f930-serving-cert\") pod \"service-ca-operator-777779d784-9h22n\" (UID: \"3d9c07f9-d9e4-45b0-8567-49936f18f930\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-9h22n" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.983578 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/04ec2320-667b-4f35-976e-60c4a1eb3386-srv-cert\") pod \"catalog-operator-68c6474976-9d46g\" (UID: \"04ec2320-667b-4f35-976e-60c4a1eb3386\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-9d46g" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.983726 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/04ec2320-667b-4f35-976e-60c4a1eb3386-profile-collector-cert\") pod \"catalog-operator-68c6474976-9d46g\" (UID: \"04ec2320-667b-4f35-976e-60c4a1eb3386\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-9d46g" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.984154 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/74a58b61-df24-4f27-8382-ff175aa2bb14-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-qbnrs\" (UID: \"74a58b61-df24-4f27-8382-ff175aa2bb14\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-qbnrs" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.988220 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/d7cb9451-0082-4c4b-b90e-7d90581fe2b9-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-wpjbf\" (UID: \"d7cb9451-0082-4c4b-b90e-7d90581fe2b9\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-wpjbf" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.988211 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/53ff20ea-24c0-4364-9bbb-6ef3a48fdd6f-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-bp2ss\" (UID: \"53ff20ea-24c0-4364-9bbb-6ef3a48fdd6f\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-bp2ss" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.998001 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/76a8915d-4ebe-4679-be00-6b290832b9cb-proxy-tls\") pod \"machine-config-operator-74547568cd-c4k79\" (UID: \"76a8915d-4ebe-4679-be00-6b290832b9cb\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-c4k79" Nov 28 16:12:36 crc kubenswrapper[4909]: I1128 16:12:36.998602 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-ld6fg" event={"ID":"5c7e3790-aa06-4b95-94a7-c41d909ec984","Type":"ContainerStarted","Data":"961a6f330f3fce723ddaac963e3a974a7dfb1c053c74b4e0691d17f3b9cc4039"} Nov 28 16:12:37 crc kubenswrapper[4909]: I1128 16:12:37.000778 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-k67zs" event={"ID":"f955b72b-dc83-4219-b488-163acebcf367","Type":"ContainerStarted","Data":"21114e406ae7c4be005b649999728ca8aeefc10cfb3ff96cc8266b1b49bde929"} Nov 28 16:12:37 crc kubenswrapper[4909]: I1128 16:12:37.002134 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-t67vb" event={"ID":"bc0316f8-a276-4bea-a4cb-bf56c011c64a","Type":"ContainerStarted","Data":"e1800aa7a3597e2f53907015922ceebe0c484180e0cccc655d9cdb2cc7a5cedd"} Nov 28 16:12:37 crc kubenswrapper[4909]: I1128 16:12:37.004440 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-khgx9\" (UniqueName: \"kubernetes.io/projected/7b62fa09-bcc3-4c5f-a6d4-91cb6c253b83-kube-api-access-khgx9\") pod \"cluster-samples-operator-665b6dd947-nrl4j\" (UID: \"7b62fa09-bcc3-4c5f-a6d4-91cb6c253b83\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-nrl4j" Nov 28 16:12:37 crc kubenswrapper[4909]: I1128 16:12:37.006377 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-5x9zh" event={"ID":"0d41d267-53c8-4859-9c63-737eda42098f","Type":"ContainerStarted","Data":"d2737bcf001f2cf5fc486550257495434306549a23f72ef97baaea42a71d8639"} Nov 28 16:12:37 crc kubenswrapper[4909]: I1128 16:12:37.017929 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/c73706cd-a7f9-436c-88a9-0fa3120649aa-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-hstbg\" (UID: \"c73706cd-a7f9-436c-88a9-0fa3120649aa\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-hstbg" Nov 28 16:12:37 crc kubenswrapper[4909]: I1128 16:12:37.024914 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/cbee12e6-b82e-4451-8292-dca1540e2ab5-bound-sa-token\") pod \"image-registry-697d97f7c8-p5p29\" (UID: \"cbee12e6-b82e-4451-8292-dca1540e2ab5\") " pod="openshift-image-registry/image-registry-697d97f7c8-p5p29" Nov 28 16:12:37 crc kubenswrapper[4909]: I1128 16:12:37.044096 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/d229415a-fd7f-4553-9752-c6c401709252-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-dmgdn\" (UID: \"d229415a-fd7f-4553-9752-c6c401709252\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-dmgdn" Nov 28 16:12:37 crc kubenswrapper[4909]: I1128 16:12:37.062409 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qfgm8\" (UniqueName: \"kubernetes.io/projected/03717037-d782-4ec7-bc3d-845c4b455107-kube-api-access-qfgm8\") pod \"olm-operator-6b444d44fb-4p52q\" (UID: \"03717037-d782-4ec7-bc3d-845c4b455107\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-4p52q" Nov 28 16:12:37 crc kubenswrapper[4909]: I1128 16:12:37.071501 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 16:12:37 crc kubenswrapper[4909]: E1128 16:12:37.072042 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 16:12:37.572024826 +0000 UTC m=+139.968709350 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:37 crc kubenswrapper[4909]: I1128 16:12:37.092964 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lvh4n\" (UniqueName: \"kubernetes.io/projected/7cbf4a3d-f0e2-4a50-9122-85fd3f1df269-kube-api-access-lvh4n\") pod \"console-operator-58897d9998-b4wbn\" (UID: \"7cbf4a3d-f0e2-4a50-9122-85fd3f1df269\") " pod="openshift-console-operator/console-operator-58897d9998-b4wbn" Nov 28 16:12:37 crc kubenswrapper[4909]: I1128 16:12:37.101742 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-c7bds"] Nov 28 16:12:37 crc kubenswrapper[4909]: I1128 16:12:37.104405 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/aacec508-dd9f-438d-bb9e-d408e73f0d05-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-ddm7r\" (UID: \"aacec508-dd9f-438d-bb9e-d408e73f0d05\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-ddm7r" Nov 28 16:12:37 crc kubenswrapper[4909]: I1128 16:12:37.121869 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dlcg8\" (UniqueName: \"kubernetes.io/projected/aacec508-dd9f-438d-bb9e-d408e73f0d05-kube-api-access-dlcg8\") pod \"cluster-image-registry-operator-dc59b4c8b-ddm7r\" (UID: \"aacec508-dd9f-438d-bb9e-d408e73f0d05\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-ddm7r" Nov 28 16:12:37 crc kubenswrapper[4909]: I1128 16:12:37.125800 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-mdb95" Nov 28 16:12:37 crc kubenswrapper[4909]: I1128 16:12:37.132701 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-nlb6g"] Nov 28 16:12:37 crc kubenswrapper[4909]: I1128 16:12:37.139406 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-nrl4j" Nov 28 16:12:37 crc kubenswrapper[4909]: I1128 16:12:37.146408 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-qjf9t"] Nov 28 16:12:37 crc kubenswrapper[4909]: I1128 16:12:37.147966 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-84lcp\" (UniqueName: \"kubernetes.io/projected/43c2b623-501b-489b-b5d5-3dbe0d23d265-kube-api-access-84lcp\") pod \"openshift-controller-manager-operator-756b6f6bc6-l5snl\" (UID: \"43c2b623-501b-489b-b5d5-3dbe0d23d265\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-l5snl" Nov 28 16:12:37 crc kubenswrapper[4909]: I1128 16:12:37.153650 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-dvcwf"] Nov 28 16:12:37 crc kubenswrapper[4909]: I1128 16:12:37.153929 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-7xr8n" Nov 28 16:12:37 crc kubenswrapper[4909]: I1128 16:12:37.161809 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-dmgdn" Nov 28 16:12:37 crc kubenswrapper[4909]: I1128 16:12:37.161883 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-zqhkp"] Nov 28 16:12:37 crc kubenswrapper[4909]: I1128 16:12:37.163890 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ldtbz\" (UniqueName: \"kubernetes.io/projected/e4c200b0-224e-4a58-af74-96a8568ddb3d-kube-api-access-ldtbz\") pod \"downloads-7954f5f757-l2xpj\" (UID: \"e4c200b0-224e-4a58-af74-96a8568ddb3d\") " pod="openshift-console/downloads-7954f5f757-l2xpj" Nov 28 16:12:37 crc kubenswrapper[4909]: I1128 16:12:37.168021 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-4p52q" Nov 28 16:12:37 crc kubenswrapper[4909]: I1128 16:12:37.173554 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-p5p29\" (UID: \"cbee12e6-b82e-4451-8292-dca1540e2ab5\") " pod="openshift-image-registry/image-registry-697d97f7c8-p5p29" Nov 28 16:12:37 crc kubenswrapper[4909]: E1128 16:12:37.174348 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 16:12:37.674330043 +0000 UTC m=+140.071014607 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-p5p29" (UID: "cbee12e6-b82e-4451-8292-dca1540e2ab5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:37 crc kubenswrapper[4909]: I1128 16:12:37.174955 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-wpjbf" Nov 28 16:12:37 crc kubenswrapper[4909]: I1128 16:12:37.181955 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-ddm7r" Nov 28 16:12:37 crc kubenswrapper[4909]: I1128 16:12:37.205785 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lq6w2\" (UniqueName: \"kubernetes.io/projected/eeee7a7e-bba7-4704-984d-d18e9813a7ca-kube-api-access-lq6w2\") pod \"service-ca-9c57cc56f-7zdl4\" (UID: \"eeee7a7e-bba7-4704-984d-d18e9813a7ca\") " pod="openshift-service-ca/service-ca-9c57cc56f-7zdl4" Nov 28 16:12:37 crc kubenswrapper[4909]: I1128 16:12:37.216696 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-7zdl4" Nov 28 16:12:37 crc kubenswrapper[4909]: I1128 16:12:37.230855 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d9xdg\" (UniqueName: \"kubernetes.io/projected/0731ea3e-3015-4311-88f4-bcd1f9f8204e-kube-api-access-d9xdg\") pod \"collect-profiles-29405760-qjm9k\" (UID: \"0731ea3e-3015-4311-88f4-bcd1f9f8204e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405760-qjm9k" Nov 28 16:12:37 crc kubenswrapper[4909]: I1128 16:12:37.242046 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lwf69\" (UniqueName: \"kubernetes.io/projected/76a8915d-4ebe-4679-be00-6b290832b9cb-kube-api-access-lwf69\") pod \"machine-config-operator-74547568cd-c4k79\" (UID: \"76a8915d-4ebe-4679-be00-6b290832b9cb\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-c4k79" Nov 28 16:12:37 crc kubenswrapper[4909]: I1128 16:12:37.253249 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-c4k79" Nov 28 16:12:37 crc kubenswrapper[4909]: I1128 16:12:37.273865 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-np4gr\" (UniqueName: \"kubernetes.io/projected/05f40c85-8360-453c-80d9-907bb6db611b-kube-api-access-np4gr\") pod \"csi-hostpathplugin-xz6qx\" (UID: \"05f40c85-8360-453c-80d9-907bb6db611b\") " pod="hostpath-provisioner/csi-hostpathplugin-xz6qx" Nov 28 16:12:37 crc kubenswrapper[4909]: I1128 16:12:37.275376 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 16:12:37 crc kubenswrapper[4909]: E1128 16:12:37.275724 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 16:12:37.775690361 +0000 UTC m=+140.172374885 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:37 crc kubenswrapper[4909]: I1128 16:12:37.275997 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-p5p29\" (UID: \"cbee12e6-b82e-4451-8292-dca1540e2ab5\") " pod="openshift-image-registry/image-registry-697d97f7c8-p5p29" Nov 28 16:12:37 crc kubenswrapper[4909]: E1128 16:12:37.276486 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 16:12:37.776456493 +0000 UTC m=+140.173141057 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-p5p29" (UID: "cbee12e6-b82e-4451-8292-dca1540e2ab5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:37 crc kubenswrapper[4909]: I1128 16:12:37.277217 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/562fb423-75b3-4716-9370-bc090c7fbbee-metrics-tls\") pod \"dns-default-jcpqp\" (UID: \"562fb423-75b3-4716-9370-bc090c7fbbee\") " pod="openshift-dns/dns-default-jcpqp" Nov 28 16:12:37 crc kubenswrapper[4909]: I1128 16:12:37.277308 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"certs\" (UniqueName: \"kubernetes.io/secret/4262b1b7-b141-48fd-ad22-8827154ba68a-certs\") pod \"machine-config-server-h9mgh\" (UID: \"4262b1b7-b141-48fd-ad22-8827154ba68a\") " pod="openshift-machine-config-operator/machine-config-server-h9mgh" Nov 28 16:12:37 crc kubenswrapper[4909]: I1128 16:12:37.284810 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405760-qjm9k" Nov 28 16:12:37 crc kubenswrapper[4909]: W1128 16:12:37.288936 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfe88e922_b9a9_4c6c_8b36_5cd5eaf1b7b8.slice/crio-1d1dfd636d6861489810ce5322d1f7650340ac9232fa721f08e3c6621da33a6e WatchSource:0}: Error finding container 1d1dfd636d6861489810ce5322d1f7650340ac9232fa721f08e3c6621da33a6e: Status 404 returned error can't find the container with id 1d1dfd636d6861489810ce5322d1f7650340ac9232fa721f08e3c6621da33a6e Nov 28 16:12:37 crc kubenswrapper[4909]: W1128 16:12:37.291761 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc6a2e168_6cb6_43ff_8abd_e16e5b14ea6c.slice/crio-fd65cba3596daa9ac6f3a101570e70d05cde5a6f60fa897e5a1323c306337088 WatchSource:0}: Error finding container fd65cba3596daa9ac6f3a101570e70d05cde5a6f60fa897e5a1323c306337088: Status 404 returned error can't find the container with id fd65cba3596daa9ac6f3a101570e70d05cde5a6f60fa897e5a1323c306337088 Nov 28 16:12:37 crc kubenswrapper[4909]: I1128 16:12:37.293434 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x2ntx\" (UniqueName: \"kubernetes.io/projected/3d9c07f9-d9e4-45b0-8567-49936f18f930-kube-api-access-x2ntx\") pod \"service-ca-operator-777779d784-9h22n\" (UID: \"3d9c07f9-d9e4-45b0-8567-49936f18f930\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-9h22n" Nov 28 16:12:37 crc kubenswrapper[4909]: I1128 16:12:37.297499 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-b4wbn" Nov 28 16:12:37 crc kubenswrapper[4909]: W1128 16:12:37.301746 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0120f632_f92f_4cdc_a4c5_0d63471be0ef.slice/crio-2496e82f6188bdd7c760992e10defae28c6ecc0baa22004eefcd9450a626db32 WatchSource:0}: Error finding container 2496e82f6188bdd7c760992e10defae28c6ecc0baa22004eefcd9450a626db32: Status 404 returned error can't find the container with id 2496e82f6188bdd7c760992e10defae28c6ecc0baa22004eefcd9450a626db32 Nov 28 16:12:37 crc kubenswrapper[4909]: I1128 16:12:37.307325 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-djb5c\" (UniqueName: \"kubernetes.io/projected/c73706cd-a7f9-436c-88a9-0fa3120649aa-kube-api-access-djb5c\") pod \"package-server-manager-789f6589d5-hstbg\" (UID: \"c73706cd-a7f9-436c-88a9-0fa3120649aa\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-hstbg" Nov 28 16:12:37 crc kubenswrapper[4909]: I1128 16:12:37.343058 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k4qwq\" (UniqueName: \"kubernetes.io/projected/42d7dcab-41fb-48a8-a08b-f4fca523e27b-kube-api-access-k4qwq\") pod \"ingress-operator-5b745b69d9-j4f6w\" (UID: \"42d7dcab-41fb-48a8-a08b-f4fca523e27b\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-j4f6w" Nov 28 16:12:37 crc kubenswrapper[4909]: I1128 16:12:37.353408 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-596z9\" (UniqueName: \"kubernetes.io/projected/ebbacd25-74d0-422d-a0b1-51bb64a57468-kube-api-access-596z9\") pod \"control-plane-machine-set-operator-78cbb6b69f-87jr2\" (UID: \"ebbacd25-74d0-422d-a0b1-51bb64a57468\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-87jr2" Nov 28 16:12:37 crc kubenswrapper[4909]: I1128 16:12:37.361819 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-xz6qx" Nov 28 16:12:37 crc kubenswrapper[4909]: I1128 16:12:37.371973 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5brq7\" (UniqueName: \"kubernetes.io/projected/39ad8798-c58e-4c22-b31c-2eb95b257309-kube-api-access-5brq7\") pod \"kube-storage-version-migrator-operator-b67b599dd-cml8b\" (UID: \"39ad8798-c58e-4c22-b31c-2eb95b257309\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-cml8b" Nov 28 16:12:37 crc kubenswrapper[4909]: I1128 16:12:37.378209 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 16:12:37 crc kubenswrapper[4909]: E1128 16:12:37.378399 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 16:12:37.878370909 +0000 UTC m=+140.275055453 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:37 crc kubenswrapper[4909]: I1128 16:12:37.378498 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-p5p29\" (UID: \"cbee12e6-b82e-4451-8292-dca1540e2ab5\") " pod="openshift-image-registry/image-registry-697d97f7c8-p5p29" Nov 28 16:12:37 crc kubenswrapper[4909]: E1128 16:12:37.378927 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 16:12:37.878916645 +0000 UTC m=+140.275601179 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-p5p29" (UID: "cbee12e6-b82e-4451-8292-dca1540e2ab5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:37 crc kubenswrapper[4909]: I1128 16:12:37.390412 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z54pw\" (UniqueName: \"kubernetes.io/projected/6b7310f1-adae-43c2-ab22-53385dabd116-kube-api-access-z54pw\") pod \"ingress-canary-rwfb8\" (UID: \"6b7310f1-adae-43c2-ab22-53385dabd116\") " pod="openshift-ingress-canary/ingress-canary-rwfb8" Nov 28 16:12:37 crc kubenswrapper[4909]: I1128 16:12:37.410716 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sz845\" (UniqueName: \"kubernetes.io/projected/4262b1b7-b141-48fd-ad22-8827154ba68a-kube-api-access-sz845\") pod \"machine-config-server-h9mgh\" (UID: \"4262b1b7-b141-48fd-ad22-8827154ba68a\") " pod="openshift-machine-config-operator/machine-config-server-h9mgh" Nov 28 16:12:37 crc kubenswrapper[4909]: I1128 16:12:37.429178 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gdph5\" (UniqueName: \"kubernetes.io/projected/a56d38d4-9ee2-46f0-8fc8-3edbc68647d4-kube-api-access-gdph5\") pod \"router-default-5444994796-g6mgs\" (UID: \"a56d38d4-9ee2-46f0-8fc8-3edbc68647d4\") " pod="openshift-ingress/router-default-5444994796-g6mgs" Nov 28 16:12:37 crc kubenswrapper[4909]: I1128 16:12:37.431151 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-l2xpj" Nov 28 16:12:37 crc kubenswrapper[4909]: I1128 16:12:37.449177 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-l5snl" Nov 28 16:12:37 crc kubenswrapper[4909]: I1128 16:12:37.464612 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-x4wv5"] Nov 28 16:12:37 crc kubenswrapper[4909]: I1128 16:12:37.466602 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ps8mt\" (UniqueName: \"kubernetes.io/projected/40eb22b8-4c66-4dee-a689-e41144bd9f3e-kube-api-access-ps8mt\") pod \"machine-config-controller-84d6567774-gpb8r\" (UID: \"40eb22b8-4c66-4dee-a689-e41144bd9f3e\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-gpb8r" Nov 28 16:12:37 crc kubenswrapper[4909]: I1128 16:12:37.471910 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b2t7k\" (UniqueName: \"kubernetes.io/projected/53ff20ea-24c0-4364-9bbb-6ef3a48fdd6f-kube-api-access-b2t7k\") pod \"machine-api-operator-5694c8668f-bp2ss\" (UID: \"53ff20ea-24c0-4364-9bbb-6ef3a48fdd6f\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-bp2ss" Nov 28 16:12:37 crc kubenswrapper[4909]: I1128 16:12:37.479539 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 16:12:37 crc kubenswrapper[4909]: E1128 16:12:37.479911 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 16:12:37.979896154 +0000 UTC m=+140.376580678 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:37 crc kubenswrapper[4909]: I1128 16:12:37.493185 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/42d7dcab-41fb-48a8-a08b-f4fca523e27b-bound-sa-token\") pod \"ingress-operator-5b745b69d9-j4f6w\" (UID: \"42d7dcab-41fb-48a8-a08b-f4fca523e27b\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-j4f6w" Nov 28 16:12:37 crc kubenswrapper[4909]: I1128 16:12:37.507930 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-hstbg" Nov 28 16:12:37 crc kubenswrapper[4909]: I1128 16:12:37.514307 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n58dg\" (UniqueName: \"kubernetes.io/projected/562fb423-75b3-4716-9370-bc090c7fbbee-kube-api-access-n58dg\") pod \"dns-default-jcpqp\" (UID: \"562fb423-75b3-4716-9370-bc090c7fbbee\") " pod="openshift-dns/dns-default-jcpqp" Nov 28 16:12:37 crc kubenswrapper[4909]: I1128 16:12:37.524442 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-j4f6w" Nov 28 16:12:37 crc kubenswrapper[4909]: I1128 16:12:37.530992 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-gpb8r" Nov 28 16:12:37 crc kubenswrapper[4909]: I1128 16:12:37.532208 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cbzgq\" (UniqueName: \"kubernetes.io/projected/04ec2320-667b-4f35-976e-60c4a1eb3386-kube-api-access-cbzgq\") pod \"catalog-operator-68c6474976-9d46g\" (UID: \"04ec2320-667b-4f35-976e-60c4a1eb3386\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-9d46g" Nov 28 16:12:37 crc kubenswrapper[4909]: I1128 16:12:37.545038 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-9d46g" Nov 28 16:12:37 crc kubenswrapper[4909]: I1128 16:12:37.551135 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mn5s7\" (UniqueName: \"kubernetes.io/projected/f7ab1edf-70d0-4894-a7b1-ec68413d9b45-kube-api-access-mn5s7\") pod \"migrator-59844c95c7-4j2wt\" (UID: \"f7ab1edf-70d0-4894-a7b1-ec68413d9b45\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-4j2wt" Nov 28 16:12:37 crc kubenswrapper[4909]: I1128 16:12:37.560330 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-9h22n" Nov 28 16:12:37 crc kubenswrapper[4909]: I1128 16:12:37.563359 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/403639b0-0067-40f7-bf1c-fcddd8320e62-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-qjl8k\" (UID: \"403639b0-0067-40f7-bf1c-fcddd8320e62\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-qjl8k" Nov 28 16:12:37 crc kubenswrapper[4909]: I1128 16:12:37.568766 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-cml8b" Nov 28 16:12:37 crc kubenswrapper[4909]: I1128 16:12:37.574845 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-c4k79"] Nov 28 16:12:37 crc kubenswrapper[4909]: I1128 16:12:37.576571 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-g6mgs" Nov 28 16:12:37 crc kubenswrapper[4909]: I1128 16:12:37.581489 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-p5p29\" (UID: \"cbee12e6-b82e-4451-8292-dca1540e2ab5\") " pod="openshift-image-registry/image-registry-697d97f7c8-p5p29" Nov 28 16:12:37 crc kubenswrapper[4909]: E1128 16:12:37.581794 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 16:12:38.081779628 +0000 UTC m=+140.478464142 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-p5p29" (UID: "cbee12e6-b82e-4451-8292-dca1540e2ab5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:37 crc kubenswrapper[4909]: I1128 16:12:37.587208 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cfztf\" (UniqueName: \"kubernetes.io/projected/410969ad-6fe5-4169-a78a-5e459f402cd3-kube-api-access-cfztf\") pod \"marketplace-operator-79b997595-pnjkw\" (UID: \"410969ad-6fe5-4169-a78a-5e459f402cd3\") " pod="openshift-marketplace/marketplace-operator-79b997595-pnjkw" Nov 28 16:12:37 crc kubenswrapper[4909]: I1128 16:12:37.599533 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-pnjkw" Nov 28 16:12:37 crc kubenswrapper[4909]: I1128 16:12:37.606624 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-87jr2" Nov 28 16:12:37 crc kubenswrapper[4909]: I1128 16:12:37.606910 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4d5l8\" (UniqueName: \"kubernetes.io/projected/74a58b61-df24-4f27-8382-ff175aa2bb14-kube-api-access-4d5l8\") pod \"multus-admission-controller-857f4d67dd-qbnrs\" (UID: \"74a58b61-df24-4f27-8382-ff175aa2bb14\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-qbnrs" Nov 28 16:12:37 crc kubenswrapper[4909]: I1128 16:12:37.612339 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-bp2ss" Nov 28 16:12:37 crc kubenswrapper[4909]: I1128 16:12:37.620166 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-4j2wt" Nov 28 16:12:37 crc kubenswrapper[4909]: I1128 16:12:37.626897 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-jcpqp" Nov 28 16:12:37 crc kubenswrapper[4909]: I1128 16:12:37.637519 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-rwfb8" Nov 28 16:12:37 crc kubenswrapper[4909]: I1128 16:12:37.645776 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-7zdl4"] Nov 28 16:12:37 crc kubenswrapper[4909]: I1128 16:12:37.667918 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-h9mgh" Nov 28 16:12:37 crc kubenswrapper[4909]: I1128 16:12:37.682876 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 16:12:37 crc kubenswrapper[4909]: E1128 16:12:37.683096 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 16:12:38.183071316 +0000 UTC m=+140.579755840 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:37 crc kubenswrapper[4909]: I1128 16:12:37.683953 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-p5p29\" (UID: \"cbee12e6-b82e-4451-8292-dca1540e2ab5\") " pod="openshift-image-registry/image-registry-697d97f7c8-p5p29" Nov 28 16:12:37 crc kubenswrapper[4909]: E1128 16:12:37.684280 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 16:12:38.184273451 +0000 UTC m=+140.580957975 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-p5p29" (UID: "cbee12e6-b82e-4451-8292-dca1540e2ab5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:37 crc kubenswrapper[4909]: I1128 16:12:37.735332 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-mg8mw"] Nov 28 16:12:37 crc kubenswrapper[4909]: W1128 16:12:37.755940 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podeeee7a7e_bba7_4704_984d_d18e9813a7ca.slice/crio-a7c4503b503373230e11a96a1bb038872a3a097b7628526cd48cec63ad794835 WatchSource:0}: Error finding container a7c4503b503373230e11a96a1bb038872a3a097b7628526cd48cec63ad794835: Status 404 returned error can't find the container with id a7c4503b503373230e11a96a1bb038872a3a097b7628526cd48cec63ad794835 Nov 28 16:12:37 crc kubenswrapper[4909]: I1128 16:12:37.785132 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 16:12:37 crc kubenswrapper[4909]: E1128 16:12:37.785292 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 16:12:38.28526137 +0000 UTC m=+140.681945894 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:37 crc kubenswrapper[4909]: I1128 16:12:37.785506 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-p5p29\" (UID: \"cbee12e6-b82e-4451-8292-dca1540e2ab5\") " pod="openshift-image-registry/image-registry-697d97f7c8-p5p29" Nov 28 16:12:37 crc kubenswrapper[4909]: E1128 16:12:37.785829 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 16:12:38.285819946 +0000 UTC m=+140.682504470 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-p5p29" (UID: "cbee12e6-b82e-4451-8292-dca1540e2ab5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:37 crc kubenswrapper[4909]: I1128 16:12:37.846256 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-qjl8k" Nov 28 16:12:37 crc kubenswrapper[4909]: I1128 16:12:37.887899 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 16:12:37 crc kubenswrapper[4909]: E1128 16:12:37.888263 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 16:12:38.388248247 +0000 UTC m=+140.784932761 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:37 crc kubenswrapper[4909]: I1128 16:12:37.891223 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-qbnrs" Nov 28 16:12:37 crc kubenswrapper[4909]: I1128 16:12:37.989057 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-p5p29\" (UID: \"cbee12e6-b82e-4451-8292-dca1540e2ab5\") " pod="openshift-image-registry/image-registry-697d97f7c8-p5p29" Nov 28 16:12:37 crc kubenswrapper[4909]: E1128 16:12:37.989460 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 16:12:38.489443632 +0000 UTC m=+140.886128156 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-p5p29" (UID: "cbee12e6-b82e-4451-8292-dca1540e2ab5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:38 crc kubenswrapper[4909]: I1128 16:12:38.012292 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-dvcwf" event={"ID":"aeb1d3c3-35d9-4d08-8026-9faa832a32f0","Type":"ContainerStarted","Data":"8251a3f8ee831d115969ea21f179ae37932f413d28d30630a0094ddca18e3b4f"} Nov 28 16:12:38 crc kubenswrapper[4909]: I1128 16:12:38.013710 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-7zdl4" event={"ID":"eeee7a7e-bba7-4704-984d-d18e9813a7ca","Type":"ContainerStarted","Data":"a7c4503b503373230e11a96a1bb038872a3a097b7628526cd48cec63ad794835"} Nov 28 16:12:38 crc kubenswrapper[4909]: I1128 16:12:38.014337 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-nlb6g" event={"ID":"c6a2e168-6cb6-43ff-8abd-e16e5b14ea6c","Type":"ContainerStarted","Data":"fd65cba3596daa9ac6f3a101570e70d05cde5a6f60fa897e5a1323c306337088"} Nov 28 16:12:38 crc kubenswrapper[4909]: I1128 16:12:38.023140 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-mg8mw" event={"ID":"b6bb0f55-f1dd-4a71-b917-5f9b7209478e","Type":"ContainerStarted","Data":"dae581a768c53bc650a182c8218e7dfa3cc29c1208fe994e3274cbc5de155354"} Nov 28 16:12:38 crc kubenswrapper[4909]: I1128 16:12:38.026336 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-qjf9t" event={"ID":"06224dc2-1e32-47b8-8a11-0c90a61084cf","Type":"ContainerStarted","Data":"a8b7b0a2f7e774e4668fd5c20c312857d0ba0aa96585f98894ee6a9a1df46cf5"} Nov 28 16:12:38 crc kubenswrapper[4909]: I1128 16:12:38.059395 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-zqhkp" event={"ID":"0120f632-f92f-4cdc-a4c5-0d63471be0ef","Type":"ContainerStarted","Data":"2496e82f6188bdd7c760992e10defae28c6ecc0baa22004eefcd9450a626db32"} Nov 28 16:12:38 crc kubenswrapper[4909]: I1128 16:12:38.064884 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-x4wv5" event={"ID":"f15e54ab-209a-490e-a93a-4c86fe1b6b2b","Type":"ContainerStarted","Data":"a6a3987dc3497e51cd64ebbc82e499fb24677617caa7698fd299999a676a4a62"} Nov 28 16:12:38 crc kubenswrapper[4909]: I1128 16:12:38.071149 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-c7bds" event={"ID":"fe88e922-b9a9-4c6c-8b36-5cd5eaf1b7b8","Type":"ContainerStarted","Data":"1d1dfd636d6861489810ce5322d1f7650340ac9232fa721f08e3c6621da33a6e"} Nov 28 16:12:38 crc kubenswrapper[4909]: I1128 16:12:38.076117 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-c4k79" event={"ID":"76a8915d-4ebe-4679-be00-6b290832b9cb","Type":"ContainerStarted","Data":"3620d14ab92a0c2ac761f126721c2719ce594107f285d6426e671cfa631fa8d9"} Nov 28 16:12:38 crc kubenswrapper[4909]: I1128 16:12:38.089522 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 16:12:38 crc kubenswrapper[4909]: E1128 16:12:38.089718 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 16:12:38.58969022 +0000 UTC m=+140.986374744 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:38 crc kubenswrapper[4909]: I1128 16:12:38.089848 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-p5p29\" (UID: \"cbee12e6-b82e-4451-8292-dca1540e2ab5\") " pod="openshift-image-registry/image-registry-697d97f7c8-p5p29" Nov 28 16:12:38 crc kubenswrapper[4909]: E1128 16:12:38.090123 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 16:12:38.590115772 +0000 UTC m=+140.986800296 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-p5p29" (UID: "cbee12e6-b82e-4451-8292-dca1540e2ab5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:38 crc kubenswrapper[4909]: I1128 16:12:38.194459 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 16:12:38 crc kubenswrapper[4909]: E1128 16:12:38.194722 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 16:12:38.694641443 +0000 UTC m=+141.091325977 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:38 crc kubenswrapper[4909]: I1128 16:12:38.194832 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-p5p29\" (UID: \"cbee12e6-b82e-4451-8292-dca1540e2ab5\") " pod="openshift-image-registry/image-registry-697d97f7c8-p5p29" Nov 28 16:12:38 crc kubenswrapper[4909]: E1128 16:12:38.195154 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 16:12:38.695143967 +0000 UTC m=+141.091828491 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-p5p29" (UID: "cbee12e6-b82e-4451-8292-dca1540e2ab5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:38 crc kubenswrapper[4909]: I1128 16:12:38.210616 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-nrl4j"] Nov 28 16:12:38 crc kubenswrapper[4909]: I1128 16:12:38.223336 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-ddm7r"] Nov 28 16:12:38 crc kubenswrapper[4909]: I1128 16:12:38.227438 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-mdb95"] Nov 28 16:12:38 crc kubenswrapper[4909]: I1128 16:12:38.230826 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405760-qjm9k"] Nov 28 16:12:38 crc kubenswrapper[4909]: I1128 16:12:38.297013 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 16:12:38 crc kubenswrapper[4909]: E1128 16:12:38.297162 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 16:12:38.797137865 +0000 UTC m=+141.193822389 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:38 crc kubenswrapper[4909]: I1128 16:12:38.297285 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-p5p29\" (UID: \"cbee12e6-b82e-4451-8292-dca1540e2ab5\") " pod="openshift-image-registry/image-registry-697d97f7c8-p5p29" Nov 28 16:12:38 crc kubenswrapper[4909]: E1128 16:12:38.297601 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 16:12:38.797593859 +0000 UTC m=+141.194278383 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-p5p29" (UID: "cbee12e6-b82e-4451-8292-dca1540e2ab5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:38 crc kubenswrapper[4909]: I1128 16:12:38.299214 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-gpb8r"] Nov 28 16:12:38 crc kubenswrapper[4909]: W1128 16:12:38.376241 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod40eb22b8_4c66_4dee_a689_e41144bd9f3e.slice/crio-51b8172dea67235066d8776d32e3b5bcf3d9a39947236848dcd10c29de6f951f WatchSource:0}: Error finding container 51b8172dea67235066d8776d32e3b5bcf3d9a39947236848dcd10c29de6f951f: Status 404 returned error can't find the container with id 51b8172dea67235066d8776d32e3b5bcf3d9a39947236848dcd10c29de6f951f Nov 28 16:12:38 crc kubenswrapper[4909]: I1128 16:12:38.397974 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 16:12:38 crc kubenswrapper[4909]: E1128 16:12:38.398370 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 16:12:38.898355101 +0000 UTC m=+141.295039625 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:38 crc kubenswrapper[4909]: I1128 16:12:38.417232 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-wpjbf"] Nov 28 16:12:38 crc kubenswrapper[4909]: I1128 16:12:38.426568 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-xz6qx"] Nov 28 16:12:38 crc kubenswrapper[4909]: I1128 16:12:38.440246 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-b4wbn"] Nov 28 16:12:38 crc kubenswrapper[4909]: I1128 16:12:38.445387 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-dmgdn"] Nov 28 16:12:38 crc kubenswrapper[4909]: I1128 16:12:38.470329 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-7xr8n"] Nov 28 16:12:38 crc kubenswrapper[4909]: I1128 16:12:38.481431 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-4p52q"] Nov 28 16:12:38 crc kubenswrapper[4909]: I1128 16:12:38.503672 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-p5p29\" (UID: \"cbee12e6-b82e-4451-8292-dca1540e2ab5\") " pod="openshift-image-registry/image-registry-697d97f7c8-p5p29" Nov 28 16:12:38 crc kubenswrapper[4909]: E1128 16:12:38.510383 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 16:12:39.010361897 +0000 UTC m=+141.407046421 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-p5p29" (UID: "cbee12e6-b82e-4451-8292-dca1540e2ab5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:38 crc kubenswrapper[4909]: I1128 16:12:38.521215 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-jcpqp"] Nov 28 16:12:38 crc kubenswrapper[4909]: I1128 16:12:38.540720 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-j4f6w"] Nov 28 16:12:38 crc kubenswrapper[4909]: I1128 16:12:38.566277 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-87jr2"] Nov 28 16:12:38 crc kubenswrapper[4909]: I1128 16:12:38.619393 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 16:12:38 crc kubenswrapper[4909]: E1128 16:12:38.619893 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 16:12:39.119877602 +0000 UTC m=+141.516562126 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:38 crc kubenswrapper[4909]: I1128 16:12:38.720503 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-p5p29\" (UID: \"cbee12e6-b82e-4451-8292-dca1540e2ab5\") " pod="openshift-image-registry/image-registry-697d97f7c8-p5p29" Nov 28 16:12:38 crc kubenswrapper[4909]: E1128 16:12:38.720912 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 16:12:39.220896352 +0000 UTC m=+141.617580866 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-p5p29" (UID: "cbee12e6-b82e-4451-8292-dca1540e2ab5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:38 crc kubenswrapper[4909]: I1128 16:12:38.811191 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-9d46g"] Nov 28 16:12:38 crc kubenswrapper[4909]: I1128 16:12:38.824903 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 16:12:38 crc kubenswrapper[4909]: E1128 16:12:38.825337 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 16:12:39.32531232 +0000 UTC m=+141.721996904 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:38 crc kubenswrapper[4909]: I1128 16:12:38.836893 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-9h22n"] Nov 28 16:12:38 crc kubenswrapper[4909]: I1128 16:12:38.839726 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-qbnrs"] Nov 28 16:12:38 crc kubenswrapper[4909]: I1128 16:12:38.842287 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-bp2ss"] Nov 28 16:12:38 crc kubenswrapper[4909]: I1128 16:12:38.929376 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-p5p29\" (UID: \"cbee12e6-b82e-4451-8292-dca1540e2ab5\") " pod="openshift-image-registry/image-registry-697d97f7c8-p5p29" Nov 28 16:12:38 crc kubenswrapper[4909]: E1128 16:12:38.930215 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 16:12:39.430202531 +0000 UTC m=+141.826887055 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-p5p29" (UID: "cbee12e6-b82e-4451-8292-dca1540e2ab5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:38 crc kubenswrapper[4909]: I1128 16:12:38.967482 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-4j2wt"] Nov 28 16:12:38 crc kubenswrapper[4909]: I1128 16:12:38.967597 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-qjl8k"] Nov 28 16:12:38 crc kubenswrapper[4909]: I1128 16:12:38.970451 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-pnjkw"] Nov 28 16:12:38 crc kubenswrapper[4909]: I1128 16:12:38.973481 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-l2xpj"] Nov 28 16:12:39 crc kubenswrapper[4909]: I1128 16:12:39.030514 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 16:12:39 crc kubenswrapper[4909]: E1128 16:12:39.030786 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 16:12:39.530750098 +0000 UTC m=+141.927434632 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:39 crc kubenswrapper[4909]: I1128 16:12:39.084111 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-c4k79" event={"ID":"76a8915d-4ebe-4679-be00-6b290832b9cb","Type":"ContainerStarted","Data":"210c2756b9fdc88ae09f01abbed5d5b86287a2d461fc7de228a732cadaf0791c"} Nov 28 16:12:39 crc kubenswrapper[4909]: I1128 16:12:39.085523 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-g6mgs" event={"ID":"a56d38d4-9ee2-46f0-8fc8-3edbc68647d4","Type":"ContainerStarted","Data":"51512d28aaee5054b9a47d6c73f2b5cbd92fac7924ffb708d81e63a0d60cad11"} Nov 28 16:12:39 crc kubenswrapper[4909]: I1128 16:12:39.086917 4909 generic.go:334] "Generic (PLEG): container finished" podID="0120f632-f92f-4cdc-a4c5-0d63471be0ef" containerID="706677fd25e7924c13ad86527e6a6daaf632b46778c1f054a37d930c92689d5e" exitCode=0 Nov 28 16:12:39 crc kubenswrapper[4909]: I1128 16:12:39.087009 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-zqhkp" event={"ID":"0120f632-f92f-4cdc-a4c5-0d63471be0ef","Type":"ContainerDied","Data":"706677fd25e7924c13ad86527e6a6daaf632b46778c1f054a37d930c92689d5e"} Nov 28 16:12:39 crc kubenswrapper[4909]: I1128 16:12:39.088507 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-dvcwf" event={"ID":"aeb1d3c3-35d9-4d08-8026-9faa832a32f0","Type":"ContainerStarted","Data":"caa964ef0b1beee5b56ad36e57b56edb3740727ec8770e638e7f81f32d7e77f4"} Nov 28 16:12:39 crc kubenswrapper[4909]: I1128 16:12:39.091030 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-9d46g" event={"ID":"04ec2320-667b-4f35-976e-60c4a1eb3386","Type":"ContainerStarted","Data":"d2e6a13aaf160a4f7005d453e1768498aab6aa148fe3358ce7f6662fb9054f6c"} Nov 28 16:12:39 crc kubenswrapper[4909]: I1128 16:12:39.092207 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405760-qjm9k" event={"ID":"0731ea3e-3015-4311-88f4-bcd1f9f8204e","Type":"ContainerStarted","Data":"b554c96afb7772b34e8637a199929dc98b13cf0c5ffae266ddd9cc4c93b88898"} Nov 28 16:12:39 crc kubenswrapper[4909]: I1128 16:12:39.093337 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-j4f6w" event={"ID":"42d7dcab-41fb-48a8-a08b-f4fca523e27b","Type":"ContainerStarted","Data":"036795f06f3faad168696c8a7bfe6d3e71725626af93fe564c33e5b1c3cc80d8"} Nov 28 16:12:39 crc kubenswrapper[4909]: I1128 16:12:39.096562 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-c7bds" event={"ID":"fe88e922-b9a9-4c6c-8b36-5cd5eaf1b7b8","Type":"ContainerStarted","Data":"783a0a1719c49a100ce7b1793012170f1dbe4583f48051467574be0089b49c5f"} Nov 28 16:12:39 crc kubenswrapper[4909]: I1128 16:12:39.098421 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-nlb6g" event={"ID":"c6a2e168-6cb6-43ff-8abd-e16e5b14ea6c","Type":"ContainerStarted","Data":"5560495d63d19fb9fa9087d8c4ed2772ce6f2d1ac03596d22e5aebbf43453114"} Nov 28 16:12:39 crc kubenswrapper[4909]: I1128 16:12:39.099954 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-qjf9t" event={"ID":"06224dc2-1e32-47b8-8a11-0c90a61084cf","Type":"ContainerStarted","Data":"439076ca80c295f5ff253f45c069a0d9576c9cd88d3fe150dcba26460c96045a"} Nov 28 16:12:39 crc kubenswrapper[4909]: I1128 16:12:39.100978 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-dmgdn" event={"ID":"d229415a-fd7f-4553-9752-c6c401709252","Type":"ContainerStarted","Data":"b5c433a461bd97eed4ac29d3b489fa74df0b578ff301fdd45d13d2e0969bc695"} Nov 28 16:12:39 crc kubenswrapper[4909]: I1128 16:12:39.102558 4909 generic.go:334] "Generic (PLEG): container finished" podID="5c7e3790-aa06-4b95-94a7-c41d909ec984" containerID="42af31cf6c91e06feeb28e8183da0a995e6154a6b347b746847e799bfd0dea55" exitCode=0 Nov 28 16:12:39 crc kubenswrapper[4909]: I1128 16:12:39.102603 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-ld6fg" event={"ID":"5c7e3790-aa06-4b95-94a7-c41d909ec984","Type":"ContainerDied","Data":"42af31cf6c91e06feeb28e8183da0a995e6154a6b347b746847e799bfd0dea55"} Nov 28 16:12:39 crc kubenswrapper[4909]: I1128 16:12:39.104015 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-ddm7r" event={"ID":"aacec508-dd9f-438d-bb9e-d408e73f0d05","Type":"ContainerStarted","Data":"e8290d7eead2ab0d1dcf4b196a3dabdf950ea6ac30799714a127b7e9070bee8c"} Nov 28 16:12:39 crc kubenswrapper[4909]: I1128 16:12:39.105424 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-x4wv5" event={"ID":"f15e54ab-209a-490e-a93a-4c86fe1b6b2b","Type":"ContainerStarted","Data":"5a6c145f13f7e695bd2d0358afea979bff194b4a7a0cc8e1e3f043ba1405ff4e"} Nov 28 16:12:39 crc kubenswrapper[4909]: I1128 16:12:39.105624 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-x4wv5" Nov 28 16:12:39 crc kubenswrapper[4909]: I1128 16:12:39.106879 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-4p52q" event={"ID":"03717037-d782-4ec7-bc3d-845c4b455107","Type":"ContainerStarted","Data":"c38fe19f6c3880468d98d7b3f1124af93e83fe8fa12b5742fa5c122d2877d950"} Nov 28 16:12:39 crc kubenswrapper[4909]: I1128 16:12:39.108234 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-bp2ss" event={"ID":"53ff20ea-24c0-4364-9bbb-6ef3a48fdd6f","Type":"ContainerStarted","Data":"2117070eaeae71a2c5356fea5ecc07e2d8a52eeef63641c6467b90767264c508"} Nov 28 16:12:39 crc kubenswrapper[4909]: I1128 16:12:39.108324 4909 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-x4wv5 container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.16:5443/healthz\": dial tcp 10.217.0.16:5443: connect: connection refused" start-of-body= Nov 28 16:12:39 crc kubenswrapper[4909]: I1128 16:12:39.108425 4909 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-x4wv5" podUID="f15e54ab-209a-490e-a93a-4c86fe1b6b2b" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.16:5443/healthz\": dial tcp 10.217.0.16:5443: connect: connection refused" Nov 28 16:12:39 crc kubenswrapper[4909]: I1128 16:12:39.110896 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-9h22n" event={"ID":"3d9c07f9-d9e4-45b0-8567-49936f18f930","Type":"ContainerStarted","Data":"f85791dec3aacaf90491e8e2328313c176deea52192713690b786662c92f8eb3"} Nov 28 16:12:39 crc kubenswrapper[4909]: I1128 16:12:39.112749 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-xz6qx" event={"ID":"05f40c85-8360-453c-80d9-907bb6db611b","Type":"ContainerStarted","Data":"4c66059a777ad164a582dc6169c608406de98bcc1001a6f21c4ade884371c4d2"} Nov 28 16:12:39 crc kubenswrapper[4909]: I1128 16:12:39.114098 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-h9mgh" event={"ID":"4262b1b7-b141-48fd-ad22-8827154ba68a","Type":"ContainerStarted","Data":"40bcae1c1a186a2da741420d18ab6a021d93d396dd4d8e84939378eea35e06ad"} Nov 28 16:12:39 crc kubenswrapper[4909]: I1128 16:12:39.115536 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-gpb8r" event={"ID":"40eb22b8-4c66-4dee-a689-e41144bd9f3e","Type":"ContainerStarted","Data":"51b8172dea67235066d8776d32e3b5bcf3d9a39947236848dcd10c29de6f951f"} Nov 28 16:12:39 crc kubenswrapper[4909]: I1128 16:12:39.116602 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-b4wbn" event={"ID":"7cbf4a3d-f0e2-4a50-9122-85fd3f1df269","Type":"ContainerStarted","Data":"bec28993eb4a180cf7f8af29625879584777c6d051ef2edadda82bb3f4d4e095"} Nov 28 16:12:39 crc kubenswrapper[4909]: I1128 16:12:39.117570 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-qbnrs" event={"ID":"74a58b61-df24-4f27-8382-ff175aa2bb14","Type":"ContainerStarted","Data":"893823ff4a2cd84c25f69dad797a0cac47da1fb505d874e50c54a837476290e6"} Nov 28 16:12:39 crc kubenswrapper[4909]: I1128 16:12:39.119024 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-jcpqp" event={"ID":"562fb423-75b3-4716-9370-bc090c7fbbee","Type":"ContainerStarted","Data":"47a0792a88bae417c8fdc0d5a3d2992f257c1ad1b2171acc94505f9853d6ec24"} Nov 28 16:12:39 crc kubenswrapper[4909]: I1128 16:12:39.120054 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-mdb95" event={"ID":"355a0053-b22d-4e37-9ad1-ecfbdf391bc2","Type":"ContainerStarted","Data":"04cb77b656efec8bbea0cc54382e3a8363e660fdb6664c62ecc480da68530695"} Nov 28 16:12:39 crc kubenswrapper[4909]: I1128 16:12:39.121497 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-5x9zh" event={"ID":"0d41d267-53c8-4859-9c63-737eda42098f","Type":"ContainerStarted","Data":"764eb19d478be4054e7b450cd969736a4743b863f72316d5f20fc53365fe16f4"} Nov 28 16:12:39 crc kubenswrapper[4909]: I1128 16:12:39.122047 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-879f6c89f-5x9zh" Nov 28 16:12:39 crc kubenswrapper[4909]: I1128 16:12:39.122951 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-7xr8n" event={"ID":"ba89323c-1e8c-4731-85d4-8b07766e3609","Type":"ContainerStarted","Data":"5306c6fc63b3c04d97a82b87bf9d2845e50abb83d4c25bb7bff29b6ddbbba182"} Nov 28 16:12:39 crc kubenswrapper[4909]: I1128 16:12:39.123231 4909 patch_prober.go:28] interesting pod/controller-manager-879f6c89f-5x9zh container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.6:8443/healthz\": dial tcp 10.217.0.6:8443: connect: connection refused" start-of-body= Nov 28 16:12:39 crc kubenswrapper[4909]: I1128 16:12:39.123290 4909 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-879f6c89f-5x9zh" podUID="0d41d267-53c8-4859-9c63-737eda42098f" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.6:8443/healthz\": dial tcp 10.217.0.6:8443: connect: connection refused" Nov 28 16:12:39 crc kubenswrapper[4909]: I1128 16:12:39.124747 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-t67vb" event={"ID":"bc0316f8-a276-4bea-a4cb-bf56c011c64a","Type":"ContainerStarted","Data":"cd3a1f6752ef3375c643e1c150d367d924dbf80e772167f8f59992411baf517a"} Nov 28 16:12:39 crc kubenswrapper[4909]: I1128 16:12:39.125126 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-558db77b4-t67vb" Nov 28 16:12:39 crc kubenswrapper[4909]: I1128 16:12:39.126280 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-wpjbf" event={"ID":"d7cb9451-0082-4c4b-b90e-7d90581fe2b9","Type":"ContainerStarted","Data":"402a84c87d57933f2e9aa2c5ca6fd59b2a979ea3d6d57715d4c706fd96b4a8f0"} Nov 28 16:12:39 crc kubenswrapper[4909]: I1128 16:12:39.126765 4909 patch_prober.go:28] interesting pod/oauth-openshift-558db77b4-t67vb container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.8:6443/healthz\": dial tcp 10.217.0.8:6443: connect: connection refused" start-of-body= Nov 28 16:12:39 crc kubenswrapper[4909]: I1128 16:12:39.126816 4909 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-558db77b4-t67vb" podUID="bc0316f8-a276-4bea-a4cb-bf56c011c64a" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.8:6443/healthz\": dial tcp 10.217.0.8:6443: connect: connection refused" Nov 28 16:12:39 crc kubenswrapper[4909]: I1128 16:12:39.127526 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-87jr2" event={"ID":"ebbacd25-74d0-422d-a0b1-51bb64a57468","Type":"ContainerStarted","Data":"795e76803fa54a0ff2da12269775346a152a2af4712d8ec0b2d808b598498eee"} Nov 28 16:12:39 crc kubenswrapper[4909]: I1128 16:12:39.129230 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-k67zs" event={"ID":"f955b72b-dc83-4219-b488-163acebcf367","Type":"ContainerStarted","Data":"0ed0c8d8e707e57515cf3d66edf2cda72bcfc723713f23377106295ef760d409"} Nov 28 16:12:39 crc kubenswrapper[4909]: I1128 16:12:39.132492 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-p5p29\" (UID: \"cbee12e6-b82e-4451-8292-dca1540e2ab5\") " pod="openshift-image-registry/image-registry-697d97f7c8-p5p29" Nov 28 16:12:39 crc kubenswrapper[4909]: E1128 16:12:39.133236 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 16:12:39.633211449 +0000 UTC m=+142.029896013 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-p5p29" (UID: "cbee12e6-b82e-4451-8292-dca1540e2ab5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:39 crc kubenswrapper[4909]: I1128 16:12:39.229881 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication-operator/authentication-operator-69f744f599-nlb6g" podStartSLOduration=122.229860333 podStartE2EDuration="2m2.229860333s" podCreationTimestamp="2025-11-28 16:10:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:12:39.227265528 +0000 UTC m=+141.623950062" watchObservedRunningTime="2025-11-28 16:12:39.229860333 +0000 UTC m=+141.626544877" Nov 28 16:12:39 crc kubenswrapper[4909]: I1128 16:12:39.233190 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 16:12:39 crc kubenswrapper[4909]: E1128 16:12:39.234450 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 16:12:39.734417044 +0000 UTC m=+142.131101588 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:39 crc kubenswrapper[4909]: I1128 16:12:39.302907 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-x4wv5" podStartSLOduration=121.302889117 podStartE2EDuration="2m1.302889117s" podCreationTimestamp="2025-11-28 16:10:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:12:39.282699805 +0000 UTC m=+141.679384359" watchObservedRunningTime="2025-11-28 16:12:39.302889117 +0000 UTC m=+141.699573641" Nov 28 16:12:39 crc kubenswrapper[4909]: I1128 16:12:39.303873 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-879f6c89f-5x9zh" podStartSLOduration=122.303866635 podStartE2EDuration="2m2.303866635s" podCreationTimestamp="2025-11-28 16:10:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:12:39.301316151 +0000 UTC m=+141.698000695" watchObservedRunningTime="2025-11-28 16:12:39.303866635 +0000 UTC m=+141.700551159" Nov 28 16:12:39 crc kubenswrapper[4909]: I1128 16:12:39.335279 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-p5p29\" (UID: \"cbee12e6-b82e-4451-8292-dca1540e2ab5\") " pod="openshift-image-registry/image-registry-697d97f7c8-p5p29" Nov 28 16:12:39 crc kubenswrapper[4909]: E1128 16:12:39.335625 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 16:12:39.835611429 +0000 UTC m=+142.232295973 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-p5p29" (UID: "cbee12e6-b82e-4451-8292-dca1540e2ab5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:39 crc kubenswrapper[4909]: I1128 16:12:39.351485 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-dvcwf" podStartSLOduration=122.351471136 podStartE2EDuration="2m2.351471136s" podCreationTimestamp="2025-11-28 16:10:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:12:39.350185499 +0000 UTC m=+141.746870043" watchObservedRunningTime="2025-11-28 16:12:39.351471136 +0000 UTC m=+141.748155680" Nov 28 16:12:39 crc kubenswrapper[4909]: I1128 16:12:39.391531 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-558db77b4-t67vb" podStartSLOduration=122.391501689 podStartE2EDuration="2m2.391501689s" podCreationTimestamp="2025-11-28 16:10:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:12:39.387765372 +0000 UTC m=+141.784449906" watchObservedRunningTime="2025-11-28 16:12:39.391501689 +0000 UTC m=+141.788186253" Nov 28 16:12:39 crc kubenswrapper[4909]: I1128 16:12:39.435831 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 16:12:39 crc kubenswrapper[4909]: E1128 16:12:39.436070 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 16:12:39.936034922 +0000 UTC m=+142.332719486 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:39 crc kubenswrapper[4909]: I1128 16:12:39.436143 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-p5p29\" (UID: \"cbee12e6-b82e-4451-8292-dca1540e2ab5\") " pod="openshift-image-registry/image-registry-697d97f7c8-p5p29" Nov 28 16:12:39 crc kubenswrapper[4909]: E1128 16:12:39.436629 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 16:12:39.936612149 +0000 UTC m=+142.333296703 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-p5p29" (UID: "cbee12e6-b82e-4451-8292-dca1540e2ab5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:39 crc kubenswrapper[4909]: I1128 16:12:39.537477 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 16:12:39 crc kubenswrapper[4909]: E1128 16:12:39.537631 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 16:12:40.037607658 +0000 UTC m=+142.434292182 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:39 crc kubenswrapper[4909]: I1128 16:12:39.537849 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-p5p29\" (UID: \"cbee12e6-b82e-4451-8292-dca1540e2ab5\") " pod="openshift-image-registry/image-registry-697d97f7c8-p5p29" Nov 28 16:12:39 crc kubenswrapper[4909]: E1128 16:12:39.538325 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 16:12:40.038303768 +0000 UTC m=+142.434988332 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-p5p29" (UID: "cbee12e6-b82e-4451-8292-dca1540e2ab5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:39 crc kubenswrapper[4909]: I1128 16:12:39.638833 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 16:12:39 crc kubenswrapper[4909]: E1128 16:12:39.639072 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 16:12:40.13903962 +0000 UTC m=+142.535724184 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:39 crc kubenswrapper[4909]: I1128 16:12:39.639360 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-p5p29\" (UID: \"cbee12e6-b82e-4451-8292-dca1540e2ab5\") " pod="openshift-image-registry/image-registry-697d97f7c8-p5p29" Nov 28 16:12:39 crc kubenswrapper[4909]: E1128 16:12:39.639855 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 16:12:40.139834503 +0000 UTC m=+142.536519057 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-p5p29" (UID: "cbee12e6-b82e-4451-8292-dca1540e2ab5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:39 crc kubenswrapper[4909]: I1128 16:12:39.740414 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 16:12:39 crc kubenswrapper[4909]: E1128 16:12:39.740714 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 16:12:40.240631416 +0000 UTC m=+142.637315980 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:39 crc kubenswrapper[4909]: I1128 16:12:39.848791 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-p5p29\" (UID: \"cbee12e6-b82e-4451-8292-dca1540e2ab5\") " pod="openshift-image-registry/image-registry-697d97f7c8-p5p29" Nov 28 16:12:39 crc kubenswrapper[4909]: E1128 16:12:39.849611 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 16:12:40.349582585 +0000 UTC m=+142.746267149 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-p5p29" (UID: "cbee12e6-b82e-4451-8292-dca1540e2ab5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:39 crc kubenswrapper[4909]: I1128 16:12:39.895461 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-cml8b"] Nov 28 16:12:39 crc kubenswrapper[4909]: I1128 16:12:39.896597 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-l5snl"] Nov 28 16:12:39 crc kubenswrapper[4909]: I1128 16:12:39.946844 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-hstbg"] Nov 28 16:12:39 crc kubenswrapper[4909]: I1128 16:12:39.946892 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-rwfb8"] Nov 28 16:12:39 crc kubenswrapper[4909]: I1128 16:12:39.951838 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 16:12:39 crc kubenswrapper[4909]: E1128 16:12:39.953815 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 16:12:40.453792167 +0000 UTC m=+142.850476691 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:39 crc kubenswrapper[4909]: W1128 16:12:39.974769 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6b7310f1_adae_43c2_ab22_53385dabd116.slice/crio-ef021173152e210088672afd2b2223f500ae305a0b36155bd1e737d42524ca83 WatchSource:0}: Error finding container ef021173152e210088672afd2b2223f500ae305a0b36155bd1e737d42524ca83: Status 404 returned error can't find the container with id ef021173152e210088672afd2b2223f500ae305a0b36155bd1e737d42524ca83 Nov 28 16:12:40 crc kubenswrapper[4909]: I1128 16:12:40.054732 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-p5p29\" (UID: \"cbee12e6-b82e-4451-8292-dca1540e2ab5\") " pod="openshift-image-registry/image-registry-697d97f7c8-p5p29" Nov 28 16:12:40 crc kubenswrapper[4909]: E1128 16:12:40.055333 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 16:12:40.555316371 +0000 UTC m=+142.952000905 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-p5p29" (UID: "cbee12e6-b82e-4451-8292-dca1540e2ab5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:40 crc kubenswrapper[4909]: I1128 16:12:40.135722 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-hstbg" event={"ID":"c73706cd-a7f9-436c-88a9-0fa3120649aa","Type":"ContainerStarted","Data":"fbedd9ddc609bc02e5c51e09d90edb72b5ff0b912ecdfe822c0b3de5e56f190d"} Nov 28 16:12:40 crc kubenswrapper[4909]: I1128 16:12:40.137459 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-4j2wt" event={"ID":"f7ab1edf-70d0-4894-a7b1-ec68413d9b45","Type":"ContainerStarted","Data":"8d1fbf34ad30303c874bfa2ac4d32852a04ffcfb21f2102662fa6ec5d9a8bf1c"} Nov 28 16:12:40 crc kubenswrapper[4909]: I1128 16:12:40.138573 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-qjl8k" event={"ID":"403639b0-0067-40f7-bf1c-fcddd8320e62","Type":"ContainerStarted","Data":"2afcbf938dbcb32e3c36dc7ebe95c4f88b9e281aaefaf11b63374468419ef111"} Nov 28 16:12:40 crc kubenswrapper[4909]: I1128 16:12:40.141065 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-l5snl" event={"ID":"43c2b623-501b-489b-b5d5-3dbe0d23d265","Type":"ContainerStarted","Data":"1520c22d947bc991acb15c8b7930982cf019638027d4ea5509dccf4df5cf581b"} Nov 28 16:12:40 crc kubenswrapper[4909]: I1128 16:12:40.142042 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-cml8b" event={"ID":"39ad8798-c58e-4c22-b31c-2eb95b257309","Type":"ContainerStarted","Data":"95578864523f8938c97720c7adbc803a63f19525ed05f5669cbc611001a3f10d"} Nov 28 16:12:40 crc kubenswrapper[4909]: I1128 16:12:40.143247 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-rwfb8" event={"ID":"6b7310f1-adae-43c2-ab22-53385dabd116","Type":"ContainerStarted","Data":"ef021173152e210088672afd2b2223f500ae305a0b36155bd1e737d42524ca83"} Nov 28 16:12:40 crc kubenswrapper[4909]: I1128 16:12:40.144388 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-pnjkw" event={"ID":"410969ad-6fe5-4169-a78a-5e459f402cd3","Type":"ContainerStarted","Data":"ae8083dc8b11ea8b7f8929a28abf86dd38a2824f428eb5573589142355c138bc"} Nov 28 16:12:40 crc kubenswrapper[4909]: I1128 16:12:40.145914 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-l2xpj" event={"ID":"e4c200b0-224e-4a58-af74-96a8568ddb3d","Type":"ContainerStarted","Data":"4d1dec2ca6432ea00343bd9f5a249050f550f605b8621bb0f0d6abf48a31f3af"} Nov 28 16:12:40 crc kubenswrapper[4909]: I1128 16:12:40.146799 4909 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-x4wv5 container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.16:5443/healthz\": dial tcp 10.217.0.16:5443: connect: connection refused" start-of-body= Nov 28 16:12:40 crc kubenswrapper[4909]: I1128 16:12:40.147890 4909 patch_prober.go:28] interesting pod/controller-manager-879f6c89f-5x9zh container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.6:8443/healthz\": dial tcp 10.217.0.6:8443: connect: connection refused" start-of-body= Nov 28 16:12:40 crc kubenswrapper[4909]: I1128 16:12:40.147942 4909 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-879f6c89f-5x9zh" podUID="0d41d267-53c8-4859-9c63-737eda42098f" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.6:8443/healthz\": dial tcp 10.217.0.6:8443: connect: connection refused" Nov 28 16:12:40 crc kubenswrapper[4909]: I1128 16:12:40.147890 4909 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-x4wv5" podUID="f15e54ab-209a-490e-a93a-4c86fe1b6b2b" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.16:5443/healthz\": dial tcp 10.217.0.16:5443: connect: connection refused" Nov 28 16:12:40 crc kubenswrapper[4909]: I1128 16:12:40.147129 4909 patch_prober.go:28] interesting pod/oauth-openshift-558db77b4-t67vb container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.8:6443/healthz\": dial tcp 10.217.0.8:6443: connect: connection refused" start-of-body= Nov 28 16:12:40 crc kubenswrapper[4909]: I1128 16:12:40.148054 4909 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-558db77b4-t67vb" podUID="bc0316f8-a276-4bea-a4cb-bf56c011c64a" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.8:6443/healthz\": dial tcp 10.217.0.8:6443: connect: connection refused" Nov 28 16:12:40 crc kubenswrapper[4909]: I1128 16:12:40.155998 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 16:12:40 crc kubenswrapper[4909]: E1128 16:12:40.157281 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 16:12:40.657250137 +0000 UTC m=+143.053934731 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:40 crc kubenswrapper[4909]: I1128 16:12:40.257772 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-p5p29\" (UID: \"cbee12e6-b82e-4451-8292-dca1540e2ab5\") " pod="openshift-image-registry/image-registry-697d97f7c8-p5p29" Nov 28 16:12:40 crc kubenswrapper[4909]: E1128 16:12:40.258224 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 16:12:40.758196555 +0000 UTC m=+143.154881119 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-p5p29" (UID: "cbee12e6-b82e-4451-8292-dca1540e2ab5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:40 crc kubenswrapper[4909]: I1128 16:12:40.358980 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 16:12:40 crc kubenswrapper[4909]: E1128 16:12:40.359361 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 16:12:40.859346299 +0000 UTC m=+143.256030823 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:40 crc kubenswrapper[4909]: I1128 16:12:40.460215 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-p5p29\" (UID: \"cbee12e6-b82e-4451-8292-dca1540e2ab5\") " pod="openshift-image-registry/image-registry-697d97f7c8-p5p29" Nov 28 16:12:40 crc kubenswrapper[4909]: E1128 16:12:40.460574 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 16:12:40.960562884 +0000 UTC m=+143.357247408 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-p5p29" (UID: "cbee12e6-b82e-4451-8292-dca1540e2ab5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:40 crc kubenswrapper[4909]: I1128 16:12:40.560729 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 16:12:40 crc kubenswrapper[4909]: E1128 16:12:40.560982 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 16:12:41.060958746 +0000 UTC m=+143.457643270 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:40 crc kubenswrapper[4909]: I1128 16:12:40.561306 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-p5p29\" (UID: \"cbee12e6-b82e-4451-8292-dca1540e2ab5\") " pod="openshift-image-registry/image-registry-697d97f7c8-p5p29" Nov 28 16:12:40 crc kubenswrapper[4909]: E1128 16:12:40.562632 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 16:12:41.062611354 +0000 UTC m=+143.459295918 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-p5p29" (UID: "cbee12e6-b82e-4451-8292-dca1540e2ab5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:40 crc kubenswrapper[4909]: I1128 16:12:40.662585 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 16:12:40 crc kubenswrapper[4909]: E1128 16:12:40.662797 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 16:12:41.162768279 +0000 UTC m=+143.559452843 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:40 crc kubenswrapper[4909]: I1128 16:12:40.662886 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-p5p29\" (UID: \"cbee12e6-b82e-4451-8292-dca1540e2ab5\") " pod="openshift-image-registry/image-registry-697d97f7c8-p5p29" Nov 28 16:12:40 crc kubenswrapper[4909]: E1128 16:12:40.663190 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 16:12:41.163182051 +0000 UTC m=+143.559866575 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-p5p29" (UID: "cbee12e6-b82e-4451-8292-dca1540e2ab5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:40 crc kubenswrapper[4909]: I1128 16:12:40.764274 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 16:12:40 crc kubenswrapper[4909]: E1128 16:12:40.764514 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 16:12:41.2645004 +0000 UTC m=+143.661184924 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:40 crc kubenswrapper[4909]: I1128 16:12:40.866056 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-p5p29\" (UID: \"cbee12e6-b82e-4451-8292-dca1540e2ab5\") " pod="openshift-image-registry/image-registry-697d97f7c8-p5p29" Nov 28 16:12:40 crc kubenswrapper[4909]: E1128 16:12:40.871026 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 16:12:41.371009147 +0000 UTC m=+143.767693671 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-p5p29" (UID: "cbee12e6-b82e-4451-8292-dca1540e2ab5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:40 crc kubenswrapper[4909]: I1128 16:12:40.975577 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 16:12:40 crc kubenswrapper[4909]: E1128 16:12:40.975945 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 16:12:41.475932679 +0000 UTC m=+143.872617203 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:41 crc kubenswrapper[4909]: I1128 16:12:41.076729 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-p5p29\" (UID: \"cbee12e6-b82e-4451-8292-dca1540e2ab5\") " pod="openshift-image-registry/image-registry-697d97f7c8-p5p29" Nov 28 16:12:41 crc kubenswrapper[4909]: E1128 16:12:41.077070 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 16:12:41.577042542 +0000 UTC m=+143.973727056 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-p5p29" (UID: "cbee12e6-b82e-4451-8292-dca1540e2ab5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:41 crc kubenswrapper[4909]: I1128 16:12:41.178254 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 16:12:41 crc kubenswrapper[4909]: E1128 16:12:41.178970 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 16:12:41.678952567 +0000 UTC m=+144.075637101 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:41 crc kubenswrapper[4909]: I1128 16:12:41.199199 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-gpb8r" event={"ID":"40eb22b8-4c66-4dee-a689-e41144bd9f3e","Type":"ContainerStarted","Data":"df19d0b2ea0a64f867214982240cb7162dadfaf4bfd396dda608237b86b0f112"} Nov 28 16:12:41 crc kubenswrapper[4909]: I1128 16:12:41.220747 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405760-qjm9k" event={"ID":"0731ea3e-3015-4311-88f4-bcd1f9f8204e","Type":"ContainerStarted","Data":"55e25c87ad488a0ee230737508f73167846e286147ac4f3da75ce9603ff85aad"} Nov 28 16:12:41 crc kubenswrapper[4909]: I1128 16:12:41.224745 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-7zdl4" event={"ID":"eeee7a7e-bba7-4704-984d-d18e9813a7ca","Type":"ContainerStarted","Data":"732170f01c94900ac82b3b3b687ac1f550df3323816d5a3fb4cd2cbcd572e968"} Nov 28 16:12:41 crc kubenswrapper[4909]: I1128 16:12:41.239409 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-87jr2" event={"ID":"ebbacd25-74d0-422d-a0b1-51bb64a57468","Type":"ContainerStarted","Data":"e178674799e5305b0a40b4924ac769ee4cb5cc75b5394944f57a4507be99b036"} Nov 28 16:12:41 crc kubenswrapper[4909]: I1128 16:12:41.253581 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-4p52q" event={"ID":"03717037-d782-4ec7-bc3d-845c4b455107","Type":"ContainerStarted","Data":"271f5b1b72bac2e401c03923218a023d8ae17ac06df64006f50d334959a616c9"} Nov 28 16:12:41 crc kubenswrapper[4909]: I1128 16:12:41.253987 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-4p52q" Nov 28 16:12:41 crc kubenswrapper[4909]: I1128 16:12:41.255238 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29405760-qjm9k" podStartSLOduration=124.255227705 podStartE2EDuration="2m4.255227705s" podCreationTimestamp="2025-11-28 16:10:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:12:41.239148791 +0000 UTC m=+143.635833315" watchObservedRunningTime="2025-11-28 16:12:41.255227705 +0000 UTC m=+143.651912219" Nov 28 16:12:41 crc kubenswrapper[4909]: I1128 16:12:41.257817 4909 patch_prober.go:28] interesting pod/olm-operator-6b444d44fb-4p52q container/olm-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.14:8443/healthz\": dial tcp 10.217.0.14:8443: connect: connection refused" start-of-body= Nov 28 16:12:41 crc kubenswrapper[4909]: I1128 16:12:41.257862 4909 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-4p52q" podUID="03717037-d782-4ec7-bc3d-845c4b455107" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.14:8443/healthz\": dial tcp 10.217.0.14:8443: connect: connection refused" Nov 28 16:12:41 crc kubenswrapper[4909]: I1128 16:12:41.259521 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-ddm7r" event={"ID":"aacec508-dd9f-438d-bb9e-d408e73f0d05","Type":"ContainerStarted","Data":"7a5bfedb53ab7031cd474674bab186a0df4933e30bc2696804f6bd2c5e152dc8"} Nov 28 16:12:41 crc kubenswrapper[4909]: I1128 16:12:41.277183 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca/service-ca-9c57cc56f-7zdl4" podStartSLOduration=123.277163216 podStartE2EDuration="2m3.277163216s" podCreationTimestamp="2025-11-28 16:10:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:12:41.261878676 +0000 UTC m=+143.658563210" watchObservedRunningTime="2025-11-28 16:12:41.277163216 +0000 UTC m=+143.673847740" Nov 28 16:12:41 crc kubenswrapper[4909]: I1128 16:12:41.277560 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-87jr2" podStartSLOduration=123.277555388 podStartE2EDuration="2m3.277555388s" podCreationTimestamp="2025-11-28 16:10:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:12:41.27348237 +0000 UTC m=+143.670166894" watchObservedRunningTime="2025-11-28 16:12:41.277555388 +0000 UTC m=+143.674239912" Nov 28 16:12:41 crc kubenswrapper[4909]: I1128 16:12:41.281488 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-p5p29\" (UID: \"cbee12e6-b82e-4451-8292-dca1540e2ab5\") " pod="openshift-image-registry/image-registry-697d97f7c8-p5p29" Nov 28 16:12:41 crc kubenswrapper[4909]: E1128 16:12:41.281766 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 16:12:41.781755349 +0000 UTC m=+144.178439873 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-p5p29" (UID: "cbee12e6-b82e-4451-8292-dca1540e2ab5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:41 crc kubenswrapper[4909]: I1128 16:12:41.303019 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-4p52q" podStartSLOduration=123.303003021 podStartE2EDuration="2m3.303003021s" podCreationTimestamp="2025-11-28 16:10:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:12:41.301004323 +0000 UTC m=+143.697688847" watchObservedRunningTime="2025-11-28 16:12:41.303003021 +0000 UTC m=+143.699687545" Nov 28 16:12:41 crc kubenswrapper[4909]: I1128 16:12:41.303714 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-mdb95" event={"ID":"355a0053-b22d-4e37-9ad1-ecfbdf391bc2","Type":"ContainerStarted","Data":"1008712aa38a099b0a000e017f19b34377d9bcbd127ba4d2b48beea7a32e0282"} Nov 28 16:12:41 crc kubenswrapper[4909]: I1128 16:12:41.309573 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-9h22n" event={"ID":"3d9c07f9-d9e4-45b0-8567-49936f18f930","Type":"ContainerStarted","Data":"0b8f4ff7a0f146e89f36e7b1bbd50bf9eab2f540f89a30ef4fb0d07dffa4d1d7"} Nov 28 16:12:41 crc kubenswrapper[4909]: I1128 16:12:41.328422 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-h9mgh" event={"ID":"4262b1b7-b141-48fd-ad22-8827154ba68a","Type":"ContainerStarted","Data":"fb04e77cc31f1688dbb91f28477415a68189f6412ed87b11af29a5419788369b"} Nov 28 16:12:41 crc kubenswrapper[4909]: I1128 16:12:41.335628 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-ddm7r" podStartSLOduration=124.33560628 podStartE2EDuration="2m4.33560628s" podCreationTimestamp="2025-11-28 16:10:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:12:41.334163128 +0000 UTC m=+143.730847652" watchObservedRunningTime="2025-11-28 16:12:41.33560628 +0000 UTC m=+143.732290804" Nov 28 16:12:41 crc kubenswrapper[4909]: I1128 16:12:41.348438 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-c4k79" event={"ID":"76a8915d-4ebe-4679-be00-6b290832b9cb","Type":"ContainerStarted","Data":"7ce50c3049bcc5407c8725b48c58b92812965b692fe846c5515260a5d40169f9"} Nov 28 16:12:41 crc kubenswrapper[4909]: I1128 16:12:41.382905 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 16:12:41 crc kubenswrapper[4909]: E1128 16:12:41.383683 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 16:12:41.883668164 +0000 UTC m=+144.280352688 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:41 crc kubenswrapper[4909]: I1128 16:12:41.425771 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-server-h9mgh" podStartSLOduration=7.425754037 podStartE2EDuration="7.425754037s" podCreationTimestamp="2025-11-28 16:12:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:12:41.367397966 +0000 UTC m=+143.764082490" watchObservedRunningTime="2025-11-28 16:12:41.425754037 +0000 UTC m=+143.822438561" Nov 28 16:12:41 crc kubenswrapper[4909]: I1128 16:12:41.426563 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd-operator/etcd-operator-b45778765-mdb95" podStartSLOduration=124.42655855 podStartE2EDuration="2m4.42655855s" podCreationTimestamp="2025-11-28 16:10:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:12:41.425245132 +0000 UTC m=+143.821929656" watchObservedRunningTime="2025-11-28 16:12:41.42655855 +0000 UTC m=+143.823243074" Nov 28 16:12:41 crc kubenswrapper[4909]: I1128 16:12:41.453647 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-7xr8n" event={"ID":"ba89323c-1e8c-4731-85d4-8b07766e3609","Type":"ContainerStarted","Data":"39361b0a0565eaaafdaa8577e4a9d64aa138c3f4a79c40628c688c51becc0ed6"} Nov 28 16:12:41 crc kubenswrapper[4909]: I1128 16:12:41.472806 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-b4wbn" event={"ID":"7cbf4a3d-f0e2-4a50-9122-85fd3f1df269","Type":"ContainerStarted","Data":"0dc3165bed73f998330903e65e3b8e85532d85a4a63d2debcfaa6af91c270b69"} Nov 28 16:12:41 crc kubenswrapper[4909]: I1128 16:12:41.473436 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console-operator/console-operator-58897d9998-b4wbn" Nov 28 16:12:41 crc kubenswrapper[4909]: I1128 16:12:41.484682 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-p5p29\" (UID: \"cbee12e6-b82e-4451-8292-dca1540e2ab5\") " pod="openshift-image-registry/image-registry-697d97f7c8-p5p29" Nov 28 16:12:41 crc kubenswrapper[4909]: E1128 16:12:41.484958 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 16:12:41.984946302 +0000 UTC m=+144.381630826 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-p5p29" (UID: "cbee12e6-b82e-4451-8292-dca1540e2ab5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:41 crc kubenswrapper[4909]: I1128 16:12:41.517969 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-9d46g" event={"ID":"04ec2320-667b-4f35-976e-60c4a1eb3386","Type":"ContainerStarted","Data":"e8e63ae2ccc6e8dcf37b7e3a58d70ac4ce2b8cf20f05f9fb4740bdae8c0bb607"} Nov 28 16:12:41 crc kubenswrapper[4909]: I1128 16:12:41.519126 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-9d46g" Nov 28 16:12:41 crc kubenswrapper[4909]: I1128 16:12:41.533830 4909 patch_prober.go:28] interesting pod/console-operator-58897d9998-b4wbn container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.13:8443/readyz\": dial tcp 10.217.0.13:8443: connect: connection refused" start-of-body= Nov 28 16:12:41 crc kubenswrapper[4909]: I1128 16:12:41.533891 4909 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-58897d9998-b4wbn" podUID="7cbf4a3d-f0e2-4a50-9122-85fd3f1df269" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.13:8443/readyz\": dial tcp 10.217.0.13:8443: connect: connection refused" Nov 28 16:12:41 crc kubenswrapper[4909]: I1128 16:12:41.549527 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-c4k79" podStartSLOduration=123.549511242 podStartE2EDuration="2m3.549511242s" podCreationTimestamp="2025-11-28 16:10:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:12:41.547830503 +0000 UTC m=+143.944515027" watchObservedRunningTime="2025-11-28 16:12:41.549511242 +0000 UTC m=+143.946195766" Nov 28 16:12:41 crc kubenswrapper[4909]: I1128 16:12:41.550264 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca-operator/service-ca-operator-777779d784-9h22n" podStartSLOduration=123.550258213 podStartE2EDuration="2m3.550258213s" podCreationTimestamp="2025-11-28 16:10:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:12:41.472992128 +0000 UTC m=+143.869676652" watchObservedRunningTime="2025-11-28 16:12:41.550258213 +0000 UTC m=+143.946942727" Nov 28 16:12:41 crc kubenswrapper[4909]: I1128 16:12:41.566051 4909 generic.go:334] "Generic (PLEG): container finished" podID="b6bb0f55-f1dd-4a71-b917-5f9b7209478e" containerID="33b0467f1a437151f1509b91b8dff34e020fb67396df349e4c11ae868eab594c" exitCode=0 Nov 28 16:12:41 crc kubenswrapper[4909]: I1128 16:12:41.566145 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-mg8mw" event={"ID":"b6bb0f55-f1dd-4a71-b917-5f9b7209478e","Type":"ContainerDied","Data":"33b0467f1a437151f1509b91b8dff34e020fb67396df349e4c11ae868eab594c"} Nov 28 16:12:41 crc kubenswrapper[4909]: I1128 16:12:41.580647 4909 patch_prober.go:28] interesting pod/catalog-operator-68c6474976-9d46g container/catalog-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.30:8443/healthz\": dial tcp 10.217.0.30:8443: connect: connection refused" start-of-body= Nov 28 16:12:41 crc kubenswrapper[4909]: I1128 16:12:41.580715 4909 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-9d46g" podUID="04ec2320-667b-4f35-976e-60c4a1eb3386" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.30:8443/healthz\": dial tcp 10.217.0.30:8443: connect: connection refused" Nov 28 16:12:41 crc kubenswrapper[4909]: I1128 16:12:41.587161 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 16:12:41 crc kubenswrapper[4909]: E1128 16:12:41.588474 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 16:12:42.088448643 +0000 UTC m=+144.485133167 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:41 crc kubenswrapper[4909]: I1128 16:12:41.620875 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-nrl4j" event={"ID":"7b62fa09-bcc3-4c5f-a6d4-91cb6c253b83","Type":"ContainerStarted","Data":"5d8354ccd1241f77ad158c3e1a51404d8976bf7f93362453c075387c704aca4f"} Nov 28 16:12:41 crc kubenswrapper[4909]: I1128 16:12:41.622726 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console-operator/console-operator-58897d9998-b4wbn" podStartSLOduration=124.62270949 podStartE2EDuration="2m4.62270949s" podCreationTimestamp="2025-11-28 16:10:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:12:41.596856156 +0000 UTC m=+143.993540680" watchObservedRunningTime="2025-11-28 16:12:41.62270949 +0000 UTC m=+144.019394014" Nov 28 16:12:41 crc kubenswrapper[4909]: I1128 16:12:41.623267 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-9d46g" podStartSLOduration=123.623261466 podStartE2EDuration="2m3.623261466s" podCreationTimestamp="2025-11-28 16:10:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:12:41.622323489 +0000 UTC m=+144.019008013" watchObservedRunningTime="2025-11-28 16:12:41.623261466 +0000 UTC m=+144.019945990" Nov 28 16:12:41 crc kubenswrapper[4909]: I1128 16:12:41.671451 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-wpjbf" event={"ID":"d7cb9451-0082-4c4b-b90e-7d90581fe2b9","Type":"ContainerStarted","Data":"4454f76b04e9f855aa8f4d6828a2823031542ca69dd39cb2c121a77339b8294c"} Nov 28 16:12:41 crc kubenswrapper[4909]: I1128 16:12:41.688372 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-p5p29\" (UID: \"cbee12e6-b82e-4451-8292-dca1540e2ab5\") " pod="openshift-image-registry/image-registry-697d97f7c8-p5p29" Nov 28 16:12:41 crc kubenswrapper[4909]: E1128 16:12:41.689227 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 16:12:42.189216806 +0000 UTC m=+144.585901330 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-p5p29" (UID: "cbee12e6-b82e-4451-8292-dca1540e2ab5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:41 crc kubenswrapper[4909]: I1128 16:12:41.710858 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-j4f6w" event={"ID":"42d7dcab-41fb-48a8-a08b-f4fca523e27b","Type":"ContainerStarted","Data":"5821acddcaf6da17b6c16956fe0851467b4315c062dfc28d8ad05325b8d0efa2"} Nov 28 16:12:41 crc kubenswrapper[4909]: I1128 16:12:41.741062 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-g6mgs" event={"ID":"a56d38d4-9ee2-46f0-8fc8-3edbc68647d4","Type":"ContainerStarted","Data":"15faff93cd20bed13c5fe3cd512871a9c3645ac154427817012e587473049ad3"} Nov 28 16:12:41 crc kubenswrapper[4909]: I1128 16:12:41.742260 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-c7bds" Nov 28 16:12:41 crc kubenswrapper[4909]: I1128 16:12:41.789903 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 16:12:41 crc kubenswrapper[4909]: E1128 16:12:41.791032 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 16:12:42.291015748 +0000 UTC m=+144.687700262 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:41 crc kubenswrapper[4909]: I1128 16:12:41.794020 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-c7bds" Nov 28 16:12:41 crc kubenswrapper[4909]: I1128 16:12:41.794603 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-wpjbf" podStartSLOduration=124.794588491 podStartE2EDuration="2m4.794588491s" podCreationTimestamp="2025-11-28 16:10:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:12:41.72303906 +0000 UTC m=+144.119723584" watchObservedRunningTime="2025-11-28 16:12:41.794588491 +0000 UTC m=+144.191273015" Nov 28 16:12:41 crc kubenswrapper[4909]: I1128 16:12:41.834946 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-c7bds" podStartSLOduration=123.834927593 podStartE2EDuration="2m3.834927593s" podCreationTimestamp="2025-11-28 16:10:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:12:41.834029617 +0000 UTC m=+144.230714131" watchObservedRunningTime="2025-11-28 16:12:41.834927593 +0000 UTC m=+144.231612117" Nov 28 16:12:41 crc kubenswrapper[4909]: I1128 16:12:41.835844 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-f9d7485db-qjf9t" podStartSLOduration=124.83583885 podStartE2EDuration="2m4.83583885s" podCreationTimestamp="2025-11-28 16:10:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:12:41.794251772 +0000 UTC m=+144.190936296" watchObservedRunningTime="2025-11-28 16:12:41.83583885 +0000 UTC m=+144.232523374" Nov 28 16:12:41 crc kubenswrapper[4909]: I1128 16:12:41.891202 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-p5p29\" (UID: \"cbee12e6-b82e-4451-8292-dca1540e2ab5\") " pod="openshift-image-registry/image-registry-697d97f7c8-p5p29" Nov 28 16:12:41 crc kubenswrapper[4909]: E1128 16:12:41.897762 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 16:12:42.397748033 +0000 UTC m=+144.794432637 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-p5p29" (UID: "cbee12e6-b82e-4451-8292-dca1540e2ab5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:41 crc kubenswrapper[4909]: I1128 16:12:41.933412 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress/router-default-5444994796-g6mgs" podStartSLOduration=124.93339821000001 podStartE2EDuration="2m4.93339821s" podCreationTimestamp="2025-11-28 16:10:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:12:41.933227295 +0000 UTC m=+144.329911819" watchObservedRunningTime="2025-11-28 16:12:41.93339821 +0000 UTC m=+144.330082734" Nov 28 16:12:41 crc kubenswrapper[4909]: I1128 16:12:41.994013 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 16:12:41 crc kubenswrapper[4909]: E1128 16:12:41.994367 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 16:12:42.494353266 +0000 UTC m=+144.891037790 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:42 crc kubenswrapper[4909]: I1128 16:12:42.095598 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-p5p29\" (UID: \"cbee12e6-b82e-4451-8292-dca1540e2ab5\") " pod="openshift-image-registry/image-registry-697d97f7c8-p5p29" Nov 28 16:12:42 crc kubenswrapper[4909]: E1128 16:12:42.095893 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 16:12:42.59588178 +0000 UTC m=+144.992566304 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-p5p29" (UID: "cbee12e6-b82e-4451-8292-dca1540e2ab5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:42 crc kubenswrapper[4909]: I1128 16:12:42.198100 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 16:12:42 crc kubenswrapper[4909]: E1128 16:12:42.198462 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 16:12:42.698446275 +0000 UTC m=+145.095130799 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:42 crc kubenswrapper[4909]: I1128 16:12:42.304145 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-p5p29\" (UID: \"cbee12e6-b82e-4451-8292-dca1540e2ab5\") " pod="openshift-image-registry/image-registry-697d97f7c8-p5p29" Nov 28 16:12:42 crc kubenswrapper[4909]: E1128 16:12:42.304436 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 16:12:42.804425768 +0000 UTC m=+145.201110292 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-p5p29" (UID: "cbee12e6-b82e-4451-8292-dca1540e2ab5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:42 crc kubenswrapper[4909]: I1128 16:12:42.404673 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 16:12:42 crc kubenswrapper[4909]: E1128 16:12:42.405034 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 16:12:42.905020815 +0000 UTC m=+145.301705339 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:42 crc kubenswrapper[4909]: I1128 16:12:42.516670 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-p5p29\" (UID: \"cbee12e6-b82e-4451-8292-dca1540e2ab5\") " pod="openshift-image-registry/image-registry-697d97f7c8-p5p29" Nov 28 16:12:42 crc kubenswrapper[4909]: E1128 16:12:42.516909 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 16:12:43.016899528 +0000 UTC m=+145.413584052 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-p5p29" (UID: "cbee12e6-b82e-4451-8292-dca1540e2ab5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:42 crc kubenswrapper[4909]: I1128 16:12:42.578986 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-ingress/router-default-5444994796-g6mgs" Nov 28 16:12:42 crc kubenswrapper[4909]: I1128 16:12:42.590889 4909 patch_prober.go:28] interesting pod/router-default-5444994796-g6mgs container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 28 16:12:42 crc kubenswrapper[4909]: [-]has-synced failed: reason withheld Nov 28 16:12:42 crc kubenswrapper[4909]: [+]process-running ok Nov 28 16:12:42 crc kubenswrapper[4909]: healthz check failed Nov 28 16:12:42 crc kubenswrapper[4909]: I1128 16:12:42.590952 4909 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-g6mgs" podUID="a56d38d4-9ee2-46f0-8fc8-3edbc68647d4" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 28 16:12:42 crc kubenswrapper[4909]: I1128 16:12:42.618217 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 16:12:42 crc kubenswrapper[4909]: E1128 16:12:42.618613 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 16:12:43.118598428 +0000 UTC m=+145.515282952 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:42 crc kubenswrapper[4909]: I1128 16:12:42.721644 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-p5p29\" (UID: \"cbee12e6-b82e-4451-8292-dca1540e2ab5\") " pod="openshift-image-registry/image-registry-697d97f7c8-p5p29" Nov 28 16:12:42 crc kubenswrapper[4909]: E1128 16:12:42.722429 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 16:12:43.222414568 +0000 UTC m=+145.619099092 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-p5p29" (UID: "cbee12e6-b82e-4451-8292-dca1540e2ab5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:42 crc kubenswrapper[4909]: I1128 16:12:42.798928 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-rwfb8" event={"ID":"6b7310f1-adae-43c2-ab22-53385dabd116","Type":"ContainerStarted","Data":"ad9c279c7f5ed09d5b12f7c9f3f8133030b90e53c31a820be984e457396ee5f3"} Nov 28 16:12:42 crc kubenswrapper[4909]: I1128 16:12:42.802468 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-mg8mw" event={"ID":"b6bb0f55-f1dd-4a71-b917-5f9b7209478e","Type":"ContainerStarted","Data":"9ca2c71e5e701cf8cd075857dcea7fbdc8463f5cf813d40f5540068243a73c70"} Nov 28 16:12:42 crc kubenswrapper[4909]: I1128 16:12:42.802813 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-config-operator/openshift-config-operator-7777fb866f-mg8mw" Nov 28 16:12:42 crc kubenswrapper[4909]: I1128 16:12:42.809424 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-cml8b" event={"ID":"39ad8798-c58e-4c22-b31c-2eb95b257309","Type":"ContainerStarted","Data":"7f99a15d58bc9ac6383293c3e3222272deb7c08046c4f56866924177b0ad0c26"} Nov 28 16:12:42 crc kubenswrapper[4909]: I1128 16:12:42.811320 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-gpb8r" event={"ID":"40eb22b8-4c66-4dee-a689-e41144bd9f3e","Type":"ContainerStarted","Data":"de4ebca5ded1c57abadf8010fca0eb70234106d9be001c0ab976f2596c63f462"} Nov 28 16:12:42 crc kubenswrapper[4909]: I1128 16:12:42.818713 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-ld6fg" event={"ID":"5c7e3790-aa06-4b95-94a7-c41d909ec984","Type":"ContainerStarted","Data":"e1ada51e7e12421de25e44688902e5dc64d1d0bebabce0ca6f90953b1cedb31f"} Nov 28 16:12:42 crc kubenswrapper[4909]: I1128 16:12:42.828170 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 16:12:42 crc kubenswrapper[4909]: I1128 16:12:42.828517 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-qjl8k" event={"ID":"403639b0-0067-40f7-bf1c-fcddd8320e62","Type":"ContainerStarted","Data":"e0cce2871e4711ac0c367eeab7b212ce126bcadccddabf3828e17337edc16e8f"} Nov 28 16:12:42 crc kubenswrapper[4909]: E1128 16:12:42.828543 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 16:12:43.328527785 +0000 UTC m=+145.725212309 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:42 crc kubenswrapper[4909]: I1128 16:12:42.834052 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-dmgdn" event={"ID":"d229415a-fd7f-4553-9752-c6c401709252","Type":"ContainerStarted","Data":"b7263ba1ed87112c4347ab93d7e05e05989ca80630ba35aa7ece619fed05c305"} Nov 28 16:12:42 crc kubenswrapper[4909]: I1128 16:12:42.839048 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-l5snl" event={"ID":"43c2b623-501b-489b-b5d5-3dbe0d23d265","Type":"ContainerStarted","Data":"abbb042187b7dfc7564f73de22e9d6ead872f6eb7979e0cef57b34763333f63e"} Nov 28 16:12:42 crc kubenswrapper[4909]: I1128 16:12:42.858303 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-j4f6w" event={"ID":"42d7dcab-41fb-48a8-a08b-f4fca523e27b","Type":"ContainerStarted","Data":"f18d16b5a66805300d4a2aaa496f7c7540aacc34b24845ba98de273fc1ad04c5"} Nov 28 16:12:42 crc kubenswrapper[4909]: I1128 16:12:42.883500 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-k67zs" event={"ID":"f955b72b-dc83-4219-b488-163acebcf367","Type":"ContainerStarted","Data":"79f4f1b70739d5a31c02e9b5923ce7884338b967a48059b5a4e49849dba379cb"} Nov 28 16:12:42 crc kubenswrapper[4909]: I1128 16:12:42.931246 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-p5p29\" (UID: \"cbee12e6-b82e-4451-8292-dca1540e2ab5\") " pod="openshift-image-registry/image-registry-697d97f7c8-p5p29" Nov 28 16:12:42 crc kubenswrapper[4909]: E1128 16:12:42.934345 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 16:12:43.434332383 +0000 UTC m=+145.831016907 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-p5p29" (UID: "cbee12e6-b82e-4451-8292-dca1540e2ab5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:42 crc kubenswrapper[4909]: I1128 16:12:42.949568 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-bp2ss" event={"ID":"53ff20ea-24c0-4364-9bbb-6ef3a48fdd6f","Type":"ContainerStarted","Data":"06f7722f738b374fbfae1e6c70a43df80fe8f74c5e69f28f8f2e7219337aec51"} Nov 28 16:12:42 crc kubenswrapper[4909]: I1128 16:12:42.949610 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-bp2ss" event={"ID":"53ff20ea-24c0-4364-9bbb-6ef3a48fdd6f","Type":"ContainerStarted","Data":"dda54e82835112e1ddf742b31c1f10e3eb587361e61d48b0bfb5641b98be998c"} Nov 28 16:12:42 crc kubenswrapper[4909]: I1128 16:12:42.984925 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-config-operator/openshift-config-operator-7777fb866f-mg8mw" podStartSLOduration=125.98491123 podStartE2EDuration="2m5.98491123s" podCreationTimestamp="2025-11-28 16:10:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:12:42.983309614 +0000 UTC m=+145.379994138" watchObservedRunningTime="2025-11-28 16:12:42.98491123 +0000 UTC m=+145.381595754" Nov 28 16:12:42 crc kubenswrapper[4909]: I1128 16:12:42.985794 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-canary/ingress-canary-rwfb8" podStartSLOduration=8.985790305 podStartE2EDuration="8.985790305s" podCreationTimestamp="2025-11-28 16:12:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:12:42.901903099 +0000 UTC m=+145.298587613" watchObservedRunningTime="2025-11-28 16:12:42.985790305 +0000 UTC m=+145.382474829" Nov 28 16:12:43 crc kubenswrapper[4909]: I1128 16:12:43.000898 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-l2xpj" event={"ID":"e4c200b0-224e-4a58-af74-96a8568ddb3d","Type":"ContainerStarted","Data":"7617d35c019c412b373ffcc83806abb1e4113972c3c387f7991581428ac6ed21"} Nov 28 16:12:43 crc kubenswrapper[4909]: I1128 16:12:43.001429 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/downloads-7954f5f757-l2xpj" Nov 28 16:12:43 crc kubenswrapper[4909]: I1128 16:12:43.008760 4909 patch_prober.go:28] interesting pod/downloads-7954f5f757-l2xpj container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.20:8080/\": dial tcp 10.217.0.20:8080: connect: connection refused" start-of-body= Nov 28 16:12:43 crc kubenswrapper[4909]: I1128 16:12:43.008806 4909 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-l2xpj" podUID="e4c200b0-224e-4a58-af74-96a8568ddb3d" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.20:8080/\": dial tcp 10.217.0.20:8080: connect: connection refused" Nov 28 16:12:43 crc kubenswrapper[4909]: I1128 16:12:43.033200 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 16:12:43 crc kubenswrapper[4909]: E1128 16:12:43.034541 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 16:12:43.534521809 +0000 UTC m=+145.931206333 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:43 crc kubenswrapper[4909]: I1128 16:12:43.035839 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-xz6qx" event={"ID":"05f40c85-8360-453c-80d9-907bb6db611b","Type":"ContainerStarted","Data":"c27c03d346b7d9d375fb2a28b2c092ca581cf9c1a702c237cc80378eef06b667"} Nov 28 16:12:43 crc kubenswrapper[4909]: I1128 16:12:43.056507 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-dmgdn" podStartSLOduration=126.056489902 podStartE2EDuration="2m6.056489902s" podCreationTimestamp="2025-11-28 16:10:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:12:43.053837685 +0000 UTC m=+145.450522209" watchObservedRunningTime="2025-11-28 16:12:43.056489902 +0000 UTC m=+145.453174416" Nov 28 16:12:43 crc kubenswrapper[4909]: I1128 16:12:43.057027 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-j4f6w" podStartSLOduration=126.057020757 podStartE2EDuration="2m6.057020757s" podCreationTimestamp="2025-11-28 16:10:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:12:43.019454765 +0000 UTC m=+145.416139289" watchObservedRunningTime="2025-11-28 16:12:43.057020757 +0000 UTC m=+145.453705271" Nov 28 16:12:43 crc kubenswrapper[4909]: I1128 16:12:43.122931 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-hstbg" event={"ID":"c73706cd-a7f9-436c-88a9-0fa3120649aa","Type":"ContainerStarted","Data":"1dabd19e93762420111aefa0abf6a0446fc4f8309526a5cd422d7e4ee25c85a6"} Nov 28 16:12:43 crc kubenswrapper[4909]: I1128 16:12:43.122983 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-hstbg" event={"ID":"c73706cd-a7f9-436c-88a9-0fa3120649aa","Type":"ContainerStarted","Data":"b6885431c39646f3dde1ad319cf7a80f71c4d5d636752b3db0cb9f91ab401635"} Nov 28 16:12:43 crc kubenswrapper[4909]: I1128 16:12:43.123647 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-hstbg" Nov 28 16:12:43 crc kubenswrapper[4909]: I1128 16:12:43.145187 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-p5p29\" (UID: \"cbee12e6-b82e-4451-8292-dca1540e2ab5\") " pod="openshift-image-registry/image-registry-697d97f7c8-p5p29" Nov 28 16:12:43 crc kubenswrapper[4909]: E1128 16:12:43.145476 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 16:12:43.645465525 +0000 UTC m=+146.042150049 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-p5p29" (UID: "cbee12e6-b82e-4451-8292-dca1540e2ab5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:43 crc kubenswrapper[4909]: I1128 16:12:43.165233 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-4j2wt" event={"ID":"f7ab1edf-70d0-4894-a7b1-ec68413d9b45","Type":"ContainerStarted","Data":"6bbd5d24260c097cd7fa9dd31df4f7c2e92389b07b654274f6b83556181f91ff"} Nov 28 16:12:43 crc kubenswrapper[4909]: I1128 16:12:43.170325 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-qjl8k" podStartSLOduration=126.17030614 podStartE2EDuration="2m6.17030614s" podCreationTimestamp="2025-11-28 16:10:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:12:43.115922384 +0000 UTC m=+145.512606908" watchObservedRunningTime="2025-11-28 16:12:43.17030614 +0000 UTC m=+145.566990664" Nov 28 16:12:43 crc kubenswrapper[4909]: I1128 16:12:43.205597 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-nrl4j" event={"ID":"7b62fa09-bcc3-4c5f-a6d4-91cb6c253b83","Type":"ContainerStarted","Data":"ff916a058969195c60d0b3a73ae6fa56b6c34611346ddf61a648cc646c041a96"} Nov 28 16:12:43 crc kubenswrapper[4909]: I1128 16:12:43.225013 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-cml8b" podStartSLOduration=125.22497984500001 podStartE2EDuration="2m5.224979845s" podCreationTimestamp="2025-11-28 16:10:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:12:43.171737441 +0000 UTC m=+145.568421965" watchObservedRunningTime="2025-11-28 16:12:43.224979845 +0000 UTC m=+145.621664369" Nov 28 16:12:43 crc kubenswrapper[4909]: I1128 16:12:43.225951 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-l5snl" podStartSLOduration=126.225944013 podStartE2EDuration="2m6.225944013s" podCreationTimestamp="2025-11-28 16:10:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:12:43.223365219 +0000 UTC m=+145.620049753" watchObservedRunningTime="2025-11-28 16:12:43.225944013 +0000 UTC m=+145.622628527" Nov 28 16:12:43 crc kubenswrapper[4909]: I1128 16:12:43.251479 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-qbnrs" event={"ID":"74a58b61-df24-4f27-8382-ff175aa2bb14","Type":"ContainerStarted","Data":"8bd9b9a79d3ea4873366f9dd60fd77310dc91f76a727a58ae344c5598b82b4a6"} Nov 28 16:12:43 crc kubenswrapper[4909]: I1128 16:12:43.251858 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-k67zs" podStartSLOduration=127.251844659 podStartE2EDuration="2m7.251844659s" podCreationTimestamp="2025-11-28 16:10:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:12:43.2511885 +0000 UTC m=+145.647873024" watchObservedRunningTime="2025-11-28 16:12:43.251844659 +0000 UTC m=+145.648529183" Nov 28 16:12:43 crc kubenswrapper[4909]: I1128 16:12:43.252207 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 16:12:43 crc kubenswrapper[4909]: E1128 16:12:43.253310 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 16:12:43.753295431 +0000 UTC m=+146.149979955 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:43 crc kubenswrapper[4909]: I1128 16:12:43.289563 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-jcpqp" event={"ID":"562fb423-75b3-4716-9370-bc090c7fbbee","Type":"ContainerStarted","Data":"b193ed90741daab3641400aaeeb06ee0e7949177e2c9141d6f47f1d0c53ffbe1"} Nov 28 16:12:43 crc kubenswrapper[4909]: I1128 16:12:43.290098 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-dns/dns-default-jcpqp" Nov 28 16:12:43 crc kubenswrapper[4909]: I1128 16:12:43.310824 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-pnjkw" event={"ID":"410969ad-6fe5-4169-a78a-5e459f402cd3","Type":"ContainerStarted","Data":"03a017ebdf97fbdc2be2c38251a1008c90ba09a22584b15a7e4f8a764e954c4b"} Nov 28 16:12:43 crc kubenswrapper[4909]: I1128 16:12:43.311584 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-pnjkw" Nov 28 16:12:43 crc kubenswrapper[4909]: I1128 16:12:43.333922 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-4j2wt" podStartSLOduration=125.333909803 podStartE2EDuration="2m5.333909803s" podCreationTimestamp="2025-11-28 16:10:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:12:43.332049809 +0000 UTC m=+145.728734333" watchObservedRunningTime="2025-11-28 16:12:43.333909803 +0000 UTC m=+145.730594327" Nov 28 16:12:43 crc kubenswrapper[4909]: I1128 16:12:43.334181 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-gpb8r" podStartSLOduration=125.334176281 podStartE2EDuration="2m5.334176281s" podCreationTimestamp="2025-11-28 16:10:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:12:43.281703949 +0000 UTC m=+145.678388473" watchObservedRunningTime="2025-11-28 16:12:43.334176281 +0000 UTC m=+145.730860795" Nov 28 16:12:43 crc kubenswrapper[4909]: I1128 16:12:43.334430 4909 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-pnjkw container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.38:8080/healthz\": dial tcp 10.217.0.38:8080: connect: connection refused" start-of-body= Nov 28 16:12:43 crc kubenswrapper[4909]: I1128 16:12:43.334455 4909 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-pnjkw" podUID="410969ad-6fe5-4169-a78a-5e459f402cd3" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.38:8080/healthz\": dial tcp 10.217.0.38:8080: connect: connection refused" Nov 28 16:12:43 crc kubenswrapper[4909]: I1128 16:12:43.342605 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-7xr8n" event={"ID":"ba89323c-1e8c-4731-85d4-8b07766e3609","Type":"ContainerStarted","Data":"32b3771f5e891b335a38d973a5d9f684c3ec99d2a7b9997f41466ed7956cc38f"} Nov 28 16:12:43 crc kubenswrapper[4909]: I1128 16:12:43.354395 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-p5p29\" (UID: \"cbee12e6-b82e-4451-8292-dca1540e2ab5\") " pod="openshift-image-registry/image-registry-697d97f7c8-p5p29" Nov 28 16:12:43 crc kubenswrapper[4909]: E1128 16:12:43.355938 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 16:12:43.855927877 +0000 UTC m=+146.252612401 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-p5p29" (UID: "cbee12e6-b82e-4451-8292-dca1540e2ab5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:43 crc kubenswrapper[4909]: I1128 16:12:43.366670 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-hstbg" podStartSLOduration=125.366640806 podStartE2EDuration="2m5.366640806s" podCreationTimestamp="2025-11-28 16:10:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:12:43.36576073 +0000 UTC m=+145.762445254" watchObservedRunningTime="2025-11-28 16:12:43.366640806 +0000 UTC m=+145.763325330" Nov 28 16:12:43 crc kubenswrapper[4909]: I1128 16:12:43.367523 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-9d46g" Nov 28 16:12:43 crc kubenswrapper[4909]: I1128 16:12:43.373758 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-4p52q" Nov 28 16:12:43 crc kubenswrapper[4909]: I1128 16:12:43.455168 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 16:12:43 crc kubenswrapper[4909]: E1128 16:12:43.455381 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 16:12:43.955350661 +0000 UTC m=+146.352035185 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:43 crc kubenswrapper[4909]: I1128 16:12:43.455686 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-p5p29\" (UID: \"cbee12e6-b82e-4451-8292-dca1540e2ab5\") " pod="openshift-image-registry/image-registry-697d97f7c8-p5p29" Nov 28 16:12:43 crc kubenswrapper[4909]: E1128 16:12:43.473285 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 16:12:43.973261497 +0000 UTC m=+146.369946021 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-p5p29" (UID: "cbee12e6-b82e-4451-8292-dca1540e2ab5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:43 crc kubenswrapper[4909]: I1128 16:12:43.507778 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-admission-controller-857f4d67dd-qbnrs" podStartSLOduration=125.507761801 podStartE2EDuration="2m5.507761801s" podCreationTimestamp="2025-11-28 16:10:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:12:43.419963562 +0000 UTC m=+145.816648086" watchObservedRunningTime="2025-11-28 16:12:43.507761801 +0000 UTC m=+145.904446325" Nov 28 16:12:43 crc kubenswrapper[4909]: I1128 16:12:43.536301 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/downloads-7954f5f757-l2xpj" podStartSLOduration=126.536285982 podStartE2EDuration="2m6.536285982s" podCreationTimestamp="2025-11-28 16:10:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:12:43.509877342 +0000 UTC m=+145.906561866" watchObservedRunningTime="2025-11-28 16:12:43.536285982 +0000 UTC m=+145.932970506" Nov 28 16:12:43 crc kubenswrapper[4909]: I1128 16:12:43.557036 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 16:12:43 crc kubenswrapper[4909]: E1128 16:12:43.557348 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 16:12:44.057332369 +0000 UTC m=+146.454016893 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:43 crc kubenswrapper[4909]: I1128 16:12:43.560144 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-nrl4j" podStartSLOduration=126.560127069 podStartE2EDuration="2m6.560127069s" podCreationTimestamp="2025-11-28 16:10:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:12:43.557558915 +0000 UTC m=+145.954243449" watchObservedRunningTime="2025-11-28 16:12:43.560127069 +0000 UTC m=+145.956811593" Nov 28 16:12:43 crc kubenswrapper[4909]: I1128 16:12:43.560980 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/machine-api-operator-5694c8668f-bp2ss" podStartSLOduration=125.560949473 podStartE2EDuration="2m5.560949473s" podCreationTimestamp="2025-11-28 16:10:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:12:43.535972023 +0000 UTC m=+145.932656547" watchObservedRunningTime="2025-11-28 16:12:43.560949473 +0000 UTC m=+145.957633997" Nov 28 16:12:43 crc kubenswrapper[4909]: I1128 16:12:43.584798 4909 patch_prober.go:28] interesting pod/router-default-5444994796-g6mgs container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 28 16:12:43 crc kubenswrapper[4909]: [-]has-synced failed: reason withheld Nov 28 16:12:43 crc kubenswrapper[4909]: [+]process-running ok Nov 28 16:12:43 crc kubenswrapper[4909]: healthz check failed Nov 28 16:12:43 crc kubenswrapper[4909]: I1128 16:12:43.585175 4909 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-g6mgs" podUID="a56d38d4-9ee2-46f0-8fc8-3edbc68647d4" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 28 16:12:43 crc kubenswrapper[4909]: I1128 16:12:43.654784 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/dns-default-jcpqp" podStartSLOduration=9.654768396 podStartE2EDuration="9.654768396s" podCreationTimestamp="2025-11-28 16:12:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:12:43.65354819 +0000 UTC m=+146.050232714" watchObservedRunningTime="2025-11-28 16:12:43.654768396 +0000 UTC m=+146.051452920" Nov 28 16:12:43 crc kubenswrapper[4909]: I1128 16:12:43.654920 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-pnjkw" podStartSLOduration=125.65491704 podStartE2EDuration="2m5.65491704s" podCreationTimestamp="2025-11-28 16:10:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:12:43.617150032 +0000 UTC m=+146.013834556" watchObservedRunningTime="2025-11-28 16:12:43.65491704 +0000 UTC m=+146.051601564" Nov 28 16:12:43 crc kubenswrapper[4909]: I1128 16:12:43.658129 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-p5p29\" (UID: \"cbee12e6-b82e-4451-8292-dca1540e2ab5\") " pod="openshift-image-registry/image-registry-697d97f7c8-p5p29" Nov 28 16:12:43 crc kubenswrapper[4909]: E1128 16:12:43.658484 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 16:12:44.158471842 +0000 UTC m=+146.555156366 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-p5p29" (UID: "cbee12e6-b82e-4451-8292-dca1540e2ab5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:43 crc kubenswrapper[4909]: I1128 16:12:43.693444 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns-operator/dns-operator-744455d44c-7xr8n" podStartSLOduration=126.693429199 podStartE2EDuration="2m6.693429199s" podCreationTimestamp="2025-11-28 16:10:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:12:43.69103264 +0000 UTC m=+146.087717164" watchObservedRunningTime="2025-11-28 16:12:43.693429199 +0000 UTC m=+146.090113723" Nov 28 16:12:43 crc kubenswrapper[4909]: I1128 16:12:43.759638 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 16:12:43 crc kubenswrapper[4909]: E1128 16:12:43.759995 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 16:12:44.259978466 +0000 UTC m=+146.656662990 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:43 crc kubenswrapper[4909]: I1128 16:12:43.769943 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console-operator/console-operator-58897d9998-b4wbn" Nov 28 16:12:43 crc kubenswrapper[4909]: I1128 16:12:43.838552 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-42h7p"] Nov 28 16:12:43 crc kubenswrapper[4909]: I1128 16:12:43.839433 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-42h7p" Nov 28 16:12:43 crc kubenswrapper[4909]: I1128 16:12:43.844927 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Nov 28 16:12:43 crc kubenswrapper[4909]: I1128 16:12:43.861276 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-p5p29\" (UID: \"cbee12e6-b82e-4451-8292-dca1540e2ab5\") " pod="openshift-image-registry/image-registry-697d97f7c8-p5p29" Nov 28 16:12:43 crc kubenswrapper[4909]: E1128 16:12:43.861636 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 16:12:44.361624654 +0000 UTC m=+146.758309178 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-p5p29" (UID: "cbee12e6-b82e-4451-8292-dca1540e2ab5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:43 crc kubenswrapper[4909]: I1128 16:12:43.870669 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-42h7p"] Nov 28 16:12:43 crc kubenswrapper[4909]: I1128 16:12:43.962486 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 16:12:43 crc kubenswrapper[4909]: I1128 16:12:43.962744 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/52118b39-f1b7-486d-a819-ae1f464d793d-catalog-content\") pod \"certified-operators-42h7p\" (UID: \"52118b39-f1b7-486d-a819-ae1f464d793d\") " pod="openshift-marketplace/certified-operators-42h7p" Nov 28 16:12:43 crc kubenswrapper[4909]: I1128 16:12:43.962783 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-msv77\" (UniqueName: \"kubernetes.io/projected/52118b39-f1b7-486d-a819-ae1f464d793d-kube-api-access-msv77\") pod \"certified-operators-42h7p\" (UID: \"52118b39-f1b7-486d-a819-ae1f464d793d\") " pod="openshift-marketplace/certified-operators-42h7p" Nov 28 16:12:43 crc kubenswrapper[4909]: I1128 16:12:43.962799 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/52118b39-f1b7-486d-a819-ae1f464d793d-utilities\") pod \"certified-operators-42h7p\" (UID: \"52118b39-f1b7-486d-a819-ae1f464d793d\") " pod="openshift-marketplace/certified-operators-42h7p" Nov 28 16:12:43 crc kubenswrapper[4909]: E1128 16:12:43.962913 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 16:12:44.462898252 +0000 UTC m=+146.859582776 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:43 crc kubenswrapper[4909]: I1128 16:12:43.979981 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-rkbfc"] Nov 28 16:12:43 crc kubenswrapper[4909]: I1128 16:12:43.981038 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-rkbfc" Nov 28 16:12:43 crc kubenswrapper[4909]: I1128 16:12:43.985055 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Nov 28 16:12:44 crc kubenswrapper[4909]: I1128 16:12:44.007184 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-rkbfc"] Nov 28 16:12:44 crc kubenswrapper[4909]: I1128 16:12:44.063590 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-snwjp\" (UniqueName: \"kubernetes.io/projected/e831d096-86aa-4351-8d67-bdf81194727c-kube-api-access-snwjp\") pod \"community-operators-rkbfc\" (UID: \"e831d096-86aa-4351-8d67-bdf81194727c\") " pod="openshift-marketplace/community-operators-rkbfc" Nov 28 16:12:44 crc kubenswrapper[4909]: I1128 16:12:44.063691 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/52118b39-f1b7-486d-a819-ae1f464d793d-catalog-content\") pod \"certified-operators-42h7p\" (UID: \"52118b39-f1b7-486d-a819-ae1f464d793d\") " pod="openshift-marketplace/certified-operators-42h7p" Nov 28 16:12:44 crc kubenswrapper[4909]: I1128 16:12:44.063719 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e831d096-86aa-4351-8d67-bdf81194727c-catalog-content\") pod \"community-operators-rkbfc\" (UID: \"e831d096-86aa-4351-8d67-bdf81194727c\") " pod="openshift-marketplace/community-operators-rkbfc" Nov 28 16:12:44 crc kubenswrapper[4909]: I1128 16:12:44.063741 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-msv77\" (UniqueName: \"kubernetes.io/projected/52118b39-f1b7-486d-a819-ae1f464d793d-kube-api-access-msv77\") pod \"certified-operators-42h7p\" (UID: \"52118b39-f1b7-486d-a819-ae1f464d793d\") " pod="openshift-marketplace/certified-operators-42h7p" Nov 28 16:12:44 crc kubenswrapper[4909]: I1128 16:12:44.063756 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/52118b39-f1b7-486d-a819-ae1f464d793d-utilities\") pod \"certified-operators-42h7p\" (UID: \"52118b39-f1b7-486d-a819-ae1f464d793d\") " pod="openshift-marketplace/certified-operators-42h7p" Nov 28 16:12:44 crc kubenswrapper[4909]: I1128 16:12:44.063780 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-p5p29\" (UID: \"cbee12e6-b82e-4451-8292-dca1540e2ab5\") " pod="openshift-image-registry/image-registry-697d97f7c8-p5p29" Nov 28 16:12:44 crc kubenswrapper[4909]: I1128 16:12:44.063818 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e831d096-86aa-4351-8d67-bdf81194727c-utilities\") pod \"community-operators-rkbfc\" (UID: \"e831d096-86aa-4351-8d67-bdf81194727c\") " pod="openshift-marketplace/community-operators-rkbfc" Nov 28 16:12:44 crc kubenswrapper[4909]: I1128 16:12:44.064216 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/52118b39-f1b7-486d-a819-ae1f464d793d-catalog-content\") pod \"certified-operators-42h7p\" (UID: \"52118b39-f1b7-486d-a819-ae1f464d793d\") " pod="openshift-marketplace/certified-operators-42h7p" Nov 28 16:12:44 crc kubenswrapper[4909]: I1128 16:12:44.064725 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/52118b39-f1b7-486d-a819-ae1f464d793d-utilities\") pod \"certified-operators-42h7p\" (UID: \"52118b39-f1b7-486d-a819-ae1f464d793d\") " pod="openshift-marketplace/certified-operators-42h7p" Nov 28 16:12:44 crc kubenswrapper[4909]: E1128 16:12:44.064981 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 16:12:44.564970552 +0000 UTC m=+146.961655076 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-p5p29" (UID: "cbee12e6-b82e-4451-8292-dca1540e2ab5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:44 crc kubenswrapper[4909]: I1128 16:12:44.089025 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-msv77\" (UniqueName: \"kubernetes.io/projected/52118b39-f1b7-486d-a819-ae1f464d793d-kube-api-access-msv77\") pod \"certified-operators-42h7p\" (UID: \"52118b39-f1b7-486d-a819-ae1f464d793d\") " pod="openshift-marketplace/certified-operators-42h7p" Nov 28 16:12:44 crc kubenswrapper[4909]: I1128 16:12:44.159187 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-cqqvr"] Nov 28 16:12:44 crc kubenswrapper[4909]: I1128 16:12:44.160057 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-cqqvr" Nov 28 16:12:44 crc kubenswrapper[4909]: I1128 16:12:44.164711 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 16:12:44 crc kubenswrapper[4909]: I1128 16:12:44.164989 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e831d096-86aa-4351-8d67-bdf81194727c-utilities\") pod \"community-operators-rkbfc\" (UID: \"e831d096-86aa-4351-8d67-bdf81194727c\") " pod="openshift-marketplace/community-operators-rkbfc" Nov 28 16:12:44 crc kubenswrapper[4909]: I1128 16:12:44.165023 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-snwjp\" (UniqueName: \"kubernetes.io/projected/e831d096-86aa-4351-8d67-bdf81194727c-kube-api-access-snwjp\") pod \"community-operators-rkbfc\" (UID: \"e831d096-86aa-4351-8d67-bdf81194727c\") " pod="openshift-marketplace/community-operators-rkbfc" Nov 28 16:12:44 crc kubenswrapper[4909]: I1128 16:12:44.165086 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e831d096-86aa-4351-8d67-bdf81194727c-catalog-content\") pod \"community-operators-rkbfc\" (UID: \"e831d096-86aa-4351-8d67-bdf81194727c\") " pod="openshift-marketplace/community-operators-rkbfc" Nov 28 16:12:44 crc kubenswrapper[4909]: I1128 16:12:44.165481 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e831d096-86aa-4351-8d67-bdf81194727c-catalog-content\") pod \"community-operators-rkbfc\" (UID: \"e831d096-86aa-4351-8d67-bdf81194727c\") " pod="openshift-marketplace/community-operators-rkbfc" Nov 28 16:12:44 crc kubenswrapper[4909]: E1128 16:12:44.165550 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 16:12:44.665535829 +0000 UTC m=+147.062220353 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:44 crc kubenswrapper[4909]: I1128 16:12:44.165821 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e831d096-86aa-4351-8d67-bdf81194727c-utilities\") pod \"community-operators-rkbfc\" (UID: \"e831d096-86aa-4351-8d67-bdf81194727c\") " pod="openshift-marketplace/community-operators-rkbfc" Nov 28 16:12:44 crc kubenswrapper[4909]: I1128 16:12:44.179916 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-42h7p" Nov 28 16:12:44 crc kubenswrapper[4909]: I1128 16:12:44.200410 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-snwjp\" (UniqueName: \"kubernetes.io/projected/e831d096-86aa-4351-8d67-bdf81194727c-kube-api-access-snwjp\") pod \"community-operators-rkbfc\" (UID: \"e831d096-86aa-4351-8d67-bdf81194727c\") " pod="openshift-marketplace/community-operators-rkbfc" Nov 28 16:12:44 crc kubenswrapper[4909]: I1128 16:12:44.266511 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-cqqvr"] Nov 28 16:12:44 crc kubenswrapper[4909]: I1128 16:12:44.267155 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-p5p29\" (UID: \"cbee12e6-b82e-4451-8292-dca1540e2ab5\") " pod="openshift-image-registry/image-registry-697d97f7c8-p5p29" Nov 28 16:12:44 crc kubenswrapper[4909]: I1128 16:12:44.267244 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cl95r\" (UniqueName: \"kubernetes.io/projected/14032fa5-0a63-49fb-b785-4478ed116450-kube-api-access-cl95r\") pod \"certified-operators-cqqvr\" (UID: \"14032fa5-0a63-49fb-b785-4478ed116450\") " pod="openshift-marketplace/certified-operators-cqqvr" Nov 28 16:12:44 crc kubenswrapper[4909]: I1128 16:12:44.267305 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/14032fa5-0a63-49fb-b785-4478ed116450-catalog-content\") pod \"certified-operators-cqqvr\" (UID: \"14032fa5-0a63-49fb-b785-4478ed116450\") " pod="openshift-marketplace/certified-operators-cqqvr" Nov 28 16:12:44 crc kubenswrapper[4909]: I1128 16:12:44.267328 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/14032fa5-0a63-49fb-b785-4478ed116450-utilities\") pod \"certified-operators-cqqvr\" (UID: \"14032fa5-0a63-49fb-b785-4478ed116450\") " pod="openshift-marketplace/certified-operators-cqqvr" Nov 28 16:12:44 crc kubenswrapper[4909]: E1128 16:12:44.267589 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 16:12:44.767578278 +0000 UTC m=+147.164262802 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-p5p29" (UID: "cbee12e6-b82e-4451-8292-dca1540e2ab5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:44 crc kubenswrapper[4909]: I1128 16:12:44.326926 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-rkbfc" Nov 28 16:12:44 crc kubenswrapper[4909]: I1128 16:12:44.362376 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-qbnrs" event={"ID":"74a58b61-df24-4f27-8382-ff175aa2bb14","Type":"ContainerStarted","Data":"bd70150a739a561f96dbb8e5722949ebae5fc3fb867dbab289e57917f0639b55"} Nov 28 16:12:44 crc kubenswrapper[4909]: I1128 16:12:44.365545 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-65j2k"] Nov 28 16:12:44 crc kubenswrapper[4909]: I1128 16:12:44.366419 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-65j2k" Nov 28 16:12:44 crc kubenswrapper[4909]: I1128 16:12:44.370087 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 16:12:44 crc kubenswrapper[4909]: I1128 16:12:44.370309 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/14032fa5-0a63-49fb-b785-4478ed116450-catalog-content\") pod \"certified-operators-cqqvr\" (UID: \"14032fa5-0a63-49fb-b785-4478ed116450\") " pod="openshift-marketplace/certified-operators-cqqvr" Nov 28 16:12:44 crc kubenswrapper[4909]: I1128 16:12:44.370339 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/14032fa5-0a63-49fb-b785-4478ed116450-utilities\") pod \"certified-operators-cqqvr\" (UID: \"14032fa5-0a63-49fb-b785-4478ed116450\") " pod="openshift-marketplace/certified-operators-cqqvr" Nov 28 16:12:44 crc kubenswrapper[4909]: I1128 16:12:44.370410 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cl95r\" (UniqueName: \"kubernetes.io/projected/14032fa5-0a63-49fb-b785-4478ed116450-kube-api-access-cl95r\") pod \"certified-operators-cqqvr\" (UID: \"14032fa5-0a63-49fb-b785-4478ed116450\") " pod="openshift-marketplace/certified-operators-cqqvr" Nov 28 16:12:44 crc kubenswrapper[4909]: E1128 16:12:44.371701 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 16:12:44.871683386 +0000 UTC m=+147.268367910 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:44 crc kubenswrapper[4909]: I1128 16:12:44.377765 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/14032fa5-0a63-49fb-b785-4478ed116450-catalog-content\") pod \"certified-operators-cqqvr\" (UID: \"14032fa5-0a63-49fb-b785-4478ed116450\") " pod="openshift-marketplace/certified-operators-cqqvr" Nov 28 16:12:44 crc kubenswrapper[4909]: I1128 16:12:44.379871 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/14032fa5-0a63-49fb-b785-4478ed116450-utilities\") pod \"certified-operators-cqqvr\" (UID: \"14032fa5-0a63-49fb-b785-4478ed116450\") " pod="openshift-marketplace/certified-operators-cqqvr" Nov 28 16:12:44 crc kubenswrapper[4909]: I1128 16:12:44.395380 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-65j2k"] Nov 28 16:12:44 crc kubenswrapper[4909]: I1128 16:12:44.396411 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cl95r\" (UniqueName: \"kubernetes.io/projected/14032fa5-0a63-49fb-b785-4478ed116450-kube-api-access-cl95r\") pod \"certified-operators-cqqvr\" (UID: \"14032fa5-0a63-49fb-b785-4478ed116450\") " pod="openshift-marketplace/certified-operators-cqqvr" Nov 28 16:12:44 crc kubenswrapper[4909]: I1128 16:12:44.409307 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-jcpqp" event={"ID":"562fb423-75b3-4716-9370-bc090c7fbbee","Type":"ContainerStarted","Data":"5c580a34396ace571601e6ee06fb7446386b88cb6c9da276545d4579cb7d41dc"} Nov 28 16:12:44 crc kubenswrapper[4909]: I1128 16:12:44.476250 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-cqqvr" Nov 28 16:12:44 crc kubenswrapper[4909]: I1128 16:12:44.476848 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-p5p29\" (UID: \"cbee12e6-b82e-4451-8292-dca1540e2ab5\") " pod="openshift-image-registry/image-registry-697d97f7c8-p5p29" Nov 28 16:12:44 crc kubenswrapper[4909]: I1128 16:12:44.476972 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j2vmw\" (UniqueName: \"kubernetes.io/projected/55ed08a1-95ba-49ec-8c26-2da4a2774920-kube-api-access-j2vmw\") pod \"community-operators-65j2k\" (UID: \"55ed08a1-95ba-49ec-8c26-2da4a2774920\") " pod="openshift-marketplace/community-operators-65j2k" Nov 28 16:12:44 crc kubenswrapper[4909]: I1128 16:12:44.477177 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/55ed08a1-95ba-49ec-8c26-2da4a2774920-utilities\") pod \"community-operators-65j2k\" (UID: \"55ed08a1-95ba-49ec-8c26-2da4a2774920\") " pod="openshift-marketplace/community-operators-65j2k" Nov 28 16:12:44 crc kubenswrapper[4909]: I1128 16:12:44.477215 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/55ed08a1-95ba-49ec-8c26-2da4a2774920-catalog-content\") pod \"community-operators-65j2k\" (UID: \"55ed08a1-95ba-49ec-8c26-2da4a2774920\") " pod="openshift-marketplace/community-operators-65j2k" Nov 28 16:12:44 crc kubenswrapper[4909]: E1128 16:12:44.478200 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 16:12:44.978171964 +0000 UTC m=+147.374856488 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-p5p29" (UID: "cbee12e6-b82e-4451-8292-dca1540e2ab5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:44 crc kubenswrapper[4909]: I1128 16:12:44.525358 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-ld6fg" event={"ID":"5c7e3790-aa06-4b95-94a7-c41d909ec984","Type":"ContainerStarted","Data":"9ca18caf6ae1fa26699bb56797d009ac339aed7d0f61e9ff97d93335283eafaa"} Nov 28 16:12:44 crc kubenswrapper[4909]: I1128 16:12:44.540827 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-xz6qx" event={"ID":"05f40c85-8360-453c-80d9-907bb6db611b","Type":"ContainerStarted","Data":"a1169918806500af93a9e5b88c8ecbbd6d90efbd3ef32265356d7f2b975f95ff"} Nov 28 16:12:44 crc kubenswrapper[4909]: I1128 16:12:44.540864 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-xz6qx" event={"ID":"05f40c85-8360-453c-80d9-907bb6db611b","Type":"ContainerStarted","Data":"fe262313bb32b7cff3367b9319d8f902fc89dfb3526a7b68529c452deb58ef91"} Nov 28 16:12:44 crc kubenswrapper[4909]: I1128 16:12:44.542083 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-4j2wt" event={"ID":"f7ab1edf-70d0-4894-a7b1-ec68413d9b45","Type":"ContainerStarted","Data":"1cccd0ccfce1c388801ac9347f1f78a4efde8aa0158cc8c38f5bf892d3e76358"} Nov 28 16:12:44 crc kubenswrapper[4909]: I1128 16:12:44.567939 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-zqhkp" event={"ID":"0120f632-f92f-4cdc-a4c5-0d63471be0ef","Type":"ContainerStarted","Data":"6080bb20fead105e4318392baa419ea9a8d0900d51728fefb9b9418cd8789a72"} Nov 28 16:12:44 crc kubenswrapper[4909]: I1128 16:12:44.579587 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 16:12:44 crc kubenswrapper[4909]: I1128 16:12:44.579787 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/55ed08a1-95ba-49ec-8c26-2da4a2774920-catalog-content\") pod \"community-operators-65j2k\" (UID: \"55ed08a1-95ba-49ec-8c26-2da4a2774920\") " pod="openshift-marketplace/community-operators-65j2k" Nov 28 16:12:44 crc kubenswrapper[4909]: I1128 16:12:44.579841 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 16:12:44 crc kubenswrapper[4909]: I1128 16:12:44.579871 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j2vmw\" (UniqueName: \"kubernetes.io/projected/55ed08a1-95ba-49ec-8c26-2da4a2774920-kube-api-access-j2vmw\") pod \"community-operators-65j2k\" (UID: \"55ed08a1-95ba-49ec-8c26-2da4a2774920\") " pod="openshift-marketplace/community-operators-65j2k" Nov 28 16:12:44 crc kubenswrapper[4909]: I1128 16:12:44.579902 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 16:12:44 crc kubenswrapper[4909]: I1128 16:12:44.579929 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 16:12:44 crc kubenswrapper[4909]: I1128 16:12:44.579946 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 16:12:44 crc kubenswrapper[4909]: I1128 16:12:44.579967 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/55ed08a1-95ba-49ec-8c26-2da4a2774920-utilities\") pod \"community-operators-65j2k\" (UID: \"55ed08a1-95ba-49ec-8c26-2da4a2774920\") " pod="openshift-marketplace/community-operators-65j2k" Nov 28 16:12:44 crc kubenswrapper[4909]: I1128 16:12:44.580649 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/55ed08a1-95ba-49ec-8c26-2da4a2774920-utilities\") pod \"community-operators-65j2k\" (UID: \"55ed08a1-95ba-49ec-8c26-2da4a2774920\") " pod="openshift-marketplace/community-operators-65j2k" Nov 28 16:12:44 crc kubenswrapper[4909]: E1128 16:12:44.580991 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 16:12:45.080976595 +0000 UTC m=+147.477661119 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:44 crc kubenswrapper[4909]: I1128 16:12:44.581197 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/55ed08a1-95ba-49ec-8c26-2da4a2774920-catalog-content\") pod \"community-operators-65j2k\" (UID: \"55ed08a1-95ba-49ec-8c26-2da4a2774920\") " pod="openshift-marketplace/community-operators-65j2k" Nov 28 16:12:44 crc kubenswrapper[4909]: I1128 16:12:44.582493 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 16:12:44 crc kubenswrapper[4909]: I1128 16:12:44.585439 4909 patch_prober.go:28] interesting pod/router-default-5444994796-g6mgs container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 28 16:12:44 crc kubenswrapper[4909]: [-]has-synced failed: reason withheld Nov 28 16:12:44 crc kubenswrapper[4909]: [+]process-running ok Nov 28 16:12:44 crc kubenswrapper[4909]: healthz check failed Nov 28 16:12:44 crc kubenswrapper[4909]: I1128 16:12:44.585495 4909 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-g6mgs" podUID="a56d38d4-9ee2-46f0-8fc8-3edbc68647d4" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 28 16:12:44 crc kubenswrapper[4909]: I1128 16:12:44.587828 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 16:12:44 crc kubenswrapper[4909]: I1128 16:12:44.589412 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 16:12:44 crc kubenswrapper[4909]: I1128 16:12:44.590535 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 16:12:44 crc kubenswrapper[4909]: I1128 16:12:44.599910 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-nrl4j" event={"ID":"7b62fa09-bcc3-4c5f-a6d4-91cb6c253b83","Type":"ContainerStarted","Data":"aae766ef9dd8f1f5a95e78141044541ae33d723a0072ffeff4e88d2033ea7726"} Nov 28 16:12:44 crc kubenswrapper[4909]: I1128 16:12:44.606417 4909 patch_prober.go:28] interesting pod/downloads-7954f5f757-l2xpj container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.20:8080/\": dial tcp 10.217.0.20:8080: connect: connection refused" start-of-body= Nov 28 16:12:44 crc kubenswrapper[4909]: I1128 16:12:44.607018 4909 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-l2xpj" podUID="e4c200b0-224e-4a58-af74-96a8568ddb3d" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.20:8080/\": dial tcp 10.217.0.20:8080: connect: connection refused" Nov 28 16:12:44 crc kubenswrapper[4909]: I1128 16:12:44.618540 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-zqhkp" podStartSLOduration=126.618525697 podStartE2EDuration="2m6.618525697s" podCreationTimestamp="2025-11-28 16:10:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:12:44.617550348 +0000 UTC m=+147.014234872" watchObservedRunningTime="2025-11-28 16:12:44.618525697 +0000 UTC m=+147.015210221" Nov 28 16:12:44 crc kubenswrapper[4909]: I1128 16:12:44.620014 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver/apiserver-76f77b778f-ld6fg" podStartSLOduration=127.620007109 podStartE2EDuration="2m7.620007109s" podCreationTimestamp="2025-11-28 16:10:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:12:44.560190056 +0000 UTC m=+146.956874600" watchObservedRunningTime="2025-11-28 16:12:44.620007109 +0000 UTC m=+147.016691633" Nov 28 16:12:44 crc kubenswrapper[4909]: I1128 16:12:44.620514 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j2vmw\" (UniqueName: \"kubernetes.io/projected/55ed08a1-95ba-49ec-8c26-2da4a2774920-kube-api-access-j2vmw\") pod \"community-operators-65j2k\" (UID: \"55ed08a1-95ba-49ec-8c26-2da4a2774920\") " pod="openshift-marketplace/community-operators-65j2k" Nov 28 16:12:44 crc kubenswrapper[4909]: I1128 16:12:44.671704 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-pnjkw" Nov 28 16:12:44 crc kubenswrapper[4909]: I1128 16:12:44.684793 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-65j2k" Nov 28 16:12:44 crc kubenswrapper[4909]: I1128 16:12:44.686104 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-p5p29\" (UID: \"cbee12e6-b82e-4451-8292-dca1540e2ab5\") " pod="openshift-image-registry/image-registry-697d97f7c8-p5p29" Nov 28 16:12:44 crc kubenswrapper[4909]: E1128 16:12:44.690693 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 16:12:45.190682035 +0000 UTC m=+147.587366559 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-p5p29" (UID: "cbee12e6-b82e-4451-8292-dca1540e2ab5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:44 crc kubenswrapper[4909]: I1128 16:12:44.770810 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 16:12:44 crc kubenswrapper[4909]: I1128 16:12:44.771220 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 16:12:44 crc kubenswrapper[4909]: I1128 16:12:44.786970 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 16:12:44 crc kubenswrapper[4909]: I1128 16:12:44.787220 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 16:12:44 crc kubenswrapper[4909]: E1128 16:12:44.787342 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 16:12:45.287322009 +0000 UTC m=+147.684006533 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:44 crc kubenswrapper[4909]: I1128 16:12:44.787485 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-p5p29\" (UID: \"cbee12e6-b82e-4451-8292-dca1540e2ab5\") " pod="openshift-image-registry/image-registry-697d97f7c8-p5p29" Nov 28 16:12:44 crc kubenswrapper[4909]: E1128 16:12:44.787750 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 16:12:45.287742281 +0000 UTC m=+147.684426805 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-p5p29" (UID: "cbee12e6-b82e-4451-8292-dca1540e2ab5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:44 crc kubenswrapper[4909]: I1128 16:12:44.888339 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 16:12:44 crc kubenswrapper[4909]: E1128 16:12:44.888909 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 16:12:45.388895075 +0000 UTC m=+147.785579599 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:44 crc kubenswrapper[4909]: I1128 16:12:44.913121 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-42h7p"] Nov 28 16:12:44 crc kubenswrapper[4909]: I1128 16:12:44.989450 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-p5p29\" (UID: \"cbee12e6-b82e-4451-8292-dca1540e2ab5\") " pod="openshift-image-registry/image-registry-697d97f7c8-p5p29" Nov 28 16:12:44 crc kubenswrapper[4909]: E1128 16:12:44.989796 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 16:12:45.489784731 +0000 UTC m=+147.886469255 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-p5p29" (UID: "cbee12e6-b82e-4451-8292-dca1540e2ab5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:45 crc kubenswrapper[4909]: W1128 16:12:45.014646 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod52118b39_f1b7_486d_a819_ae1f464d793d.slice/crio-5fe0e4acd00ddf6b5fcc7598a7eee3de0818bb3840bbf9ecf6a46492213f9ae6 WatchSource:0}: Error finding container 5fe0e4acd00ddf6b5fcc7598a7eee3de0818bb3840bbf9ecf6a46492213f9ae6: Status 404 returned error can't find the container with id 5fe0e4acd00ddf6b5fcc7598a7eee3de0818bb3840bbf9ecf6a46492213f9ae6 Nov 28 16:12:45 crc kubenswrapper[4909]: I1128 16:12:45.033241 4909 plugin_watcher.go:194] "Adding socket path or updating timestamp to desired state cache" path="/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock" Nov 28 16:12:45 crc kubenswrapper[4909]: I1128 16:12:45.071752 4909 reconciler.go:161] "OperationExecutor.RegisterPlugin started" plugin={"SocketPath":"/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock","Timestamp":"2025-11-28T16:12:45.033260633Z","Handler":null,"Name":""} Nov 28 16:12:45 crc kubenswrapper[4909]: I1128 16:12:45.090407 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 16:12:45 crc kubenswrapper[4909]: E1128 16:12:45.090834 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 16:12:45.590817811 +0000 UTC m=+147.987502335 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:45 crc kubenswrapper[4909]: I1128 16:12:45.109948 4909 csi_plugin.go:100] kubernetes.io/csi: Trying to validate a new CSI Driver with name: kubevirt.io.hostpath-provisioner endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock versions: 1.0.0 Nov 28 16:12:45 crc kubenswrapper[4909]: I1128 16:12:45.109976 4909 csi_plugin.go:113] kubernetes.io/csi: Register new plugin with name: kubevirt.io.hostpath-provisioner at endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock Nov 28 16:12:45 crc kubenswrapper[4909]: I1128 16:12:45.130950 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-cqqvr"] Nov 28 16:12:45 crc kubenswrapper[4909]: W1128 16:12:45.159803 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod14032fa5_0a63_49fb_b785_4478ed116450.slice/crio-80e27b1a0957fa05969543964b9a68ae7d5d81d9b41701fbe4c20e4ec3d602db WatchSource:0}: Error finding container 80e27b1a0957fa05969543964b9a68ae7d5d81d9b41701fbe4c20e4ec3d602db: Status 404 returned error can't find the container with id 80e27b1a0957fa05969543964b9a68ae7d5d81d9b41701fbe4c20e4ec3d602db Nov 28 16:12:45 crc kubenswrapper[4909]: I1128 16:12:45.161402 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-rkbfc"] Nov 28 16:12:45 crc kubenswrapper[4909]: W1128 16:12:45.179111 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode831d096_86aa_4351_8d67_bdf81194727c.slice/crio-23670d109b89358f2efd2e124f2f82e1e9fc43db526410d2dfb3e58165a33e5a WatchSource:0}: Error finding container 23670d109b89358f2efd2e124f2f82e1e9fc43db526410d2dfb3e58165a33e5a: Status 404 returned error can't find the container with id 23670d109b89358f2efd2e124f2f82e1e9fc43db526410d2dfb3e58165a33e5a Nov 28 16:12:45 crc kubenswrapper[4909]: I1128 16:12:45.194414 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-p5p29\" (UID: \"cbee12e6-b82e-4451-8292-dca1540e2ab5\") " pod="openshift-image-registry/image-registry-697d97f7c8-p5p29" Nov 28 16:12:45 crc kubenswrapper[4909]: I1128 16:12:45.204241 4909 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 28 16:12:45 crc kubenswrapper[4909]: I1128 16:12:45.204281 4909 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-p5p29\" (UID: \"cbee12e6-b82e-4451-8292-dca1540e2ab5\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount\"" pod="openshift-image-registry/image-registry-697d97f7c8-p5p29" Nov 28 16:12:45 crc kubenswrapper[4909]: I1128 16:12:45.206261 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-65j2k"] Nov 28 16:12:45 crc kubenswrapper[4909]: I1128 16:12:45.279183 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-p5p29\" (UID: \"cbee12e6-b82e-4451-8292-dca1540e2ab5\") " pod="openshift-image-registry/image-registry-697d97f7c8-p5p29" Nov 28 16:12:45 crc kubenswrapper[4909]: I1128 16:12:45.288643 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-p5p29" Nov 28 16:12:45 crc kubenswrapper[4909]: I1128 16:12:45.295356 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 16:12:45 crc kubenswrapper[4909]: I1128 16:12:45.309191 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Nov 28 16:12:45 crc kubenswrapper[4909]: W1128 16:12:45.545754 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9d751cbb_f2e2_430d_9754_c882a5e924a5.slice/crio-e695f42f8b46ca817bc6873c5263aee42dae6113928b003bf3cdf9517b8cd35e WatchSource:0}: Error finding container e695f42f8b46ca817bc6873c5263aee42dae6113928b003bf3cdf9517b8cd35e: Status 404 returned error can't find the container with id e695f42f8b46ca817bc6873c5263aee42dae6113928b003bf3cdf9517b8cd35e Nov 28 16:12:45 crc kubenswrapper[4909]: W1128 16:12:45.545992 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3b6479f0_333b_4a96_9adf_2099afdc2447.slice/crio-f2be5b5d239f8179dd1bdbf7b9684fc614da10639765c3024b1b4b4154fbdd2b WatchSource:0}: Error finding container f2be5b5d239f8179dd1bdbf7b9684fc614da10639765c3024b1b4b4154fbdd2b: Status 404 returned error can't find the container with id f2be5b5d239f8179dd1bdbf7b9684fc614da10639765c3024b1b4b4154fbdd2b Nov 28 16:12:45 crc kubenswrapper[4909]: W1128 16:12:45.560177 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5fe485a1_e14f_4c09_b5b9_f252bc42b7e8.slice/crio-2e586ded4bff1870d4e290bbf4df4254f87a76296caeed16d273a96fe725a524 WatchSource:0}: Error finding container 2e586ded4bff1870d4e290bbf4df4254f87a76296caeed16d273a96fe725a524: Status 404 returned error can't find the container with id 2e586ded4bff1870d4e290bbf4df4254f87a76296caeed16d273a96fe725a524 Nov 28 16:12:45 crc kubenswrapper[4909]: I1128 16:12:45.582650 4909 patch_prober.go:28] interesting pod/router-default-5444994796-g6mgs container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 28 16:12:45 crc kubenswrapper[4909]: [-]has-synced failed: reason withheld Nov 28 16:12:45 crc kubenswrapper[4909]: [+]process-running ok Nov 28 16:12:45 crc kubenswrapper[4909]: healthz check failed Nov 28 16:12:45 crc kubenswrapper[4909]: I1128 16:12:45.582708 4909 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-g6mgs" podUID="a56d38d4-9ee2-46f0-8fc8-3edbc68647d4" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 28 16:12:45 crc kubenswrapper[4909]: I1128 16:12:45.627928 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"f2be5b5d239f8179dd1bdbf7b9684fc614da10639765c3024b1b4b4154fbdd2b"} Nov 28 16:12:45 crc kubenswrapper[4909]: I1128 16:12:45.636906 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"2e586ded4bff1870d4e290bbf4df4254f87a76296caeed16d273a96fe725a524"} Nov 28 16:12:45 crc kubenswrapper[4909]: I1128 16:12:45.647009 4909 generic.go:334] "Generic (PLEG): container finished" podID="0731ea3e-3015-4311-88f4-bcd1f9f8204e" containerID="55e25c87ad488a0ee230737508f73167846e286147ac4f3da75ce9603ff85aad" exitCode=0 Nov 28 16:12:45 crc kubenswrapper[4909]: I1128 16:12:45.647082 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405760-qjm9k" event={"ID":"0731ea3e-3015-4311-88f4-bcd1f9f8204e","Type":"ContainerDied","Data":"55e25c87ad488a0ee230737508f73167846e286147ac4f3da75ce9603ff85aad"} Nov 28 16:12:45 crc kubenswrapper[4909]: I1128 16:12:45.660931 4909 generic.go:334] "Generic (PLEG): container finished" podID="52118b39-f1b7-486d-a819-ae1f464d793d" containerID="d4a91133eac17c2b3e7f9c4d51ef3868264e0e0645b5824d7fc16244c77e4d17" exitCode=0 Nov 28 16:12:45 crc kubenswrapper[4909]: I1128 16:12:45.661233 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-42h7p" event={"ID":"52118b39-f1b7-486d-a819-ae1f464d793d","Type":"ContainerDied","Data":"d4a91133eac17c2b3e7f9c4d51ef3868264e0e0645b5824d7fc16244c77e4d17"} Nov 28 16:12:45 crc kubenswrapper[4909]: I1128 16:12:45.661268 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-42h7p" event={"ID":"52118b39-f1b7-486d-a819-ae1f464d793d","Type":"ContainerStarted","Data":"5fe0e4acd00ddf6b5fcc7598a7eee3de0818bb3840bbf9ecf6a46492213f9ae6"} Nov 28 16:12:45 crc kubenswrapper[4909]: I1128 16:12:45.662599 4909 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 28 16:12:45 crc kubenswrapper[4909]: I1128 16:12:45.666996 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"e695f42f8b46ca817bc6873c5263aee42dae6113928b003bf3cdf9517b8cd35e"} Nov 28 16:12:45 crc kubenswrapper[4909]: I1128 16:12:45.680432 4909 generic.go:334] "Generic (PLEG): container finished" podID="55ed08a1-95ba-49ec-8c26-2da4a2774920" containerID="bc3116d54b6a95bef18b47a8a906a4f3bed525c0d2c300837a2f87ffa77680d5" exitCode=0 Nov 28 16:12:45 crc kubenswrapper[4909]: I1128 16:12:45.680508 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-65j2k" event={"ID":"55ed08a1-95ba-49ec-8c26-2da4a2774920","Type":"ContainerDied","Data":"bc3116d54b6a95bef18b47a8a906a4f3bed525c0d2c300837a2f87ffa77680d5"} Nov 28 16:12:45 crc kubenswrapper[4909]: I1128 16:12:45.680531 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-65j2k" event={"ID":"55ed08a1-95ba-49ec-8c26-2da4a2774920","Type":"ContainerStarted","Data":"9df10a3d6c1b760ab3a9abceb27b299b37c7388ea0fc5c1d0544e5e5e6cabe9b"} Nov 28 16:12:45 crc kubenswrapper[4909]: I1128 16:12:45.708537 4909 generic.go:334] "Generic (PLEG): container finished" podID="14032fa5-0a63-49fb-b785-4478ed116450" containerID="a09ede08a7250ab92fc300a282131a492edbc1f863c0aca3f7605225ffe0fdce" exitCode=0 Nov 28 16:12:45 crc kubenswrapper[4909]: I1128 16:12:45.708638 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-cqqvr" event={"ID":"14032fa5-0a63-49fb-b785-4478ed116450","Type":"ContainerDied","Data":"a09ede08a7250ab92fc300a282131a492edbc1f863c0aca3f7605225ffe0fdce"} Nov 28 16:12:45 crc kubenswrapper[4909]: I1128 16:12:45.708682 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-cqqvr" event={"ID":"14032fa5-0a63-49fb-b785-4478ed116450","Type":"ContainerStarted","Data":"80e27b1a0957fa05969543964b9a68ae7d5d81d9b41701fbe4c20e4ec3d602db"} Nov 28 16:12:45 crc kubenswrapper[4909]: I1128 16:12:45.709187 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Nov 28 16:12:45 crc kubenswrapper[4909]: I1128 16:12:45.709959 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 28 16:12:45 crc kubenswrapper[4909]: I1128 16:12:45.719644 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager"/"kube-root-ca.crt" Nov 28 16:12:45 crc kubenswrapper[4909]: I1128 16:12:45.719703 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager"/"installer-sa-dockercfg-kjl2n" Nov 28 16:12:45 crc kubenswrapper[4909]: I1128 16:12:45.749358 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-xz6qx" event={"ID":"05f40c85-8360-453c-80d9-907bb6db611b","Type":"ContainerStarted","Data":"747a17e062d617b9b0a742f9136d6bc6117d8b69c9340b83ef8501ad7b7d795d"} Nov 28 16:12:45 crc kubenswrapper[4909]: I1128 16:12:45.752699 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Nov 28 16:12:45 crc kubenswrapper[4909]: I1128 16:12:45.752980 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-p5p29"] Nov 28 16:12:45 crc kubenswrapper[4909]: I1128 16:12:45.761956 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-9p7bm"] Nov 28 16:12:45 crc kubenswrapper[4909]: I1128 16:12:45.763147 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-9p7bm" Nov 28 16:12:45 crc kubenswrapper[4909]: I1128 16:12:45.768021 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Nov 28 16:12:45 crc kubenswrapper[4909]: I1128 16:12:45.791505 4909 generic.go:334] "Generic (PLEG): container finished" podID="e831d096-86aa-4351-8d67-bdf81194727c" containerID="8070c9c93c3734ff833204cbea830f4977d7b5624e15b1635c2d85b2316fa3fc" exitCode=0 Nov 28 16:12:45 crc kubenswrapper[4909]: I1128 16:12:45.791717 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rkbfc" event={"ID":"e831d096-86aa-4351-8d67-bdf81194727c","Type":"ContainerDied","Data":"8070c9c93c3734ff833204cbea830f4977d7b5624e15b1635c2d85b2316fa3fc"} Nov 28 16:12:45 crc kubenswrapper[4909]: I1128 16:12:45.791748 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rkbfc" event={"ID":"e831d096-86aa-4351-8d67-bdf81194727c","Type":"ContainerStarted","Data":"23670d109b89358f2efd2e124f2f82e1e9fc43db526410d2dfb3e58165a33e5a"} Nov 28 16:12:45 crc kubenswrapper[4909]: I1128 16:12:45.793424 4909 patch_prober.go:28] interesting pod/downloads-7954f5f757-l2xpj container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.20:8080/\": dial tcp 10.217.0.20:8080: connect: connection refused" start-of-body= Nov 28 16:12:45 crc kubenswrapper[4909]: I1128 16:12:45.793471 4909 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-l2xpj" podUID="e4c200b0-224e-4a58-af74-96a8568ddb3d" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.20:8080/\": dial tcp 10.217.0.20:8080: connect: connection refused" Nov 28 16:12:45 crc kubenswrapper[4909]: I1128 16:12:45.806977 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-9p7bm"] Nov 28 16:12:45 crc kubenswrapper[4909]: I1128 16:12:45.807615 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/ce338213-07e1-4e6c-bbaf-a99f4a056fdf-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"ce338213-07e1-4e6c-bbaf-a99f4a056fdf\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 28 16:12:45 crc kubenswrapper[4909]: I1128 16:12:45.807644 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6b58m\" (UniqueName: \"kubernetes.io/projected/4e3c8494-44f8-475d-8ae3-2613649d6c73-kube-api-access-6b58m\") pod \"redhat-marketplace-9p7bm\" (UID: \"4e3c8494-44f8-475d-8ae3-2613649d6c73\") " pod="openshift-marketplace/redhat-marketplace-9p7bm" Nov 28 16:12:45 crc kubenswrapper[4909]: I1128 16:12:45.807722 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4e3c8494-44f8-475d-8ae3-2613649d6c73-utilities\") pod \"redhat-marketplace-9p7bm\" (UID: \"4e3c8494-44f8-475d-8ae3-2613649d6c73\") " pod="openshift-marketplace/redhat-marketplace-9p7bm" Nov 28 16:12:45 crc kubenswrapper[4909]: I1128 16:12:45.807741 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4e3c8494-44f8-475d-8ae3-2613649d6c73-catalog-content\") pod \"redhat-marketplace-9p7bm\" (UID: \"4e3c8494-44f8-475d-8ae3-2613649d6c73\") " pod="openshift-marketplace/redhat-marketplace-9p7bm" Nov 28 16:12:45 crc kubenswrapper[4909]: I1128 16:12:45.807759 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/ce338213-07e1-4e6c-bbaf-a99f4a056fdf-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"ce338213-07e1-4e6c-bbaf-a99f4a056fdf\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 28 16:12:45 crc kubenswrapper[4909]: I1128 16:12:45.816017 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-config-operator/openshift-config-operator-7777fb866f-mg8mw" Nov 28 16:12:45 crc kubenswrapper[4909]: I1128 16:12:45.854696 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="hostpath-provisioner/csi-hostpathplugin-xz6qx" podStartSLOduration=11.854676595 podStartE2EDuration="11.854676595s" podCreationTimestamp="2025-11-28 16:12:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:12:45.850196296 +0000 UTC m=+148.246880830" watchObservedRunningTime="2025-11-28 16:12:45.854676595 +0000 UTC m=+148.251361119" Nov 28 16:12:45 crc kubenswrapper[4909]: I1128 16:12:45.908918 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/ce338213-07e1-4e6c-bbaf-a99f4a056fdf-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"ce338213-07e1-4e6c-bbaf-a99f4a056fdf\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 28 16:12:45 crc kubenswrapper[4909]: I1128 16:12:45.909267 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6b58m\" (UniqueName: \"kubernetes.io/projected/4e3c8494-44f8-475d-8ae3-2613649d6c73-kube-api-access-6b58m\") pod \"redhat-marketplace-9p7bm\" (UID: \"4e3c8494-44f8-475d-8ae3-2613649d6c73\") " pod="openshift-marketplace/redhat-marketplace-9p7bm" Nov 28 16:12:45 crc kubenswrapper[4909]: I1128 16:12:45.909360 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4e3c8494-44f8-475d-8ae3-2613649d6c73-utilities\") pod \"redhat-marketplace-9p7bm\" (UID: \"4e3c8494-44f8-475d-8ae3-2613649d6c73\") " pod="openshift-marketplace/redhat-marketplace-9p7bm" Nov 28 16:12:45 crc kubenswrapper[4909]: I1128 16:12:45.909379 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4e3c8494-44f8-475d-8ae3-2613649d6c73-catalog-content\") pod \"redhat-marketplace-9p7bm\" (UID: \"4e3c8494-44f8-475d-8ae3-2613649d6c73\") " pod="openshift-marketplace/redhat-marketplace-9p7bm" Nov 28 16:12:45 crc kubenswrapper[4909]: I1128 16:12:45.909396 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/ce338213-07e1-4e6c-bbaf-a99f4a056fdf-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"ce338213-07e1-4e6c-bbaf-a99f4a056fdf\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 28 16:12:45 crc kubenswrapper[4909]: I1128 16:12:45.910203 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8f668bae-612b-4b75-9490-919e737c6a3b" path="/var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes" Nov 28 16:12:45 crc kubenswrapper[4909]: I1128 16:12:45.910708 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4e3c8494-44f8-475d-8ae3-2613649d6c73-utilities\") pod \"redhat-marketplace-9p7bm\" (UID: \"4e3c8494-44f8-475d-8ae3-2613649d6c73\") " pod="openshift-marketplace/redhat-marketplace-9p7bm" Nov 28 16:12:45 crc kubenswrapper[4909]: I1128 16:12:45.910960 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/ce338213-07e1-4e6c-bbaf-a99f4a056fdf-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"ce338213-07e1-4e6c-bbaf-a99f4a056fdf\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 28 16:12:45 crc kubenswrapper[4909]: I1128 16:12:45.910969 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4e3c8494-44f8-475d-8ae3-2613649d6c73-catalog-content\") pod \"redhat-marketplace-9p7bm\" (UID: \"4e3c8494-44f8-475d-8ae3-2613649d6c73\") " pod="openshift-marketplace/redhat-marketplace-9p7bm" Nov 28 16:12:45 crc kubenswrapper[4909]: I1128 16:12:45.940576 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6b58m\" (UniqueName: \"kubernetes.io/projected/4e3c8494-44f8-475d-8ae3-2613649d6c73-kube-api-access-6b58m\") pod \"redhat-marketplace-9p7bm\" (UID: \"4e3c8494-44f8-475d-8ae3-2613649d6c73\") " pod="openshift-marketplace/redhat-marketplace-9p7bm" Nov 28 16:12:45 crc kubenswrapper[4909]: I1128 16:12:45.951595 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/ce338213-07e1-4e6c-bbaf-a99f4a056fdf-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"ce338213-07e1-4e6c-bbaf-a99f4a056fdf\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 28 16:12:46 crc kubenswrapper[4909]: I1128 16:12:46.042025 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 28 16:12:46 crc kubenswrapper[4909]: I1128 16:12:46.118841 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-9p7bm" Nov 28 16:12:46 crc kubenswrapper[4909]: I1128 16:12:46.151783 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-j2vhf"] Nov 28 16:12:46 crc kubenswrapper[4909]: I1128 16:12:46.153854 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-j2vhf" Nov 28 16:12:46 crc kubenswrapper[4909]: I1128 16:12:46.162107 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-j2vhf"] Nov 28 16:12:46 crc kubenswrapper[4909]: I1128 16:12:46.217320 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bm8r6\" (UniqueName: \"kubernetes.io/projected/f208265d-6857-409d-8bc1-e2e9d87f754b-kube-api-access-bm8r6\") pod \"redhat-marketplace-j2vhf\" (UID: \"f208265d-6857-409d-8bc1-e2e9d87f754b\") " pod="openshift-marketplace/redhat-marketplace-j2vhf" Nov 28 16:12:46 crc kubenswrapper[4909]: I1128 16:12:46.217445 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f208265d-6857-409d-8bc1-e2e9d87f754b-catalog-content\") pod \"redhat-marketplace-j2vhf\" (UID: \"f208265d-6857-409d-8bc1-e2e9d87f754b\") " pod="openshift-marketplace/redhat-marketplace-j2vhf" Nov 28 16:12:46 crc kubenswrapper[4909]: I1128 16:12:46.217472 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f208265d-6857-409d-8bc1-e2e9d87f754b-utilities\") pod \"redhat-marketplace-j2vhf\" (UID: \"f208265d-6857-409d-8bc1-e2e9d87f754b\") " pod="openshift-marketplace/redhat-marketplace-j2vhf" Nov 28 16:12:46 crc kubenswrapper[4909]: I1128 16:12:46.270887 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Nov 28 16:12:46 crc kubenswrapper[4909]: I1128 16:12:46.318511 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f208265d-6857-409d-8bc1-e2e9d87f754b-catalog-content\") pod \"redhat-marketplace-j2vhf\" (UID: \"f208265d-6857-409d-8bc1-e2e9d87f754b\") " pod="openshift-marketplace/redhat-marketplace-j2vhf" Nov 28 16:12:46 crc kubenswrapper[4909]: I1128 16:12:46.318543 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f208265d-6857-409d-8bc1-e2e9d87f754b-utilities\") pod \"redhat-marketplace-j2vhf\" (UID: \"f208265d-6857-409d-8bc1-e2e9d87f754b\") " pod="openshift-marketplace/redhat-marketplace-j2vhf" Nov 28 16:12:46 crc kubenswrapper[4909]: I1128 16:12:46.318596 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bm8r6\" (UniqueName: \"kubernetes.io/projected/f208265d-6857-409d-8bc1-e2e9d87f754b-kube-api-access-bm8r6\") pod \"redhat-marketplace-j2vhf\" (UID: \"f208265d-6857-409d-8bc1-e2e9d87f754b\") " pod="openshift-marketplace/redhat-marketplace-j2vhf" Nov 28 16:12:46 crc kubenswrapper[4909]: I1128 16:12:46.319674 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f208265d-6857-409d-8bc1-e2e9d87f754b-catalog-content\") pod \"redhat-marketplace-j2vhf\" (UID: \"f208265d-6857-409d-8bc1-e2e9d87f754b\") " pod="openshift-marketplace/redhat-marketplace-j2vhf" Nov 28 16:12:46 crc kubenswrapper[4909]: I1128 16:12:46.320358 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f208265d-6857-409d-8bc1-e2e9d87f754b-utilities\") pod \"redhat-marketplace-j2vhf\" (UID: \"f208265d-6857-409d-8bc1-e2e9d87f754b\") " pod="openshift-marketplace/redhat-marketplace-j2vhf" Nov 28 16:12:46 crc kubenswrapper[4909]: I1128 16:12:46.337084 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bm8r6\" (UniqueName: \"kubernetes.io/projected/f208265d-6857-409d-8bc1-e2e9d87f754b-kube-api-access-bm8r6\") pod \"redhat-marketplace-j2vhf\" (UID: \"f208265d-6857-409d-8bc1-e2e9d87f754b\") " pod="openshift-marketplace/redhat-marketplace-j2vhf" Nov 28 16:12:46 crc kubenswrapper[4909]: I1128 16:12:46.384717 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-9p7bm"] Nov 28 16:12:46 crc kubenswrapper[4909]: I1128 16:12:46.482848 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-j2vhf" Nov 28 16:12:46 crc kubenswrapper[4909]: I1128 16:12:46.553017 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-apiserver/apiserver-76f77b778f-ld6fg" Nov 28 16:12:46 crc kubenswrapper[4909]: I1128 16:12:46.553363 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-apiserver/apiserver-76f77b778f-ld6fg" Nov 28 16:12:46 crc kubenswrapper[4909]: I1128 16:12:46.560205 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-apiserver/apiserver-76f77b778f-ld6fg" Nov 28 16:12:46 crc kubenswrapper[4909]: I1128 16:12:46.586219 4909 patch_prober.go:28] interesting pod/router-default-5444994796-g6mgs container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 28 16:12:46 crc kubenswrapper[4909]: [-]has-synced failed: reason withheld Nov 28 16:12:46 crc kubenswrapper[4909]: [+]process-running ok Nov 28 16:12:46 crc kubenswrapper[4909]: healthz check failed Nov 28 16:12:46 crc kubenswrapper[4909]: I1128 16:12:46.586280 4909 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-g6mgs" podUID="a56d38d4-9ee2-46f0-8fc8-3edbc68647d4" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 28 16:12:46 crc kubenswrapper[4909]: I1128 16:12:46.638769 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-558db77b4-t67vb" Nov 28 16:12:46 crc kubenswrapper[4909]: I1128 16:12:46.639266 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-zqhkp" Nov 28 16:12:46 crc kubenswrapper[4909]: I1128 16:12:46.639287 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-zqhkp" Nov 28 16:12:46 crc kubenswrapper[4909]: I1128 16:12:46.665147 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-zqhkp" Nov 28 16:12:46 crc kubenswrapper[4909]: I1128 16:12:46.668713 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-j2vhf"] Nov 28 16:12:46 crc kubenswrapper[4909]: I1128 16:12:46.687778 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-f9d7485db-qjf9t" Nov 28 16:12:46 crc kubenswrapper[4909]: I1128 16:12:46.687814 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-f9d7485db-qjf9t" Nov 28 16:12:46 crc kubenswrapper[4909]: I1128 16:12:46.689500 4909 patch_prober.go:28] interesting pod/console-f9d7485db-qjf9t container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.9:8443/health\": dial tcp 10.217.0.9:8443: connect: connection refused" start-of-body= Nov 28 16:12:46 crc kubenswrapper[4909]: I1128 16:12:46.689532 4909 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-qjf9t" podUID="06224dc2-1e32-47b8-8a11-0c90a61084cf" containerName="console" probeResult="failure" output="Get \"https://10.217.0.9:8443/health\": dial tcp 10.217.0.9:8443: connect: connection refused" Nov 28 16:12:46 crc kubenswrapper[4909]: I1128 16:12:46.798840 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"ce338213-07e1-4e6c-bbaf-a99f4a056fdf","Type":"ContainerStarted","Data":"8f4969029621c690ec450f9fbd3f646a4c354f550890b23cff12878291b22020"} Nov 28 16:12:46 crc kubenswrapper[4909]: I1128 16:12:46.800212 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"569a8e8ac24d227c726aaf61c101ae2b622aecb894e624dbb829a8619b6d3543"} Nov 28 16:12:46 crc kubenswrapper[4909]: I1128 16:12:46.800314 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 16:12:46 crc kubenswrapper[4909]: I1128 16:12:46.801504 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"3868d2872c5524dcc5392db4a4af829cb686ef05fbf72d982eb9977a5b3b900d"} Nov 28 16:12:46 crc kubenswrapper[4909]: I1128 16:12:46.803131 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-p5p29" event={"ID":"cbee12e6-b82e-4451-8292-dca1540e2ab5","Type":"ContainerStarted","Data":"eaffd4db9346545391d8cdfe91b539c95595a1c1e74c030bea6ee1f67e0cbf48"} Nov 28 16:12:46 crc kubenswrapper[4909]: I1128 16:12:46.803164 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-p5p29" event={"ID":"cbee12e6-b82e-4451-8292-dca1540e2ab5","Type":"ContainerStarted","Data":"013197be78954c0ec7b8f6584bebf24eff562a7b9f777f6a805b5fa8774256ec"} Nov 28 16:12:46 crc kubenswrapper[4909]: I1128 16:12:46.803271 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-697d97f7c8-p5p29" Nov 28 16:12:46 crc kubenswrapper[4909]: I1128 16:12:46.804496 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"ee6087dee9a1d86dd50b0b7c799fff71d4ca20ac638471016c6a61b3705d4cf3"} Nov 28 16:12:46 crc kubenswrapper[4909]: I1128 16:12:46.805405 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-9p7bm" event={"ID":"4e3c8494-44f8-475d-8ae3-2613649d6c73","Type":"ContainerStarted","Data":"0e9d1df6205bf1a741be6102ddde6e90e5331bad08d370b21c7ccb714f8f83d6"} Nov 28 16:12:46 crc kubenswrapper[4909]: I1128 16:12:46.813787 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-zqhkp" Nov 28 16:12:46 crc kubenswrapper[4909]: I1128 16:12:46.814904 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-apiserver/apiserver-76f77b778f-ld6fg" Nov 28 16:12:46 crc kubenswrapper[4909]: I1128 16:12:46.914057 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-879f6c89f-5x9zh" Nov 28 16:12:46 crc kubenswrapper[4909]: I1128 16:12:46.922186 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-x4wv5" Nov 28 16:12:46 crc kubenswrapper[4909]: I1128 16:12:46.962610 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-697d97f7c8-p5p29" podStartSLOduration=129.962588929 podStartE2EDuration="2m9.962588929s" podCreationTimestamp="2025-11-28 16:10:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:12:46.951930502 +0000 UTC m=+149.348615036" watchObservedRunningTime="2025-11-28 16:12:46.962588929 +0000 UTC m=+149.359273453" Nov 28 16:12:46 crc kubenswrapper[4909]: I1128 16:12:46.982622 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-sz62r"] Nov 28 16:12:46 crc kubenswrapper[4909]: I1128 16:12:46.993613 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-sz62r" Nov 28 16:12:47 crc kubenswrapper[4909]: I1128 16:12:47.002216 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-sz62r"] Nov 28 16:12:47 crc kubenswrapper[4909]: I1128 16:12:47.016410 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Nov 28 16:12:47 crc kubenswrapper[4909]: I1128 16:12:47.150878 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2e369b4b-cc5e-474d-8a7e-f3f5fcbb9f98-utilities\") pod \"redhat-operators-sz62r\" (UID: \"2e369b4b-cc5e-474d-8a7e-f3f5fcbb9f98\") " pod="openshift-marketplace/redhat-operators-sz62r" Nov 28 16:12:47 crc kubenswrapper[4909]: I1128 16:12:47.151201 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5s2pn\" (UniqueName: \"kubernetes.io/projected/2e369b4b-cc5e-474d-8a7e-f3f5fcbb9f98-kube-api-access-5s2pn\") pod \"redhat-operators-sz62r\" (UID: \"2e369b4b-cc5e-474d-8a7e-f3f5fcbb9f98\") " pod="openshift-marketplace/redhat-operators-sz62r" Nov 28 16:12:47 crc kubenswrapper[4909]: I1128 16:12:47.151251 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2e369b4b-cc5e-474d-8a7e-f3f5fcbb9f98-catalog-content\") pod \"redhat-operators-sz62r\" (UID: \"2e369b4b-cc5e-474d-8a7e-f3f5fcbb9f98\") " pod="openshift-marketplace/redhat-operators-sz62r" Nov 28 16:12:47 crc kubenswrapper[4909]: I1128 16:12:47.252322 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2e369b4b-cc5e-474d-8a7e-f3f5fcbb9f98-utilities\") pod \"redhat-operators-sz62r\" (UID: \"2e369b4b-cc5e-474d-8a7e-f3f5fcbb9f98\") " pod="openshift-marketplace/redhat-operators-sz62r" Nov 28 16:12:47 crc kubenswrapper[4909]: I1128 16:12:47.252386 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5s2pn\" (UniqueName: \"kubernetes.io/projected/2e369b4b-cc5e-474d-8a7e-f3f5fcbb9f98-kube-api-access-5s2pn\") pod \"redhat-operators-sz62r\" (UID: \"2e369b4b-cc5e-474d-8a7e-f3f5fcbb9f98\") " pod="openshift-marketplace/redhat-operators-sz62r" Nov 28 16:12:47 crc kubenswrapper[4909]: I1128 16:12:47.252425 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2e369b4b-cc5e-474d-8a7e-f3f5fcbb9f98-catalog-content\") pod \"redhat-operators-sz62r\" (UID: \"2e369b4b-cc5e-474d-8a7e-f3f5fcbb9f98\") " pod="openshift-marketplace/redhat-operators-sz62r" Nov 28 16:12:47 crc kubenswrapper[4909]: I1128 16:12:47.253242 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2e369b4b-cc5e-474d-8a7e-f3f5fcbb9f98-catalog-content\") pod \"redhat-operators-sz62r\" (UID: \"2e369b4b-cc5e-474d-8a7e-f3f5fcbb9f98\") " pod="openshift-marketplace/redhat-operators-sz62r" Nov 28 16:12:47 crc kubenswrapper[4909]: I1128 16:12:47.253293 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2e369b4b-cc5e-474d-8a7e-f3f5fcbb9f98-utilities\") pod \"redhat-operators-sz62r\" (UID: \"2e369b4b-cc5e-474d-8a7e-f3f5fcbb9f98\") " pod="openshift-marketplace/redhat-operators-sz62r" Nov 28 16:12:47 crc kubenswrapper[4909]: I1128 16:12:47.263082 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405760-qjm9k" Nov 28 16:12:47 crc kubenswrapper[4909]: I1128 16:12:47.278945 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5s2pn\" (UniqueName: \"kubernetes.io/projected/2e369b4b-cc5e-474d-8a7e-f3f5fcbb9f98-kube-api-access-5s2pn\") pod \"redhat-operators-sz62r\" (UID: \"2e369b4b-cc5e-474d-8a7e-f3f5fcbb9f98\") " pod="openshift-marketplace/redhat-operators-sz62r" Nov 28 16:12:47 crc kubenswrapper[4909]: I1128 16:12:47.357398 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/0731ea3e-3015-4311-88f4-bcd1f9f8204e-secret-volume\") pod \"0731ea3e-3015-4311-88f4-bcd1f9f8204e\" (UID: \"0731ea3e-3015-4311-88f4-bcd1f9f8204e\") " Nov 28 16:12:47 crc kubenswrapper[4909]: I1128 16:12:47.357449 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d9xdg\" (UniqueName: \"kubernetes.io/projected/0731ea3e-3015-4311-88f4-bcd1f9f8204e-kube-api-access-d9xdg\") pod \"0731ea3e-3015-4311-88f4-bcd1f9f8204e\" (UID: \"0731ea3e-3015-4311-88f4-bcd1f9f8204e\") " Nov 28 16:12:47 crc kubenswrapper[4909]: I1128 16:12:47.357468 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/0731ea3e-3015-4311-88f4-bcd1f9f8204e-config-volume\") pod \"0731ea3e-3015-4311-88f4-bcd1f9f8204e\" (UID: \"0731ea3e-3015-4311-88f4-bcd1f9f8204e\") " Nov 28 16:12:47 crc kubenswrapper[4909]: I1128 16:12:47.358289 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-sv5rc"] Nov 28 16:12:47 crc kubenswrapper[4909]: I1128 16:12:47.358916 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0731ea3e-3015-4311-88f4-bcd1f9f8204e-config-volume" (OuterVolumeSpecName: "config-volume") pod "0731ea3e-3015-4311-88f4-bcd1f9f8204e" (UID: "0731ea3e-3015-4311-88f4-bcd1f9f8204e"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:12:47 crc kubenswrapper[4909]: E1128 16:12:47.359648 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0731ea3e-3015-4311-88f4-bcd1f9f8204e" containerName="collect-profiles" Nov 28 16:12:47 crc kubenswrapper[4909]: I1128 16:12:47.359691 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="0731ea3e-3015-4311-88f4-bcd1f9f8204e" containerName="collect-profiles" Nov 28 16:12:47 crc kubenswrapper[4909]: I1128 16:12:47.361468 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="0731ea3e-3015-4311-88f4-bcd1f9f8204e" containerName="collect-profiles" Nov 28 16:12:47 crc kubenswrapper[4909]: I1128 16:12:47.362357 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-sv5rc" Nov 28 16:12:47 crc kubenswrapper[4909]: I1128 16:12:47.364918 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0731ea3e-3015-4311-88f4-bcd1f9f8204e-kube-api-access-d9xdg" (OuterVolumeSpecName: "kube-api-access-d9xdg") pod "0731ea3e-3015-4311-88f4-bcd1f9f8204e" (UID: "0731ea3e-3015-4311-88f4-bcd1f9f8204e"). InnerVolumeSpecName "kube-api-access-d9xdg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:12:47 crc kubenswrapper[4909]: I1128 16:12:47.368230 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0731ea3e-3015-4311-88f4-bcd1f9f8204e-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "0731ea3e-3015-4311-88f4-bcd1f9f8204e" (UID: "0731ea3e-3015-4311-88f4-bcd1f9f8204e"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:12:47 crc kubenswrapper[4909]: I1128 16:12:47.372034 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-sv5rc"] Nov 28 16:12:47 crc kubenswrapper[4909]: I1128 16:12:47.396157 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-sz62r" Nov 28 16:12:47 crc kubenswrapper[4909]: I1128 16:12:47.433995 4909 patch_prober.go:28] interesting pod/downloads-7954f5f757-l2xpj container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.20:8080/\": dial tcp 10.217.0.20:8080: connect: connection refused" start-of-body= Nov 28 16:12:47 crc kubenswrapper[4909]: I1128 16:12:47.434016 4909 patch_prober.go:28] interesting pod/downloads-7954f5f757-l2xpj container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.20:8080/\": dial tcp 10.217.0.20:8080: connect: connection refused" start-of-body= Nov 28 16:12:47 crc kubenswrapper[4909]: I1128 16:12:47.434053 4909 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-l2xpj" podUID="e4c200b0-224e-4a58-af74-96a8568ddb3d" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.20:8080/\": dial tcp 10.217.0.20:8080: connect: connection refused" Nov 28 16:12:47 crc kubenswrapper[4909]: I1128 16:12:47.434061 4909 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-l2xpj" podUID="e4c200b0-224e-4a58-af74-96a8568ddb3d" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.20:8080/\": dial tcp 10.217.0.20:8080: connect: connection refused" Nov 28 16:12:47 crc kubenswrapper[4909]: I1128 16:12:47.461283 4909 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/0731ea3e-3015-4311-88f4-bcd1f9f8204e-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 28 16:12:47 crc kubenswrapper[4909]: I1128 16:12:47.461316 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d9xdg\" (UniqueName: \"kubernetes.io/projected/0731ea3e-3015-4311-88f4-bcd1f9f8204e-kube-api-access-d9xdg\") on node \"crc\" DevicePath \"\"" Nov 28 16:12:47 crc kubenswrapper[4909]: I1128 16:12:47.461326 4909 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/0731ea3e-3015-4311-88f4-bcd1f9f8204e-config-volume\") on node \"crc\" DevicePath \"\"" Nov 28 16:12:47 crc kubenswrapper[4909]: I1128 16:12:47.564294 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/56df5b0b-d5f6-4167-9f41-c7bd58a661f4-utilities\") pod \"redhat-operators-sv5rc\" (UID: \"56df5b0b-d5f6-4167-9f41-c7bd58a661f4\") " pod="openshift-marketplace/redhat-operators-sv5rc" Nov 28 16:12:47 crc kubenswrapper[4909]: I1128 16:12:47.564558 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-khkrc\" (UniqueName: \"kubernetes.io/projected/56df5b0b-d5f6-4167-9f41-c7bd58a661f4-kube-api-access-khkrc\") pod \"redhat-operators-sv5rc\" (UID: \"56df5b0b-d5f6-4167-9f41-c7bd58a661f4\") " pod="openshift-marketplace/redhat-operators-sv5rc" Nov 28 16:12:47 crc kubenswrapper[4909]: I1128 16:12:47.564579 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/56df5b0b-d5f6-4167-9f41-c7bd58a661f4-catalog-content\") pod \"redhat-operators-sv5rc\" (UID: \"56df5b0b-d5f6-4167-9f41-c7bd58a661f4\") " pod="openshift-marketplace/redhat-operators-sv5rc" Nov 28 16:12:47 crc kubenswrapper[4909]: I1128 16:12:47.577230 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ingress/router-default-5444994796-g6mgs" Nov 28 16:12:47 crc kubenswrapper[4909]: I1128 16:12:47.580009 4909 patch_prober.go:28] interesting pod/router-default-5444994796-g6mgs container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 28 16:12:47 crc kubenswrapper[4909]: [-]has-synced failed: reason withheld Nov 28 16:12:47 crc kubenswrapper[4909]: [+]process-running ok Nov 28 16:12:47 crc kubenswrapper[4909]: healthz check failed Nov 28 16:12:47 crc kubenswrapper[4909]: I1128 16:12:47.580054 4909 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-g6mgs" podUID="a56d38d4-9ee2-46f0-8fc8-3edbc68647d4" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 28 16:12:47 crc kubenswrapper[4909]: I1128 16:12:47.671080 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/56df5b0b-d5f6-4167-9f41-c7bd58a661f4-utilities\") pod \"redhat-operators-sv5rc\" (UID: \"56df5b0b-d5f6-4167-9f41-c7bd58a661f4\") " pod="openshift-marketplace/redhat-operators-sv5rc" Nov 28 16:12:47 crc kubenswrapper[4909]: I1128 16:12:47.671127 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-khkrc\" (UniqueName: \"kubernetes.io/projected/56df5b0b-d5f6-4167-9f41-c7bd58a661f4-kube-api-access-khkrc\") pod \"redhat-operators-sv5rc\" (UID: \"56df5b0b-d5f6-4167-9f41-c7bd58a661f4\") " pod="openshift-marketplace/redhat-operators-sv5rc" Nov 28 16:12:47 crc kubenswrapper[4909]: I1128 16:12:47.671145 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/56df5b0b-d5f6-4167-9f41-c7bd58a661f4-catalog-content\") pod \"redhat-operators-sv5rc\" (UID: \"56df5b0b-d5f6-4167-9f41-c7bd58a661f4\") " pod="openshift-marketplace/redhat-operators-sv5rc" Nov 28 16:12:47 crc kubenswrapper[4909]: I1128 16:12:47.672167 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/56df5b0b-d5f6-4167-9f41-c7bd58a661f4-utilities\") pod \"redhat-operators-sv5rc\" (UID: \"56df5b0b-d5f6-4167-9f41-c7bd58a661f4\") " pod="openshift-marketplace/redhat-operators-sv5rc" Nov 28 16:12:47 crc kubenswrapper[4909]: I1128 16:12:47.672430 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/56df5b0b-d5f6-4167-9f41-c7bd58a661f4-catalog-content\") pod \"redhat-operators-sv5rc\" (UID: \"56df5b0b-d5f6-4167-9f41-c7bd58a661f4\") " pod="openshift-marketplace/redhat-operators-sv5rc" Nov 28 16:12:47 crc kubenswrapper[4909]: I1128 16:12:47.705806 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-khkrc\" (UniqueName: \"kubernetes.io/projected/56df5b0b-d5f6-4167-9f41-c7bd58a661f4-kube-api-access-khkrc\") pod \"redhat-operators-sv5rc\" (UID: \"56df5b0b-d5f6-4167-9f41-c7bd58a661f4\") " pod="openshift-marketplace/redhat-operators-sv5rc" Nov 28 16:12:47 crc kubenswrapper[4909]: I1128 16:12:47.816759 4909 generic.go:334] "Generic (PLEG): container finished" podID="4e3c8494-44f8-475d-8ae3-2613649d6c73" containerID="221d793acad81d9374268688764596065b92f87c2903edcf05e5a1dc9852f4f0" exitCode=0 Nov 28 16:12:47 crc kubenswrapper[4909]: I1128 16:12:47.816898 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-9p7bm" event={"ID":"4e3c8494-44f8-475d-8ae3-2613649d6c73","Type":"ContainerDied","Data":"221d793acad81d9374268688764596065b92f87c2903edcf05e5a1dc9852f4f0"} Nov 28 16:12:47 crc kubenswrapper[4909]: I1128 16:12:47.826633 4909 generic.go:334] "Generic (PLEG): container finished" podID="ce338213-07e1-4e6c-bbaf-a99f4a056fdf" containerID="a2a30bad733787dc8ce000b7eb09c8618321c3de2df683b4e6d5ba819b948610" exitCode=0 Nov 28 16:12:47 crc kubenswrapper[4909]: I1128 16:12:47.826705 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"ce338213-07e1-4e6c-bbaf-a99f4a056fdf","Type":"ContainerDied","Data":"a2a30bad733787dc8ce000b7eb09c8618321c3de2df683b4e6d5ba819b948610"} Nov 28 16:12:47 crc kubenswrapper[4909]: I1128 16:12:47.830613 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405760-qjm9k" event={"ID":"0731ea3e-3015-4311-88f4-bcd1f9f8204e","Type":"ContainerDied","Data":"b554c96afb7772b34e8637a199929dc98b13cf0c5ffae266ddd9cc4c93b88898"} Nov 28 16:12:47 crc kubenswrapper[4909]: I1128 16:12:47.830681 4909 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b554c96afb7772b34e8637a199929dc98b13cf0c5ffae266ddd9cc4c93b88898" Nov 28 16:12:47 crc kubenswrapper[4909]: I1128 16:12:47.831079 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405760-qjm9k" Nov 28 16:12:47 crc kubenswrapper[4909]: I1128 16:12:47.835694 4909 generic.go:334] "Generic (PLEG): container finished" podID="f208265d-6857-409d-8bc1-e2e9d87f754b" containerID="20f3faf3e642c4325388a582eea4b696b7040d14befbc2c253efc76e895db23f" exitCode=0 Nov 28 16:12:47 crc kubenswrapper[4909]: I1128 16:12:47.836377 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-j2vhf" event={"ID":"f208265d-6857-409d-8bc1-e2e9d87f754b","Type":"ContainerDied","Data":"20f3faf3e642c4325388a582eea4b696b7040d14befbc2c253efc76e895db23f"} Nov 28 16:12:47 crc kubenswrapper[4909]: I1128 16:12:47.836436 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-j2vhf" event={"ID":"f208265d-6857-409d-8bc1-e2e9d87f754b","Type":"ContainerStarted","Data":"2c2624940e5ad5678a2356af00919b83f643dae1fa581f978353a11b322b4b29"} Nov 28 16:12:47 crc kubenswrapper[4909]: I1128 16:12:47.879072 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-sz62r"] Nov 28 16:12:47 crc kubenswrapper[4909]: I1128 16:12:47.986528 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-sv5rc" Nov 28 16:12:48 crc kubenswrapper[4909]: I1128 16:12:48.453364 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-sv5rc"] Nov 28 16:12:48 crc kubenswrapper[4909]: W1128 16:12:48.482981 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod56df5b0b_d5f6_4167_9f41_c7bd58a661f4.slice/crio-552f0ce1b0a8bed3c45060e79bcf57f0d461cf4ccdcf61acefc2e3104bcb116f WatchSource:0}: Error finding container 552f0ce1b0a8bed3c45060e79bcf57f0d461cf4ccdcf61acefc2e3104bcb116f: Status 404 returned error can't find the container with id 552f0ce1b0a8bed3c45060e79bcf57f0d461cf4ccdcf61acefc2e3104bcb116f Nov 28 16:12:48 crc kubenswrapper[4909]: I1128 16:12:48.581067 4909 patch_prober.go:28] interesting pod/router-default-5444994796-g6mgs container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 28 16:12:48 crc kubenswrapper[4909]: [-]has-synced failed: reason withheld Nov 28 16:12:48 crc kubenswrapper[4909]: [+]process-running ok Nov 28 16:12:48 crc kubenswrapper[4909]: healthz check failed Nov 28 16:12:48 crc kubenswrapper[4909]: I1128 16:12:48.581454 4909 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-g6mgs" podUID="a56d38d4-9ee2-46f0-8fc8-3edbc68647d4" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 28 16:12:48 crc kubenswrapper[4909]: I1128 16:12:48.847561 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-sz62r" event={"ID":"2e369b4b-cc5e-474d-8a7e-f3f5fcbb9f98","Type":"ContainerStarted","Data":"90c03f5224ea4d34e2edcfe82e970fba8733c1a77c17c68987df0cd2d67bfa64"} Nov 28 16:12:48 crc kubenswrapper[4909]: I1128 16:12:48.850490 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-sv5rc" event={"ID":"56df5b0b-d5f6-4167-9f41-c7bd58a661f4","Type":"ContainerStarted","Data":"552f0ce1b0a8bed3c45060e79bcf57f0d461cf4ccdcf61acefc2e3104bcb116f"} Nov 28 16:12:49 crc kubenswrapper[4909]: I1128 16:12:49.045679 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 28 16:12:49 crc kubenswrapper[4909]: I1128 16:12:49.235793 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/ce338213-07e1-4e6c-bbaf-a99f4a056fdf-kubelet-dir\") pod \"ce338213-07e1-4e6c-bbaf-a99f4a056fdf\" (UID: \"ce338213-07e1-4e6c-bbaf-a99f4a056fdf\") " Nov 28 16:12:49 crc kubenswrapper[4909]: I1128 16:12:49.236244 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/ce338213-07e1-4e6c-bbaf-a99f4a056fdf-kube-api-access\") pod \"ce338213-07e1-4e6c-bbaf-a99f4a056fdf\" (UID: \"ce338213-07e1-4e6c-bbaf-a99f4a056fdf\") " Nov 28 16:12:49 crc kubenswrapper[4909]: I1128 16:12:49.237299 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/ce338213-07e1-4e6c-bbaf-a99f4a056fdf-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "ce338213-07e1-4e6c-bbaf-a99f4a056fdf" (UID: "ce338213-07e1-4e6c-bbaf-a99f4a056fdf"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 16:12:49 crc kubenswrapper[4909]: I1128 16:12:49.242215 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ce338213-07e1-4e6c-bbaf-a99f4a056fdf-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "ce338213-07e1-4e6c-bbaf-a99f4a056fdf" (UID: "ce338213-07e1-4e6c-bbaf-a99f4a056fdf"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:12:49 crc kubenswrapper[4909]: I1128 16:12:49.337649 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/ce338213-07e1-4e6c-bbaf-a99f4a056fdf-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 28 16:12:49 crc kubenswrapper[4909]: I1128 16:12:49.337696 4909 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/ce338213-07e1-4e6c-bbaf-a99f4a056fdf-kubelet-dir\") on node \"crc\" DevicePath \"\"" Nov 28 16:12:49 crc kubenswrapper[4909]: I1128 16:12:49.581123 4909 patch_prober.go:28] interesting pod/router-default-5444994796-g6mgs container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 28 16:12:49 crc kubenswrapper[4909]: [-]has-synced failed: reason withheld Nov 28 16:12:49 crc kubenswrapper[4909]: [+]process-running ok Nov 28 16:12:49 crc kubenswrapper[4909]: healthz check failed Nov 28 16:12:49 crc kubenswrapper[4909]: I1128 16:12:49.581177 4909 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-g6mgs" podUID="a56d38d4-9ee2-46f0-8fc8-3edbc68647d4" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 28 16:12:49 crc kubenswrapper[4909]: I1128 16:12:49.868157 4909 generic.go:334] "Generic (PLEG): container finished" podID="56df5b0b-d5f6-4167-9f41-c7bd58a661f4" containerID="7c861a62bc101acee6d0382e82545215844c075ca319a8846b28bd4af9a4fcd6" exitCode=0 Nov 28 16:12:49 crc kubenswrapper[4909]: I1128 16:12:49.868215 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-sv5rc" event={"ID":"56df5b0b-d5f6-4167-9f41-c7bd58a661f4","Type":"ContainerDied","Data":"7c861a62bc101acee6d0382e82545215844c075ca319a8846b28bd4af9a4fcd6"} Nov 28 16:12:49 crc kubenswrapper[4909]: I1128 16:12:49.875139 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"ce338213-07e1-4e6c-bbaf-a99f4a056fdf","Type":"ContainerDied","Data":"8f4969029621c690ec450f9fbd3f646a4c354f550890b23cff12878291b22020"} Nov 28 16:12:49 crc kubenswrapper[4909]: I1128 16:12:49.875173 4909 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8f4969029621c690ec450f9fbd3f646a4c354f550890b23cff12878291b22020" Nov 28 16:12:49 crc kubenswrapper[4909]: I1128 16:12:49.875228 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 28 16:12:49 crc kubenswrapper[4909]: I1128 16:12:49.887828 4909 generic.go:334] "Generic (PLEG): container finished" podID="2e369b4b-cc5e-474d-8a7e-f3f5fcbb9f98" containerID="e5e1491bc4bb047c814c9415d1546349f91e517df99fdef355efc0e09724c5cb" exitCode=0 Nov 28 16:12:49 crc kubenswrapper[4909]: I1128 16:12:49.887878 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-sz62r" event={"ID":"2e369b4b-cc5e-474d-8a7e-f3f5fcbb9f98","Type":"ContainerDied","Data":"e5e1491bc4bb047c814c9415d1546349f91e517df99fdef355efc0e09724c5cb"} Nov 28 16:12:49 crc kubenswrapper[4909]: I1128 16:12:49.910896 4909 patch_prober.go:28] interesting pod/machine-config-daemon-d5nd7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 16:12:49 crc kubenswrapper[4909]: I1128 16:12:49.910934 4909 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 16:12:50 crc kubenswrapper[4909]: I1128 16:12:50.453641 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Nov 28 16:12:50 crc kubenswrapper[4909]: E1128 16:12:50.454015 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ce338213-07e1-4e6c-bbaf-a99f4a056fdf" containerName="pruner" Nov 28 16:12:50 crc kubenswrapper[4909]: I1128 16:12:50.454032 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="ce338213-07e1-4e6c-bbaf-a99f4a056fdf" containerName="pruner" Nov 28 16:12:50 crc kubenswrapper[4909]: I1128 16:12:50.454180 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="ce338213-07e1-4e6c-bbaf-a99f4a056fdf" containerName="pruner" Nov 28 16:12:50 crc kubenswrapper[4909]: I1128 16:12:50.457465 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 28 16:12:50 crc kubenswrapper[4909]: I1128 16:12:50.459557 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Nov 28 16:12:50 crc kubenswrapper[4909]: I1128 16:12:50.468903 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Nov 28 16:12:50 crc kubenswrapper[4909]: I1128 16:12:50.469350 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Nov 28 16:12:50 crc kubenswrapper[4909]: I1128 16:12:50.554040 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/3a2a266d-04bd-49b7-8dff-9ee944432a24-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"3a2a266d-04bd-49b7-8dff-9ee944432a24\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 28 16:12:50 crc kubenswrapper[4909]: I1128 16:12:50.554083 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/3a2a266d-04bd-49b7-8dff-9ee944432a24-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"3a2a266d-04bd-49b7-8dff-9ee944432a24\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 28 16:12:50 crc kubenswrapper[4909]: I1128 16:12:50.579317 4909 patch_prober.go:28] interesting pod/router-default-5444994796-g6mgs container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 28 16:12:50 crc kubenswrapper[4909]: [-]has-synced failed: reason withheld Nov 28 16:12:50 crc kubenswrapper[4909]: [+]process-running ok Nov 28 16:12:50 crc kubenswrapper[4909]: healthz check failed Nov 28 16:12:50 crc kubenswrapper[4909]: I1128 16:12:50.579376 4909 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-g6mgs" podUID="a56d38d4-9ee2-46f0-8fc8-3edbc68647d4" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 28 16:12:50 crc kubenswrapper[4909]: I1128 16:12:50.655713 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/3a2a266d-04bd-49b7-8dff-9ee944432a24-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"3a2a266d-04bd-49b7-8dff-9ee944432a24\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 28 16:12:50 crc kubenswrapper[4909]: I1128 16:12:50.655767 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/3a2a266d-04bd-49b7-8dff-9ee944432a24-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"3a2a266d-04bd-49b7-8dff-9ee944432a24\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 28 16:12:50 crc kubenswrapper[4909]: I1128 16:12:50.655832 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/3a2a266d-04bd-49b7-8dff-9ee944432a24-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"3a2a266d-04bd-49b7-8dff-9ee944432a24\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 28 16:12:50 crc kubenswrapper[4909]: I1128 16:12:50.674362 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/3a2a266d-04bd-49b7-8dff-9ee944432a24-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"3a2a266d-04bd-49b7-8dff-9ee944432a24\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 28 16:12:50 crc kubenswrapper[4909]: I1128 16:12:50.786123 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 28 16:12:51 crc kubenswrapper[4909]: I1128 16:12:51.580177 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-ingress/router-default-5444994796-g6mgs" Nov 28 16:12:51 crc kubenswrapper[4909]: I1128 16:12:51.583402 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ingress/router-default-5444994796-g6mgs" Nov 28 16:12:52 crc kubenswrapper[4909]: I1128 16:12:52.629228 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-dns/dns-default-jcpqp" Nov 28 16:12:56 crc kubenswrapper[4909]: I1128 16:12:56.692295 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-f9d7485db-qjf9t" Nov 28 16:12:56 crc kubenswrapper[4909]: I1128 16:12:56.697517 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-f9d7485db-qjf9t" Nov 28 16:12:57 crc kubenswrapper[4909]: I1128 16:12:57.440876 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/downloads-7954f5f757-l2xpj" Nov 28 16:12:59 crc kubenswrapper[4909]: I1128 16:12:59.520525 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/ffceca0e-d9b5-484f-8753-5e0269eec811-metrics-certs\") pod \"network-metrics-daemon-8rjn2\" (UID: \"ffceca0e-d9b5-484f-8753-5e0269eec811\") " pod="openshift-multus/network-metrics-daemon-8rjn2" Nov 28 16:12:59 crc kubenswrapper[4909]: I1128 16:12:59.534846 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/ffceca0e-d9b5-484f-8753-5e0269eec811-metrics-certs\") pod \"network-metrics-daemon-8rjn2\" (UID: \"ffceca0e-d9b5-484f-8753-5e0269eec811\") " pod="openshift-multus/network-metrics-daemon-8rjn2" Nov 28 16:12:59 crc kubenswrapper[4909]: I1128 16:12:59.747418 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-8rjn2" Nov 28 16:13:05 crc kubenswrapper[4909]: I1128 16:13:05.298116 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-697d97f7c8-p5p29" Nov 28 16:13:17 crc kubenswrapper[4909]: I1128 16:13:17.518239 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-hstbg" Nov 28 16:13:17 crc kubenswrapper[4909]: E1128 16:13:17.531669 4909 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Nov 28 16:13:17 crc kubenswrapper[4909]: E1128 16:13:17.531820 4909 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-cl95r,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-cqqvr_openshift-marketplace(14032fa5-0a63-49fb-b785-4478ed116450): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 28 16:13:17 crc kubenswrapper[4909]: E1128 16:13:17.533222 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-cqqvr" podUID="14032fa5-0a63-49fb-b785-4478ed116450" Nov 28 16:13:17 crc kubenswrapper[4909]: E1128 16:13:17.628408 4909 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Nov 28 16:13:17 crc kubenswrapper[4909]: E1128 16:13:17.628541 4909 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-msv77,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-42h7p_openshift-marketplace(52118b39-f1b7-486d-a819-ae1f464d793d): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 28 16:13:17 crc kubenswrapper[4909]: E1128 16:13:17.629725 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-42h7p" podUID="52118b39-f1b7-486d-a819-ae1f464d793d" Nov 28 16:13:19 crc kubenswrapper[4909]: I1128 16:13:19.911330 4909 patch_prober.go:28] interesting pod/machine-config-daemon-d5nd7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 16:13:19 crc kubenswrapper[4909]: I1128 16:13:19.911787 4909 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 16:13:21 crc kubenswrapper[4909]: E1128 16:13:21.484230 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-cqqvr" podUID="14032fa5-0a63-49fb-b785-4478ed116450" Nov 28 16:13:21 crc kubenswrapper[4909]: E1128 16:13:21.484320 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-42h7p" podUID="52118b39-f1b7-486d-a819-ae1f464d793d" Nov 28 16:13:24 crc kubenswrapper[4909]: I1128 16:13:24.911185 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 16:13:25 crc kubenswrapper[4909]: E1128 16:13:25.609951 4909 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Nov 28 16:13:25 crc kubenswrapper[4909]: E1128 16:13:25.610116 4909 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-j2vmw,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-65j2k_openshift-marketplace(55ed08a1-95ba-49ec-8c26-2da4a2774920): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 28 16:13:25 crc kubenswrapper[4909]: E1128 16:13:25.611316 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-65j2k" podUID="55ed08a1-95ba-49ec-8c26-2da4a2774920" Nov 28 16:13:26 crc kubenswrapper[4909]: E1128 16:13:26.115555 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"\"" pod="openshift-marketplace/community-operators-65j2k" podUID="55ed08a1-95ba-49ec-8c26-2da4a2774920" Nov 28 16:13:26 crc kubenswrapper[4909]: I1128 16:13:26.299339 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Nov 28 16:13:26 crc kubenswrapper[4909]: I1128 16:13:26.336064 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-8rjn2"] Nov 28 16:13:26 crc kubenswrapper[4909]: W1128 16:13:26.340347 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podffceca0e_d9b5_484f_8753_5e0269eec811.slice/crio-b669120d55520c259f7e51a6eba7d9b4ab5ae4edf747142e7ccd6bcdad5fb777 WatchSource:0}: Error finding container b669120d55520c259f7e51a6eba7d9b4ab5ae4edf747142e7ccd6bcdad5fb777: Status 404 returned error can't find the container with id b669120d55520c259f7e51a6eba7d9b4ab5ae4edf747142e7ccd6bcdad5fb777 Nov 28 16:13:26 crc kubenswrapper[4909]: E1128 16:13:26.563240 4909 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Nov 28 16:13:26 crc kubenswrapper[4909]: E1128 16:13:26.563371 4909 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-snwjp,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-rkbfc_openshift-marketplace(e831d096-86aa-4351-8d67-bdf81194727c): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 28 16:13:26 crc kubenswrapper[4909]: E1128 16:13:26.564505 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-rkbfc" podUID="e831d096-86aa-4351-8d67-bdf81194727c" Nov 28 16:13:26 crc kubenswrapper[4909]: E1128 16:13:26.785728 4909 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Nov 28 16:13:26 crc kubenswrapper[4909]: E1128 16:13:26.786196 4909 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-bm8r6,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-j2vhf_openshift-marketplace(f208265d-6857-409d-8bc1-e2e9d87f754b): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 28 16:13:26 crc kubenswrapper[4909]: E1128 16:13:26.787537 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-marketplace-j2vhf" podUID="f208265d-6857-409d-8bc1-e2e9d87f754b" Nov 28 16:13:26 crc kubenswrapper[4909]: E1128 16:13:26.866066 4909 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Nov 28 16:13:26 crc kubenswrapper[4909]: E1128 16:13:26.866225 4909 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-6b58m,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-9p7bm_openshift-marketplace(4e3c8494-44f8-475d-8ae3-2613649d6c73): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 28 16:13:26 crc kubenswrapper[4909]: E1128 16:13:26.867423 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-marketplace-9p7bm" podUID="4e3c8494-44f8-475d-8ae3-2613649d6c73" Nov 28 16:13:27 crc kubenswrapper[4909]: I1128 16:13:27.119644 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"3a2a266d-04bd-49b7-8dff-9ee944432a24","Type":"ContainerStarted","Data":"63227ed8eaef69f5322d64a30607ae05b04fb269b303b15270621e7f36fb8aaf"} Nov 28 16:13:27 crc kubenswrapper[4909]: I1128 16:13:27.119718 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"3a2a266d-04bd-49b7-8dff-9ee944432a24","Type":"ContainerStarted","Data":"f8f659c0de6f9f0c8407e2f0dd472d30e8af9c7586316734b45a7b56627c9fc1"} Nov 28 16:13:27 crc kubenswrapper[4909]: I1128 16:13:27.123280 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-8rjn2" event={"ID":"ffceca0e-d9b5-484f-8753-5e0269eec811","Type":"ContainerStarted","Data":"4efc62198f65f3b0e2af9a58e7d17695393cc76eb00b347cad080be46b3df58b"} Nov 28 16:13:27 crc kubenswrapper[4909]: I1128 16:13:27.123379 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-8rjn2" event={"ID":"ffceca0e-d9b5-484f-8753-5e0269eec811","Type":"ContainerStarted","Data":"b669120d55520c259f7e51a6eba7d9b4ab5ae4edf747142e7ccd6bcdad5fb777"} Nov 28 16:13:27 crc kubenswrapper[4909]: I1128 16:13:27.134546 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/revision-pruner-8-crc" podStartSLOduration=37.134527744 podStartE2EDuration="37.134527744s" podCreationTimestamp="2025-11-28 16:12:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:13:27.134292918 +0000 UTC m=+189.530977442" watchObservedRunningTime="2025-11-28 16:13:27.134527744 +0000 UTC m=+189.531212268" Nov 28 16:13:27 crc kubenswrapper[4909]: I1128 16:13:27.839771 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Nov 28 16:13:27 crc kubenswrapper[4909]: I1128 16:13:27.840827 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 28 16:13:27 crc kubenswrapper[4909]: I1128 16:13:27.849835 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Nov 28 16:13:27 crc kubenswrapper[4909]: I1128 16:13:27.935912 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/fe05bf28-f2f6-4b61-8a80-15e9a408ed15-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"fe05bf28-f2f6-4b61-8a80-15e9a408ed15\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 28 16:13:27 crc kubenswrapper[4909]: I1128 16:13:27.936043 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/fe05bf28-f2f6-4b61-8a80-15e9a408ed15-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"fe05bf28-f2f6-4b61-8a80-15e9a408ed15\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 28 16:13:28 crc kubenswrapper[4909]: I1128 16:13:28.036869 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/fe05bf28-f2f6-4b61-8a80-15e9a408ed15-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"fe05bf28-f2f6-4b61-8a80-15e9a408ed15\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 28 16:13:28 crc kubenswrapper[4909]: I1128 16:13:28.036945 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/fe05bf28-f2f6-4b61-8a80-15e9a408ed15-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"fe05bf28-f2f6-4b61-8a80-15e9a408ed15\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 28 16:13:28 crc kubenswrapper[4909]: I1128 16:13:28.037034 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/fe05bf28-f2f6-4b61-8a80-15e9a408ed15-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"fe05bf28-f2f6-4b61-8a80-15e9a408ed15\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 28 16:13:28 crc kubenswrapper[4909]: I1128 16:13:28.063592 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/fe05bf28-f2f6-4b61-8a80-15e9a408ed15-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"fe05bf28-f2f6-4b61-8a80-15e9a408ed15\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 28 16:13:28 crc kubenswrapper[4909]: I1128 16:13:28.130057 4909 generic.go:334] "Generic (PLEG): container finished" podID="3a2a266d-04bd-49b7-8dff-9ee944432a24" containerID="63227ed8eaef69f5322d64a30607ae05b04fb269b303b15270621e7f36fb8aaf" exitCode=0 Nov 28 16:13:28 crc kubenswrapper[4909]: I1128 16:13:28.130105 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"3a2a266d-04bd-49b7-8dff-9ee944432a24","Type":"ContainerDied","Data":"63227ed8eaef69f5322d64a30607ae05b04fb269b303b15270621e7f36fb8aaf"} Nov 28 16:13:28 crc kubenswrapper[4909]: I1128 16:13:28.159550 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 28 16:13:29 crc kubenswrapper[4909]: E1128 16:13:29.656733 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"\"" pod="openshift-marketplace/community-operators-rkbfc" podUID="e831d096-86aa-4351-8d67-bdf81194727c" Nov 28 16:13:29 crc kubenswrapper[4909]: E1128 16:13:29.656746 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-marketplace-9p7bm" podUID="4e3c8494-44f8-475d-8ae3-2613649d6c73" Nov 28 16:13:29 crc kubenswrapper[4909]: E1128 16:13:29.658807 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-marketplace-j2vhf" podUID="f208265d-6857-409d-8bc1-e2e9d87f754b" Nov 28 16:13:29 crc kubenswrapper[4909]: I1128 16:13:29.738327 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 28 16:13:29 crc kubenswrapper[4909]: I1128 16:13:29.761690 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/3a2a266d-04bd-49b7-8dff-9ee944432a24-kubelet-dir\") pod \"3a2a266d-04bd-49b7-8dff-9ee944432a24\" (UID: \"3a2a266d-04bd-49b7-8dff-9ee944432a24\") " Nov 28 16:13:29 crc kubenswrapper[4909]: I1128 16:13:29.761858 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/3a2a266d-04bd-49b7-8dff-9ee944432a24-kube-api-access\") pod \"3a2a266d-04bd-49b7-8dff-9ee944432a24\" (UID: \"3a2a266d-04bd-49b7-8dff-9ee944432a24\") " Nov 28 16:13:29 crc kubenswrapper[4909]: I1128 16:13:29.761876 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3a2a266d-04bd-49b7-8dff-9ee944432a24-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "3a2a266d-04bd-49b7-8dff-9ee944432a24" (UID: "3a2a266d-04bd-49b7-8dff-9ee944432a24"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 16:13:29 crc kubenswrapper[4909]: I1128 16:13:29.762071 4909 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/3a2a266d-04bd-49b7-8dff-9ee944432a24-kubelet-dir\") on node \"crc\" DevicePath \"\"" Nov 28 16:13:29 crc kubenswrapper[4909]: I1128 16:13:29.785904 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3a2a266d-04bd-49b7-8dff-9ee944432a24-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "3a2a266d-04bd-49b7-8dff-9ee944432a24" (UID: "3a2a266d-04bd-49b7-8dff-9ee944432a24"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:13:29 crc kubenswrapper[4909]: I1128 16:13:29.863018 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/3a2a266d-04bd-49b7-8dff-9ee944432a24-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 28 16:13:29 crc kubenswrapper[4909]: I1128 16:13:29.881403 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Nov 28 16:13:30 crc kubenswrapper[4909]: I1128 16:13:30.142001 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 28 16:13:30 crc kubenswrapper[4909]: I1128 16:13:30.142177 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"3a2a266d-04bd-49b7-8dff-9ee944432a24","Type":"ContainerDied","Data":"f8f659c0de6f9f0c8407e2f0dd472d30e8af9c7586316734b45a7b56627c9fc1"} Nov 28 16:13:30 crc kubenswrapper[4909]: I1128 16:13:30.142376 4909 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f8f659c0de6f9f0c8407e2f0dd472d30e8af9c7586316734b45a7b56627c9fc1" Nov 28 16:13:30 crc kubenswrapper[4909]: I1128 16:13:30.144938 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-sz62r" event={"ID":"2e369b4b-cc5e-474d-8a7e-f3f5fcbb9f98","Type":"ContainerStarted","Data":"6e383ba7faa431efb72a65caf0cd05b7874550f25432847545e7be55a4b89bda"} Nov 28 16:13:30 crc kubenswrapper[4909]: I1128 16:13:30.146477 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-sv5rc" event={"ID":"56df5b0b-d5f6-4167-9f41-c7bd58a661f4","Type":"ContainerStarted","Data":"ab9b5208821a7407f1e84af8a6cca12d83872ab2a97a76f5bda68b782f1b9e14"} Nov 28 16:13:30 crc kubenswrapper[4909]: I1128 16:13:30.151618 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-8rjn2" event={"ID":"ffceca0e-d9b5-484f-8753-5e0269eec811","Type":"ContainerStarted","Data":"ea2ded060fd799d0fac185a84efd1cb48e18166da28ef64f08af01a9a9270885"} Nov 28 16:13:30 crc kubenswrapper[4909]: I1128 16:13:30.155035 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"fe05bf28-f2f6-4b61-8a80-15e9a408ed15","Type":"ContainerStarted","Data":"442593d80255372c8173c24acd2c0b5f690fa44aec37d3f6851b4c8fd2cfcce9"} Nov 28 16:13:30 crc kubenswrapper[4909]: I1128 16:13:30.177981 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/network-metrics-daemon-8rjn2" podStartSLOduration=173.177962042 podStartE2EDuration="2m53.177962042s" podCreationTimestamp="2025-11-28 16:10:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:13:30.174440431 +0000 UTC m=+192.571124975" watchObservedRunningTime="2025-11-28 16:13:30.177962042 +0000 UTC m=+192.574646566" Nov 28 16:13:31 crc kubenswrapper[4909]: I1128 16:13:31.162497 4909 generic.go:334] "Generic (PLEG): container finished" podID="fe05bf28-f2f6-4b61-8a80-15e9a408ed15" containerID="e734029e0feec31b71aa988cec27cce9b6cf01f20b1bbc34aeb60ed88bd767ff" exitCode=0 Nov 28 16:13:31 crc kubenswrapper[4909]: I1128 16:13:31.162535 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"fe05bf28-f2f6-4b61-8a80-15e9a408ed15","Type":"ContainerDied","Data":"e734029e0feec31b71aa988cec27cce9b6cf01f20b1bbc34aeb60ed88bd767ff"} Nov 28 16:13:31 crc kubenswrapper[4909]: I1128 16:13:31.165961 4909 generic.go:334] "Generic (PLEG): container finished" podID="2e369b4b-cc5e-474d-8a7e-f3f5fcbb9f98" containerID="6e383ba7faa431efb72a65caf0cd05b7874550f25432847545e7be55a4b89bda" exitCode=0 Nov 28 16:13:31 crc kubenswrapper[4909]: I1128 16:13:31.166013 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-sz62r" event={"ID":"2e369b4b-cc5e-474d-8a7e-f3f5fcbb9f98","Type":"ContainerDied","Data":"6e383ba7faa431efb72a65caf0cd05b7874550f25432847545e7be55a4b89bda"} Nov 28 16:13:31 crc kubenswrapper[4909]: I1128 16:13:31.168811 4909 generic.go:334] "Generic (PLEG): container finished" podID="56df5b0b-d5f6-4167-9f41-c7bd58a661f4" containerID="ab9b5208821a7407f1e84af8a6cca12d83872ab2a97a76f5bda68b782f1b9e14" exitCode=0 Nov 28 16:13:31 crc kubenswrapper[4909]: I1128 16:13:31.168883 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-sv5rc" event={"ID":"56df5b0b-d5f6-4167-9f41-c7bd58a661f4","Type":"ContainerDied","Data":"ab9b5208821a7407f1e84af8a6cca12d83872ab2a97a76f5bda68b782f1b9e14"} Nov 28 16:13:32 crc kubenswrapper[4909]: I1128 16:13:32.177322 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-sz62r" event={"ID":"2e369b4b-cc5e-474d-8a7e-f3f5fcbb9f98","Type":"ContainerStarted","Data":"1a1ef1c84a77b88343460d5d03bebcb83c6e97155e80e21f045566260c7666f7"} Nov 28 16:13:32 crc kubenswrapper[4909]: I1128 16:13:32.180613 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-sv5rc" event={"ID":"56df5b0b-d5f6-4167-9f41-c7bd58a661f4","Type":"ContainerStarted","Data":"40f760c7df08348b34d658105d2ab50ec291ebbbf19910e1fc8f1600d2696613"} Nov 28 16:13:32 crc kubenswrapper[4909]: I1128 16:13:32.196949 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-sz62r" podStartSLOduration=4.39910666 podStartE2EDuration="46.196929059s" podCreationTimestamp="2025-11-28 16:12:46 +0000 UTC" firstStartedPulling="2025-11-28 16:12:49.891296952 +0000 UTC m=+152.287981476" lastFinishedPulling="2025-11-28 16:13:31.689119311 +0000 UTC m=+194.085803875" observedRunningTime="2025-11-28 16:13:32.195780946 +0000 UTC m=+194.592465490" watchObservedRunningTime="2025-11-28 16:13:32.196929059 +0000 UTC m=+194.593613623" Nov 28 16:13:32 crc kubenswrapper[4909]: I1128 16:13:32.218278 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-sv5rc" podStartSLOduration=3.356181054 podStartE2EDuration="45.218267464s" podCreationTimestamp="2025-11-28 16:12:47 +0000 UTC" firstStartedPulling="2025-11-28 16:12:49.869729611 +0000 UTC m=+152.266414135" lastFinishedPulling="2025-11-28 16:13:31.731816021 +0000 UTC m=+194.128500545" observedRunningTime="2025-11-28 16:13:32.214928588 +0000 UTC m=+194.611613142" watchObservedRunningTime="2025-11-28 16:13:32.218267464 +0000 UTC m=+194.614951988" Nov 28 16:13:32 crc kubenswrapper[4909]: I1128 16:13:32.418214 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 28 16:13:32 crc kubenswrapper[4909]: I1128 16:13:32.494357 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/fe05bf28-f2f6-4b61-8a80-15e9a408ed15-kube-api-access\") pod \"fe05bf28-f2f6-4b61-8a80-15e9a408ed15\" (UID: \"fe05bf28-f2f6-4b61-8a80-15e9a408ed15\") " Nov 28 16:13:32 crc kubenswrapper[4909]: I1128 16:13:32.494459 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/fe05bf28-f2f6-4b61-8a80-15e9a408ed15-kubelet-dir\") pod \"fe05bf28-f2f6-4b61-8a80-15e9a408ed15\" (UID: \"fe05bf28-f2f6-4b61-8a80-15e9a408ed15\") " Nov 28 16:13:32 crc kubenswrapper[4909]: I1128 16:13:32.494570 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/fe05bf28-f2f6-4b61-8a80-15e9a408ed15-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "fe05bf28-f2f6-4b61-8a80-15e9a408ed15" (UID: "fe05bf28-f2f6-4b61-8a80-15e9a408ed15"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 16:13:32 crc kubenswrapper[4909]: I1128 16:13:32.494730 4909 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/fe05bf28-f2f6-4b61-8a80-15e9a408ed15-kubelet-dir\") on node \"crc\" DevicePath \"\"" Nov 28 16:13:32 crc kubenswrapper[4909]: I1128 16:13:32.502771 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fe05bf28-f2f6-4b61-8a80-15e9a408ed15-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "fe05bf28-f2f6-4b61-8a80-15e9a408ed15" (UID: "fe05bf28-f2f6-4b61-8a80-15e9a408ed15"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:13:32 crc kubenswrapper[4909]: I1128 16:13:32.595909 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/fe05bf28-f2f6-4b61-8a80-15e9a408ed15-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 28 16:13:32 crc kubenswrapper[4909]: I1128 16:13:32.640100 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Nov 28 16:13:32 crc kubenswrapper[4909]: E1128 16:13:32.640414 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fe05bf28-f2f6-4b61-8a80-15e9a408ed15" containerName="pruner" Nov 28 16:13:32 crc kubenswrapper[4909]: I1128 16:13:32.640435 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="fe05bf28-f2f6-4b61-8a80-15e9a408ed15" containerName="pruner" Nov 28 16:13:32 crc kubenswrapper[4909]: E1128 16:13:32.640450 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3a2a266d-04bd-49b7-8dff-9ee944432a24" containerName="pruner" Nov 28 16:13:32 crc kubenswrapper[4909]: I1128 16:13:32.640459 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="3a2a266d-04bd-49b7-8dff-9ee944432a24" containerName="pruner" Nov 28 16:13:32 crc kubenswrapper[4909]: I1128 16:13:32.640607 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="fe05bf28-f2f6-4b61-8a80-15e9a408ed15" containerName="pruner" Nov 28 16:13:32 crc kubenswrapper[4909]: I1128 16:13:32.640636 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="3a2a266d-04bd-49b7-8dff-9ee944432a24" containerName="pruner" Nov 28 16:13:32 crc kubenswrapper[4909]: I1128 16:13:32.641084 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Nov 28 16:13:32 crc kubenswrapper[4909]: I1128 16:13:32.651856 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Nov 28 16:13:32 crc kubenswrapper[4909]: I1128 16:13:32.697090 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/3ff86d88-1ce0-4571-be27-bde40c65a82e-var-lock\") pod \"installer-9-crc\" (UID: \"3ff86d88-1ce0-4571-be27-bde40c65a82e\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 28 16:13:32 crc kubenswrapper[4909]: I1128 16:13:32.697150 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/3ff86d88-1ce0-4571-be27-bde40c65a82e-kube-api-access\") pod \"installer-9-crc\" (UID: \"3ff86d88-1ce0-4571-be27-bde40c65a82e\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 28 16:13:32 crc kubenswrapper[4909]: I1128 16:13:32.697183 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/3ff86d88-1ce0-4571-be27-bde40c65a82e-kubelet-dir\") pod \"installer-9-crc\" (UID: \"3ff86d88-1ce0-4571-be27-bde40c65a82e\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 28 16:13:32 crc kubenswrapper[4909]: I1128 16:13:32.798856 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/3ff86d88-1ce0-4571-be27-bde40c65a82e-var-lock\") pod \"installer-9-crc\" (UID: \"3ff86d88-1ce0-4571-be27-bde40c65a82e\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 28 16:13:32 crc kubenswrapper[4909]: I1128 16:13:32.798915 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/3ff86d88-1ce0-4571-be27-bde40c65a82e-kube-api-access\") pod \"installer-9-crc\" (UID: \"3ff86d88-1ce0-4571-be27-bde40c65a82e\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 28 16:13:32 crc kubenswrapper[4909]: I1128 16:13:32.798949 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/3ff86d88-1ce0-4571-be27-bde40c65a82e-kubelet-dir\") pod \"installer-9-crc\" (UID: \"3ff86d88-1ce0-4571-be27-bde40c65a82e\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 28 16:13:32 crc kubenswrapper[4909]: I1128 16:13:32.798992 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/3ff86d88-1ce0-4571-be27-bde40c65a82e-var-lock\") pod \"installer-9-crc\" (UID: \"3ff86d88-1ce0-4571-be27-bde40c65a82e\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 28 16:13:32 crc kubenswrapper[4909]: I1128 16:13:32.799055 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/3ff86d88-1ce0-4571-be27-bde40c65a82e-kubelet-dir\") pod \"installer-9-crc\" (UID: \"3ff86d88-1ce0-4571-be27-bde40c65a82e\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 28 16:13:32 crc kubenswrapper[4909]: I1128 16:13:32.827502 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/3ff86d88-1ce0-4571-be27-bde40c65a82e-kube-api-access\") pod \"installer-9-crc\" (UID: \"3ff86d88-1ce0-4571-be27-bde40c65a82e\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 28 16:13:32 crc kubenswrapper[4909]: I1128 16:13:32.956225 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Nov 28 16:13:33 crc kubenswrapper[4909]: I1128 16:13:33.189828 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"fe05bf28-f2f6-4b61-8a80-15e9a408ed15","Type":"ContainerDied","Data":"442593d80255372c8173c24acd2c0b5f690fa44aec37d3f6851b4c8fd2cfcce9"} Nov 28 16:13:33 crc kubenswrapper[4909]: I1128 16:13:33.189875 4909 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="442593d80255372c8173c24acd2c0b5f690fa44aec37d3f6851b4c8fd2cfcce9" Nov 28 16:13:33 crc kubenswrapper[4909]: I1128 16:13:33.189909 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 28 16:13:33 crc kubenswrapper[4909]: I1128 16:13:33.434996 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Nov 28 16:13:33 crc kubenswrapper[4909]: W1128 16:13:33.441769 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-pod3ff86d88_1ce0_4571_be27_bde40c65a82e.slice/crio-6d1ed1035a85aaa1d85ef065879893963080680df072779da4a201d7c286b07d WatchSource:0}: Error finding container 6d1ed1035a85aaa1d85ef065879893963080680df072779da4a201d7c286b07d: Status 404 returned error can't find the container with id 6d1ed1035a85aaa1d85ef065879893963080680df072779da4a201d7c286b07d Nov 28 16:13:34 crc kubenswrapper[4909]: I1128 16:13:34.197407 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"3ff86d88-1ce0-4571-be27-bde40c65a82e","Type":"ContainerStarted","Data":"aedeec87b186fd98fa6513a40b47135d3169f23f135696528fc62c0c802aad81"} Nov 28 16:13:34 crc kubenswrapper[4909]: I1128 16:13:34.197682 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"3ff86d88-1ce0-4571-be27-bde40c65a82e","Type":"ContainerStarted","Data":"6d1ed1035a85aaa1d85ef065879893963080680df072779da4a201d7c286b07d"} Nov 28 16:13:34 crc kubenswrapper[4909]: I1128 16:13:34.212185 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/installer-9-crc" podStartSLOduration=2.21216834 podStartE2EDuration="2.21216834s" podCreationTimestamp="2025-11-28 16:13:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:13:34.209944526 +0000 UTC m=+196.606629050" watchObservedRunningTime="2025-11-28 16:13:34.21216834 +0000 UTC m=+196.608852864" Nov 28 16:13:36 crc kubenswrapper[4909]: I1128 16:13:36.152202 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-t67vb"] Nov 28 16:13:37 crc kubenswrapper[4909]: I1128 16:13:37.215315 4909 generic.go:334] "Generic (PLEG): container finished" podID="14032fa5-0a63-49fb-b785-4478ed116450" containerID="7c40aca941204c74348bb636828be29879af3451e478d9c995f5f95c6d66e6fd" exitCode=0 Nov 28 16:13:37 crc kubenswrapper[4909]: I1128 16:13:37.215395 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-cqqvr" event={"ID":"14032fa5-0a63-49fb-b785-4478ed116450","Type":"ContainerDied","Data":"7c40aca941204c74348bb636828be29879af3451e478d9c995f5f95c6d66e6fd"} Nov 28 16:13:37 crc kubenswrapper[4909]: I1128 16:13:37.219281 4909 generic.go:334] "Generic (PLEG): container finished" podID="52118b39-f1b7-486d-a819-ae1f464d793d" containerID="85eb98ed2bdb798022896afd7e0a4c88282d245288a897f0823eb32722a91272" exitCode=0 Nov 28 16:13:37 crc kubenswrapper[4909]: I1128 16:13:37.219330 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-42h7p" event={"ID":"52118b39-f1b7-486d-a819-ae1f464d793d","Type":"ContainerDied","Data":"85eb98ed2bdb798022896afd7e0a4c88282d245288a897f0823eb32722a91272"} Nov 28 16:13:37 crc kubenswrapper[4909]: I1128 16:13:37.396284 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-sz62r" Nov 28 16:13:37 crc kubenswrapper[4909]: I1128 16:13:37.396422 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-sz62r" Nov 28 16:13:37 crc kubenswrapper[4909]: I1128 16:13:37.987443 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-sv5rc" Nov 28 16:13:37 crc kubenswrapper[4909]: I1128 16:13:37.988058 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-sv5rc" Nov 28 16:13:38 crc kubenswrapper[4909]: I1128 16:13:38.326803 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-sv5rc" Nov 28 16:13:38 crc kubenswrapper[4909]: I1128 16:13:38.363446 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-sv5rc" Nov 28 16:13:38 crc kubenswrapper[4909]: I1128 16:13:38.461649 4909 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-sz62r" podUID="2e369b4b-cc5e-474d-8a7e-f3f5fcbb9f98" containerName="registry-server" probeResult="failure" output=< Nov 28 16:13:38 crc kubenswrapper[4909]: timeout: failed to connect service ":50051" within 1s Nov 28 16:13:38 crc kubenswrapper[4909]: > Nov 28 16:13:39 crc kubenswrapper[4909]: I1128 16:13:39.235205 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-cqqvr" event={"ID":"14032fa5-0a63-49fb-b785-4478ed116450","Type":"ContainerStarted","Data":"9385d4fb1a573c058f506b40dba6e1eda353a9ff1a7dfe73c90f37f4b4db637b"} Nov 28 16:13:39 crc kubenswrapper[4909]: I1128 16:13:39.237217 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-42h7p" event={"ID":"52118b39-f1b7-486d-a819-ae1f464d793d","Type":"ContainerStarted","Data":"a1468afc4f9e49253e0248b48724aa02ef0c25e2df428ced93b5df419a619897"} Nov 28 16:13:39 crc kubenswrapper[4909]: I1128 16:13:39.277396 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-cqqvr" podStartSLOduration=2.578525974 podStartE2EDuration="55.277379152s" podCreationTimestamp="2025-11-28 16:12:44 +0000 UTC" firstStartedPulling="2025-11-28 16:12:45.719254364 +0000 UTC m=+148.115938888" lastFinishedPulling="2025-11-28 16:13:38.418107542 +0000 UTC m=+200.814792066" observedRunningTime="2025-11-28 16:13:39.252821535 +0000 UTC m=+201.649506059" watchObservedRunningTime="2025-11-28 16:13:39.277379152 +0000 UTC m=+201.674063686" Nov 28 16:13:40 crc kubenswrapper[4909]: I1128 16:13:40.245824 4909 generic.go:334] "Generic (PLEG): container finished" podID="55ed08a1-95ba-49ec-8c26-2da4a2774920" containerID="405fdc32bcb5f833a33c6681dda4f7e064f104693d375e9ec308992cbaca745c" exitCode=0 Nov 28 16:13:40 crc kubenswrapper[4909]: I1128 16:13:40.245886 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-65j2k" event={"ID":"55ed08a1-95ba-49ec-8c26-2da4a2774920","Type":"ContainerDied","Data":"405fdc32bcb5f833a33c6681dda4f7e064f104693d375e9ec308992cbaca745c"} Nov 28 16:13:40 crc kubenswrapper[4909]: I1128 16:13:40.263978 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-42h7p" podStartSLOduration=4.467006378 podStartE2EDuration="57.263960298s" podCreationTimestamp="2025-11-28 16:12:43 +0000 UTC" firstStartedPulling="2025-11-28 16:12:45.662401476 +0000 UTC m=+148.059086000" lastFinishedPulling="2025-11-28 16:13:38.459355406 +0000 UTC m=+200.856039920" observedRunningTime="2025-11-28 16:13:39.276255742 +0000 UTC m=+201.672940276" watchObservedRunningTime="2025-11-28 16:13:40.263960298 +0000 UTC m=+202.660644832" Nov 28 16:13:40 crc kubenswrapper[4909]: I1128 16:13:40.775231 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-sv5rc"] Nov 28 16:13:40 crc kubenswrapper[4909]: I1128 16:13:40.775838 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-sv5rc" podUID="56df5b0b-d5f6-4167-9f41-c7bd58a661f4" containerName="registry-server" containerID="cri-o://40f760c7df08348b34d658105d2ab50ec291ebbbf19910e1fc8f1600d2696613" gracePeriod=2 Nov 28 16:13:41 crc kubenswrapper[4909]: I1128 16:13:41.081477 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-sv5rc" Nov 28 16:13:41 crc kubenswrapper[4909]: I1128 16:13:41.204297 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/56df5b0b-d5f6-4167-9f41-c7bd58a661f4-utilities\") pod \"56df5b0b-d5f6-4167-9f41-c7bd58a661f4\" (UID: \"56df5b0b-d5f6-4167-9f41-c7bd58a661f4\") " Nov 28 16:13:41 crc kubenswrapper[4909]: I1128 16:13:41.204349 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-khkrc\" (UniqueName: \"kubernetes.io/projected/56df5b0b-d5f6-4167-9f41-c7bd58a661f4-kube-api-access-khkrc\") pod \"56df5b0b-d5f6-4167-9f41-c7bd58a661f4\" (UID: \"56df5b0b-d5f6-4167-9f41-c7bd58a661f4\") " Nov 28 16:13:41 crc kubenswrapper[4909]: I1128 16:13:41.204385 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/56df5b0b-d5f6-4167-9f41-c7bd58a661f4-catalog-content\") pod \"56df5b0b-d5f6-4167-9f41-c7bd58a661f4\" (UID: \"56df5b0b-d5f6-4167-9f41-c7bd58a661f4\") " Nov 28 16:13:41 crc kubenswrapper[4909]: I1128 16:13:41.205619 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/56df5b0b-d5f6-4167-9f41-c7bd58a661f4-utilities" (OuterVolumeSpecName: "utilities") pod "56df5b0b-d5f6-4167-9f41-c7bd58a661f4" (UID: "56df5b0b-d5f6-4167-9f41-c7bd58a661f4"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:13:41 crc kubenswrapper[4909]: I1128 16:13:41.213949 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/56df5b0b-d5f6-4167-9f41-c7bd58a661f4-kube-api-access-khkrc" (OuterVolumeSpecName: "kube-api-access-khkrc") pod "56df5b0b-d5f6-4167-9f41-c7bd58a661f4" (UID: "56df5b0b-d5f6-4167-9f41-c7bd58a661f4"). InnerVolumeSpecName "kube-api-access-khkrc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:13:41 crc kubenswrapper[4909]: I1128 16:13:41.252522 4909 generic.go:334] "Generic (PLEG): container finished" podID="56df5b0b-d5f6-4167-9f41-c7bd58a661f4" containerID="40f760c7df08348b34d658105d2ab50ec291ebbbf19910e1fc8f1600d2696613" exitCode=0 Nov 28 16:13:41 crc kubenswrapper[4909]: I1128 16:13:41.252585 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-sv5rc" Nov 28 16:13:41 crc kubenswrapper[4909]: I1128 16:13:41.252584 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-sv5rc" event={"ID":"56df5b0b-d5f6-4167-9f41-c7bd58a661f4","Type":"ContainerDied","Data":"40f760c7df08348b34d658105d2ab50ec291ebbbf19910e1fc8f1600d2696613"} Nov 28 16:13:41 crc kubenswrapper[4909]: I1128 16:13:41.252706 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-sv5rc" event={"ID":"56df5b0b-d5f6-4167-9f41-c7bd58a661f4","Type":"ContainerDied","Data":"552f0ce1b0a8bed3c45060e79bcf57f0d461cf4ccdcf61acefc2e3104bcb116f"} Nov 28 16:13:41 crc kubenswrapper[4909]: I1128 16:13:41.252734 4909 scope.go:117] "RemoveContainer" containerID="40f760c7df08348b34d658105d2ab50ec291ebbbf19910e1fc8f1600d2696613" Nov 28 16:13:41 crc kubenswrapper[4909]: I1128 16:13:41.255418 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-65j2k" event={"ID":"55ed08a1-95ba-49ec-8c26-2da4a2774920","Type":"ContainerStarted","Data":"af2b8315ca367d40073676d9c49360ed1d8b63d081331e1575c71d6bd36b84f3"} Nov 28 16:13:41 crc kubenswrapper[4909]: I1128 16:13:41.266347 4909 scope.go:117] "RemoveContainer" containerID="ab9b5208821a7407f1e84af8a6cca12d83872ab2a97a76f5bda68b782f1b9e14" Nov 28 16:13:41 crc kubenswrapper[4909]: I1128 16:13:41.277679 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-65j2k" podStartSLOduration=2.156882872 podStartE2EDuration="57.277644739s" podCreationTimestamp="2025-11-28 16:12:44 +0000 UTC" firstStartedPulling="2025-11-28 16:12:45.689416105 +0000 UTC m=+148.086100629" lastFinishedPulling="2025-11-28 16:13:40.810177972 +0000 UTC m=+203.206862496" observedRunningTime="2025-11-28 16:13:41.275034059 +0000 UTC m=+203.671718583" watchObservedRunningTime="2025-11-28 16:13:41.277644739 +0000 UTC m=+203.674329263" Nov 28 16:13:41 crc kubenswrapper[4909]: I1128 16:13:41.287138 4909 scope.go:117] "RemoveContainer" containerID="7c861a62bc101acee6d0382e82545215844c075ca319a8846b28bd4af9a4fcd6" Nov 28 16:13:41 crc kubenswrapper[4909]: I1128 16:13:41.305348 4909 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/56df5b0b-d5f6-4167-9f41-c7bd58a661f4-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 16:13:41 crc kubenswrapper[4909]: I1128 16:13:41.305376 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-khkrc\" (UniqueName: \"kubernetes.io/projected/56df5b0b-d5f6-4167-9f41-c7bd58a661f4-kube-api-access-khkrc\") on node \"crc\" DevicePath \"\"" Nov 28 16:13:41 crc kubenswrapper[4909]: I1128 16:13:41.309494 4909 scope.go:117] "RemoveContainer" containerID="40f760c7df08348b34d658105d2ab50ec291ebbbf19910e1fc8f1600d2696613" Nov 28 16:13:41 crc kubenswrapper[4909]: E1128 16:13:41.309920 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"40f760c7df08348b34d658105d2ab50ec291ebbbf19910e1fc8f1600d2696613\": container with ID starting with 40f760c7df08348b34d658105d2ab50ec291ebbbf19910e1fc8f1600d2696613 not found: ID does not exist" containerID="40f760c7df08348b34d658105d2ab50ec291ebbbf19910e1fc8f1600d2696613" Nov 28 16:13:41 crc kubenswrapper[4909]: I1128 16:13:41.309966 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"40f760c7df08348b34d658105d2ab50ec291ebbbf19910e1fc8f1600d2696613"} err="failed to get container status \"40f760c7df08348b34d658105d2ab50ec291ebbbf19910e1fc8f1600d2696613\": rpc error: code = NotFound desc = could not find container \"40f760c7df08348b34d658105d2ab50ec291ebbbf19910e1fc8f1600d2696613\": container with ID starting with 40f760c7df08348b34d658105d2ab50ec291ebbbf19910e1fc8f1600d2696613 not found: ID does not exist" Nov 28 16:13:41 crc kubenswrapper[4909]: I1128 16:13:41.310004 4909 scope.go:117] "RemoveContainer" containerID="ab9b5208821a7407f1e84af8a6cca12d83872ab2a97a76f5bda68b782f1b9e14" Nov 28 16:13:41 crc kubenswrapper[4909]: E1128 16:13:41.310213 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ab9b5208821a7407f1e84af8a6cca12d83872ab2a97a76f5bda68b782f1b9e14\": container with ID starting with ab9b5208821a7407f1e84af8a6cca12d83872ab2a97a76f5bda68b782f1b9e14 not found: ID does not exist" containerID="ab9b5208821a7407f1e84af8a6cca12d83872ab2a97a76f5bda68b782f1b9e14" Nov 28 16:13:41 crc kubenswrapper[4909]: I1128 16:13:41.310252 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ab9b5208821a7407f1e84af8a6cca12d83872ab2a97a76f5bda68b782f1b9e14"} err="failed to get container status \"ab9b5208821a7407f1e84af8a6cca12d83872ab2a97a76f5bda68b782f1b9e14\": rpc error: code = NotFound desc = could not find container \"ab9b5208821a7407f1e84af8a6cca12d83872ab2a97a76f5bda68b782f1b9e14\": container with ID starting with ab9b5208821a7407f1e84af8a6cca12d83872ab2a97a76f5bda68b782f1b9e14 not found: ID does not exist" Nov 28 16:13:41 crc kubenswrapper[4909]: I1128 16:13:41.310267 4909 scope.go:117] "RemoveContainer" containerID="7c861a62bc101acee6d0382e82545215844c075ca319a8846b28bd4af9a4fcd6" Nov 28 16:13:41 crc kubenswrapper[4909]: E1128 16:13:41.310736 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7c861a62bc101acee6d0382e82545215844c075ca319a8846b28bd4af9a4fcd6\": container with ID starting with 7c861a62bc101acee6d0382e82545215844c075ca319a8846b28bd4af9a4fcd6 not found: ID does not exist" containerID="7c861a62bc101acee6d0382e82545215844c075ca319a8846b28bd4af9a4fcd6" Nov 28 16:13:41 crc kubenswrapper[4909]: I1128 16:13:41.310760 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7c861a62bc101acee6d0382e82545215844c075ca319a8846b28bd4af9a4fcd6"} err="failed to get container status \"7c861a62bc101acee6d0382e82545215844c075ca319a8846b28bd4af9a4fcd6\": rpc error: code = NotFound desc = could not find container \"7c861a62bc101acee6d0382e82545215844c075ca319a8846b28bd4af9a4fcd6\": container with ID starting with 7c861a62bc101acee6d0382e82545215844c075ca319a8846b28bd4af9a4fcd6 not found: ID does not exist" Nov 28 16:13:41 crc kubenswrapper[4909]: I1128 16:13:41.321021 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/56df5b0b-d5f6-4167-9f41-c7bd58a661f4-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "56df5b0b-d5f6-4167-9f41-c7bd58a661f4" (UID: "56df5b0b-d5f6-4167-9f41-c7bd58a661f4"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:13:41 crc kubenswrapper[4909]: I1128 16:13:41.406524 4909 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/56df5b0b-d5f6-4167-9f41-c7bd58a661f4-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 16:13:41 crc kubenswrapper[4909]: I1128 16:13:41.628500 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-sv5rc"] Nov 28 16:13:41 crc kubenswrapper[4909]: I1128 16:13:41.633134 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-sv5rc"] Nov 28 16:13:41 crc kubenswrapper[4909]: I1128 16:13:41.907856 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="56df5b0b-d5f6-4167-9f41-c7bd58a661f4" path="/var/lib/kubelet/pods/56df5b0b-d5f6-4167-9f41-c7bd58a661f4/volumes" Nov 28 16:13:44 crc kubenswrapper[4909]: I1128 16:13:44.181011 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-42h7p" Nov 28 16:13:44 crc kubenswrapper[4909]: I1128 16:13:44.181742 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-42h7p" Nov 28 16:13:44 crc kubenswrapper[4909]: I1128 16:13:44.255226 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-42h7p" Nov 28 16:13:44 crc kubenswrapper[4909]: I1128 16:13:44.341799 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-42h7p" Nov 28 16:13:44 crc kubenswrapper[4909]: I1128 16:13:44.479801 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-cqqvr" Nov 28 16:13:44 crc kubenswrapper[4909]: I1128 16:13:44.479854 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-cqqvr" Nov 28 16:13:44 crc kubenswrapper[4909]: I1128 16:13:44.568509 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-cqqvr" Nov 28 16:13:44 crc kubenswrapper[4909]: I1128 16:13:44.685895 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-65j2k" Nov 28 16:13:44 crc kubenswrapper[4909]: I1128 16:13:44.685953 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-65j2k" Nov 28 16:13:44 crc kubenswrapper[4909]: I1128 16:13:44.726080 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-65j2k" Nov 28 16:13:45 crc kubenswrapper[4909]: I1128 16:13:45.286380 4909 generic.go:334] "Generic (PLEG): container finished" podID="4e3c8494-44f8-475d-8ae3-2613649d6c73" containerID="e166515e87435186b7799c72a8e6540638343ac39f832c90e256df338e36c67e" exitCode=0 Nov 28 16:13:45 crc kubenswrapper[4909]: I1128 16:13:45.286454 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-9p7bm" event={"ID":"4e3c8494-44f8-475d-8ae3-2613649d6c73","Type":"ContainerDied","Data":"e166515e87435186b7799c72a8e6540638343ac39f832c90e256df338e36c67e"} Nov 28 16:13:45 crc kubenswrapper[4909]: I1128 16:13:45.325067 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-cqqvr" Nov 28 16:13:46 crc kubenswrapper[4909]: I1128 16:13:46.294694 4909 generic.go:334] "Generic (PLEG): container finished" podID="f208265d-6857-409d-8bc1-e2e9d87f754b" containerID="93e5848e7bdc15ed3348d24f550fa0eaec03162a4af938288b9e031ed5e39577" exitCode=0 Nov 28 16:13:46 crc kubenswrapper[4909]: I1128 16:13:46.294771 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-j2vhf" event={"ID":"f208265d-6857-409d-8bc1-e2e9d87f754b","Type":"ContainerDied","Data":"93e5848e7bdc15ed3348d24f550fa0eaec03162a4af938288b9e031ed5e39577"} Nov 28 16:13:46 crc kubenswrapper[4909]: I1128 16:13:46.300624 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-9p7bm" event={"ID":"4e3c8494-44f8-475d-8ae3-2613649d6c73","Type":"ContainerStarted","Data":"cffa0c54f48406f8f946d3133ef1ff15af43ceda767c5fd6adbeedc719146e2c"} Nov 28 16:13:46 crc kubenswrapper[4909]: I1128 16:13:46.302891 4909 generic.go:334] "Generic (PLEG): container finished" podID="e831d096-86aa-4351-8d67-bdf81194727c" containerID="558d6d23696475180141a8cc5656299ee5da8be154125f2df474c5b3420d162e" exitCode=0 Nov 28 16:13:46 crc kubenswrapper[4909]: I1128 16:13:46.302949 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rkbfc" event={"ID":"e831d096-86aa-4351-8d67-bdf81194727c","Type":"ContainerDied","Data":"558d6d23696475180141a8cc5656299ee5da8be154125f2df474c5b3420d162e"} Nov 28 16:13:46 crc kubenswrapper[4909]: I1128 16:13:46.345969 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-9p7bm" podStartSLOduration=3.140186148 podStartE2EDuration="1m1.345950431s" podCreationTimestamp="2025-11-28 16:12:45 +0000 UTC" firstStartedPulling="2025-11-28 16:12:47.821739548 +0000 UTC m=+150.218424072" lastFinishedPulling="2025-11-28 16:13:46.027503831 +0000 UTC m=+208.424188355" observedRunningTime="2025-11-28 16:13:46.34402497 +0000 UTC m=+208.740709524" watchObservedRunningTime="2025-11-28 16:13:46.345950431 +0000 UTC m=+208.742634955" Nov 28 16:13:47 crc kubenswrapper[4909]: I1128 16:13:47.173918 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-cqqvr"] Nov 28 16:13:47 crc kubenswrapper[4909]: I1128 16:13:47.307180 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-cqqvr" podUID="14032fa5-0a63-49fb-b785-4478ed116450" containerName="registry-server" containerID="cri-o://9385d4fb1a573c058f506b40dba6e1eda353a9ff1a7dfe73c90f37f4b4db637b" gracePeriod=2 Nov 28 16:13:47 crc kubenswrapper[4909]: I1128 16:13:47.444871 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-sz62r" Nov 28 16:13:47 crc kubenswrapper[4909]: I1128 16:13:47.503083 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-sz62r" Nov 28 16:13:48 crc kubenswrapper[4909]: I1128 16:13:48.313425 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rkbfc" event={"ID":"e831d096-86aa-4351-8d67-bdf81194727c","Type":"ContainerStarted","Data":"77d07557bf73b5f34dcab3f8ab701ab2566220050e5268a52dffb857cb9b172a"} Nov 28 16:13:49 crc kubenswrapper[4909]: I1128 16:13:49.321799 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-j2vhf" event={"ID":"f208265d-6857-409d-8bc1-e2e9d87f754b","Type":"ContainerStarted","Data":"2244cc61ba2f4cc472afdb9854f1103878a7a5b28fd26c748489f704be52aebf"} Nov 28 16:13:49 crc kubenswrapper[4909]: I1128 16:13:49.910851 4909 patch_prober.go:28] interesting pod/machine-config-daemon-d5nd7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 16:13:49 crc kubenswrapper[4909]: I1128 16:13:49.910944 4909 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 16:13:49 crc kubenswrapper[4909]: I1128 16:13:49.911002 4909 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" Nov 28 16:13:49 crc kubenswrapper[4909]: I1128 16:13:49.911551 4909 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"1c2757c4dc287e41bc57c065df2906fc5961d005829fa24f22d3b5078d17555a"} pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 16:13:49 crc kubenswrapper[4909]: I1128 16:13:49.911650 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerName="machine-config-daemon" containerID="cri-o://1c2757c4dc287e41bc57c065df2906fc5961d005829fa24f22d3b5078d17555a" gracePeriod=600 Nov 28 16:13:50 crc kubenswrapper[4909]: I1128 16:13:50.336151 4909 generic.go:334] "Generic (PLEG): container finished" podID="14032fa5-0a63-49fb-b785-4478ed116450" containerID="9385d4fb1a573c058f506b40dba6e1eda353a9ff1a7dfe73c90f37f4b4db637b" exitCode=0 Nov 28 16:13:50 crc kubenswrapper[4909]: I1128 16:13:50.336221 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-cqqvr" event={"ID":"14032fa5-0a63-49fb-b785-4478ed116450","Type":"ContainerDied","Data":"9385d4fb1a573c058f506b40dba6e1eda353a9ff1a7dfe73c90f37f4b4db637b"} Nov 28 16:13:50 crc kubenswrapper[4909]: I1128 16:13:50.361491 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-j2vhf" podStartSLOduration=5.165887876 podStartE2EDuration="1m4.361474035s" podCreationTimestamp="2025-11-28 16:12:46 +0000 UTC" firstStartedPulling="2025-11-28 16:12:47.839823959 +0000 UTC m=+150.236508493" lastFinishedPulling="2025-11-28 16:13:47.035410128 +0000 UTC m=+209.432094652" observedRunningTime="2025-11-28 16:13:50.361159147 +0000 UTC m=+212.757843681" watchObservedRunningTime="2025-11-28 16:13:50.361474035 +0000 UTC m=+212.758158579" Nov 28 16:13:50 crc kubenswrapper[4909]: I1128 16:13:50.398112 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-rkbfc" podStartSLOduration=6.205256393 podStartE2EDuration="1m7.398087625s" podCreationTimestamp="2025-11-28 16:12:43 +0000 UTC" firstStartedPulling="2025-11-28 16:12:45.798515727 +0000 UTC m=+148.195200251" lastFinishedPulling="2025-11-28 16:13:46.991346929 +0000 UTC m=+209.388031483" observedRunningTime="2025-11-28 16:13:50.387987175 +0000 UTC m=+212.784671729" watchObservedRunningTime="2025-11-28 16:13:50.398087625 +0000 UTC m=+212.794772149" Nov 28 16:13:50 crc kubenswrapper[4909]: I1128 16:13:50.476063 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-cqqvr" Nov 28 16:13:50 crc kubenswrapper[4909]: I1128 16:13:50.656806 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cl95r\" (UniqueName: \"kubernetes.io/projected/14032fa5-0a63-49fb-b785-4478ed116450-kube-api-access-cl95r\") pod \"14032fa5-0a63-49fb-b785-4478ed116450\" (UID: \"14032fa5-0a63-49fb-b785-4478ed116450\") " Nov 28 16:13:50 crc kubenswrapper[4909]: I1128 16:13:50.656858 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/14032fa5-0a63-49fb-b785-4478ed116450-catalog-content\") pod \"14032fa5-0a63-49fb-b785-4478ed116450\" (UID: \"14032fa5-0a63-49fb-b785-4478ed116450\") " Nov 28 16:13:50 crc kubenswrapper[4909]: I1128 16:13:50.656985 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/14032fa5-0a63-49fb-b785-4478ed116450-utilities\") pod \"14032fa5-0a63-49fb-b785-4478ed116450\" (UID: \"14032fa5-0a63-49fb-b785-4478ed116450\") " Nov 28 16:13:50 crc kubenswrapper[4909]: I1128 16:13:50.658049 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/14032fa5-0a63-49fb-b785-4478ed116450-utilities" (OuterVolumeSpecName: "utilities") pod "14032fa5-0a63-49fb-b785-4478ed116450" (UID: "14032fa5-0a63-49fb-b785-4478ed116450"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:13:50 crc kubenswrapper[4909]: I1128 16:13:50.669089 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/14032fa5-0a63-49fb-b785-4478ed116450-kube-api-access-cl95r" (OuterVolumeSpecName: "kube-api-access-cl95r") pod "14032fa5-0a63-49fb-b785-4478ed116450" (UID: "14032fa5-0a63-49fb-b785-4478ed116450"). InnerVolumeSpecName "kube-api-access-cl95r". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:13:50 crc kubenswrapper[4909]: I1128 16:13:50.715316 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/14032fa5-0a63-49fb-b785-4478ed116450-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "14032fa5-0a63-49fb-b785-4478ed116450" (UID: "14032fa5-0a63-49fb-b785-4478ed116450"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:13:50 crc kubenswrapper[4909]: I1128 16:13:50.758746 4909 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/14032fa5-0a63-49fb-b785-4478ed116450-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 16:13:50 crc kubenswrapper[4909]: I1128 16:13:50.758818 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cl95r\" (UniqueName: \"kubernetes.io/projected/14032fa5-0a63-49fb-b785-4478ed116450-kube-api-access-cl95r\") on node \"crc\" DevicePath \"\"" Nov 28 16:13:50 crc kubenswrapper[4909]: I1128 16:13:50.758847 4909 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/14032fa5-0a63-49fb-b785-4478ed116450-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 16:13:51 crc kubenswrapper[4909]: I1128 16:13:51.343750 4909 generic.go:334] "Generic (PLEG): container finished" podID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerID="1c2757c4dc287e41bc57c065df2906fc5961d005829fa24f22d3b5078d17555a" exitCode=0 Nov 28 16:13:51 crc kubenswrapper[4909]: I1128 16:13:51.343834 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" event={"ID":"5f0ac931-d37b-4342-8c12-c2779b455cc5","Type":"ContainerDied","Data":"1c2757c4dc287e41bc57c065df2906fc5961d005829fa24f22d3b5078d17555a"} Nov 28 16:13:51 crc kubenswrapper[4909]: I1128 16:13:51.346100 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-cqqvr" event={"ID":"14032fa5-0a63-49fb-b785-4478ed116450","Type":"ContainerDied","Data":"80e27b1a0957fa05969543964b9a68ae7d5d81d9b41701fbe4c20e4ec3d602db"} Nov 28 16:13:51 crc kubenswrapper[4909]: I1128 16:13:51.346641 4909 scope.go:117] "RemoveContainer" containerID="9385d4fb1a573c058f506b40dba6e1eda353a9ff1a7dfe73c90f37f4b4db637b" Nov 28 16:13:51 crc kubenswrapper[4909]: I1128 16:13:51.346823 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-cqqvr" Nov 28 16:13:51 crc kubenswrapper[4909]: I1128 16:13:51.366236 4909 scope.go:117] "RemoveContainer" containerID="7c40aca941204c74348bb636828be29879af3451e478d9c995f5f95c6d66e6fd" Nov 28 16:13:51 crc kubenswrapper[4909]: I1128 16:13:51.373160 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-cqqvr"] Nov 28 16:13:51 crc kubenswrapper[4909]: I1128 16:13:51.375707 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-cqqvr"] Nov 28 16:13:51 crc kubenswrapper[4909]: I1128 16:13:51.404348 4909 scope.go:117] "RemoveContainer" containerID="a09ede08a7250ab92fc300a282131a492edbc1f863c0aca3f7605225ffe0fdce" Nov 28 16:13:51 crc kubenswrapper[4909]: I1128 16:13:51.909298 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="14032fa5-0a63-49fb-b785-4478ed116450" path="/var/lib/kubelet/pods/14032fa5-0a63-49fb-b785-4478ed116450/volumes" Nov 28 16:13:54 crc kubenswrapper[4909]: I1128 16:13:54.327929 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-rkbfc" Nov 28 16:13:54 crc kubenswrapper[4909]: I1128 16:13:54.328319 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-rkbfc" Nov 28 16:13:54 crc kubenswrapper[4909]: I1128 16:13:54.370026 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" event={"ID":"5f0ac931-d37b-4342-8c12-c2779b455cc5","Type":"ContainerStarted","Data":"d061b7e6392d6d737369fee3bbd3cb565547cf5b6232329c2831c199654babf4"} Nov 28 16:13:54 crc kubenswrapper[4909]: I1128 16:13:54.383094 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-rkbfc" Nov 28 16:13:54 crc kubenswrapper[4909]: I1128 16:13:54.438447 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-rkbfc" Nov 28 16:13:54 crc kubenswrapper[4909]: I1128 16:13:54.749707 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-65j2k" Nov 28 16:13:55 crc kubenswrapper[4909]: I1128 16:13:55.978188 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-65j2k"] Nov 28 16:13:55 crc kubenswrapper[4909]: I1128 16:13:55.978460 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-65j2k" podUID="55ed08a1-95ba-49ec-8c26-2da4a2774920" containerName="registry-server" containerID="cri-o://af2b8315ca367d40073676d9c49360ed1d8b63d081331e1575c71d6bd36b84f3" gracePeriod=2 Nov 28 16:13:56 crc kubenswrapper[4909]: I1128 16:13:56.119553 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-9p7bm" Nov 28 16:13:56 crc kubenswrapper[4909]: I1128 16:13:56.119734 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-9p7bm" Nov 28 16:13:56 crc kubenswrapper[4909]: I1128 16:13:56.204014 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-9p7bm" Nov 28 16:13:56 crc kubenswrapper[4909]: I1128 16:13:56.386210 4909 generic.go:334] "Generic (PLEG): container finished" podID="55ed08a1-95ba-49ec-8c26-2da4a2774920" containerID="af2b8315ca367d40073676d9c49360ed1d8b63d081331e1575c71d6bd36b84f3" exitCode=0 Nov 28 16:13:56 crc kubenswrapper[4909]: I1128 16:13:56.386299 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-65j2k" event={"ID":"55ed08a1-95ba-49ec-8c26-2da4a2774920","Type":"ContainerDied","Data":"af2b8315ca367d40073676d9c49360ed1d8b63d081331e1575c71d6bd36b84f3"} Nov 28 16:13:56 crc kubenswrapper[4909]: I1128 16:13:56.386775 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-65j2k" event={"ID":"55ed08a1-95ba-49ec-8c26-2da4a2774920","Type":"ContainerDied","Data":"9df10a3d6c1b760ab3a9abceb27b299b37c7388ea0fc5c1d0544e5e5e6cabe9b"} Nov 28 16:13:56 crc kubenswrapper[4909]: I1128 16:13:56.386805 4909 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9df10a3d6c1b760ab3a9abceb27b299b37c7388ea0fc5c1d0544e5e5e6cabe9b" Nov 28 16:13:56 crc kubenswrapper[4909]: I1128 16:13:56.388463 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-65j2k" Nov 28 16:13:56 crc kubenswrapper[4909]: I1128 16:13:56.431713 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-9p7bm" Nov 28 16:13:56 crc kubenswrapper[4909]: I1128 16:13:56.483468 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-j2vhf" Nov 28 16:13:56 crc kubenswrapper[4909]: I1128 16:13:56.483562 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-j2vhf" Nov 28 16:13:56 crc kubenswrapper[4909]: I1128 16:13:56.531103 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-j2vhf" Nov 28 16:13:56 crc kubenswrapper[4909]: I1128 16:13:56.539045 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/55ed08a1-95ba-49ec-8c26-2da4a2774920-catalog-content\") pod \"55ed08a1-95ba-49ec-8c26-2da4a2774920\" (UID: \"55ed08a1-95ba-49ec-8c26-2da4a2774920\") " Nov 28 16:13:56 crc kubenswrapper[4909]: I1128 16:13:56.539125 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j2vmw\" (UniqueName: \"kubernetes.io/projected/55ed08a1-95ba-49ec-8c26-2da4a2774920-kube-api-access-j2vmw\") pod \"55ed08a1-95ba-49ec-8c26-2da4a2774920\" (UID: \"55ed08a1-95ba-49ec-8c26-2da4a2774920\") " Nov 28 16:13:56 crc kubenswrapper[4909]: I1128 16:13:56.539165 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/55ed08a1-95ba-49ec-8c26-2da4a2774920-utilities\") pod \"55ed08a1-95ba-49ec-8c26-2da4a2774920\" (UID: \"55ed08a1-95ba-49ec-8c26-2da4a2774920\") " Nov 28 16:13:56 crc kubenswrapper[4909]: I1128 16:13:56.543329 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/55ed08a1-95ba-49ec-8c26-2da4a2774920-utilities" (OuterVolumeSpecName: "utilities") pod "55ed08a1-95ba-49ec-8c26-2da4a2774920" (UID: "55ed08a1-95ba-49ec-8c26-2da4a2774920"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:13:56 crc kubenswrapper[4909]: I1128 16:13:56.560479 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/55ed08a1-95ba-49ec-8c26-2da4a2774920-kube-api-access-j2vmw" (OuterVolumeSpecName: "kube-api-access-j2vmw") pod "55ed08a1-95ba-49ec-8c26-2da4a2774920" (UID: "55ed08a1-95ba-49ec-8c26-2da4a2774920"). InnerVolumeSpecName "kube-api-access-j2vmw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:13:56 crc kubenswrapper[4909]: I1128 16:13:56.592322 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/55ed08a1-95ba-49ec-8c26-2da4a2774920-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "55ed08a1-95ba-49ec-8c26-2da4a2774920" (UID: "55ed08a1-95ba-49ec-8c26-2da4a2774920"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:13:56 crc kubenswrapper[4909]: I1128 16:13:56.641522 4909 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/55ed08a1-95ba-49ec-8c26-2da4a2774920-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 16:13:56 crc kubenswrapper[4909]: I1128 16:13:56.641619 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-j2vmw\" (UniqueName: \"kubernetes.io/projected/55ed08a1-95ba-49ec-8c26-2da4a2774920-kube-api-access-j2vmw\") on node \"crc\" DevicePath \"\"" Nov 28 16:13:56 crc kubenswrapper[4909]: I1128 16:13:56.641644 4909 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/55ed08a1-95ba-49ec-8c26-2da4a2774920-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 16:13:57 crc kubenswrapper[4909]: I1128 16:13:57.391305 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-65j2k" Nov 28 16:13:57 crc kubenswrapper[4909]: I1128 16:13:57.428907 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-65j2k"] Nov 28 16:13:57 crc kubenswrapper[4909]: I1128 16:13:57.432223 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-65j2k"] Nov 28 16:13:57 crc kubenswrapper[4909]: I1128 16:13:57.434817 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-j2vhf" Nov 28 16:13:57 crc kubenswrapper[4909]: I1128 16:13:57.911285 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="55ed08a1-95ba-49ec-8c26-2da4a2774920" path="/var/lib/kubelet/pods/55ed08a1-95ba-49ec-8c26-2da4a2774920/volumes" Nov 28 16:13:58 crc kubenswrapper[4909]: I1128 16:13:58.575173 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-j2vhf"] Nov 28 16:13:59 crc kubenswrapper[4909]: I1128 16:13:59.405880 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-j2vhf" podUID="f208265d-6857-409d-8bc1-e2e9d87f754b" containerName="registry-server" containerID="cri-o://2244cc61ba2f4cc472afdb9854f1103878a7a5b28fd26c748489f704be52aebf" gracePeriod=2 Nov 28 16:14:00 crc kubenswrapper[4909]: I1128 16:14:00.413440 4909 generic.go:334] "Generic (PLEG): container finished" podID="f208265d-6857-409d-8bc1-e2e9d87f754b" containerID="2244cc61ba2f4cc472afdb9854f1103878a7a5b28fd26c748489f704be52aebf" exitCode=0 Nov 28 16:14:00 crc kubenswrapper[4909]: I1128 16:14:00.413543 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-j2vhf" event={"ID":"f208265d-6857-409d-8bc1-e2e9d87f754b","Type":"ContainerDied","Data":"2244cc61ba2f4cc472afdb9854f1103878a7a5b28fd26c748489f704be52aebf"} Nov 28 16:14:00 crc kubenswrapper[4909]: I1128 16:14:00.519463 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-j2vhf" Nov 28 16:14:00 crc kubenswrapper[4909]: I1128 16:14:00.702961 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bm8r6\" (UniqueName: \"kubernetes.io/projected/f208265d-6857-409d-8bc1-e2e9d87f754b-kube-api-access-bm8r6\") pod \"f208265d-6857-409d-8bc1-e2e9d87f754b\" (UID: \"f208265d-6857-409d-8bc1-e2e9d87f754b\") " Nov 28 16:14:00 crc kubenswrapper[4909]: I1128 16:14:00.703120 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f208265d-6857-409d-8bc1-e2e9d87f754b-catalog-content\") pod \"f208265d-6857-409d-8bc1-e2e9d87f754b\" (UID: \"f208265d-6857-409d-8bc1-e2e9d87f754b\") " Nov 28 16:14:00 crc kubenswrapper[4909]: I1128 16:14:00.703281 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f208265d-6857-409d-8bc1-e2e9d87f754b-utilities\") pod \"f208265d-6857-409d-8bc1-e2e9d87f754b\" (UID: \"f208265d-6857-409d-8bc1-e2e9d87f754b\") " Nov 28 16:14:00 crc kubenswrapper[4909]: I1128 16:14:00.704529 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f208265d-6857-409d-8bc1-e2e9d87f754b-utilities" (OuterVolumeSpecName: "utilities") pod "f208265d-6857-409d-8bc1-e2e9d87f754b" (UID: "f208265d-6857-409d-8bc1-e2e9d87f754b"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:14:00 crc kubenswrapper[4909]: I1128 16:14:00.724522 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f208265d-6857-409d-8bc1-e2e9d87f754b-kube-api-access-bm8r6" (OuterVolumeSpecName: "kube-api-access-bm8r6") pod "f208265d-6857-409d-8bc1-e2e9d87f754b" (UID: "f208265d-6857-409d-8bc1-e2e9d87f754b"). InnerVolumeSpecName "kube-api-access-bm8r6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:14:00 crc kubenswrapper[4909]: I1128 16:14:00.728688 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f208265d-6857-409d-8bc1-e2e9d87f754b-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "f208265d-6857-409d-8bc1-e2e9d87f754b" (UID: "f208265d-6857-409d-8bc1-e2e9d87f754b"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:14:00 crc kubenswrapper[4909]: I1128 16:14:00.805268 4909 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f208265d-6857-409d-8bc1-e2e9d87f754b-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 16:14:00 crc kubenswrapper[4909]: I1128 16:14:00.805321 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bm8r6\" (UniqueName: \"kubernetes.io/projected/f208265d-6857-409d-8bc1-e2e9d87f754b-kube-api-access-bm8r6\") on node \"crc\" DevicePath \"\"" Nov 28 16:14:00 crc kubenswrapper[4909]: I1128 16:14:00.805343 4909 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f208265d-6857-409d-8bc1-e2e9d87f754b-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 16:14:01 crc kubenswrapper[4909]: I1128 16:14:01.193887 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-authentication/oauth-openshift-558db77b4-t67vb" podUID="bc0316f8-a276-4bea-a4cb-bf56c011c64a" containerName="oauth-openshift" containerID="cri-o://cd3a1f6752ef3375c643e1c150d367d924dbf80e772167f8f59992411baf517a" gracePeriod=15 Nov 28 16:14:01 crc kubenswrapper[4909]: I1128 16:14:01.424732 4909 generic.go:334] "Generic (PLEG): container finished" podID="bc0316f8-a276-4bea-a4cb-bf56c011c64a" containerID="cd3a1f6752ef3375c643e1c150d367d924dbf80e772167f8f59992411baf517a" exitCode=0 Nov 28 16:14:01 crc kubenswrapper[4909]: I1128 16:14:01.424873 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-t67vb" event={"ID":"bc0316f8-a276-4bea-a4cb-bf56c011c64a","Type":"ContainerDied","Data":"cd3a1f6752ef3375c643e1c150d367d924dbf80e772167f8f59992411baf517a"} Nov 28 16:14:01 crc kubenswrapper[4909]: I1128 16:14:01.429244 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-j2vhf" event={"ID":"f208265d-6857-409d-8bc1-e2e9d87f754b","Type":"ContainerDied","Data":"2c2624940e5ad5678a2356af00919b83f643dae1fa581f978353a11b322b4b29"} Nov 28 16:14:01 crc kubenswrapper[4909]: I1128 16:14:01.429308 4909 scope.go:117] "RemoveContainer" containerID="2244cc61ba2f4cc472afdb9854f1103878a7a5b28fd26c748489f704be52aebf" Nov 28 16:14:01 crc kubenswrapper[4909]: I1128 16:14:01.429481 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-j2vhf" Nov 28 16:14:01 crc kubenswrapper[4909]: I1128 16:14:01.480343 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-j2vhf"] Nov 28 16:14:01 crc kubenswrapper[4909]: I1128 16:14:01.482845 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-j2vhf"] Nov 28 16:14:01 crc kubenswrapper[4909]: I1128 16:14:01.485914 4909 scope.go:117] "RemoveContainer" containerID="93e5848e7bdc15ed3348d24f550fa0eaec03162a4af938288b9e031ed5e39577" Nov 28 16:14:01 crc kubenswrapper[4909]: I1128 16:14:01.525933 4909 scope.go:117] "RemoveContainer" containerID="20f3faf3e642c4325388a582eea4b696b7040d14befbc2c253efc76e895db23f" Nov 28 16:14:01 crc kubenswrapper[4909]: I1128 16:14:01.608002 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-t67vb" Nov 28 16:14:01 crc kubenswrapper[4909]: I1128 16:14:01.717604 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/bc0316f8-a276-4bea-a4cb-bf56c011c64a-v4-0-config-system-service-ca\") pod \"bc0316f8-a276-4bea-a4cb-bf56c011c64a\" (UID: \"bc0316f8-a276-4bea-a4cb-bf56c011c64a\") " Nov 28 16:14:01 crc kubenswrapper[4909]: I1128 16:14:01.717676 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/bc0316f8-a276-4bea-a4cb-bf56c011c64a-v4-0-config-system-ocp-branding-template\") pod \"bc0316f8-a276-4bea-a4cb-bf56c011c64a\" (UID: \"bc0316f8-a276-4bea-a4cb-bf56c011c64a\") " Nov 28 16:14:01 crc kubenswrapper[4909]: I1128 16:14:01.717712 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/bc0316f8-a276-4bea-a4cb-bf56c011c64a-v4-0-config-system-trusted-ca-bundle\") pod \"bc0316f8-a276-4bea-a4cb-bf56c011c64a\" (UID: \"bc0316f8-a276-4bea-a4cb-bf56c011c64a\") " Nov 28 16:14:01 crc kubenswrapper[4909]: I1128 16:14:01.717754 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/bc0316f8-a276-4bea-a4cb-bf56c011c64a-audit-dir\") pod \"bc0316f8-a276-4bea-a4cb-bf56c011c64a\" (UID: \"bc0316f8-a276-4bea-a4cb-bf56c011c64a\") " Nov 28 16:14:01 crc kubenswrapper[4909]: I1128 16:14:01.717791 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/bc0316f8-a276-4bea-a4cb-bf56c011c64a-v4-0-config-system-cliconfig\") pod \"bc0316f8-a276-4bea-a4cb-bf56c011c64a\" (UID: \"bc0316f8-a276-4bea-a4cb-bf56c011c64a\") " Nov 28 16:14:01 crc kubenswrapper[4909]: I1128 16:14:01.717816 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/bc0316f8-a276-4bea-a4cb-bf56c011c64a-v4-0-config-system-session\") pod \"bc0316f8-a276-4bea-a4cb-bf56c011c64a\" (UID: \"bc0316f8-a276-4bea-a4cb-bf56c011c64a\") " Nov 28 16:14:01 crc kubenswrapper[4909]: I1128 16:14:01.717873 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/bc0316f8-a276-4bea-a4cb-bf56c011c64a-v4-0-config-user-template-provider-selection\") pod \"bc0316f8-a276-4bea-a4cb-bf56c011c64a\" (UID: \"bc0316f8-a276-4bea-a4cb-bf56c011c64a\") " Nov 28 16:14:01 crc kubenswrapper[4909]: I1128 16:14:01.717902 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/bc0316f8-a276-4bea-a4cb-bf56c011c64a-audit-policies\") pod \"bc0316f8-a276-4bea-a4cb-bf56c011c64a\" (UID: \"bc0316f8-a276-4bea-a4cb-bf56c011c64a\") " Nov 28 16:14:01 crc kubenswrapper[4909]: I1128 16:14:01.717925 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/bc0316f8-a276-4bea-a4cb-bf56c011c64a-v4-0-config-system-router-certs\") pod \"bc0316f8-a276-4bea-a4cb-bf56c011c64a\" (UID: \"bc0316f8-a276-4bea-a4cb-bf56c011c64a\") " Nov 28 16:14:01 crc kubenswrapper[4909]: I1128 16:14:01.717909 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/bc0316f8-a276-4bea-a4cb-bf56c011c64a-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "bc0316f8-a276-4bea-a4cb-bf56c011c64a" (UID: "bc0316f8-a276-4bea-a4cb-bf56c011c64a"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 16:14:01 crc kubenswrapper[4909]: I1128 16:14:01.717971 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/bc0316f8-a276-4bea-a4cb-bf56c011c64a-v4-0-config-user-template-error\") pod \"bc0316f8-a276-4bea-a4cb-bf56c011c64a\" (UID: \"bc0316f8-a276-4bea-a4cb-bf56c011c64a\") " Nov 28 16:14:01 crc kubenswrapper[4909]: I1128 16:14:01.718014 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/bc0316f8-a276-4bea-a4cb-bf56c011c64a-v4-0-config-system-serving-cert\") pod \"bc0316f8-a276-4bea-a4cb-bf56c011c64a\" (UID: \"bc0316f8-a276-4bea-a4cb-bf56c011c64a\") " Nov 28 16:14:01 crc kubenswrapper[4909]: I1128 16:14:01.718036 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/bc0316f8-a276-4bea-a4cb-bf56c011c64a-v4-0-config-user-idp-0-file-data\") pod \"bc0316f8-a276-4bea-a4cb-bf56c011c64a\" (UID: \"bc0316f8-a276-4bea-a4cb-bf56c011c64a\") " Nov 28 16:14:01 crc kubenswrapper[4909]: I1128 16:14:01.718066 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/bc0316f8-a276-4bea-a4cb-bf56c011c64a-v4-0-config-user-template-login\") pod \"bc0316f8-a276-4bea-a4cb-bf56c011c64a\" (UID: \"bc0316f8-a276-4bea-a4cb-bf56c011c64a\") " Nov 28 16:14:01 crc kubenswrapper[4909]: I1128 16:14:01.718097 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9hbkn\" (UniqueName: \"kubernetes.io/projected/bc0316f8-a276-4bea-a4cb-bf56c011c64a-kube-api-access-9hbkn\") pod \"bc0316f8-a276-4bea-a4cb-bf56c011c64a\" (UID: \"bc0316f8-a276-4bea-a4cb-bf56c011c64a\") " Nov 28 16:14:01 crc kubenswrapper[4909]: I1128 16:14:01.718329 4909 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/bc0316f8-a276-4bea-a4cb-bf56c011c64a-audit-dir\") on node \"crc\" DevicePath \"\"" Nov 28 16:14:01 crc kubenswrapper[4909]: I1128 16:14:01.718746 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bc0316f8-a276-4bea-a4cb-bf56c011c64a-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "bc0316f8-a276-4bea-a4cb-bf56c011c64a" (UID: "bc0316f8-a276-4bea-a4cb-bf56c011c64a"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:14:01 crc kubenswrapper[4909]: I1128 16:14:01.718774 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bc0316f8-a276-4bea-a4cb-bf56c011c64a-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "bc0316f8-a276-4bea-a4cb-bf56c011c64a" (UID: "bc0316f8-a276-4bea-a4cb-bf56c011c64a"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:14:01 crc kubenswrapper[4909]: I1128 16:14:01.718919 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bc0316f8-a276-4bea-a4cb-bf56c011c64a-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "bc0316f8-a276-4bea-a4cb-bf56c011c64a" (UID: "bc0316f8-a276-4bea-a4cb-bf56c011c64a"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:14:01 crc kubenswrapper[4909]: I1128 16:14:01.719628 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bc0316f8-a276-4bea-a4cb-bf56c011c64a-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "bc0316f8-a276-4bea-a4cb-bf56c011c64a" (UID: "bc0316f8-a276-4bea-a4cb-bf56c011c64a"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:14:01 crc kubenswrapper[4909]: I1128 16:14:01.722694 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bc0316f8-a276-4bea-a4cb-bf56c011c64a-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "bc0316f8-a276-4bea-a4cb-bf56c011c64a" (UID: "bc0316f8-a276-4bea-a4cb-bf56c011c64a"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:14:01 crc kubenswrapper[4909]: I1128 16:14:01.723196 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bc0316f8-a276-4bea-a4cb-bf56c011c64a-kube-api-access-9hbkn" (OuterVolumeSpecName: "kube-api-access-9hbkn") pod "bc0316f8-a276-4bea-a4cb-bf56c011c64a" (UID: "bc0316f8-a276-4bea-a4cb-bf56c011c64a"). InnerVolumeSpecName "kube-api-access-9hbkn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:14:01 crc kubenswrapper[4909]: I1128 16:14:01.723427 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bc0316f8-a276-4bea-a4cb-bf56c011c64a-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "bc0316f8-a276-4bea-a4cb-bf56c011c64a" (UID: "bc0316f8-a276-4bea-a4cb-bf56c011c64a"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:14:01 crc kubenswrapper[4909]: I1128 16:14:01.723951 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bc0316f8-a276-4bea-a4cb-bf56c011c64a-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "bc0316f8-a276-4bea-a4cb-bf56c011c64a" (UID: "bc0316f8-a276-4bea-a4cb-bf56c011c64a"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:14:01 crc kubenswrapper[4909]: I1128 16:14:01.724350 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bc0316f8-a276-4bea-a4cb-bf56c011c64a-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "bc0316f8-a276-4bea-a4cb-bf56c011c64a" (UID: "bc0316f8-a276-4bea-a4cb-bf56c011c64a"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:14:01 crc kubenswrapper[4909]: I1128 16:14:01.724649 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bc0316f8-a276-4bea-a4cb-bf56c011c64a-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "bc0316f8-a276-4bea-a4cb-bf56c011c64a" (UID: "bc0316f8-a276-4bea-a4cb-bf56c011c64a"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:14:01 crc kubenswrapper[4909]: I1128 16:14:01.724840 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bc0316f8-a276-4bea-a4cb-bf56c011c64a-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "bc0316f8-a276-4bea-a4cb-bf56c011c64a" (UID: "bc0316f8-a276-4bea-a4cb-bf56c011c64a"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:14:01 crc kubenswrapper[4909]: I1128 16:14:01.725014 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bc0316f8-a276-4bea-a4cb-bf56c011c64a-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "bc0316f8-a276-4bea-a4cb-bf56c011c64a" (UID: "bc0316f8-a276-4bea-a4cb-bf56c011c64a"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:14:01 crc kubenswrapper[4909]: I1128 16:14:01.725844 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bc0316f8-a276-4bea-a4cb-bf56c011c64a-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "bc0316f8-a276-4bea-a4cb-bf56c011c64a" (UID: "bc0316f8-a276-4bea-a4cb-bf56c011c64a"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:14:01 crc kubenswrapper[4909]: I1128 16:14:01.819791 4909 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/bc0316f8-a276-4bea-a4cb-bf56c011c64a-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Nov 28 16:14:01 crc kubenswrapper[4909]: I1128 16:14:01.819845 4909 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/bc0316f8-a276-4bea-a4cb-bf56c011c64a-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Nov 28 16:14:01 crc kubenswrapper[4909]: I1128 16:14:01.819874 4909 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/bc0316f8-a276-4bea-a4cb-bf56c011c64a-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Nov 28 16:14:01 crc kubenswrapper[4909]: I1128 16:14:01.819900 4909 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/bc0316f8-a276-4bea-a4cb-bf56c011c64a-audit-policies\") on node \"crc\" DevicePath \"\"" Nov 28 16:14:01 crc kubenswrapper[4909]: I1128 16:14:01.819921 4909 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/bc0316f8-a276-4bea-a4cb-bf56c011c64a-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Nov 28 16:14:01 crc kubenswrapper[4909]: I1128 16:14:01.819945 4909 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/bc0316f8-a276-4bea-a4cb-bf56c011c64a-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Nov 28 16:14:01 crc kubenswrapper[4909]: I1128 16:14:01.819965 4909 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/bc0316f8-a276-4bea-a4cb-bf56c011c64a-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 16:14:01 crc kubenswrapper[4909]: I1128 16:14:01.819984 4909 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/bc0316f8-a276-4bea-a4cb-bf56c011c64a-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:14:01 crc kubenswrapper[4909]: I1128 16:14:01.820007 4909 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/bc0316f8-a276-4bea-a4cb-bf56c011c64a-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Nov 28 16:14:01 crc kubenswrapper[4909]: I1128 16:14:01.820026 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9hbkn\" (UniqueName: \"kubernetes.io/projected/bc0316f8-a276-4bea-a4cb-bf56c011c64a-kube-api-access-9hbkn\") on node \"crc\" DevicePath \"\"" Nov 28 16:14:01 crc kubenswrapper[4909]: I1128 16:14:01.820045 4909 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/bc0316f8-a276-4bea-a4cb-bf56c011c64a-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Nov 28 16:14:01 crc kubenswrapper[4909]: I1128 16:14:01.820065 4909 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/bc0316f8-a276-4bea-a4cb-bf56c011c64a-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Nov 28 16:14:01 crc kubenswrapper[4909]: I1128 16:14:01.820085 4909 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/bc0316f8-a276-4bea-a4cb-bf56c011c64a-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:14:01 crc kubenswrapper[4909]: I1128 16:14:01.915935 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f208265d-6857-409d-8bc1-e2e9d87f754b" path="/var/lib/kubelet/pods/f208265d-6857-409d-8bc1-e2e9d87f754b/volumes" Nov 28 16:14:02 crc kubenswrapper[4909]: I1128 16:14:02.439036 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-t67vb" Nov 28 16:14:02 crc kubenswrapper[4909]: I1128 16:14:02.439050 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-t67vb" event={"ID":"bc0316f8-a276-4bea-a4cb-bf56c011c64a","Type":"ContainerDied","Data":"e1800aa7a3597e2f53907015922ceebe0c484180e0cccc655d9cdb2cc7a5cedd"} Nov 28 16:14:02 crc kubenswrapper[4909]: I1128 16:14:02.439117 4909 scope.go:117] "RemoveContainer" containerID="cd3a1f6752ef3375c643e1c150d367d924dbf80e772167f8f59992411baf517a" Nov 28 16:14:02 crc kubenswrapper[4909]: I1128 16:14:02.468373 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-t67vb"] Nov 28 16:14:02 crc kubenswrapper[4909]: I1128 16:14:02.474966 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-t67vb"] Nov 28 16:14:03 crc kubenswrapper[4909]: I1128 16:14:03.913562 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bc0316f8-a276-4bea-a4cb-bf56c011c64a" path="/var/lib/kubelet/pods/bc0316f8-a276-4bea-a4cb-bf56c011c64a/volumes" Nov 28 16:14:04 crc kubenswrapper[4909]: I1128 16:14:04.840623 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-5fff7d8cf9-j4czb"] Nov 28 16:14:04 crc kubenswrapper[4909]: E1128 16:14:04.841586 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f208265d-6857-409d-8bc1-e2e9d87f754b" containerName="extract-utilities" Nov 28 16:14:04 crc kubenswrapper[4909]: I1128 16:14:04.841615 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="f208265d-6857-409d-8bc1-e2e9d87f754b" containerName="extract-utilities" Nov 28 16:14:04 crc kubenswrapper[4909]: E1128 16:14:04.841637 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="56df5b0b-d5f6-4167-9f41-c7bd58a661f4" containerName="registry-server" Nov 28 16:14:04 crc kubenswrapper[4909]: I1128 16:14:04.841650 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="56df5b0b-d5f6-4167-9f41-c7bd58a661f4" containerName="registry-server" Nov 28 16:14:04 crc kubenswrapper[4909]: E1128 16:14:04.841711 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="14032fa5-0a63-49fb-b785-4478ed116450" containerName="registry-server" Nov 28 16:14:04 crc kubenswrapper[4909]: I1128 16:14:04.841725 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="14032fa5-0a63-49fb-b785-4478ed116450" containerName="registry-server" Nov 28 16:14:04 crc kubenswrapper[4909]: E1128 16:14:04.841742 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bc0316f8-a276-4bea-a4cb-bf56c011c64a" containerName="oauth-openshift" Nov 28 16:14:04 crc kubenswrapper[4909]: I1128 16:14:04.841754 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="bc0316f8-a276-4bea-a4cb-bf56c011c64a" containerName="oauth-openshift" Nov 28 16:14:04 crc kubenswrapper[4909]: E1128 16:14:04.841770 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f208265d-6857-409d-8bc1-e2e9d87f754b" containerName="registry-server" Nov 28 16:14:04 crc kubenswrapper[4909]: I1128 16:14:04.841782 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="f208265d-6857-409d-8bc1-e2e9d87f754b" containerName="registry-server" Nov 28 16:14:04 crc kubenswrapper[4909]: E1128 16:14:04.841800 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="56df5b0b-d5f6-4167-9f41-c7bd58a661f4" containerName="extract-utilities" Nov 28 16:14:04 crc kubenswrapper[4909]: I1128 16:14:04.841811 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="56df5b0b-d5f6-4167-9f41-c7bd58a661f4" containerName="extract-utilities" Nov 28 16:14:04 crc kubenswrapper[4909]: E1128 16:14:04.841826 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="55ed08a1-95ba-49ec-8c26-2da4a2774920" containerName="registry-server" Nov 28 16:14:04 crc kubenswrapper[4909]: I1128 16:14:04.841837 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="55ed08a1-95ba-49ec-8c26-2da4a2774920" containerName="registry-server" Nov 28 16:14:04 crc kubenswrapper[4909]: E1128 16:14:04.841853 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="55ed08a1-95ba-49ec-8c26-2da4a2774920" containerName="extract-content" Nov 28 16:14:04 crc kubenswrapper[4909]: I1128 16:14:04.841865 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="55ed08a1-95ba-49ec-8c26-2da4a2774920" containerName="extract-content" Nov 28 16:14:04 crc kubenswrapper[4909]: E1128 16:14:04.841885 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f208265d-6857-409d-8bc1-e2e9d87f754b" containerName="extract-content" Nov 28 16:14:04 crc kubenswrapper[4909]: I1128 16:14:04.841897 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="f208265d-6857-409d-8bc1-e2e9d87f754b" containerName="extract-content" Nov 28 16:14:04 crc kubenswrapper[4909]: E1128 16:14:04.841913 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="14032fa5-0a63-49fb-b785-4478ed116450" containerName="extract-content" Nov 28 16:14:04 crc kubenswrapper[4909]: I1128 16:14:04.841925 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="14032fa5-0a63-49fb-b785-4478ed116450" containerName="extract-content" Nov 28 16:14:04 crc kubenswrapper[4909]: E1128 16:14:04.841939 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="55ed08a1-95ba-49ec-8c26-2da4a2774920" containerName="extract-utilities" Nov 28 16:14:04 crc kubenswrapper[4909]: I1128 16:14:04.841952 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="55ed08a1-95ba-49ec-8c26-2da4a2774920" containerName="extract-utilities" Nov 28 16:14:04 crc kubenswrapper[4909]: E1128 16:14:04.841973 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="14032fa5-0a63-49fb-b785-4478ed116450" containerName="extract-utilities" Nov 28 16:14:04 crc kubenswrapper[4909]: I1128 16:14:04.841985 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="14032fa5-0a63-49fb-b785-4478ed116450" containerName="extract-utilities" Nov 28 16:14:04 crc kubenswrapper[4909]: E1128 16:14:04.842004 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="56df5b0b-d5f6-4167-9f41-c7bd58a661f4" containerName="extract-content" Nov 28 16:14:04 crc kubenswrapper[4909]: I1128 16:14:04.842017 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="56df5b0b-d5f6-4167-9f41-c7bd58a661f4" containerName="extract-content" Nov 28 16:14:04 crc kubenswrapper[4909]: I1128 16:14:04.842194 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="56df5b0b-d5f6-4167-9f41-c7bd58a661f4" containerName="registry-server" Nov 28 16:14:04 crc kubenswrapper[4909]: I1128 16:14:04.842212 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="14032fa5-0a63-49fb-b785-4478ed116450" containerName="registry-server" Nov 28 16:14:04 crc kubenswrapper[4909]: I1128 16:14:04.842232 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="bc0316f8-a276-4bea-a4cb-bf56c011c64a" containerName="oauth-openshift" Nov 28 16:14:04 crc kubenswrapper[4909]: I1128 16:14:04.842256 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="55ed08a1-95ba-49ec-8c26-2da4a2774920" containerName="registry-server" Nov 28 16:14:04 crc kubenswrapper[4909]: I1128 16:14:04.842272 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="f208265d-6857-409d-8bc1-e2e9d87f754b" containerName="registry-server" Nov 28 16:14:04 crc kubenswrapper[4909]: I1128 16:14:04.842830 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-5fff7d8cf9-j4czb" Nov 28 16:14:04 crc kubenswrapper[4909]: I1128 16:14:04.846606 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Nov 28 16:14:04 crc kubenswrapper[4909]: I1128 16:14:04.846690 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Nov 28 16:14:04 crc kubenswrapper[4909]: I1128 16:14:04.848346 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Nov 28 16:14:04 crc kubenswrapper[4909]: I1128 16:14:04.848854 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Nov 28 16:14:04 crc kubenswrapper[4909]: I1128 16:14:04.848917 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Nov 28 16:14:04 crc kubenswrapper[4909]: I1128 16:14:04.848971 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Nov 28 16:14:04 crc kubenswrapper[4909]: I1128 16:14:04.849115 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Nov 28 16:14:04 crc kubenswrapper[4909]: I1128 16:14:04.848970 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Nov 28 16:14:04 crc kubenswrapper[4909]: I1128 16:14:04.850107 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Nov 28 16:14:04 crc kubenswrapper[4909]: I1128 16:14:04.852065 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Nov 28 16:14:04 crc kubenswrapper[4909]: I1128 16:14:04.854115 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Nov 28 16:14:04 crc kubenswrapper[4909]: I1128 16:14:04.855828 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Nov 28 16:14:04 crc kubenswrapper[4909]: I1128 16:14:04.869581 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Nov 28 16:14:04 crc kubenswrapper[4909]: I1128 16:14:04.874565 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-5fff7d8cf9-j4czb"] Nov 28 16:14:04 crc kubenswrapper[4909]: I1128 16:14:04.879542 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Nov 28 16:14:04 crc kubenswrapper[4909]: I1128 16:14:04.880697 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Nov 28 16:14:04 crc kubenswrapper[4909]: I1128 16:14:04.969796 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/15c8c4d8-7746-4e38-b771-7ef4addacdcf-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-5fff7d8cf9-j4czb\" (UID: \"15c8c4d8-7746-4e38-b771-7ef4addacdcf\") " pod="openshift-authentication/oauth-openshift-5fff7d8cf9-j4czb" Nov 28 16:14:04 crc kubenswrapper[4909]: I1128 16:14:04.969873 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/15c8c4d8-7746-4e38-b771-7ef4addacdcf-v4-0-config-system-session\") pod \"oauth-openshift-5fff7d8cf9-j4czb\" (UID: \"15c8c4d8-7746-4e38-b771-7ef4addacdcf\") " pod="openshift-authentication/oauth-openshift-5fff7d8cf9-j4czb" Nov 28 16:14:04 crc kubenswrapper[4909]: I1128 16:14:04.969918 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/15c8c4d8-7746-4e38-b771-7ef4addacdcf-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-5fff7d8cf9-j4czb\" (UID: \"15c8c4d8-7746-4e38-b771-7ef4addacdcf\") " pod="openshift-authentication/oauth-openshift-5fff7d8cf9-j4czb" Nov 28 16:14:04 crc kubenswrapper[4909]: I1128 16:14:04.969959 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/15c8c4d8-7746-4e38-b771-7ef4addacdcf-v4-0-config-user-template-error\") pod \"oauth-openshift-5fff7d8cf9-j4czb\" (UID: \"15c8c4d8-7746-4e38-b771-7ef4addacdcf\") " pod="openshift-authentication/oauth-openshift-5fff7d8cf9-j4czb" Nov 28 16:14:04 crc kubenswrapper[4909]: I1128 16:14:04.969998 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/15c8c4d8-7746-4e38-b771-7ef4addacdcf-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-5fff7d8cf9-j4czb\" (UID: \"15c8c4d8-7746-4e38-b771-7ef4addacdcf\") " pod="openshift-authentication/oauth-openshift-5fff7d8cf9-j4czb" Nov 28 16:14:04 crc kubenswrapper[4909]: I1128 16:14:04.970031 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wnrrm\" (UniqueName: \"kubernetes.io/projected/15c8c4d8-7746-4e38-b771-7ef4addacdcf-kube-api-access-wnrrm\") pod \"oauth-openshift-5fff7d8cf9-j4czb\" (UID: \"15c8c4d8-7746-4e38-b771-7ef4addacdcf\") " pod="openshift-authentication/oauth-openshift-5fff7d8cf9-j4czb" Nov 28 16:14:04 crc kubenswrapper[4909]: I1128 16:14:04.970063 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/15c8c4d8-7746-4e38-b771-7ef4addacdcf-audit-dir\") pod \"oauth-openshift-5fff7d8cf9-j4czb\" (UID: \"15c8c4d8-7746-4e38-b771-7ef4addacdcf\") " pod="openshift-authentication/oauth-openshift-5fff7d8cf9-j4czb" Nov 28 16:14:04 crc kubenswrapper[4909]: I1128 16:14:04.970115 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/15c8c4d8-7746-4e38-b771-7ef4addacdcf-audit-policies\") pod \"oauth-openshift-5fff7d8cf9-j4czb\" (UID: \"15c8c4d8-7746-4e38-b771-7ef4addacdcf\") " pod="openshift-authentication/oauth-openshift-5fff7d8cf9-j4czb" Nov 28 16:14:04 crc kubenswrapper[4909]: I1128 16:14:04.970147 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/15c8c4d8-7746-4e38-b771-7ef4addacdcf-v4-0-config-system-serving-cert\") pod \"oauth-openshift-5fff7d8cf9-j4czb\" (UID: \"15c8c4d8-7746-4e38-b771-7ef4addacdcf\") " pod="openshift-authentication/oauth-openshift-5fff7d8cf9-j4czb" Nov 28 16:14:04 crc kubenswrapper[4909]: I1128 16:14:04.970179 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/15c8c4d8-7746-4e38-b771-7ef4addacdcf-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-5fff7d8cf9-j4czb\" (UID: \"15c8c4d8-7746-4e38-b771-7ef4addacdcf\") " pod="openshift-authentication/oauth-openshift-5fff7d8cf9-j4czb" Nov 28 16:14:04 crc kubenswrapper[4909]: I1128 16:14:04.970421 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/15c8c4d8-7746-4e38-b771-7ef4addacdcf-v4-0-config-user-template-login\") pod \"oauth-openshift-5fff7d8cf9-j4czb\" (UID: \"15c8c4d8-7746-4e38-b771-7ef4addacdcf\") " pod="openshift-authentication/oauth-openshift-5fff7d8cf9-j4czb" Nov 28 16:14:04 crc kubenswrapper[4909]: I1128 16:14:04.970498 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/15c8c4d8-7746-4e38-b771-7ef4addacdcf-v4-0-config-system-cliconfig\") pod \"oauth-openshift-5fff7d8cf9-j4czb\" (UID: \"15c8c4d8-7746-4e38-b771-7ef4addacdcf\") " pod="openshift-authentication/oauth-openshift-5fff7d8cf9-j4czb" Nov 28 16:14:04 crc kubenswrapper[4909]: I1128 16:14:04.970529 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/15c8c4d8-7746-4e38-b771-7ef4addacdcf-v4-0-config-system-service-ca\") pod \"oauth-openshift-5fff7d8cf9-j4czb\" (UID: \"15c8c4d8-7746-4e38-b771-7ef4addacdcf\") " pod="openshift-authentication/oauth-openshift-5fff7d8cf9-j4czb" Nov 28 16:14:04 crc kubenswrapper[4909]: I1128 16:14:04.970594 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/15c8c4d8-7746-4e38-b771-7ef4addacdcf-v4-0-config-system-router-certs\") pod \"oauth-openshift-5fff7d8cf9-j4czb\" (UID: \"15c8c4d8-7746-4e38-b771-7ef4addacdcf\") " pod="openshift-authentication/oauth-openshift-5fff7d8cf9-j4czb" Nov 28 16:14:05 crc kubenswrapper[4909]: I1128 16:14:05.072191 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/15c8c4d8-7746-4e38-b771-7ef4addacdcf-v4-0-config-system-router-certs\") pod \"oauth-openshift-5fff7d8cf9-j4czb\" (UID: \"15c8c4d8-7746-4e38-b771-7ef4addacdcf\") " pod="openshift-authentication/oauth-openshift-5fff7d8cf9-j4czb" Nov 28 16:14:05 crc kubenswrapper[4909]: I1128 16:14:05.072336 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/15c8c4d8-7746-4e38-b771-7ef4addacdcf-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-5fff7d8cf9-j4czb\" (UID: \"15c8c4d8-7746-4e38-b771-7ef4addacdcf\") " pod="openshift-authentication/oauth-openshift-5fff7d8cf9-j4czb" Nov 28 16:14:05 crc kubenswrapper[4909]: I1128 16:14:05.072397 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/15c8c4d8-7746-4e38-b771-7ef4addacdcf-v4-0-config-system-session\") pod \"oauth-openshift-5fff7d8cf9-j4czb\" (UID: \"15c8c4d8-7746-4e38-b771-7ef4addacdcf\") " pod="openshift-authentication/oauth-openshift-5fff7d8cf9-j4czb" Nov 28 16:14:05 crc kubenswrapper[4909]: I1128 16:14:05.072471 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/15c8c4d8-7746-4e38-b771-7ef4addacdcf-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-5fff7d8cf9-j4czb\" (UID: \"15c8c4d8-7746-4e38-b771-7ef4addacdcf\") " pod="openshift-authentication/oauth-openshift-5fff7d8cf9-j4czb" Nov 28 16:14:05 crc kubenswrapper[4909]: I1128 16:14:05.072541 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/15c8c4d8-7746-4e38-b771-7ef4addacdcf-v4-0-config-user-template-error\") pod \"oauth-openshift-5fff7d8cf9-j4czb\" (UID: \"15c8c4d8-7746-4e38-b771-7ef4addacdcf\") " pod="openshift-authentication/oauth-openshift-5fff7d8cf9-j4czb" Nov 28 16:14:05 crc kubenswrapper[4909]: I1128 16:14:05.072610 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/15c8c4d8-7746-4e38-b771-7ef4addacdcf-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-5fff7d8cf9-j4czb\" (UID: \"15c8c4d8-7746-4e38-b771-7ef4addacdcf\") " pod="openshift-authentication/oauth-openshift-5fff7d8cf9-j4czb" Nov 28 16:14:05 crc kubenswrapper[4909]: I1128 16:14:05.072652 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wnrrm\" (UniqueName: \"kubernetes.io/projected/15c8c4d8-7746-4e38-b771-7ef4addacdcf-kube-api-access-wnrrm\") pod \"oauth-openshift-5fff7d8cf9-j4czb\" (UID: \"15c8c4d8-7746-4e38-b771-7ef4addacdcf\") " pod="openshift-authentication/oauth-openshift-5fff7d8cf9-j4czb" Nov 28 16:14:05 crc kubenswrapper[4909]: I1128 16:14:05.072711 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/15c8c4d8-7746-4e38-b771-7ef4addacdcf-audit-dir\") pod \"oauth-openshift-5fff7d8cf9-j4czb\" (UID: \"15c8c4d8-7746-4e38-b771-7ef4addacdcf\") " pod="openshift-authentication/oauth-openshift-5fff7d8cf9-j4czb" Nov 28 16:14:05 crc kubenswrapper[4909]: I1128 16:14:05.072824 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/15c8c4d8-7746-4e38-b771-7ef4addacdcf-audit-policies\") pod \"oauth-openshift-5fff7d8cf9-j4czb\" (UID: \"15c8c4d8-7746-4e38-b771-7ef4addacdcf\") " pod="openshift-authentication/oauth-openshift-5fff7d8cf9-j4czb" Nov 28 16:14:05 crc kubenswrapper[4909]: I1128 16:14:05.072856 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/15c8c4d8-7746-4e38-b771-7ef4addacdcf-v4-0-config-system-serving-cert\") pod \"oauth-openshift-5fff7d8cf9-j4czb\" (UID: \"15c8c4d8-7746-4e38-b771-7ef4addacdcf\") " pod="openshift-authentication/oauth-openshift-5fff7d8cf9-j4czb" Nov 28 16:14:05 crc kubenswrapper[4909]: I1128 16:14:05.072893 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/15c8c4d8-7746-4e38-b771-7ef4addacdcf-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-5fff7d8cf9-j4czb\" (UID: \"15c8c4d8-7746-4e38-b771-7ef4addacdcf\") " pod="openshift-authentication/oauth-openshift-5fff7d8cf9-j4czb" Nov 28 16:14:05 crc kubenswrapper[4909]: I1128 16:14:05.072986 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/15c8c4d8-7746-4e38-b771-7ef4addacdcf-v4-0-config-user-template-login\") pod \"oauth-openshift-5fff7d8cf9-j4czb\" (UID: \"15c8c4d8-7746-4e38-b771-7ef4addacdcf\") " pod="openshift-authentication/oauth-openshift-5fff7d8cf9-j4czb" Nov 28 16:14:05 crc kubenswrapper[4909]: I1128 16:14:05.073045 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/15c8c4d8-7746-4e38-b771-7ef4addacdcf-v4-0-config-system-cliconfig\") pod \"oauth-openshift-5fff7d8cf9-j4czb\" (UID: \"15c8c4d8-7746-4e38-b771-7ef4addacdcf\") " pod="openshift-authentication/oauth-openshift-5fff7d8cf9-j4czb" Nov 28 16:14:05 crc kubenswrapper[4909]: I1128 16:14:05.073081 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/15c8c4d8-7746-4e38-b771-7ef4addacdcf-v4-0-config-system-service-ca\") pod \"oauth-openshift-5fff7d8cf9-j4czb\" (UID: \"15c8c4d8-7746-4e38-b771-7ef4addacdcf\") " pod="openshift-authentication/oauth-openshift-5fff7d8cf9-j4czb" Nov 28 16:14:05 crc kubenswrapper[4909]: I1128 16:14:05.073801 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/15c8c4d8-7746-4e38-b771-7ef4addacdcf-audit-dir\") pod \"oauth-openshift-5fff7d8cf9-j4czb\" (UID: \"15c8c4d8-7746-4e38-b771-7ef4addacdcf\") " pod="openshift-authentication/oauth-openshift-5fff7d8cf9-j4czb" Nov 28 16:14:05 crc kubenswrapper[4909]: I1128 16:14:05.075052 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/15c8c4d8-7746-4e38-b771-7ef4addacdcf-audit-policies\") pod \"oauth-openshift-5fff7d8cf9-j4czb\" (UID: \"15c8c4d8-7746-4e38-b771-7ef4addacdcf\") " pod="openshift-authentication/oauth-openshift-5fff7d8cf9-j4czb" Nov 28 16:14:05 crc kubenswrapper[4909]: I1128 16:14:05.075696 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/15c8c4d8-7746-4e38-b771-7ef4addacdcf-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-5fff7d8cf9-j4czb\" (UID: \"15c8c4d8-7746-4e38-b771-7ef4addacdcf\") " pod="openshift-authentication/oauth-openshift-5fff7d8cf9-j4czb" Nov 28 16:14:05 crc kubenswrapper[4909]: I1128 16:14:05.077724 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/15c8c4d8-7746-4e38-b771-7ef4addacdcf-v4-0-config-system-cliconfig\") pod \"oauth-openshift-5fff7d8cf9-j4czb\" (UID: \"15c8c4d8-7746-4e38-b771-7ef4addacdcf\") " pod="openshift-authentication/oauth-openshift-5fff7d8cf9-j4czb" Nov 28 16:14:05 crc kubenswrapper[4909]: I1128 16:14:05.077788 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/15c8c4d8-7746-4e38-b771-7ef4addacdcf-v4-0-config-system-service-ca\") pod \"oauth-openshift-5fff7d8cf9-j4czb\" (UID: \"15c8c4d8-7746-4e38-b771-7ef4addacdcf\") " pod="openshift-authentication/oauth-openshift-5fff7d8cf9-j4czb" Nov 28 16:14:05 crc kubenswrapper[4909]: I1128 16:14:05.080842 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/15c8c4d8-7746-4e38-b771-7ef4addacdcf-v4-0-config-user-template-error\") pod \"oauth-openshift-5fff7d8cf9-j4czb\" (UID: \"15c8c4d8-7746-4e38-b771-7ef4addacdcf\") " pod="openshift-authentication/oauth-openshift-5fff7d8cf9-j4czb" Nov 28 16:14:05 crc kubenswrapper[4909]: I1128 16:14:05.081407 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/15c8c4d8-7746-4e38-b771-7ef4addacdcf-v4-0-config-system-router-certs\") pod \"oauth-openshift-5fff7d8cf9-j4czb\" (UID: \"15c8c4d8-7746-4e38-b771-7ef4addacdcf\") " pod="openshift-authentication/oauth-openshift-5fff7d8cf9-j4czb" Nov 28 16:14:05 crc kubenswrapper[4909]: I1128 16:14:05.081506 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/15c8c4d8-7746-4e38-b771-7ef4addacdcf-v4-0-config-user-template-login\") pod \"oauth-openshift-5fff7d8cf9-j4czb\" (UID: \"15c8c4d8-7746-4e38-b771-7ef4addacdcf\") " pod="openshift-authentication/oauth-openshift-5fff7d8cf9-j4czb" Nov 28 16:14:05 crc kubenswrapper[4909]: I1128 16:14:05.084294 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/15c8c4d8-7746-4e38-b771-7ef4addacdcf-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-5fff7d8cf9-j4czb\" (UID: \"15c8c4d8-7746-4e38-b771-7ef4addacdcf\") " pod="openshift-authentication/oauth-openshift-5fff7d8cf9-j4czb" Nov 28 16:14:05 crc kubenswrapper[4909]: I1128 16:14:05.088280 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/15c8c4d8-7746-4e38-b771-7ef4addacdcf-v4-0-config-system-session\") pod \"oauth-openshift-5fff7d8cf9-j4czb\" (UID: \"15c8c4d8-7746-4e38-b771-7ef4addacdcf\") " pod="openshift-authentication/oauth-openshift-5fff7d8cf9-j4czb" Nov 28 16:14:05 crc kubenswrapper[4909]: I1128 16:14:05.089117 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/15c8c4d8-7746-4e38-b771-7ef4addacdcf-v4-0-config-system-serving-cert\") pod \"oauth-openshift-5fff7d8cf9-j4czb\" (UID: \"15c8c4d8-7746-4e38-b771-7ef4addacdcf\") " pod="openshift-authentication/oauth-openshift-5fff7d8cf9-j4czb" Nov 28 16:14:05 crc kubenswrapper[4909]: I1128 16:14:05.089201 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/15c8c4d8-7746-4e38-b771-7ef4addacdcf-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-5fff7d8cf9-j4czb\" (UID: \"15c8c4d8-7746-4e38-b771-7ef4addacdcf\") " pod="openshift-authentication/oauth-openshift-5fff7d8cf9-j4czb" Nov 28 16:14:05 crc kubenswrapper[4909]: I1128 16:14:05.082381 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/15c8c4d8-7746-4e38-b771-7ef4addacdcf-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-5fff7d8cf9-j4czb\" (UID: \"15c8c4d8-7746-4e38-b771-7ef4addacdcf\") " pod="openshift-authentication/oauth-openshift-5fff7d8cf9-j4czb" Nov 28 16:14:05 crc kubenswrapper[4909]: I1128 16:14:05.103350 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wnrrm\" (UniqueName: \"kubernetes.io/projected/15c8c4d8-7746-4e38-b771-7ef4addacdcf-kube-api-access-wnrrm\") pod \"oauth-openshift-5fff7d8cf9-j4czb\" (UID: \"15c8c4d8-7746-4e38-b771-7ef4addacdcf\") " pod="openshift-authentication/oauth-openshift-5fff7d8cf9-j4czb" Nov 28 16:14:05 crc kubenswrapper[4909]: I1128 16:14:05.159396 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-5fff7d8cf9-j4czb" Nov 28 16:14:05 crc kubenswrapper[4909]: I1128 16:14:05.463691 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-5fff7d8cf9-j4czb"] Nov 28 16:14:06 crc kubenswrapper[4909]: I1128 16:14:06.473460 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-5fff7d8cf9-j4czb" event={"ID":"15c8c4d8-7746-4e38-b771-7ef4addacdcf","Type":"ContainerStarted","Data":"d14d82d248ff150c7c61b43e1c09b6b7313eee6a442244ed740afe362da10f45"} Nov 28 16:14:06 crc kubenswrapper[4909]: I1128 16:14:06.473957 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-5fff7d8cf9-j4czb" Nov 28 16:14:06 crc kubenswrapper[4909]: I1128 16:14:06.473989 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-5fff7d8cf9-j4czb" event={"ID":"15c8c4d8-7746-4e38-b771-7ef4addacdcf","Type":"ContainerStarted","Data":"69010e8232e95e3ee73c18bacf7dab9fd183555b9f379cb7c66479e482b039df"} Nov 28 16:14:06 crc kubenswrapper[4909]: I1128 16:14:06.483035 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-5fff7d8cf9-j4czb" Nov 28 16:14:06 crc kubenswrapper[4909]: I1128 16:14:06.535513 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-5fff7d8cf9-j4czb" podStartSLOduration=30.535495636 podStartE2EDuration="30.535495636s" podCreationTimestamp="2025-11-28 16:13:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:14:06.506909642 +0000 UTC m=+228.903594236" watchObservedRunningTime="2025-11-28 16:14:06.535495636 +0000 UTC m=+228.932180160" Nov 28 16:14:11 crc kubenswrapper[4909]: I1128 16:14:11.597643 4909 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Nov 28 16:14:11 crc kubenswrapper[4909]: I1128 16:14:11.600459 4909 kubelet.go:2431] "SyncLoop REMOVE" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Nov 28 16:14:11 crc kubenswrapper[4909]: I1128 16:14:11.600487 4909 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Nov 28 16:14:11 crc kubenswrapper[4909]: I1128 16:14:11.600577 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 16:14:11 crc kubenswrapper[4909]: I1128 16:14:11.601045 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" containerID="cri-o://107b559f7d62f02041439f8c727185e9a819cd655afa0898e818d8618cc54cc7" gracePeriod=15 Nov 28 16:14:11 crc kubenswrapper[4909]: I1128 16:14:11.601161 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" containerID="cri-o://76ac0606fbf94f3746fb7ffa549a3cb684d40ee72224dd36c96bb47db493b482" gracePeriod=15 Nov 28 16:14:11 crc kubenswrapper[4909]: I1128 16:14:11.601277 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" containerID="cri-o://da70cc63e1edf58e6d6fed49fc9266d54fabba5f7ce3216d910eb18aebc45c34" gracePeriod=15 Nov 28 16:14:11 crc kubenswrapper[4909]: E1128 16:14:11.601359 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Nov 28 16:14:11 crc kubenswrapper[4909]: I1128 16:14:11.601376 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Nov 28 16:14:11 crc kubenswrapper[4909]: I1128 16:14:11.601078 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" containerID="cri-o://53a5c9679a2c94c1297318d2e554c2445efbfab9cf72d25e48d753d66fa217d5" gracePeriod=15 Nov 28 16:14:11 crc kubenswrapper[4909]: E1128 16:14:11.601389 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="setup" Nov 28 16:14:11 crc kubenswrapper[4909]: I1128 16:14:11.601398 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="setup" Nov 28 16:14:11 crc kubenswrapper[4909]: E1128 16:14:11.601408 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Nov 28 16:14:11 crc kubenswrapper[4909]: I1128 16:14:11.601427 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Nov 28 16:14:11 crc kubenswrapper[4909]: E1128 16:14:11.601442 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 28 16:14:11 crc kubenswrapper[4909]: I1128 16:14:11.601451 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 28 16:14:11 crc kubenswrapper[4909]: E1128 16:14:11.601461 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Nov 28 16:14:11 crc kubenswrapper[4909]: I1128 16:14:11.601467 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Nov 28 16:14:11 crc kubenswrapper[4909]: E1128 16:14:11.601474 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Nov 28 16:14:11 crc kubenswrapper[4909]: I1128 16:14:11.601480 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Nov 28 16:14:11 crc kubenswrapper[4909]: I1128 16:14:11.601219 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" containerID="cri-o://1d5716d50bbed7c047029bbd7964431e97f78c6516505f57fe76d2a1548a665d" gracePeriod=15 Nov 28 16:14:11 crc kubenswrapper[4909]: I1128 16:14:11.601590 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 28 16:14:11 crc kubenswrapper[4909]: I1128 16:14:11.601605 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Nov 28 16:14:11 crc kubenswrapper[4909]: I1128 16:14:11.601615 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Nov 28 16:14:11 crc kubenswrapper[4909]: I1128 16:14:11.601627 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Nov 28 16:14:11 crc kubenswrapper[4909]: I1128 16:14:11.601635 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Nov 28 16:14:11 crc kubenswrapper[4909]: I1128 16:14:11.603828 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 16:14:11 crc kubenswrapper[4909]: I1128 16:14:11.603865 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 16:14:11 crc kubenswrapper[4909]: I1128 16:14:11.603888 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 16:14:11 crc kubenswrapper[4909]: I1128 16:14:11.603919 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 16:14:11 crc kubenswrapper[4909]: I1128 16:14:11.603949 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 16:14:11 crc kubenswrapper[4909]: I1128 16:14:11.605532 4909 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="f4b27818a5e8e43d0dc095d08835c792" podUID="71bb4a3aecc4ba5b26c4b7318770ce13" Nov 28 16:14:11 crc kubenswrapper[4909]: I1128 16:14:11.662894 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Nov 28 16:14:11 crc kubenswrapper[4909]: I1128 16:14:11.708445 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 16:14:11 crc kubenswrapper[4909]: I1128 16:14:11.708562 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 16:14:11 crc kubenswrapper[4909]: I1128 16:14:11.708729 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 16:14:11 crc kubenswrapper[4909]: I1128 16:14:11.708788 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 16:14:11 crc kubenswrapper[4909]: I1128 16:14:11.708849 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 16:14:11 crc kubenswrapper[4909]: I1128 16:14:11.708879 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 16:14:11 crc kubenswrapper[4909]: I1128 16:14:11.708937 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 16:14:11 crc kubenswrapper[4909]: I1128 16:14:11.708974 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 16:14:11 crc kubenswrapper[4909]: I1128 16:14:11.709132 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 16:14:11 crc kubenswrapper[4909]: I1128 16:14:11.709203 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 16:14:11 crc kubenswrapper[4909]: I1128 16:14:11.709269 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 16:14:11 crc kubenswrapper[4909]: I1128 16:14:11.709348 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 16:14:11 crc kubenswrapper[4909]: I1128 16:14:11.709428 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 16:14:11 crc kubenswrapper[4909]: I1128 16:14:11.810305 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 16:14:11 crc kubenswrapper[4909]: I1128 16:14:11.810625 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 16:14:11 crc kubenswrapper[4909]: I1128 16:14:11.810671 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 16:14:11 crc kubenswrapper[4909]: I1128 16:14:11.810767 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 16:14:11 crc kubenswrapper[4909]: I1128 16:14:11.810813 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 16:14:11 crc kubenswrapper[4909]: I1128 16:14:11.810841 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 16:14:11 crc kubenswrapper[4909]: I1128 16:14:11.957739 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 16:14:11 crc kubenswrapper[4909]: E1128 16:14:11.990786 4909 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/events\": dial tcp 38.102.83.53:6443: connect: connection refused" event="&Event{ObjectMeta:{kube-apiserver-startup-monitor-crc.187c37c00b946d28 openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-startup-monitor-crc,UID:f85e55b1a89d02b0cb034b1ea31ed45a,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{startup-monitor},},Reason:Pulled,Message:Container image \"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-11-28 16:14:11.989916968 +0000 UTC m=+234.386601492,LastTimestamp:2025-11-28 16:14:11.989916968 +0000 UTC m=+234.386601492,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Nov 28 16:14:12 crc kubenswrapper[4909]: E1128 16:14:12.430541 4909 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/events\": dial tcp 38.102.83.53:6443: connect: connection refused" event="&Event{ObjectMeta:{kube-apiserver-startup-monitor-crc.187c37c00b946d28 openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-startup-monitor-crc,UID:f85e55b1a89d02b0cb034b1ea31ed45a,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{startup-monitor},},Reason:Pulled,Message:Container image \"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-11-28 16:14:11.989916968 +0000 UTC m=+234.386601492,LastTimestamp:2025-11-28 16:14:11.989916968 +0000 UTC m=+234.386601492,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Nov 28 16:14:12 crc kubenswrapper[4909]: I1128 16:14:12.520645 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Nov 28 16:14:12 crc kubenswrapper[4909]: I1128 16:14:12.522409 4909 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="53a5c9679a2c94c1297318d2e554c2445efbfab9cf72d25e48d753d66fa217d5" exitCode=0 Nov 28 16:14:12 crc kubenswrapper[4909]: I1128 16:14:12.522458 4909 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="da70cc63e1edf58e6d6fed49fc9266d54fabba5f7ce3216d910eb18aebc45c34" exitCode=0 Nov 28 16:14:12 crc kubenswrapper[4909]: I1128 16:14:12.522480 4909 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="76ac0606fbf94f3746fb7ffa549a3cb684d40ee72224dd36c96bb47db493b482" exitCode=0 Nov 28 16:14:12 crc kubenswrapper[4909]: I1128 16:14:12.522509 4909 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="1d5716d50bbed7c047029bbd7964431e97f78c6516505f57fe76d2a1548a665d" exitCode=2 Nov 28 16:14:12 crc kubenswrapper[4909]: I1128 16:14:12.525115 4909 generic.go:334] "Generic (PLEG): container finished" podID="3ff86d88-1ce0-4571-be27-bde40c65a82e" containerID="aedeec87b186fd98fa6513a40b47135d3169f23f135696528fc62c0c802aad81" exitCode=0 Nov 28 16:14:12 crc kubenswrapper[4909]: I1128 16:14:12.525215 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"3ff86d88-1ce0-4571-be27-bde40c65a82e","Type":"ContainerDied","Data":"aedeec87b186fd98fa6513a40b47135d3169f23f135696528fc62c0c802aad81"} Nov 28 16:14:12 crc kubenswrapper[4909]: I1128 16:14:12.526447 4909 status_manager.go:851] "Failed to get status for pod" podUID="3ff86d88-1ce0-4571-be27-bde40c65a82e" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.53:6443: connect: connection refused" Nov 28 16:14:12 crc kubenswrapper[4909]: I1128 16:14:12.526964 4909 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.53:6443: connect: connection refused" Nov 28 16:14:12 crc kubenswrapper[4909]: I1128 16:14:12.528381 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" event={"ID":"f85e55b1a89d02b0cb034b1ea31ed45a","Type":"ContainerStarted","Data":"310e4ea6d3a4496f951b82ec5c83991cc5297cff6267c00123b8cc4daa492da5"} Nov 28 16:14:12 crc kubenswrapper[4909]: I1128 16:14:12.528432 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" event={"ID":"f85e55b1a89d02b0cb034b1ea31ed45a","Type":"ContainerStarted","Data":"f4721f168e3895dd1881a280c1a5ed160c6951c4a7c11124aa01f3fa93df8055"} Nov 28 16:14:12 crc kubenswrapper[4909]: I1128 16:14:12.529183 4909 status_manager.go:851] "Failed to get status for pod" podUID="3ff86d88-1ce0-4571-be27-bde40c65a82e" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.53:6443: connect: connection refused" Nov 28 16:14:12 crc kubenswrapper[4909]: I1128 16:14:12.529631 4909 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.53:6443: connect: connection refused" Nov 28 16:14:13 crc kubenswrapper[4909]: I1128 16:14:13.863186 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Nov 28 16:14:13 crc kubenswrapper[4909]: I1128 16:14:13.863878 4909 status_manager.go:851] "Failed to get status for pod" podUID="3ff86d88-1ce0-4571-be27-bde40c65a82e" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.53:6443: connect: connection refused" Nov 28 16:14:13 crc kubenswrapper[4909]: I1128 16:14:13.864033 4909 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.53:6443: connect: connection refused" Nov 28 16:14:13 crc kubenswrapper[4909]: I1128 16:14:13.939018 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/3ff86d88-1ce0-4571-be27-bde40c65a82e-kubelet-dir\") pod \"3ff86d88-1ce0-4571-be27-bde40c65a82e\" (UID: \"3ff86d88-1ce0-4571-be27-bde40c65a82e\") " Nov 28 16:14:13 crc kubenswrapper[4909]: I1128 16:14:13.939264 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/3ff86d88-1ce0-4571-be27-bde40c65a82e-var-lock\") pod \"3ff86d88-1ce0-4571-be27-bde40c65a82e\" (UID: \"3ff86d88-1ce0-4571-be27-bde40c65a82e\") " Nov 28 16:14:13 crc kubenswrapper[4909]: I1128 16:14:13.939287 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/3ff86d88-1ce0-4571-be27-bde40c65a82e-kube-api-access\") pod \"3ff86d88-1ce0-4571-be27-bde40c65a82e\" (UID: \"3ff86d88-1ce0-4571-be27-bde40c65a82e\") " Nov 28 16:14:13 crc kubenswrapper[4909]: I1128 16:14:13.939175 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3ff86d88-1ce0-4571-be27-bde40c65a82e-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "3ff86d88-1ce0-4571-be27-bde40c65a82e" (UID: "3ff86d88-1ce0-4571-be27-bde40c65a82e"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 16:14:13 crc kubenswrapper[4909]: I1128 16:14:13.939345 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3ff86d88-1ce0-4571-be27-bde40c65a82e-var-lock" (OuterVolumeSpecName: "var-lock") pod "3ff86d88-1ce0-4571-be27-bde40c65a82e" (UID: "3ff86d88-1ce0-4571-be27-bde40c65a82e"). InnerVolumeSpecName "var-lock". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 16:14:13 crc kubenswrapper[4909]: I1128 16:14:13.939525 4909 reconciler_common.go:293] "Volume detached for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/3ff86d88-1ce0-4571-be27-bde40c65a82e-var-lock\") on node \"crc\" DevicePath \"\"" Nov 28 16:14:13 crc kubenswrapper[4909]: I1128 16:14:13.939538 4909 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/3ff86d88-1ce0-4571-be27-bde40c65a82e-kubelet-dir\") on node \"crc\" DevicePath \"\"" Nov 28 16:14:13 crc kubenswrapper[4909]: I1128 16:14:13.944414 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3ff86d88-1ce0-4571-be27-bde40c65a82e-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "3ff86d88-1ce0-4571-be27-bde40c65a82e" (UID: "3ff86d88-1ce0-4571-be27-bde40c65a82e"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:14:14 crc kubenswrapper[4909]: I1128 16:14:14.040901 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/3ff86d88-1ce0-4571-be27-bde40c65a82e-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 28 16:14:14 crc kubenswrapper[4909]: I1128 16:14:14.559291 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"3ff86d88-1ce0-4571-be27-bde40c65a82e","Type":"ContainerDied","Data":"6d1ed1035a85aaa1d85ef065879893963080680df072779da4a201d7c286b07d"} Nov 28 16:14:14 crc kubenswrapper[4909]: I1128 16:14:14.559359 4909 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6d1ed1035a85aaa1d85ef065879893963080680df072779da4a201d7c286b07d" Nov 28 16:14:14 crc kubenswrapper[4909]: I1128 16:14:14.559441 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Nov 28 16:14:14 crc kubenswrapper[4909]: I1128 16:14:14.576511 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Nov 28 16:14:14 crc kubenswrapper[4909]: I1128 16:14:14.581201 4909 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="107b559f7d62f02041439f8c727185e9a819cd655afa0898e818d8618cc54cc7" exitCode=0 Nov 28 16:14:14 crc kubenswrapper[4909]: I1128 16:14:14.601634 4909 status_manager.go:851] "Failed to get status for pod" podUID="3ff86d88-1ce0-4571-be27-bde40c65a82e" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.53:6443: connect: connection refused" Nov 28 16:14:14 crc kubenswrapper[4909]: I1128 16:14:14.602137 4909 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.53:6443: connect: connection refused" Nov 28 16:14:14 crc kubenswrapper[4909]: I1128 16:14:14.607035 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Nov 28 16:14:14 crc kubenswrapper[4909]: I1128 16:14:14.608744 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 16:14:14 crc kubenswrapper[4909]: I1128 16:14:14.609197 4909 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.53:6443: connect: connection refused" Nov 28 16:14:14 crc kubenswrapper[4909]: I1128 16:14:14.609651 4909 status_manager.go:851] "Failed to get status for pod" podUID="3ff86d88-1ce0-4571-be27-bde40c65a82e" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.53:6443: connect: connection refused" Nov 28 16:14:14 crc kubenswrapper[4909]: I1128 16:14:14.610125 4909 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.53:6443: connect: connection refused" Nov 28 16:14:14 crc kubenswrapper[4909]: I1128 16:14:14.649861 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Nov 28 16:14:14 crc kubenswrapper[4909]: I1128 16:14:14.649970 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Nov 28 16:14:14 crc kubenswrapper[4909]: I1128 16:14:14.649993 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir" (OuterVolumeSpecName: "cert-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "cert-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 16:14:14 crc kubenswrapper[4909]: I1128 16:14:14.650017 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Nov 28 16:14:14 crc kubenswrapper[4909]: I1128 16:14:14.650045 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 16:14:14 crc kubenswrapper[4909]: I1128 16:14:14.650162 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir" (OuterVolumeSpecName: "resource-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 16:14:14 crc kubenswrapper[4909]: I1128 16:14:14.650449 4909 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") on node \"crc\" DevicePath \"\"" Nov 28 16:14:14 crc kubenswrapper[4909]: I1128 16:14:14.650473 4909 reconciler_common.go:293] "Volume detached for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") on node \"crc\" DevicePath \"\"" Nov 28 16:14:14 crc kubenswrapper[4909]: I1128 16:14:14.650490 4909 reconciler_common.go:293] "Volume detached for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") on node \"crc\" DevicePath \"\"" Nov 28 16:14:15 crc kubenswrapper[4909]: I1128 16:14:15.594248 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Nov 28 16:14:15 crc kubenswrapper[4909]: I1128 16:14:15.595283 4909 scope.go:117] "RemoveContainer" containerID="53a5c9679a2c94c1297318d2e554c2445efbfab9cf72d25e48d753d66fa217d5" Nov 28 16:14:15 crc kubenswrapper[4909]: I1128 16:14:15.595377 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 16:14:15 crc kubenswrapper[4909]: I1128 16:14:15.622462 4909 status_manager.go:851] "Failed to get status for pod" podUID="3ff86d88-1ce0-4571-be27-bde40c65a82e" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.53:6443: connect: connection refused" Nov 28 16:14:15 crc kubenswrapper[4909]: I1128 16:14:15.623843 4909 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.53:6443: connect: connection refused" Nov 28 16:14:15 crc kubenswrapper[4909]: I1128 16:14:15.624597 4909 scope.go:117] "RemoveContainer" containerID="da70cc63e1edf58e6d6fed49fc9266d54fabba5f7ce3216d910eb18aebc45c34" Nov 28 16:14:15 crc kubenswrapper[4909]: I1128 16:14:15.624576 4909 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.53:6443: connect: connection refused" Nov 28 16:14:15 crc kubenswrapper[4909]: I1128 16:14:15.650006 4909 scope.go:117] "RemoveContainer" containerID="76ac0606fbf94f3746fb7ffa549a3cb684d40ee72224dd36c96bb47db493b482" Nov 28 16:14:15 crc kubenswrapper[4909]: I1128 16:14:15.671570 4909 scope.go:117] "RemoveContainer" containerID="1d5716d50bbed7c047029bbd7964431e97f78c6516505f57fe76d2a1548a665d" Nov 28 16:14:15 crc kubenswrapper[4909]: I1128 16:14:15.692282 4909 scope.go:117] "RemoveContainer" containerID="107b559f7d62f02041439f8c727185e9a819cd655afa0898e818d8618cc54cc7" Nov 28 16:14:15 crc kubenswrapper[4909]: I1128 16:14:15.722197 4909 scope.go:117] "RemoveContainer" containerID="0700ae65eb592e5b2110d37bb15dbb1dbdd228986de015ab5dc8fe13672032ef" Nov 28 16:14:15 crc kubenswrapper[4909]: I1128 16:14:15.911717 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f4b27818a5e8e43d0dc095d08835c792" path="/var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/volumes" Nov 28 16:14:17 crc kubenswrapper[4909]: I1128 16:14:17.904161 4909 status_manager.go:851] "Failed to get status for pod" podUID="3ff86d88-1ce0-4571-be27-bde40c65a82e" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.53:6443: connect: connection refused" Nov 28 16:14:17 crc kubenswrapper[4909]: I1128 16:14:17.905299 4909 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.53:6443: connect: connection refused" Nov 28 16:14:21 crc kubenswrapper[4909]: E1128 16:14:21.051877 4909 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.53:6443: connect: connection refused" Nov 28 16:14:21 crc kubenswrapper[4909]: E1128 16:14:21.053329 4909 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.53:6443: connect: connection refused" Nov 28 16:14:21 crc kubenswrapper[4909]: E1128 16:14:21.053814 4909 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.53:6443: connect: connection refused" Nov 28 16:14:21 crc kubenswrapper[4909]: E1128 16:14:21.054182 4909 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.53:6443: connect: connection refused" Nov 28 16:14:21 crc kubenswrapper[4909]: E1128 16:14:21.054537 4909 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.53:6443: connect: connection refused" Nov 28 16:14:21 crc kubenswrapper[4909]: I1128 16:14:21.054571 4909 controller.go:115] "failed to update lease using latest lease, fallback to ensure lease" err="failed 5 attempts to update lease" Nov 28 16:14:21 crc kubenswrapper[4909]: E1128 16:14:21.054928 4909 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.53:6443: connect: connection refused" interval="200ms" Nov 28 16:14:21 crc kubenswrapper[4909]: E1128 16:14:21.255808 4909 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.53:6443: connect: connection refused" interval="400ms" Nov 28 16:14:21 crc kubenswrapper[4909]: E1128 16:14:21.657054 4909 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.53:6443: connect: connection refused" interval="800ms" Nov 28 16:14:22 crc kubenswrapper[4909]: E1128 16:14:22.432128 4909 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/events\": dial tcp 38.102.83.53:6443: connect: connection refused" event="&Event{ObjectMeta:{kube-apiserver-startup-monitor-crc.187c37c00b946d28 openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-startup-monitor-crc,UID:f85e55b1a89d02b0cb034b1ea31ed45a,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{startup-monitor},},Reason:Pulled,Message:Container image \"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-11-28 16:14:11.989916968 +0000 UTC m=+234.386601492,LastTimestamp:2025-11-28 16:14:11.989916968 +0000 UTC m=+234.386601492,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Nov 28 16:14:22 crc kubenswrapper[4909]: E1128 16:14:22.458599 4909 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.53:6443: connect: connection refused" interval="1.6s" Nov 28 16:14:22 crc kubenswrapper[4909]: I1128 16:14:22.901376 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 16:14:22 crc kubenswrapper[4909]: I1128 16:14:22.902380 4909 status_manager.go:851] "Failed to get status for pod" podUID="3ff86d88-1ce0-4571-be27-bde40c65a82e" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.53:6443: connect: connection refused" Nov 28 16:14:22 crc kubenswrapper[4909]: I1128 16:14:22.902872 4909 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.53:6443: connect: connection refused" Nov 28 16:14:22 crc kubenswrapper[4909]: I1128 16:14:22.918564 4909 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="dda7db1a-d603-4121-8cd6-e72c9ae02961" Nov 28 16:14:22 crc kubenswrapper[4909]: I1128 16:14:22.918602 4909 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="dda7db1a-d603-4121-8cd6-e72c9ae02961" Nov 28 16:14:22 crc kubenswrapper[4909]: E1128 16:14:22.919035 4909 mirror_client.go:138] "Failed deleting a mirror pod" err="Delete \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.53:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 16:14:22 crc kubenswrapper[4909]: I1128 16:14:22.919519 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 16:14:22 crc kubenswrapper[4909]: W1128 16:14:22.942871 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod71bb4a3aecc4ba5b26c4b7318770ce13.slice/crio-328c3215813b6b9accb6107f6fbc997188569a6943c8206b22da2739f93dde68 WatchSource:0}: Error finding container 328c3215813b6b9accb6107f6fbc997188569a6943c8206b22da2739f93dde68: Status 404 returned error can't find the container with id 328c3215813b6b9accb6107f6fbc997188569a6943c8206b22da2739f93dde68 Nov 28 16:14:23 crc kubenswrapper[4909]: I1128 16:14:23.649700 4909 generic.go:334] "Generic (PLEG): container finished" podID="71bb4a3aecc4ba5b26c4b7318770ce13" containerID="b9eee9eb13bc9e29d84253a65addd008b41a932064243408de6c77c455e93356" exitCode=0 Nov 28 16:14:23 crc kubenswrapper[4909]: I1128 16:14:23.649803 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerDied","Data":"b9eee9eb13bc9e29d84253a65addd008b41a932064243408de6c77c455e93356"} Nov 28 16:14:23 crc kubenswrapper[4909]: I1128 16:14:23.650062 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"328c3215813b6b9accb6107f6fbc997188569a6943c8206b22da2739f93dde68"} Nov 28 16:14:23 crc kubenswrapper[4909]: I1128 16:14:23.650359 4909 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="dda7db1a-d603-4121-8cd6-e72c9ae02961" Nov 28 16:14:23 crc kubenswrapper[4909]: I1128 16:14:23.650374 4909 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="dda7db1a-d603-4121-8cd6-e72c9ae02961" Nov 28 16:14:23 crc kubenswrapper[4909]: I1128 16:14:23.650778 4909 status_manager.go:851] "Failed to get status for pod" podUID="3ff86d88-1ce0-4571-be27-bde40c65a82e" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.53:6443: connect: connection refused" Nov 28 16:14:23 crc kubenswrapper[4909]: E1128 16:14:23.650792 4909 mirror_client.go:138] "Failed deleting a mirror pod" err="Delete \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.53:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 16:14:23 crc kubenswrapper[4909]: I1128 16:14:23.651113 4909 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.53:6443: connect: connection refused" Nov 28 16:14:24 crc kubenswrapper[4909]: I1128 16:14:24.666757 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"75778dfc3c16a623203d98c709a64813695c2ec8290bb621d1f39353abddd98e"} Nov 28 16:14:24 crc kubenswrapper[4909]: I1128 16:14:24.667151 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"5dfccc1431b851c81823bb19f807e1a008aec41ee4da9eb0e601ac323e3e1003"} Nov 28 16:14:24 crc kubenswrapper[4909]: I1128 16:14:24.667165 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"5217f944554d13e119a2b0dce78cb631bd12c988b95f77edafba8e3021f3b046"} Nov 28 16:14:24 crc kubenswrapper[4909]: I1128 16:14:24.667176 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"30edc2c55f30dd1e453b9e9baf4a31db329044a140c24948a4e4b2040005167e"} Nov 28 16:14:25 crc kubenswrapper[4909]: I1128 16:14:25.674642 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"c30d54a65a41728b3f528d9dcc8f94ac9bc0218e056afe631cffd753d9e10f45"} Nov 28 16:14:25 crc kubenswrapper[4909]: I1128 16:14:25.674866 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 16:14:25 crc kubenswrapper[4909]: I1128 16:14:25.674971 4909 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="dda7db1a-d603-4121-8cd6-e72c9ae02961" Nov 28 16:14:25 crc kubenswrapper[4909]: I1128 16:14:25.674996 4909 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="dda7db1a-d603-4121-8cd6-e72c9ae02961" Nov 28 16:14:27 crc kubenswrapper[4909]: I1128 16:14:27.570312 4909 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/kube-controller-manager namespace/openshift-kube-controller-manager: Readiness probe status=failure output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" start-of-body= Nov 28 16:14:27 crc kubenswrapper[4909]: I1128 16:14:27.570779 4909 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="kube-controller-manager" probeResult="failure" output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" Nov 28 16:14:27 crc kubenswrapper[4909]: I1128 16:14:27.690447 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Nov 28 16:14:27 crc kubenswrapper[4909]: I1128 16:14:27.690512 4909 generic.go:334] "Generic (PLEG): container finished" podID="f614b9022728cf315e60c057852e563e" containerID="b41cf0481323080f4056d35add13c2254c6ffbe432b1651555067d3fc9d163eb" exitCode=1 Nov 28 16:14:27 crc kubenswrapper[4909]: I1128 16:14:27.690547 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerDied","Data":"b41cf0481323080f4056d35add13c2254c6ffbe432b1651555067d3fc9d163eb"} Nov 28 16:14:27 crc kubenswrapper[4909]: I1128 16:14:27.691106 4909 scope.go:117] "RemoveContainer" containerID="b41cf0481323080f4056d35add13c2254c6ffbe432b1651555067d3fc9d163eb" Nov 28 16:14:27 crc kubenswrapper[4909]: I1128 16:14:27.920038 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 16:14:27 crc kubenswrapper[4909]: I1128 16:14:27.920096 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 16:14:27 crc kubenswrapper[4909]: I1128 16:14:27.926370 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 16:14:28 crc kubenswrapper[4909]: I1128 16:14:28.702381 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Nov 28 16:14:28 crc kubenswrapper[4909]: I1128 16:14:28.702474 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"070a356d34f85e1a8e2d5f1067b105dc8a442ec37400e7805a0fc0934f0a817a"} Nov 28 16:14:30 crc kubenswrapper[4909]: I1128 16:14:30.686612 4909 kubelet.go:1914] "Deleted mirror pod because it is outdated" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 16:14:30 crc kubenswrapper[4909]: I1128 16:14:30.712510 4909 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="dda7db1a-d603-4121-8cd6-e72c9ae02961" Nov 28 16:14:30 crc kubenswrapper[4909]: I1128 16:14:30.712533 4909 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="dda7db1a-d603-4121-8cd6-e72c9ae02961" Nov 28 16:14:30 crc kubenswrapper[4909]: I1128 16:14:30.729897 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 16:14:30 crc kubenswrapper[4909]: I1128 16:14:30.743449 4909 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="71bb4a3aecc4ba5b26c4b7318770ce13" podUID="127b1949-7797-4307-8910-42b0787edd30" Nov 28 16:14:31 crc kubenswrapper[4909]: I1128 16:14:31.522643 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 16:14:31 crc kubenswrapper[4909]: I1128 16:14:31.526981 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 16:14:31 crc kubenswrapper[4909]: I1128 16:14:31.716408 4909 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="dda7db1a-d603-4121-8cd6-e72c9ae02961" Nov 28 16:14:31 crc kubenswrapper[4909]: I1128 16:14:31.716436 4909 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="dda7db1a-d603-4121-8cd6-e72c9ae02961" Nov 28 16:14:31 crc kubenswrapper[4909]: I1128 16:14:31.716525 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 16:14:31 crc kubenswrapper[4909]: I1128 16:14:31.719820 4909 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="71bb4a3aecc4ba5b26c4b7318770ce13" podUID="127b1949-7797-4307-8910-42b0787edd30" Nov 28 16:14:37 crc kubenswrapper[4909]: I1128 16:14:37.550174 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Nov 28 16:14:37 crc kubenswrapper[4909]: I1128 16:14:37.577326 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 16:14:39 crc kubenswrapper[4909]: I1128 16:14:39.964226 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Nov 28 16:14:40 crc kubenswrapper[4909]: I1128 16:14:40.394566 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Nov 28 16:14:41 crc kubenswrapper[4909]: I1128 16:14:41.068142 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Nov 28 16:14:41 crc kubenswrapper[4909]: I1128 16:14:41.820805 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Nov 28 16:14:41 crc kubenswrapper[4909]: I1128 16:14:41.953021 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Nov 28 16:14:42 crc kubenswrapper[4909]: I1128 16:14:42.253156 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Nov 28 16:14:42 crc kubenswrapper[4909]: I1128 16:14:42.419311 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Nov 28 16:14:42 crc kubenswrapper[4909]: I1128 16:14:42.710371 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Nov 28 16:14:42 crc kubenswrapper[4909]: I1128 16:14:42.823922 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Nov 28 16:14:42 crc kubenswrapper[4909]: I1128 16:14:42.838300 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Nov 28 16:14:42 crc kubenswrapper[4909]: I1128 16:14:42.865804 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Nov 28 16:14:43 crc kubenswrapper[4909]: I1128 16:14:43.363240 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Nov 28 16:14:43 crc kubenswrapper[4909]: I1128 16:14:43.468006 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Nov 28 16:14:43 crc kubenswrapper[4909]: I1128 16:14:43.644599 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Nov 28 16:14:43 crc kubenswrapper[4909]: I1128 16:14:43.799824 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Nov 28 16:14:44 crc kubenswrapper[4909]: I1128 16:14:44.112436 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Nov 28 16:14:44 crc kubenswrapper[4909]: I1128 16:14:44.339106 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Nov 28 16:14:44 crc kubenswrapper[4909]: I1128 16:14:44.387874 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Nov 28 16:14:44 crc kubenswrapper[4909]: I1128 16:14:44.498152 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Nov 28 16:14:44 crc kubenswrapper[4909]: I1128 16:14:44.520419 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Nov 28 16:14:44 crc kubenswrapper[4909]: I1128 16:14:44.529632 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Nov 28 16:14:44 crc kubenswrapper[4909]: I1128 16:14:44.572679 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Nov 28 16:14:44 crc kubenswrapper[4909]: I1128 16:14:44.589253 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Nov 28 16:14:44 crc kubenswrapper[4909]: I1128 16:14:44.631813 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Nov 28 16:14:44 crc kubenswrapper[4909]: I1128 16:14:44.644491 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Nov 28 16:14:44 crc kubenswrapper[4909]: I1128 16:14:44.856225 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Nov 28 16:14:44 crc kubenswrapper[4909]: I1128 16:14:44.916193 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Nov 28 16:14:44 crc kubenswrapper[4909]: I1128 16:14:44.991401 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Nov 28 16:14:45 crc kubenswrapper[4909]: I1128 16:14:45.179671 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Nov 28 16:14:45 crc kubenswrapper[4909]: I1128 16:14:45.277746 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Nov 28 16:14:45 crc kubenswrapper[4909]: I1128 16:14:45.454331 4909 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Nov 28 16:14:45 crc kubenswrapper[4909]: I1128 16:14:45.457305 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podStartSLOduration=34.45728934 podStartE2EDuration="34.45728934s" podCreationTimestamp="2025-11-28 16:14:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:14:30.71343554 +0000 UTC m=+253.110120064" watchObservedRunningTime="2025-11-28 16:14:45.45728934 +0000 UTC m=+267.853973874" Nov 28 16:14:45 crc kubenswrapper[4909]: I1128 16:14:45.459232 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Nov 28 16:14:45 crc kubenswrapper[4909]: I1128 16:14:45.459274 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Nov 28 16:14:45 crc kubenswrapper[4909]: I1128 16:14:45.467220 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 16:14:45 crc kubenswrapper[4909]: I1128 16:14:45.485236 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=15.485210949 podStartE2EDuration="15.485210949s" podCreationTimestamp="2025-11-28 16:14:30 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:14:45.48443183 +0000 UTC m=+267.881116404" watchObservedRunningTime="2025-11-28 16:14:45.485210949 +0000 UTC m=+267.881895513" Nov 28 16:14:45 crc kubenswrapper[4909]: I1128 16:14:45.534523 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Nov 28 16:14:45 crc kubenswrapper[4909]: I1128 16:14:45.585269 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Nov 28 16:14:45 crc kubenswrapper[4909]: I1128 16:14:45.600844 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Nov 28 16:14:45 crc kubenswrapper[4909]: I1128 16:14:45.735070 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Nov 28 16:14:45 crc kubenswrapper[4909]: I1128 16:14:45.736706 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Nov 28 16:14:45 crc kubenswrapper[4909]: I1128 16:14:45.790251 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Nov 28 16:14:45 crc kubenswrapper[4909]: I1128 16:14:45.871238 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Nov 28 16:14:45 crc kubenswrapper[4909]: I1128 16:14:45.883269 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Nov 28 16:14:45 crc kubenswrapper[4909]: I1128 16:14:45.903968 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Nov 28 16:14:45 crc kubenswrapper[4909]: I1128 16:14:45.944066 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Nov 28 16:14:45 crc kubenswrapper[4909]: I1128 16:14:45.955072 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Nov 28 16:14:46 crc kubenswrapper[4909]: I1128 16:14:46.147915 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Nov 28 16:14:46 crc kubenswrapper[4909]: I1128 16:14:46.155518 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Nov 28 16:14:46 crc kubenswrapper[4909]: I1128 16:14:46.296457 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Nov 28 16:14:46 crc kubenswrapper[4909]: I1128 16:14:46.403483 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Nov 28 16:14:46 crc kubenswrapper[4909]: I1128 16:14:46.480771 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Nov 28 16:14:46 crc kubenswrapper[4909]: I1128 16:14:46.554073 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Nov 28 16:14:46 crc kubenswrapper[4909]: I1128 16:14:46.559002 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Nov 28 16:14:46 crc kubenswrapper[4909]: I1128 16:14:46.827881 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Nov 28 16:14:46 crc kubenswrapper[4909]: I1128 16:14:46.913864 4909 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Nov 28 16:14:47 crc kubenswrapper[4909]: I1128 16:14:47.061587 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Nov 28 16:14:47 crc kubenswrapper[4909]: I1128 16:14:47.239165 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Nov 28 16:14:47 crc kubenswrapper[4909]: I1128 16:14:47.397259 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Nov 28 16:14:47 crc kubenswrapper[4909]: I1128 16:14:47.428228 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Nov 28 16:14:47 crc kubenswrapper[4909]: I1128 16:14:47.444963 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Nov 28 16:14:47 crc kubenswrapper[4909]: I1128 16:14:47.456836 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Nov 28 16:14:47 crc kubenswrapper[4909]: I1128 16:14:47.574515 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Nov 28 16:14:47 crc kubenswrapper[4909]: I1128 16:14:47.646064 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Nov 28 16:14:47 crc kubenswrapper[4909]: I1128 16:14:47.861427 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Nov 28 16:14:47 crc kubenswrapper[4909]: I1128 16:14:47.964264 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Nov 28 16:14:48 crc kubenswrapper[4909]: I1128 16:14:48.128071 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Nov 28 16:14:48 crc kubenswrapper[4909]: I1128 16:14:48.262639 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Nov 28 16:14:48 crc kubenswrapper[4909]: I1128 16:14:48.265335 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Nov 28 16:14:48 crc kubenswrapper[4909]: I1128 16:14:48.273490 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Nov 28 16:14:48 crc kubenswrapper[4909]: I1128 16:14:48.427974 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Nov 28 16:14:48 crc kubenswrapper[4909]: I1128 16:14:48.446129 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Nov 28 16:14:48 crc kubenswrapper[4909]: I1128 16:14:48.467316 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Nov 28 16:14:48 crc kubenswrapper[4909]: I1128 16:14:48.496567 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Nov 28 16:14:48 crc kubenswrapper[4909]: I1128 16:14:48.511272 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Nov 28 16:14:48 crc kubenswrapper[4909]: I1128 16:14:48.554312 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Nov 28 16:14:48 crc kubenswrapper[4909]: I1128 16:14:48.609741 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Nov 28 16:14:48 crc kubenswrapper[4909]: I1128 16:14:48.665995 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Nov 28 16:14:48 crc kubenswrapper[4909]: I1128 16:14:48.709028 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Nov 28 16:14:48 crc kubenswrapper[4909]: I1128 16:14:48.807910 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Nov 28 16:14:48 crc kubenswrapper[4909]: I1128 16:14:48.832815 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Nov 28 16:14:48 crc kubenswrapper[4909]: I1128 16:14:48.836878 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Nov 28 16:14:48 crc kubenswrapper[4909]: I1128 16:14:48.862218 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Nov 28 16:14:48 crc kubenswrapper[4909]: I1128 16:14:48.863572 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Nov 28 16:14:48 crc kubenswrapper[4909]: I1128 16:14:48.907816 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Nov 28 16:14:48 crc kubenswrapper[4909]: I1128 16:14:48.908641 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Nov 28 16:14:48 crc kubenswrapper[4909]: I1128 16:14:48.965122 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Nov 28 16:14:49 crc kubenswrapper[4909]: I1128 16:14:49.052212 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Nov 28 16:14:49 crc kubenswrapper[4909]: I1128 16:14:49.066454 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Nov 28 16:14:49 crc kubenswrapper[4909]: I1128 16:14:49.156869 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Nov 28 16:14:49 crc kubenswrapper[4909]: I1128 16:14:49.177554 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Nov 28 16:14:49 crc kubenswrapper[4909]: I1128 16:14:49.185392 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Nov 28 16:14:49 crc kubenswrapper[4909]: I1128 16:14:49.186101 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Nov 28 16:14:49 crc kubenswrapper[4909]: I1128 16:14:49.188173 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Nov 28 16:14:49 crc kubenswrapper[4909]: I1128 16:14:49.210377 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Nov 28 16:14:49 crc kubenswrapper[4909]: I1128 16:14:49.248282 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Nov 28 16:14:49 crc kubenswrapper[4909]: I1128 16:14:49.373803 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Nov 28 16:14:49 crc kubenswrapper[4909]: I1128 16:14:49.402034 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Nov 28 16:14:49 crc kubenswrapper[4909]: I1128 16:14:49.414868 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Nov 28 16:14:49 crc kubenswrapper[4909]: I1128 16:14:49.450329 4909 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Nov 28 16:14:49 crc kubenswrapper[4909]: I1128 16:14:49.453384 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Nov 28 16:14:49 crc kubenswrapper[4909]: I1128 16:14:49.471648 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Nov 28 16:14:49 crc kubenswrapper[4909]: I1128 16:14:49.513220 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Nov 28 16:14:49 crc kubenswrapper[4909]: I1128 16:14:49.543552 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Nov 28 16:14:49 crc kubenswrapper[4909]: I1128 16:14:49.556488 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Nov 28 16:14:49 crc kubenswrapper[4909]: I1128 16:14:49.558758 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Nov 28 16:14:49 crc kubenswrapper[4909]: I1128 16:14:49.683257 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Nov 28 16:14:49 crc kubenswrapper[4909]: I1128 16:14:49.703755 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Nov 28 16:14:49 crc kubenswrapper[4909]: I1128 16:14:49.704621 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Nov 28 16:14:49 crc kubenswrapper[4909]: I1128 16:14:49.710841 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Nov 28 16:14:49 crc kubenswrapper[4909]: I1128 16:14:49.760893 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Nov 28 16:14:49 crc kubenswrapper[4909]: I1128 16:14:49.858741 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Nov 28 16:14:49 crc kubenswrapper[4909]: I1128 16:14:49.911886 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Nov 28 16:14:49 crc kubenswrapper[4909]: I1128 16:14:49.917881 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Nov 28 16:14:49 crc kubenswrapper[4909]: I1128 16:14:49.978228 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Nov 28 16:14:50 crc kubenswrapper[4909]: I1128 16:14:50.063094 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Nov 28 16:14:50 crc kubenswrapper[4909]: I1128 16:14:50.071823 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Nov 28 16:14:50 crc kubenswrapper[4909]: I1128 16:14:50.081400 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Nov 28 16:14:50 crc kubenswrapper[4909]: I1128 16:14:50.125557 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Nov 28 16:14:50 crc kubenswrapper[4909]: I1128 16:14:50.148501 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Nov 28 16:14:50 crc kubenswrapper[4909]: I1128 16:14:50.271067 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Nov 28 16:14:50 crc kubenswrapper[4909]: I1128 16:14:50.373215 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Nov 28 16:14:50 crc kubenswrapper[4909]: I1128 16:14:50.398502 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Nov 28 16:14:50 crc kubenswrapper[4909]: I1128 16:14:50.433251 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Nov 28 16:14:50 crc kubenswrapper[4909]: I1128 16:14:50.572776 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Nov 28 16:14:50 crc kubenswrapper[4909]: I1128 16:14:50.630604 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Nov 28 16:14:50 crc kubenswrapper[4909]: I1128 16:14:50.679398 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Nov 28 16:14:50 crc kubenswrapper[4909]: I1128 16:14:50.694850 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Nov 28 16:14:50 crc kubenswrapper[4909]: I1128 16:14:50.705925 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Nov 28 16:14:50 crc kubenswrapper[4909]: I1128 16:14:50.718435 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Nov 28 16:14:50 crc kubenswrapper[4909]: I1128 16:14:50.734975 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Nov 28 16:14:50 crc kubenswrapper[4909]: I1128 16:14:50.779245 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Nov 28 16:14:50 crc kubenswrapper[4909]: I1128 16:14:50.796864 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Nov 28 16:14:50 crc kubenswrapper[4909]: I1128 16:14:50.817746 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Nov 28 16:14:50 crc kubenswrapper[4909]: I1128 16:14:50.854447 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Nov 28 16:14:50 crc kubenswrapper[4909]: I1128 16:14:50.930476 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Nov 28 16:14:51 crc kubenswrapper[4909]: I1128 16:14:51.132170 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Nov 28 16:14:51 crc kubenswrapper[4909]: I1128 16:14:51.149455 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Nov 28 16:14:51 crc kubenswrapper[4909]: I1128 16:14:51.232925 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Nov 28 16:14:51 crc kubenswrapper[4909]: I1128 16:14:51.303159 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Nov 28 16:14:51 crc kubenswrapper[4909]: I1128 16:14:51.324199 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Nov 28 16:14:51 crc kubenswrapper[4909]: I1128 16:14:51.348888 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Nov 28 16:14:51 crc kubenswrapper[4909]: I1128 16:14:51.349968 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Nov 28 16:14:51 crc kubenswrapper[4909]: I1128 16:14:51.391677 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Nov 28 16:14:51 crc kubenswrapper[4909]: I1128 16:14:51.496844 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Nov 28 16:14:51 crc kubenswrapper[4909]: I1128 16:14:51.568302 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Nov 28 16:14:51 crc kubenswrapper[4909]: I1128 16:14:51.672924 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Nov 28 16:14:51 crc kubenswrapper[4909]: I1128 16:14:51.727464 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Nov 28 16:14:51 crc kubenswrapper[4909]: I1128 16:14:51.795463 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Nov 28 16:14:51 crc kubenswrapper[4909]: I1128 16:14:51.842511 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Nov 28 16:14:51 crc kubenswrapper[4909]: I1128 16:14:51.869971 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Nov 28 16:14:51 crc kubenswrapper[4909]: I1128 16:14:51.871871 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Nov 28 16:14:51 crc kubenswrapper[4909]: I1128 16:14:51.908041 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Nov 28 16:14:51 crc kubenswrapper[4909]: I1128 16:14:51.920096 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Nov 28 16:14:51 crc kubenswrapper[4909]: I1128 16:14:51.968758 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Nov 28 16:14:52 crc kubenswrapper[4909]: I1128 16:14:52.175802 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Nov 28 16:14:52 crc kubenswrapper[4909]: I1128 16:14:52.182592 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Nov 28 16:14:52 crc kubenswrapper[4909]: I1128 16:14:52.268740 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Nov 28 16:14:52 crc kubenswrapper[4909]: I1128 16:14:52.337351 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Nov 28 16:14:52 crc kubenswrapper[4909]: I1128 16:14:52.379940 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Nov 28 16:14:52 crc kubenswrapper[4909]: I1128 16:14:52.388017 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Nov 28 16:14:52 crc kubenswrapper[4909]: I1128 16:14:52.401631 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Nov 28 16:14:52 crc kubenswrapper[4909]: I1128 16:14:52.414407 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Nov 28 16:14:52 crc kubenswrapper[4909]: I1128 16:14:52.456306 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Nov 28 16:14:52 crc kubenswrapper[4909]: I1128 16:14:52.466250 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Nov 28 16:14:52 crc kubenswrapper[4909]: I1128 16:14:52.491022 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Nov 28 16:14:52 crc kubenswrapper[4909]: I1128 16:14:52.566906 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Nov 28 16:14:52 crc kubenswrapper[4909]: I1128 16:14:52.655049 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Nov 28 16:14:52 crc kubenswrapper[4909]: I1128 16:14:52.682204 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Nov 28 16:14:52 crc kubenswrapper[4909]: I1128 16:14:52.722569 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Nov 28 16:14:52 crc kubenswrapper[4909]: I1128 16:14:52.731767 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Nov 28 16:14:52 crc kubenswrapper[4909]: I1128 16:14:52.750140 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Nov 28 16:14:52 crc kubenswrapper[4909]: I1128 16:14:52.777511 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Nov 28 16:14:52 crc kubenswrapper[4909]: I1128 16:14:52.791400 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Nov 28 16:14:52 crc kubenswrapper[4909]: I1128 16:14:52.826178 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Nov 28 16:14:52 crc kubenswrapper[4909]: I1128 16:14:52.937170 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Nov 28 16:14:52 crc kubenswrapper[4909]: I1128 16:14:52.996098 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Nov 28 16:14:53 crc kubenswrapper[4909]: I1128 16:14:53.039121 4909 kubelet.go:2431] "SyncLoop REMOVE" source="file" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Nov 28 16:14:53 crc kubenswrapper[4909]: I1128 16:14:53.039384 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" containerID="cri-o://310e4ea6d3a4496f951b82ec5c83991cc5297cff6267c00123b8cc4daa492da5" gracePeriod=5 Nov 28 16:14:53 crc kubenswrapper[4909]: I1128 16:14:53.100060 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Nov 28 16:14:53 crc kubenswrapper[4909]: I1128 16:14:53.103452 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Nov 28 16:14:53 crc kubenswrapper[4909]: I1128 16:14:53.177917 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Nov 28 16:14:53 crc kubenswrapper[4909]: I1128 16:14:53.259816 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Nov 28 16:14:53 crc kubenswrapper[4909]: I1128 16:14:53.277323 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Nov 28 16:14:53 crc kubenswrapper[4909]: I1128 16:14:53.398481 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Nov 28 16:14:53 crc kubenswrapper[4909]: I1128 16:14:53.412869 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Nov 28 16:14:53 crc kubenswrapper[4909]: I1128 16:14:53.470484 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Nov 28 16:14:53 crc kubenswrapper[4909]: I1128 16:14:53.480861 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Nov 28 16:14:53 crc kubenswrapper[4909]: I1128 16:14:53.580856 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Nov 28 16:14:53 crc kubenswrapper[4909]: I1128 16:14:53.624945 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Nov 28 16:14:53 crc kubenswrapper[4909]: I1128 16:14:53.760123 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Nov 28 16:14:53 crc kubenswrapper[4909]: I1128 16:14:53.845136 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Nov 28 16:14:53 crc kubenswrapper[4909]: I1128 16:14:53.845359 4909 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Nov 28 16:14:53 crc kubenswrapper[4909]: I1128 16:14:53.885946 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Nov 28 16:14:53 crc kubenswrapper[4909]: I1128 16:14:53.998520 4909 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Nov 28 16:14:54 crc kubenswrapper[4909]: I1128 16:14:54.091136 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Nov 28 16:14:54 crc kubenswrapper[4909]: I1128 16:14:54.136282 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Nov 28 16:14:54 crc kubenswrapper[4909]: I1128 16:14:54.213304 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Nov 28 16:14:54 crc kubenswrapper[4909]: I1128 16:14:54.310639 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Nov 28 16:14:54 crc kubenswrapper[4909]: I1128 16:14:54.452042 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Nov 28 16:14:54 crc kubenswrapper[4909]: I1128 16:14:54.462523 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Nov 28 16:14:54 crc kubenswrapper[4909]: I1128 16:14:54.654811 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Nov 28 16:14:54 crc kubenswrapper[4909]: I1128 16:14:54.802795 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Nov 28 16:14:54 crc kubenswrapper[4909]: I1128 16:14:54.983702 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Nov 28 16:14:54 crc kubenswrapper[4909]: I1128 16:14:54.999867 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Nov 28 16:14:55 crc kubenswrapper[4909]: I1128 16:14:55.004854 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Nov 28 16:14:55 crc kubenswrapper[4909]: I1128 16:14:55.082233 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Nov 28 16:14:55 crc kubenswrapper[4909]: I1128 16:14:55.129101 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Nov 28 16:14:55 crc kubenswrapper[4909]: I1128 16:14:55.164068 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Nov 28 16:14:55 crc kubenswrapper[4909]: I1128 16:14:55.291707 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Nov 28 16:14:55 crc kubenswrapper[4909]: I1128 16:14:55.316173 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Nov 28 16:14:55 crc kubenswrapper[4909]: I1128 16:14:55.337713 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Nov 28 16:14:55 crc kubenswrapper[4909]: I1128 16:14:55.572695 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Nov 28 16:14:55 crc kubenswrapper[4909]: I1128 16:14:55.637729 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Nov 28 16:14:55 crc kubenswrapper[4909]: I1128 16:14:55.697961 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Nov 28 16:14:55 crc kubenswrapper[4909]: I1128 16:14:55.723308 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Nov 28 16:14:55 crc kubenswrapper[4909]: I1128 16:14:55.752971 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Nov 28 16:14:55 crc kubenswrapper[4909]: I1128 16:14:55.791939 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Nov 28 16:14:55 crc kubenswrapper[4909]: I1128 16:14:55.878546 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Nov 28 16:14:55 crc kubenswrapper[4909]: I1128 16:14:55.885041 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Nov 28 16:14:56 crc kubenswrapper[4909]: I1128 16:14:56.086300 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Nov 28 16:14:56 crc kubenswrapper[4909]: I1128 16:14:56.160549 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Nov 28 16:14:56 crc kubenswrapper[4909]: I1128 16:14:56.182463 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Nov 28 16:14:56 crc kubenswrapper[4909]: I1128 16:14:56.214222 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Nov 28 16:14:56 crc kubenswrapper[4909]: I1128 16:14:56.284848 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Nov 28 16:14:56 crc kubenswrapper[4909]: I1128 16:14:56.386024 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Nov 28 16:14:56 crc kubenswrapper[4909]: I1128 16:14:56.420229 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Nov 28 16:14:56 crc kubenswrapper[4909]: I1128 16:14:56.443549 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Nov 28 16:14:56 crc kubenswrapper[4909]: I1128 16:14:56.545834 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Nov 28 16:14:56 crc kubenswrapper[4909]: I1128 16:14:56.614621 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Nov 28 16:14:56 crc kubenswrapper[4909]: I1128 16:14:56.759585 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Nov 28 16:14:56 crc kubenswrapper[4909]: I1128 16:14:56.826094 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Nov 28 16:14:56 crc kubenswrapper[4909]: I1128 16:14:56.844874 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Nov 28 16:14:56 crc kubenswrapper[4909]: I1128 16:14:56.895200 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Nov 28 16:14:56 crc kubenswrapper[4909]: I1128 16:14:56.997545 4909 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Nov 28 16:14:57 crc kubenswrapper[4909]: I1128 16:14:57.032455 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Nov 28 16:14:57 crc kubenswrapper[4909]: I1128 16:14:57.067366 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Nov 28 16:14:57 crc kubenswrapper[4909]: I1128 16:14:57.083360 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Nov 28 16:14:57 crc kubenswrapper[4909]: I1128 16:14:57.200486 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Nov 28 16:14:57 crc kubenswrapper[4909]: I1128 16:14:57.204717 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Nov 28 16:14:57 crc kubenswrapper[4909]: I1128 16:14:57.239534 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Nov 28 16:14:57 crc kubenswrapper[4909]: I1128 16:14:57.405194 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Nov 28 16:14:57 crc kubenswrapper[4909]: I1128 16:14:57.472581 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Nov 28 16:14:57 crc kubenswrapper[4909]: I1128 16:14:57.569756 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Nov 28 16:14:57 crc kubenswrapper[4909]: I1128 16:14:57.608754 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Nov 28 16:14:57 crc kubenswrapper[4909]: I1128 16:14:57.652735 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Nov 28 16:14:57 crc kubenswrapper[4909]: I1128 16:14:57.697122 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Nov 28 16:14:57 crc kubenswrapper[4909]: I1128 16:14:57.722562 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Nov 28 16:14:58 crc kubenswrapper[4909]: I1128 16:14:58.067450 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Nov 28 16:14:58 crc kubenswrapper[4909]: I1128 16:14:58.180967 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Nov 28 16:14:58 crc kubenswrapper[4909]: I1128 16:14:58.283039 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Nov 28 16:14:58 crc kubenswrapper[4909]: I1128 16:14:58.393938 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Nov 28 16:14:58 crc kubenswrapper[4909]: I1128 16:14:58.413076 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Nov 28 16:14:58 crc kubenswrapper[4909]: I1128 16:14:58.613168 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Nov 28 16:14:58 crc kubenswrapper[4909]: I1128 16:14:58.613228 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 16:14:58 crc kubenswrapper[4909]: I1128 16:14:58.643706 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Nov 28 16:14:58 crc kubenswrapper[4909]: I1128 16:14:58.689269 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Nov 28 16:14:58 crc kubenswrapper[4909]: I1128 16:14:58.689383 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Nov 28 16:14:58 crc kubenswrapper[4909]: I1128 16:14:58.689388 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log" (OuterVolumeSpecName: "var-log") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "var-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 16:14:58 crc kubenswrapper[4909]: I1128 16:14:58.689456 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Nov 28 16:14:58 crc kubenswrapper[4909]: I1128 16:14:58.689511 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Nov 28 16:14:58 crc kubenswrapper[4909]: I1128 16:14:58.689555 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock" (OuterVolumeSpecName: "var-lock") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "var-lock". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 16:14:58 crc kubenswrapper[4909]: I1128 16:14:58.689606 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Nov 28 16:14:58 crc kubenswrapper[4909]: I1128 16:14:58.689614 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests" (OuterVolumeSpecName: "manifests") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "manifests". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 16:14:58 crc kubenswrapper[4909]: I1128 16:14:58.689687 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir" (OuterVolumeSpecName: "resource-dir") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 16:14:58 crc kubenswrapper[4909]: I1128 16:14:58.690034 4909 reconciler_common.go:293] "Volume detached for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") on node \"crc\" DevicePath \"\"" Nov 28 16:14:58 crc kubenswrapper[4909]: I1128 16:14:58.690076 4909 reconciler_common.go:293] "Volume detached for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") on node \"crc\" DevicePath \"\"" Nov 28 16:14:58 crc kubenswrapper[4909]: I1128 16:14:58.690096 4909 reconciler_common.go:293] "Volume detached for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") on node \"crc\" DevicePath \"\"" Nov 28 16:14:58 crc kubenswrapper[4909]: I1128 16:14:58.690114 4909 reconciler_common.go:293] "Volume detached for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") on node \"crc\" DevicePath \"\"" Nov 28 16:14:58 crc kubenswrapper[4909]: I1128 16:14:58.697898 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir" (OuterVolumeSpecName: "pod-resource-dir") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "pod-resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 16:14:58 crc kubenswrapper[4909]: I1128 16:14:58.791630 4909 reconciler_common.go:293] "Volume detached for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") on node \"crc\" DevicePath \"\"" Nov 28 16:14:58 crc kubenswrapper[4909]: I1128 16:14:58.875524 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Nov 28 16:14:58 crc kubenswrapper[4909]: I1128 16:14:58.890513 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Nov 28 16:14:58 crc kubenswrapper[4909]: I1128 16:14:58.890559 4909 generic.go:334] "Generic (PLEG): container finished" podID="f85e55b1a89d02b0cb034b1ea31ed45a" containerID="310e4ea6d3a4496f951b82ec5c83991cc5297cff6267c00123b8cc4daa492da5" exitCode=137 Nov 28 16:14:58 crc kubenswrapper[4909]: I1128 16:14:58.890596 4909 scope.go:117] "RemoveContainer" containerID="310e4ea6d3a4496f951b82ec5c83991cc5297cff6267c00123b8cc4daa492da5" Nov 28 16:14:58 crc kubenswrapper[4909]: I1128 16:14:58.890753 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 16:14:58 crc kubenswrapper[4909]: I1128 16:14:58.905189 4909 scope.go:117] "RemoveContainer" containerID="310e4ea6d3a4496f951b82ec5c83991cc5297cff6267c00123b8cc4daa492da5" Nov 28 16:14:58 crc kubenswrapper[4909]: E1128 16:14:58.906529 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"310e4ea6d3a4496f951b82ec5c83991cc5297cff6267c00123b8cc4daa492da5\": container with ID starting with 310e4ea6d3a4496f951b82ec5c83991cc5297cff6267c00123b8cc4daa492da5 not found: ID does not exist" containerID="310e4ea6d3a4496f951b82ec5c83991cc5297cff6267c00123b8cc4daa492da5" Nov 28 16:14:58 crc kubenswrapper[4909]: I1128 16:14:58.906585 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"310e4ea6d3a4496f951b82ec5c83991cc5297cff6267c00123b8cc4daa492da5"} err="failed to get container status \"310e4ea6d3a4496f951b82ec5c83991cc5297cff6267c00123b8cc4daa492da5\": rpc error: code = NotFound desc = could not find container \"310e4ea6d3a4496f951b82ec5c83991cc5297cff6267c00123b8cc4daa492da5\": container with ID starting with 310e4ea6d3a4496f951b82ec5c83991cc5297cff6267c00123b8cc4daa492da5 not found: ID does not exist" Nov 28 16:14:59 crc kubenswrapper[4909]: I1128 16:14:59.056056 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Nov 28 16:14:59 crc kubenswrapper[4909]: I1128 16:14:59.877268 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Nov 28 16:14:59 crc kubenswrapper[4909]: I1128 16:14:59.912439 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" path="/var/lib/kubelet/pods/f85e55b1a89d02b0cb034b1ea31ed45a/volumes" Nov 28 16:14:59 crc kubenswrapper[4909]: I1128 16:14:59.912817 4909 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podUID="" Nov 28 16:14:59 crc kubenswrapper[4909]: I1128 16:14:59.927556 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Nov 28 16:14:59 crc kubenswrapper[4909]: I1128 16:14:59.927615 4909 kubelet.go:2649] "Unable to find pod for mirror pod, skipping" mirrorPod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" mirrorPodUID="b80135ef-ef18-417a-ae03-39765a94f7a2" Nov 28 16:14:59 crc kubenswrapper[4909]: I1128 16:14:59.931535 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Nov 28 16:14:59 crc kubenswrapper[4909]: I1128 16:14:59.931564 4909 kubelet.go:2673] "Unable to find pod for mirror pod, skipping" mirrorPod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" mirrorPodUID="b80135ef-ef18-417a-ae03-39765a94f7a2" Nov 28 16:15:00 crc kubenswrapper[4909]: I1128 16:15:00.166131 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405775-nv5g7"] Nov 28 16:15:00 crc kubenswrapper[4909]: E1128 16:15:00.166350 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Nov 28 16:15:00 crc kubenswrapper[4909]: I1128 16:15:00.166363 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Nov 28 16:15:00 crc kubenswrapper[4909]: E1128 16:15:00.166375 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3ff86d88-1ce0-4571-be27-bde40c65a82e" containerName="installer" Nov 28 16:15:00 crc kubenswrapper[4909]: I1128 16:15:00.166381 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="3ff86d88-1ce0-4571-be27-bde40c65a82e" containerName="installer" Nov 28 16:15:00 crc kubenswrapper[4909]: I1128 16:15:00.166456 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Nov 28 16:15:00 crc kubenswrapper[4909]: I1128 16:15:00.166468 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="3ff86d88-1ce0-4571-be27-bde40c65a82e" containerName="installer" Nov 28 16:15:00 crc kubenswrapper[4909]: I1128 16:15:00.166822 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405775-nv5g7" Nov 28 16:15:00 crc kubenswrapper[4909]: I1128 16:15:00.171079 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 28 16:15:00 crc kubenswrapper[4909]: I1128 16:15:00.171201 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 28 16:15:00 crc kubenswrapper[4909]: I1128 16:15:00.178825 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405775-nv5g7"] Nov 28 16:15:00 crc kubenswrapper[4909]: I1128 16:15:00.310759 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/897228c9-f7a7-4b65-9dcc-207146ee92f6-secret-volume\") pod \"collect-profiles-29405775-nv5g7\" (UID: \"897228c9-f7a7-4b65-9dcc-207146ee92f6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405775-nv5g7" Nov 28 16:15:00 crc kubenswrapper[4909]: I1128 16:15:00.310836 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cwpdp\" (UniqueName: \"kubernetes.io/projected/897228c9-f7a7-4b65-9dcc-207146ee92f6-kube-api-access-cwpdp\") pod \"collect-profiles-29405775-nv5g7\" (UID: \"897228c9-f7a7-4b65-9dcc-207146ee92f6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405775-nv5g7" Nov 28 16:15:00 crc kubenswrapper[4909]: I1128 16:15:00.310938 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/897228c9-f7a7-4b65-9dcc-207146ee92f6-config-volume\") pod \"collect-profiles-29405775-nv5g7\" (UID: \"897228c9-f7a7-4b65-9dcc-207146ee92f6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405775-nv5g7" Nov 28 16:15:00 crc kubenswrapper[4909]: I1128 16:15:00.413222 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/897228c9-f7a7-4b65-9dcc-207146ee92f6-secret-volume\") pod \"collect-profiles-29405775-nv5g7\" (UID: \"897228c9-f7a7-4b65-9dcc-207146ee92f6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405775-nv5g7" Nov 28 16:15:00 crc kubenswrapper[4909]: I1128 16:15:00.413280 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cwpdp\" (UniqueName: \"kubernetes.io/projected/897228c9-f7a7-4b65-9dcc-207146ee92f6-kube-api-access-cwpdp\") pod \"collect-profiles-29405775-nv5g7\" (UID: \"897228c9-f7a7-4b65-9dcc-207146ee92f6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405775-nv5g7" Nov 28 16:15:00 crc kubenswrapper[4909]: I1128 16:15:00.413325 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/897228c9-f7a7-4b65-9dcc-207146ee92f6-config-volume\") pod \"collect-profiles-29405775-nv5g7\" (UID: \"897228c9-f7a7-4b65-9dcc-207146ee92f6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405775-nv5g7" Nov 28 16:15:00 crc kubenswrapper[4909]: I1128 16:15:00.414563 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/897228c9-f7a7-4b65-9dcc-207146ee92f6-config-volume\") pod \"collect-profiles-29405775-nv5g7\" (UID: \"897228c9-f7a7-4b65-9dcc-207146ee92f6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405775-nv5g7" Nov 28 16:15:00 crc kubenswrapper[4909]: I1128 16:15:00.418324 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/897228c9-f7a7-4b65-9dcc-207146ee92f6-secret-volume\") pod \"collect-profiles-29405775-nv5g7\" (UID: \"897228c9-f7a7-4b65-9dcc-207146ee92f6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405775-nv5g7" Nov 28 16:15:00 crc kubenswrapper[4909]: I1128 16:15:00.439835 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cwpdp\" (UniqueName: \"kubernetes.io/projected/897228c9-f7a7-4b65-9dcc-207146ee92f6-kube-api-access-cwpdp\") pod \"collect-profiles-29405775-nv5g7\" (UID: \"897228c9-f7a7-4b65-9dcc-207146ee92f6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405775-nv5g7" Nov 28 16:15:00 crc kubenswrapper[4909]: I1128 16:15:00.491710 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405775-nv5g7" Nov 28 16:15:00 crc kubenswrapper[4909]: I1128 16:15:00.727856 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405775-nv5g7"] Nov 28 16:15:00 crc kubenswrapper[4909]: W1128 16:15:00.731420 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod897228c9_f7a7_4b65_9dcc_207146ee92f6.slice/crio-bbd951000bcd35eb287a379a163d82ea535646b390beff7fb8d3cbabfc566184 WatchSource:0}: Error finding container bbd951000bcd35eb287a379a163d82ea535646b390beff7fb8d3cbabfc566184: Status 404 returned error can't find the container with id bbd951000bcd35eb287a379a163d82ea535646b390beff7fb8d3cbabfc566184 Nov 28 16:15:00 crc kubenswrapper[4909]: I1128 16:15:00.904126 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405775-nv5g7" event={"ID":"897228c9-f7a7-4b65-9dcc-207146ee92f6","Type":"ContainerStarted","Data":"bbd951000bcd35eb287a379a163d82ea535646b390beff7fb8d3cbabfc566184"} Nov 28 16:15:00 crc kubenswrapper[4909]: I1128 16:15:00.960087 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Nov 28 16:15:01 crc kubenswrapper[4909]: I1128 16:15:01.911998 4909 generic.go:334] "Generic (PLEG): container finished" podID="897228c9-f7a7-4b65-9dcc-207146ee92f6" containerID="5ba11df5c984ab61981376feae17e538d7f828e2e0c91df70a4373a6bd428ef6" exitCode=0 Nov 28 16:15:01 crc kubenswrapper[4909]: I1128 16:15:01.912054 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405775-nv5g7" event={"ID":"897228c9-f7a7-4b65-9dcc-207146ee92f6","Type":"ContainerDied","Data":"5ba11df5c984ab61981376feae17e538d7f828e2e0c91df70a4373a6bd428ef6"} Nov 28 16:15:03 crc kubenswrapper[4909]: I1128 16:15:03.137992 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405775-nv5g7" Nov 28 16:15:03 crc kubenswrapper[4909]: I1128 16:15:03.247808 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/897228c9-f7a7-4b65-9dcc-207146ee92f6-config-volume\") pod \"897228c9-f7a7-4b65-9dcc-207146ee92f6\" (UID: \"897228c9-f7a7-4b65-9dcc-207146ee92f6\") " Nov 28 16:15:03 crc kubenswrapper[4909]: I1128 16:15:03.247963 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/897228c9-f7a7-4b65-9dcc-207146ee92f6-secret-volume\") pod \"897228c9-f7a7-4b65-9dcc-207146ee92f6\" (UID: \"897228c9-f7a7-4b65-9dcc-207146ee92f6\") " Nov 28 16:15:03 crc kubenswrapper[4909]: I1128 16:15:03.248018 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cwpdp\" (UniqueName: \"kubernetes.io/projected/897228c9-f7a7-4b65-9dcc-207146ee92f6-kube-api-access-cwpdp\") pod \"897228c9-f7a7-4b65-9dcc-207146ee92f6\" (UID: \"897228c9-f7a7-4b65-9dcc-207146ee92f6\") " Nov 28 16:15:03 crc kubenswrapper[4909]: I1128 16:15:03.248522 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/897228c9-f7a7-4b65-9dcc-207146ee92f6-config-volume" (OuterVolumeSpecName: "config-volume") pod "897228c9-f7a7-4b65-9dcc-207146ee92f6" (UID: "897228c9-f7a7-4b65-9dcc-207146ee92f6"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:15:03 crc kubenswrapper[4909]: I1128 16:15:03.253555 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/897228c9-f7a7-4b65-9dcc-207146ee92f6-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "897228c9-f7a7-4b65-9dcc-207146ee92f6" (UID: "897228c9-f7a7-4b65-9dcc-207146ee92f6"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:15:03 crc kubenswrapper[4909]: I1128 16:15:03.254249 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/897228c9-f7a7-4b65-9dcc-207146ee92f6-kube-api-access-cwpdp" (OuterVolumeSpecName: "kube-api-access-cwpdp") pod "897228c9-f7a7-4b65-9dcc-207146ee92f6" (UID: "897228c9-f7a7-4b65-9dcc-207146ee92f6"). InnerVolumeSpecName "kube-api-access-cwpdp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:15:03 crc kubenswrapper[4909]: I1128 16:15:03.349061 4909 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/897228c9-f7a7-4b65-9dcc-207146ee92f6-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 28 16:15:03 crc kubenswrapper[4909]: I1128 16:15:03.349101 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cwpdp\" (UniqueName: \"kubernetes.io/projected/897228c9-f7a7-4b65-9dcc-207146ee92f6-kube-api-access-cwpdp\") on node \"crc\" DevicePath \"\"" Nov 28 16:15:03 crc kubenswrapper[4909]: I1128 16:15:03.349114 4909 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/897228c9-f7a7-4b65-9dcc-207146ee92f6-config-volume\") on node \"crc\" DevicePath \"\"" Nov 28 16:15:03 crc kubenswrapper[4909]: I1128 16:15:03.924969 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405775-nv5g7" event={"ID":"897228c9-f7a7-4b65-9dcc-207146ee92f6","Type":"ContainerDied","Data":"bbd951000bcd35eb287a379a163d82ea535646b390beff7fb8d3cbabfc566184"} Nov 28 16:15:03 crc kubenswrapper[4909]: I1128 16:15:03.925449 4909 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="bbd951000bcd35eb287a379a163d82ea535646b390beff7fb8d3cbabfc566184" Nov 28 16:15:03 crc kubenswrapper[4909]: I1128 16:15:03.925009 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405775-nv5g7" Nov 28 16:15:14 crc kubenswrapper[4909]: I1128 16:15:14.991169 4909 generic.go:334] "Generic (PLEG): container finished" podID="410969ad-6fe5-4169-a78a-5e459f402cd3" containerID="03a017ebdf97fbdc2be2c38251a1008c90ba09a22584b15a7e4f8a764e954c4b" exitCode=0 Nov 28 16:15:14 crc kubenswrapper[4909]: I1128 16:15:14.991300 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-pnjkw" event={"ID":"410969ad-6fe5-4169-a78a-5e459f402cd3","Type":"ContainerDied","Data":"03a017ebdf97fbdc2be2c38251a1008c90ba09a22584b15a7e4f8a764e954c4b"} Nov 28 16:15:14 crc kubenswrapper[4909]: I1128 16:15:14.992365 4909 scope.go:117] "RemoveContainer" containerID="03a017ebdf97fbdc2be2c38251a1008c90ba09a22584b15a7e4f8a764e954c4b" Nov 28 16:15:16 crc kubenswrapper[4909]: I1128 16:15:16.000382 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-pnjkw" event={"ID":"410969ad-6fe5-4169-a78a-5e459f402cd3","Type":"ContainerStarted","Data":"a4d92d41dad11e55de2a595d1b0f1287e2777f238c4a9e3c7707b25ab44afca2"} Nov 28 16:15:16 crc kubenswrapper[4909]: I1128 16:15:16.001238 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-pnjkw" Nov 28 16:15:16 crc kubenswrapper[4909]: I1128 16:15:16.006043 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-pnjkw" Nov 28 16:15:17 crc kubenswrapper[4909]: I1128 16:15:17.751500 4909 cert_rotation.go:91] certificate rotation detected, shutting down client connections to start using new credentials Nov 28 16:15:22 crc kubenswrapper[4909]: I1128 16:15:22.071696 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-5x9zh"] Nov 28 16:15:22 crc kubenswrapper[4909]: I1128 16:15:22.072183 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-879f6c89f-5x9zh" podUID="0d41d267-53c8-4859-9c63-737eda42098f" containerName="controller-manager" containerID="cri-o://764eb19d478be4054e7b450cd969736a4743b863f72316d5f20fc53365fe16f4" gracePeriod=30 Nov 28 16:15:22 crc kubenswrapper[4909]: I1128 16:15:22.160992 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-c7bds"] Nov 28 16:15:22 crc kubenswrapper[4909]: I1128 16:15:22.162067 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-c7bds" podUID="fe88e922-b9a9-4c6c-8b36-5cd5eaf1b7b8" containerName="route-controller-manager" containerID="cri-o://783a0a1719c49a100ce7b1793012170f1dbe4583f48051467574be0089b49c5f" gracePeriod=30 Nov 28 16:15:22 crc kubenswrapper[4909]: I1128 16:15:22.397975 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-5x9zh" Nov 28 16:15:22 crc kubenswrapper[4909]: I1128 16:15:22.452792 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-c7bds" Nov 28 16:15:22 crc kubenswrapper[4909]: I1128 16:15:22.510347 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cc6ws\" (UniqueName: \"kubernetes.io/projected/fe88e922-b9a9-4c6c-8b36-5cd5eaf1b7b8-kube-api-access-cc6ws\") pod \"fe88e922-b9a9-4c6c-8b36-5cd5eaf1b7b8\" (UID: \"fe88e922-b9a9-4c6c-8b36-5cd5eaf1b7b8\") " Nov 28 16:15:22 crc kubenswrapper[4909]: I1128 16:15:22.510388 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fe88e922-b9a9-4c6c-8b36-5cd5eaf1b7b8-config\") pod \"fe88e922-b9a9-4c6c-8b36-5cd5eaf1b7b8\" (UID: \"fe88e922-b9a9-4c6c-8b36-5cd5eaf1b7b8\") " Nov 28 16:15:22 crc kubenswrapper[4909]: I1128 16:15:22.510420 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0d41d267-53c8-4859-9c63-737eda42098f-serving-cert\") pod \"0d41d267-53c8-4859-9c63-737eda42098f\" (UID: \"0d41d267-53c8-4859-9c63-737eda42098f\") " Nov 28 16:15:22 crc kubenswrapper[4909]: I1128 16:15:22.510445 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/0d41d267-53c8-4859-9c63-737eda42098f-client-ca\") pod \"0d41d267-53c8-4859-9c63-737eda42098f\" (UID: \"0d41d267-53c8-4859-9c63-737eda42098f\") " Nov 28 16:15:22 crc kubenswrapper[4909]: I1128 16:15:22.510467 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/fe88e922-b9a9-4c6c-8b36-5cd5eaf1b7b8-client-ca\") pod \"fe88e922-b9a9-4c6c-8b36-5cd5eaf1b7b8\" (UID: \"fe88e922-b9a9-4c6c-8b36-5cd5eaf1b7b8\") " Nov 28 16:15:22 crc kubenswrapper[4909]: I1128 16:15:22.510496 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0d41d267-53c8-4859-9c63-737eda42098f-config\") pod \"0d41d267-53c8-4859-9c63-737eda42098f\" (UID: \"0d41d267-53c8-4859-9c63-737eda42098f\") " Nov 28 16:15:22 crc kubenswrapper[4909]: I1128 16:15:22.510517 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/0d41d267-53c8-4859-9c63-737eda42098f-proxy-ca-bundles\") pod \"0d41d267-53c8-4859-9c63-737eda42098f\" (UID: \"0d41d267-53c8-4859-9c63-737eda42098f\") " Nov 28 16:15:22 crc kubenswrapper[4909]: I1128 16:15:22.510552 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/fe88e922-b9a9-4c6c-8b36-5cd5eaf1b7b8-serving-cert\") pod \"fe88e922-b9a9-4c6c-8b36-5cd5eaf1b7b8\" (UID: \"fe88e922-b9a9-4c6c-8b36-5cd5eaf1b7b8\") " Nov 28 16:15:22 crc kubenswrapper[4909]: I1128 16:15:22.510572 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h7snv\" (UniqueName: \"kubernetes.io/projected/0d41d267-53c8-4859-9c63-737eda42098f-kube-api-access-h7snv\") pod \"0d41d267-53c8-4859-9c63-737eda42098f\" (UID: \"0d41d267-53c8-4859-9c63-737eda42098f\") " Nov 28 16:15:22 crc kubenswrapper[4909]: I1128 16:15:22.511219 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0d41d267-53c8-4859-9c63-737eda42098f-client-ca" (OuterVolumeSpecName: "client-ca") pod "0d41d267-53c8-4859-9c63-737eda42098f" (UID: "0d41d267-53c8-4859-9c63-737eda42098f"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:15:22 crc kubenswrapper[4909]: I1128 16:15:22.511634 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fe88e922-b9a9-4c6c-8b36-5cd5eaf1b7b8-client-ca" (OuterVolumeSpecName: "client-ca") pod "fe88e922-b9a9-4c6c-8b36-5cd5eaf1b7b8" (UID: "fe88e922-b9a9-4c6c-8b36-5cd5eaf1b7b8"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:15:22 crc kubenswrapper[4909]: I1128 16:15:22.511753 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0d41d267-53c8-4859-9c63-737eda42098f-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "0d41d267-53c8-4859-9c63-737eda42098f" (UID: "0d41d267-53c8-4859-9c63-737eda42098f"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:15:22 crc kubenswrapper[4909]: I1128 16:15:22.511875 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fe88e922-b9a9-4c6c-8b36-5cd5eaf1b7b8-config" (OuterVolumeSpecName: "config") pod "fe88e922-b9a9-4c6c-8b36-5cd5eaf1b7b8" (UID: "fe88e922-b9a9-4c6c-8b36-5cd5eaf1b7b8"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:15:22 crc kubenswrapper[4909]: I1128 16:15:22.512037 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0d41d267-53c8-4859-9c63-737eda42098f-config" (OuterVolumeSpecName: "config") pod "0d41d267-53c8-4859-9c63-737eda42098f" (UID: "0d41d267-53c8-4859-9c63-737eda42098f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:15:22 crc kubenswrapper[4909]: I1128 16:15:22.515902 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fe88e922-b9a9-4c6c-8b36-5cd5eaf1b7b8-kube-api-access-cc6ws" (OuterVolumeSpecName: "kube-api-access-cc6ws") pod "fe88e922-b9a9-4c6c-8b36-5cd5eaf1b7b8" (UID: "fe88e922-b9a9-4c6c-8b36-5cd5eaf1b7b8"). InnerVolumeSpecName "kube-api-access-cc6ws". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:15:22 crc kubenswrapper[4909]: I1128 16:15:22.516025 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fe88e922-b9a9-4c6c-8b36-5cd5eaf1b7b8-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "fe88e922-b9a9-4c6c-8b36-5cd5eaf1b7b8" (UID: "fe88e922-b9a9-4c6c-8b36-5cd5eaf1b7b8"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:15:22 crc kubenswrapper[4909]: I1128 16:15:22.516100 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0d41d267-53c8-4859-9c63-737eda42098f-kube-api-access-h7snv" (OuterVolumeSpecName: "kube-api-access-h7snv") pod "0d41d267-53c8-4859-9c63-737eda42098f" (UID: "0d41d267-53c8-4859-9c63-737eda42098f"). InnerVolumeSpecName "kube-api-access-h7snv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:15:22 crc kubenswrapper[4909]: I1128 16:15:22.517297 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0d41d267-53c8-4859-9c63-737eda42098f-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "0d41d267-53c8-4859-9c63-737eda42098f" (UID: "0d41d267-53c8-4859-9c63-737eda42098f"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:15:22 crc kubenswrapper[4909]: I1128 16:15:22.612407 4909 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/0d41d267-53c8-4859-9c63-737eda42098f-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Nov 28 16:15:22 crc kubenswrapper[4909]: I1128 16:15:22.612438 4909 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/fe88e922-b9a9-4c6c-8b36-5cd5eaf1b7b8-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 16:15:22 crc kubenswrapper[4909]: I1128 16:15:22.612448 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h7snv\" (UniqueName: \"kubernetes.io/projected/0d41d267-53c8-4859-9c63-737eda42098f-kube-api-access-h7snv\") on node \"crc\" DevicePath \"\"" Nov 28 16:15:22 crc kubenswrapper[4909]: I1128 16:15:22.612460 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cc6ws\" (UniqueName: \"kubernetes.io/projected/fe88e922-b9a9-4c6c-8b36-5cd5eaf1b7b8-kube-api-access-cc6ws\") on node \"crc\" DevicePath \"\"" Nov 28 16:15:22 crc kubenswrapper[4909]: I1128 16:15:22.612492 4909 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fe88e922-b9a9-4c6c-8b36-5cd5eaf1b7b8-config\") on node \"crc\" DevicePath \"\"" Nov 28 16:15:22 crc kubenswrapper[4909]: I1128 16:15:22.612503 4909 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0d41d267-53c8-4859-9c63-737eda42098f-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 16:15:22 crc kubenswrapper[4909]: I1128 16:15:22.612517 4909 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/0d41d267-53c8-4859-9c63-737eda42098f-client-ca\") on node \"crc\" DevicePath \"\"" Nov 28 16:15:22 crc kubenswrapper[4909]: I1128 16:15:22.612527 4909 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/fe88e922-b9a9-4c6c-8b36-5cd5eaf1b7b8-client-ca\") on node \"crc\" DevicePath \"\"" Nov 28 16:15:22 crc kubenswrapper[4909]: I1128 16:15:22.612536 4909 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0d41d267-53c8-4859-9c63-737eda42098f-config\") on node \"crc\" DevicePath \"\"" Nov 28 16:15:23 crc kubenswrapper[4909]: I1128 16:15:23.050194 4909 generic.go:334] "Generic (PLEG): container finished" podID="fe88e922-b9a9-4c6c-8b36-5cd5eaf1b7b8" containerID="783a0a1719c49a100ce7b1793012170f1dbe4583f48051467574be0089b49c5f" exitCode=0 Nov 28 16:15:23 crc kubenswrapper[4909]: I1128 16:15:23.050310 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-c7bds" Nov 28 16:15:23 crc kubenswrapper[4909]: I1128 16:15:23.050302 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-c7bds" event={"ID":"fe88e922-b9a9-4c6c-8b36-5cd5eaf1b7b8","Type":"ContainerDied","Data":"783a0a1719c49a100ce7b1793012170f1dbe4583f48051467574be0089b49c5f"} Nov 28 16:15:23 crc kubenswrapper[4909]: I1128 16:15:23.050405 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-c7bds" event={"ID":"fe88e922-b9a9-4c6c-8b36-5cd5eaf1b7b8","Type":"ContainerDied","Data":"1d1dfd636d6861489810ce5322d1f7650340ac9232fa721f08e3c6621da33a6e"} Nov 28 16:15:23 crc kubenswrapper[4909]: I1128 16:15:23.050448 4909 scope.go:117] "RemoveContainer" containerID="783a0a1719c49a100ce7b1793012170f1dbe4583f48051467574be0089b49c5f" Nov 28 16:15:23 crc kubenswrapper[4909]: I1128 16:15:23.053550 4909 generic.go:334] "Generic (PLEG): container finished" podID="0d41d267-53c8-4859-9c63-737eda42098f" containerID="764eb19d478be4054e7b450cd969736a4743b863f72316d5f20fc53365fe16f4" exitCode=0 Nov 28 16:15:23 crc kubenswrapper[4909]: I1128 16:15:23.053596 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-5x9zh" event={"ID":"0d41d267-53c8-4859-9c63-737eda42098f","Type":"ContainerDied","Data":"764eb19d478be4054e7b450cd969736a4743b863f72316d5f20fc53365fe16f4"} Nov 28 16:15:23 crc kubenswrapper[4909]: I1128 16:15:23.053639 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-5x9zh" event={"ID":"0d41d267-53c8-4859-9c63-737eda42098f","Type":"ContainerDied","Data":"d2737bcf001f2cf5fc486550257495434306549a23f72ef97baaea42a71d8639"} Nov 28 16:15:23 crc kubenswrapper[4909]: I1128 16:15:23.053777 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-5x9zh" Nov 28 16:15:23 crc kubenswrapper[4909]: I1128 16:15:23.078296 4909 scope.go:117] "RemoveContainer" containerID="783a0a1719c49a100ce7b1793012170f1dbe4583f48051467574be0089b49c5f" Nov 28 16:15:23 crc kubenswrapper[4909]: E1128 16:15:23.080075 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"783a0a1719c49a100ce7b1793012170f1dbe4583f48051467574be0089b49c5f\": container with ID starting with 783a0a1719c49a100ce7b1793012170f1dbe4583f48051467574be0089b49c5f not found: ID does not exist" containerID="783a0a1719c49a100ce7b1793012170f1dbe4583f48051467574be0089b49c5f" Nov 28 16:15:23 crc kubenswrapper[4909]: I1128 16:15:23.080121 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"783a0a1719c49a100ce7b1793012170f1dbe4583f48051467574be0089b49c5f"} err="failed to get container status \"783a0a1719c49a100ce7b1793012170f1dbe4583f48051467574be0089b49c5f\": rpc error: code = NotFound desc = could not find container \"783a0a1719c49a100ce7b1793012170f1dbe4583f48051467574be0089b49c5f\": container with ID starting with 783a0a1719c49a100ce7b1793012170f1dbe4583f48051467574be0089b49c5f not found: ID does not exist" Nov 28 16:15:23 crc kubenswrapper[4909]: I1128 16:15:23.080153 4909 scope.go:117] "RemoveContainer" containerID="764eb19d478be4054e7b450cd969736a4743b863f72316d5f20fc53365fe16f4" Nov 28 16:15:23 crc kubenswrapper[4909]: I1128 16:15:23.097442 4909 scope.go:117] "RemoveContainer" containerID="764eb19d478be4054e7b450cd969736a4743b863f72316d5f20fc53365fe16f4" Nov 28 16:15:23 crc kubenswrapper[4909]: E1128 16:15:23.098009 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"764eb19d478be4054e7b450cd969736a4743b863f72316d5f20fc53365fe16f4\": container with ID starting with 764eb19d478be4054e7b450cd969736a4743b863f72316d5f20fc53365fe16f4 not found: ID does not exist" containerID="764eb19d478be4054e7b450cd969736a4743b863f72316d5f20fc53365fe16f4" Nov 28 16:15:23 crc kubenswrapper[4909]: I1128 16:15:23.098041 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"764eb19d478be4054e7b450cd969736a4743b863f72316d5f20fc53365fe16f4"} err="failed to get container status \"764eb19d478be4054e7b450cd969736a4743b863f72316d5f20fc53365fe16f4\": rpc error: code = NotFound desc = could not find container \"764eb19d478be4054e7b450cd969736a4743b863f72316d5f20fc53365fe16f4\": container with ID starting with 764eb19d478be4054e7b450cd969736a4743b863f72316d5f20fc53365fe16f4 not found: ID does not exist" Nov 28 16:15:23 crc kubenswrapper[4909]: I1128 16:15:23.098073 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-c7bds"] Nov 28 16:15:23 crc kubenswrapper[4909]: I1128 16:15:23.103887 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-c7bds"] Nov 28 16:15:23 crc kubenswrapper[4909]: I1128 16:15:23.110140 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-5x9zh"] Nov 28 16:15:23 crc kubenswrapper[4909]: I1128 16:15:23.116883 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-5x9zh"] Nov 28 16:15:23 crc kubenswrapper[4909]: I1128 16:15:23.467256 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-7ff9c64758-x28sn"] Nov 28 16:15:23 crc kubenswrapper[4909]: E1128 16:15:23.468060 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0d41d267-53c8-4859-9c63-737eda42098f" containerName="controller-manager" Nov 28 16:15:23 crc kubenswrapper[4909]: I1128 16:15:23.468078 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="0d41d267-53c8-4859-9c63-737eda42098f" containerName="controller-manager" Nov 28 16:15:23 crc kubenswrapper[4909]: E1128 16:15:23.468103 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fe88e922-b9a9-4c6c-8b36-5cd5eaf1b7b8" containerName="route-controller-manager" Nov 28 16:15:23 crc kubenswrapper[4909]: I1128 16:15:23.468112 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="fe88e922-b9a9-4c6c-8b36-5cd5eaf1b7b8" containerName="route-controller-manager" Nov 28 16:15:23 crc kubenswrapper[4909]: E1128 16:15:23.468133 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="897228c9-f7a7-4b65-9dcc-207146ee92f6" containerName="collect-profiles" Nov 28 16:15:23 crc kubenswrapper[4909]: I1128 16:15:23.468143 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="897228c9-f7a7-4b65-9dcc-207146ee92f6" containerName="collect-profiles" Nov 28 16:15:23 crc kubenswrapper[4909]: I1128 16:15:23.468251 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="fe88e922-b9a9-4c6c-8b36-5cd5eaf1b7b8" containerName="route-controller-manager" Nov 28 16:15:23 crc kubenswrapper[4909]: I1128 16:15:23.468266 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="897228c9-f7a7-4b65-9dcc-207146ee92f6" containerName="collect-profiles" Nov 28 16:15:23 crc kubenswrapper[4909]: I1128 16:15:23.468280 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="0d41d267-53c8-4859-9c63-737eda42098f" containerName="controller-manager" Nov 28 16:15:23 crc kubenswrapper[4909]: I1128 16:15:23.468753 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-7ff9c64758-x28sn" Nov 28 16:15:23 crc kubenswrapper[4909]: W1128 16:15:23.470520 4909 reflector.go:561] object-"openshift-controller-manager"/"config": failed to list *v1.ConfigMap: configmaps "config" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-controller-manager": no relationship found between node 'crc' and this object Nov 28 16:15:23 crc kubenswrapper[4909]: E1128 16:15:23.470564 4909 reflector.go:158] "Unhandled Error" err="object-\"openshift-controller-manager\"/\"config\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"config\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-controller-manager\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 28 16:15:23 crc kubenswrapper[4909]: W1128 16:15:23.470522 4909 reflector.go:561] object-"openshift-controller-manager"/"serving-cert": failed to list *v1.Secret: secrets "serving-cert" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openshift-controller-manager": no relationship found between node 'crc' and this object Nov 28 16:15:23 crc kubenswrapper[4909]: E1128 16:15:23.470612 4909 reflector.go:158] "Unhandled Error" err="object-\"openshift-controller-manager\"/\"serving-cert\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"serving-cert\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openshift-controller-manager\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 28 16:15:23 crc kubenswrapper[4909]: W1128 16:15:23.470706 4909 reflector.go:561] object-"openshift-controller-manager"/"kube-root-ca.crt": failed to list *v1.ConfigMap: configmaps "kube-root-ca.crt" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-controller-manager": no relationship found between node 'crc' and this object Nov 28 16:15:23 crc kubenswrapper[4909]: W1128 16:15:23.470752 4909 reflector.go:561] object-"openshift-controller-manager"/"client-ca": failed to list *v1.ConfigMap: configmaps "client-ca" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-controller-manager": no relationship found between node 'crc' and this object Nov 28 16:15:23 crc kubenswrapper[4909]: E1128 16:15:23.470797 4909 reflector.go:158] "Unhandled Error" err="object-\"openshift-controller-manager\"/\"client-ca\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"client-ca\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-controller-manager\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 28 16:15:23 crc kubenswrapper[4909]: W1128 16:15:23.470715 4909 reflector.go:561] object-"openshift-controller-manager"/"openshift-service-ca.crt": failed to list *v1.ConfigMap: configmaps "openshift-service-ca.crt" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-controller-manager": no relationship found between node 'crc' and this object Nov 28 16:15:23 crc kubenswrapper[4909]: E1128 16:15:23.470828 4909 reflector.go:158] "Unhandled Error" err="object-\"openshift-controller-manager\"/\"openshift-service-ca.crt\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"openshift-service-ca.crt\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-controller-manager\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 28 16:15:23 crc kubenswrapper[4909]: E1128 16:15:23.470752 4909 reflector.go:158] "Unhandled Error" err="object-\"openshift-controller-manager\"/\"kube-root-ca.crt\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"kube-root-ca.crt\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-controller-manager\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 28 16:15:23 crc kubenswrapper[4909]: I1128 16:15:23.471617 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Nov 28 16:15:23 crc kubenswrapper[4909]: I1128 16:15:23.478353 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-7ff9c64758-x28sn"] Nov 28 16:15:23 crc kubenswrapper[4909]: I1128 16:15:23.480092 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Nov 28 16:15:23 crc kubenswrapper[4909]: I1128 16:15:23.481950 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-8587bbf9b-gtlw7"] Nov 28 16:15:23 crc kubenswrapper[4909]: I1128 16:15:23.482528 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-8587bbf9b-gtlw7" Nov 28 16:15:23 crc kubenswrapper[4909]: I1128 16:15:23.484868 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Nov 28 16:15:23 crc kubenswrapper[4909]: I1128 16:15:23.485265 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Nov 28 16:15:23 crc kubenswrapper[4909]: I1128 16:15:23.485567 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Nov 28 16:15:23 crc kubenswrapper[4909]: I1128 16:15:23.485775 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Nov 28 16:15:23 crc kubenswrapper[4909]: I1128 16:15:23.485924 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Nov 28 16:15:23 crc kubenswrapper[4909]: I1128 16:15:23.503627 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Nov 28 16:15:23 crc kubenswrapper[4909]: I1128 16:15:23.518933 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-8587bbf9b-gtlw7"] Nov 28 16:15:23 crc kubenswrapper[4909]: I1128 16:15:23.524488 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/51dc9bf8-a5fb-483d-b9d7-2593847029ef-client-ca\") pod \"controller-manager-7ff9c64758-x28sn\" (UID: \"51dc9bf8-a5fb-483d-b9d7-2593847029ef\") " pod="openshift-controller-manager/controller-manager-7ff9c64758-x28sn" Nov 28 16:15:23 crc kubenswrapper[4909]: I1128 16:15:23.524565 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/da514bae-771e-4a15-87e6-2f1f7b81f125-config\") pod \"route-controller-manager-8587bbf9b-gtlw7\" (UID: \"da514bae-771e-4a15-87e6-2f1f7b81f125\") " pod="openshift-route-controller-manager/route-controller-manager-8587bbf9b-gtlw7" Nov 28 16:15:23 crc kubenswrapper[4909]: I1128 16:15:23.524613 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mkjn5\" (UniqueName: \"kubernetes.io/projected/51dc9bf8-a5fb-483d-b9d7-2593847029ef-kube-api-access-mkjn5\") pod \"controller-manager-7ff9c64758-x28sn\" (UID: \"51dc9bf8-a5fb-483d-b9d7-2593847029ef\") " pod="openshift-controller-manager/controller-manager-7ff9c64758-x28sn" Nov 28 16:15:23 crc kubenswrapper[4909]: I1128 16:15:23.524673 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/51dc9bf8-a5fb-483d-b9d7-2593847029ef-serving-cert\") pod \"controller-manager-7ff9c64758-x28sn\" (UID: \"51dc9bf8-a5fb-483d-b9d7-2593847029ef\") " pod="openshift-controller-manager/controller-manager-7ff9c64758-x28sn" Nov 28 16:15:23 crc kubenswrapper[4909]: I1128 16:15:23.524698 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/da514bae-771e-4a15-87e6-2f1f7b81f125-serving-cert\") pod \"route-controller-manager-8587bbf9b-gtlw7\" (UID: \"da514bae-771e-4a15-87e6-2f1f7b81f125\") " pod="openshift-route-controller-manager/route-controller-manager-8587bbf9b-gtlw7" Nov 28 16:15:23 crc kubenswrapper[4909]: I1128 16:15:23.524914 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/da514bae-771e-4a15-87e6-2f1f7b81f125-client-ca\") pod \"route-controller-manager-8587bbf9b-gtlw7\" (UID: \"da514bae-771e-4a15-87e6-2f1f7b81f125\") " pod="openshift-route-controller-manager/route-controller-manager-8587bbf9b-gtlw7" Nov 28 16:15:23 crc kubenswrapper[4909]: I1128 16:15:23.524943 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/51dc9bf8-a5fb-483d-b9d7-2593847029ef-proxy-ca-bundles\") pod \"controller-manager-7ff9c64758-x28sn\" (UID: \"51dc9bf8-a5fb-483d-b9d7-2593847029ef\") " pod="openshift-controller-manager/controller-manager-7ff9c64758-x28sn" Nov 28 16:15:23 crc kubenswrapper[4909]: I1128 16:15:23.524959 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/51dc9bf8-a5fb-483d-b9d7-2593847029ef-config\") pod \"controller-manager-7ff9c64758-x28sn\" (UID: \"51dc9bf8-a5fb-483d-b9d7-2593847029ef\") " pod="openshift-controller-manager/controller-manager-7ff9c64758-x28sn" Nov 28 16:15:23 crc kubenswrapper[4909]: I1128 16:15:23.525002 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wdxrn\" (UniqueName: \"kubernetes.io/projected/da514bae-771e-4a15-87e6-2f1f7b81f125-kube-api-access-wdxrn\") pod \"route-controller-manager-8587bbf9b-gtlw7\" (UID: \"da514bae-771e-4a15-87e6-2f1f7b81f125\") " pod="openshift-route-controller-manager/route-controller-manager-8587bbf9b-gtlw7" Nov 28 16:15:23 crc kubenswrapper[4909]: I1128 16:15:23.629033 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mkjn5\" (UniqueName: \"kubernetes.io/projected/51dc9bf8-a5fb-483d-b9d7-2593847029ef-kube-api-access-mkjn5\") pod \"controller-manager-7ff9c64758-x28sn\" (UID: \"51dc9bf8-a5fb-483d-b9d7-2593847029ef\") " pod="openshift-controller-manager/controller-manager-7ff9c64758-x28sn" Nov 28 16:15:23 crc kubenswrapper[4909]: I1128 16:15:23.629190 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/51dc9bf8-a5fb-483d-b9d7-2593847029ef-serving-cert\") pod \"controller-manager-7ff9c64758-x28sn\" (UID: \"51dc9bf8-a5fb-483d-b9d7-2593847029ef\") " pod="openshift-controller-manager/controller-manager-7ff9c64758-x28sn" Nov 28 16:15:23 crc kubenswrapper[4909]: I1128 16:15:23.629236 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/da514bae-771e-4a15-87e6-2f1f7b81f125-serving-cert\") pod \"route-controller-manager-8587bbf9b-gtlw7\" (UID: \"da514bae-771e-4a15-87e6-2f1f7b81f125\") " pod="openshift-route-controller-manager/route-controller-manager-8587bbf9b-gtlw7" Nov 28 16:15:23 crc kubenswrapper[4909]: I1128 16:15:23.629317 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/51dc9bf8-a5fb-483d-b9d7-2593847029ef-proxy-ca-bundles\") pod \"controller-manager-7ff9c64758-x28sn\" (UID: \"51dc9bf8-a5fb-483d-b9d7-2593847029ef\") " pod="openshift-controller-manager/controller-manager-7ff9c64758-x28sn" Nov 28 16:15:23 crc kubenswrapper[4909]: I1128 16:15:23.629351 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/da514bae-771e-4a15-87e6-2f1f7b81f125-client-ca\") pod \"route-controller-manager-8587bbf9b-gtlw7\" (UID: \"da514bae-771e-4a15-87e6-2f1f7b81f125\") " pod="openshift-route-controller-manager/route-controller-manager-8587bbf9b-gtlw7" Nov 28 16:15:23 crc kubenswrapper[4909]: I1128 16:15:23.629385 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/51dc9bf8-a5fb-483d-b9d7-2593847029ef-config\") pod \"controller-manager-7ff9c64758-x28sn\" (UID: \"51dc9bf8-a5fb-483d-b9d7-2593847029ef\") " pod="openshift-controller-manager/controller-manager-7ff9c64758-x28sn" Nov 28 16:15:23 crc kubenswrapper[4909]: I1128 16:15:23.629435 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wdxrn\" (UniqueName: \"kubernetes.io/projected/da514bae-771e-4a15-87e6-2f1f7b81f125-kube-api-access-wdxrn\") pod \"route-controller-manager-8587bbf9b-gtlw7\" (UID: \"da514bae-771e-4a15-87e6-2f1f7b81f125\") " pod="openshift-route-controller-manager/route-controller-manager-8587bbf9b-gtlw7" Nov 28 16:15:23 crc kubenswrapper[4909]: I1128 16:15:23.629482 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/51dc9bf8-a5fb-483d-b9d7-2593847029ef-client-ca\") pod \"controller-manager-7ff9c64758-x28sn\" (UID: \"51dc9bf8-a5fb-483d-b9d7-2593847029ef\") " pod="openshift-controller-manager/controller-manager-7ff9c64758-x28sn" Nov 28 16:15:23 crc kubenswrapper[4909]: I1128 16:15:23.629527 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/da514bae-771e-4a15-87e6-2f1f7b81f125-config\") pod \"route-controller-manager-8587bbf9b-gtlw7\" (UID: \"da514bae-771e-4a15-87e6-2f1f7b81f125\") " pod="openshift-route-controller-manager/route-controller-manager-8587bbf9b-gtlw7" Nov 28 16:15:23 crc kubenswrapper[4909]: I1128 16:15:23.631588 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/da514bae-771e-4a15-87e6-2f1f7b81f125-config\") pod \"route-controller-manager-8587bbf9b-gtlw7\" (UID: \"da514bae-771e-4a15-87e6-2f1f7b81f125\") " pod="openshift-route-controller-manager/route-controller-manager-8587bbf9b-gtlw7" Nov 28 16:15:23 crc kubenswrapper[4909]: I1128 16:15:23.633346 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/51dc9bf8-a5fb-483d-b9d7-2593847029ef-proxy-ca-bundles\") pod \"controller-manager-7ff9c64758-x28sn\" (UID: \"51dc9bf8-a5fb-483d-b9d7-2593847029ef\") " pod="openshift-controller-manager/controller-manager-7ff9c64758-x28sn" Nov 28 16:15:23 crc kubenswrapper[4909]: I1128 16:15:23.633610 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/da514bae-771e-4a15-87e6-2f1f7b81f125-client-ca\") pod \"route-controller-manager-8587bbf9b-gtlw7\" (UID: \"da514bae-771e-4a15-87e6-2f1f7b81f125\") " pod="openshift-route-controller-manager/route-controller-manager-8587bbf9b-gtlw7" Nov 28 16:15:23 crc kubenswrapper[4909]: I1128 16:15:23.637982 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/da514bae-771e-4a15-87e6-2f1f7b81f125-serving-cert\") pod \"route-controller-manager-8587bbf9b-gtlw7\" (UID: \"da514bae-771e-4a15-87e6-2f1f7b81f125\") " pod="openshift-route-controller-manager/route-controller-manager-8587bbf9b-gtlw7" Nov 28 16:15:23 crc kubenswrapper[4909]: I1128 16:15:23.659596 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wdxrn\" (UniqueName: \"kubernetes.io/projected/da514bae-771e-4a15-87e6-2f1f7b81f125-kube-api-access-wdxrn\") pod \"route-controller-manager-8587bbf9b-gtlw7\" (UID: \"da514bae-771e-4a15-87e6-2f1f7b81f125\") " pod="openshift-route-controller-manager/route-controller-manager-8587bbf9b-gtlw7" Nov 28 16:15:23 crc kubenswrapper[4909]: I1128 16:15:23.794095 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-8587bbf9b-gtlw7" Nov 28 16:15:23 crc kubenswrapper[4909]: I1128 16:15:23.915147 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0d41d267-53c8-4859-9c63-737eda42098f" path="/var/lib/kubelet/pods/0d41d267-53c8-4859-9c63-737eda42098f/volumes" Nov 28 16:15:23 crc kubenswrapper[4909]: I1128 16:15:23.916295 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fe88e922-b9a9-4c6c-8b36-5cd5eaf1b7b8" path="/var/lib/kubelet/pods/fe88e922-b9a9-4c6c-8b36-5cd5eaf1b7b8/volumes" Nov 28 16:15:24 crc kubenswrapper[4909]: I1128 16:15:24.031431 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-8587bbf9b-gtlw7"] Nov 28 16:15:24 crc kubenswrapper[4909]: I1128 16:15:24.060562 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-8587bbf9b-gtlw7" event={"ID":"da514bae-771e-4a15-87e6-2f1f7b81f125","Type":"ContainerStarted","Data":"68dc700b310fd698c744682338b28c62e769aa40d61d9958b889423b03c11d7e"} Nov 28 16:15:24 crc kubenswrapper[4909]: I1128 16:15:24.463145 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Nov 28 16:15:24 crc kubenswrapper[4909]: I1128 16:15:24.473524 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/51dc9bf8-a5fb-483d-b9d7-2593847029ef-config\") pod \"controller-manager-7ff9c64758-x28sn\" (UID: \"51dc9bf8-a5fb-483d-b9d7-2593847029ef\") " pod="openshift-controller-manager/controller-manager-7ff9c64758-x28sn" Nov 28 16:15:24 crc kubenswrapper[4909]: I1128 16:15:24.595184 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Nov 28 16:15:24 crc kubenswrapper[4909]: E1128 16:15:24.632516 4909 configmap.go:193] Couldn't get configMap openshift-controller-manager/client-ca: failed to sync configmap cache: timed out waiting for the condition Nov 28 16:15:24 crc kubenswrapper[4909]: E1128 16:15:24.632641 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/51dc9bf8-a5fb-483d-b9d7-2593847029ef-client-ca podName:51dc9bf8-a5fb-483d-b9d7-2593847029ef nodeName:}" failed. No retries permitted until 2025-11-28 16:15:25.132611927 +0000 UTC m=+307.529296491 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "client-ca" (UniqueName: "kubernetes.io/configmap/51dc9bf8-a5fb-483d-b9d7-2593847029ef-client-ca") pod "controller-manager-7ff9c64758-x28sn" (UID: "51dc9bf8-a5fb-483d-b9d7-2593847029ef") : failed to sync configmap cache: timed out waiting for the condition Nov 28 16:15:24 crc kubenswrapper[4909]: E1128 16:15:24.632529 4909 secret.go:188] Couldn't get secret openshift-controller-manager/serving-cert: failed to sync secret cache: timed out waiting for the condition Nov 28 16:15:24 crc kubenswrapper[4909]: E1128 16:15:24.632747 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/51dc9bf8-a5fb-483d-b9d7-2593847029ef-serving-cert podName:51dc9bf8-a5fb-483d-b9d7-2593847029ef nodeName:}" failed. No retries permitted until 2025-11-28 16:15:25.13272825 +0000 UTC m=+307.529412814 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/51dc9bf8-a5fb-483d-b9d7-2593847029ef-serving-cert") pod "controller-manager-7ff9c64758-x28sn" (UID: "51dc9bf8-a5fb-483d-b9d7-2593847029ef") : failed to sync secret cache: timed out waiting for the condition Nov 28 16:15:24 crc kubenswrapper[4909]: I1128 16:15:24.658343 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Nov 28 16:15:24 crc kubenswrapper[4909]: I1128 16:15:24.745603 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Nov 28 16:15:24 crc kubenswrapper[4909]: I1128 16:15:24.764258 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Nov 28 16:15:24 crc kubenswrapper[4909]: I1128 16:15:24.765495 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mkjn5\" (UniqueName: \"kubernetes.io/projected/51dc9bf8-a5fb-483d-b9d7-2593847029ef-kube-api-access-mkjn5\") pod \"controller-manager-7ff9c64758-x28sn\" (UID: \"51dc9bf8-a5fb-483d-b9d7-2593847029ef\") " pod="openshift-controller-manager/controller-manager-7ff9c64758-x28sn" Nov 28 16:15:25 crc kubenswrapper[4909]: I1128 16:15:25.069078 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-8587bbf9b-gtlw7" event={"ID":"da514bae-771e-4a15-87e6-2f1f7b81f125","Type":"ContainerStarted","Data":"3238e07306a47a28ab5e4f23ba6643d39048781b8c8a214024e48c809c3f708f"} Nov 28 16:15:25 crc kubenswrapper[4909]: I1128 16:15:25.069321 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-8587bbf9b-gtlw7" Nov 28 16:15:25 crc kubenswrapper[4909]: I1128 16:15:25.076233 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-8587bbf9b-gtlw7" Nov 28 16:15:25 crc kubenswrapper[4909]: I1128 16:15:25.103297 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-8587bbf9b-gtlw7" podStartSLOduration=2.10327694 podStartE2EDuration="2.10327694s" podCreationTimestamp="2025-11-28 16:15:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:15:25.087075016 +0000 UTC m=+307.483759550" watchObservedRunningTime="2025-11-28 16:15:25.10327694 +0000 UTC m=+307.499961454" Nov 28 16:15:25 crc kubenswrapper[4909]: I1128 16:15:25.152865 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/51dc9bf8-a5fb-483d-b9d7-2593847029ef-client-ca\") pod \"controller-manager-7ff9c64758-x28sn\" (UID: \"51dc9bf8-a5fb-483d-b9d7-2593847029ef\") " pod="openshift-controller-manager/controller-manager-7ff9c64758-x28sn" Nov 28 16:15:25 crc kubenswrapper[4909]: I1128 16:15:25.152965 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/51dc9bf8-a5fb-483d-b9d7-2593847029ef-serving-cert\") pod \"controller-manager-7ff9c64758-x28sn\" (UID: \"51dc9bf8-a5fb-483d-b9d7-2593847029ef\") " pod="openshift-controller-manager/controller-manager-7ff9c64758-x28sn" Nov 28 16:15:25 crc kubenswrapper[4909]: I1128 16:15:25.153772 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/51dc9bf8-a5fb-483d-b9d7-2593847029ef-client-ca\") pod \"controller-manager-7ff9c64758-x28sn\" (UID: \"51dc9bf8-a5fb-483d-b9d7-2593847029ef\") " pod="openshift-controller-manager/controller-manager-7ff9c64758-x28sn" Nov 28 16:15:25 crc kubenswrapper[4909]: I1128 16:15:25.170229 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/51dc9bf8-a5fb-483d-b9d7-2593847029ef-serving-cert\") pod \"controller-manager-7ff9c64758-x28sn\" (UID: \"51dc9bf8-a5fb-483d-b9d7-2593847029ef\") " pod="openshift-controller-manager/controller-manager-7ff9c64758-x28sn" Nov 28 16:15:25 crc kubenswrapper[4909]: I1128 16:15:25.282766 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-7ff9c64758-x28sn" Nov 28 16:15:25 crc kubenswrapper[4909]: I1128 16:15:25.515385 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-7ff9c64758-x28sn"] Nov 28 16:15:25 crc kubenswrapper[4909]: W1128 16:15:25.518492 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod51dc9bf8_a5fb_483d_b9d7_2593847029ef.slice/crio-8076f37fed4234f650c43a90064a0e228b79209ba7181389ec754fe41e18dec3 WatchSource:0}: Error finding container 8076f37fed4234f650c43a90064a0e228b79209ba7181389ec754fe41e18dec3: Status 404 returned error can't find the container with id 8076f37fed4234f650c43a90064a0e228b79209ba7181389ec754fe41e18dec3 Nov 28 16:15:26 crc kubenswrapper[4909]: I1128 16:15:26.074260 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-7ff9c64758-x28sn" event={"ID":"51dc9bf8-a5fb-483d-b9d7-2593847029ef","Type":"ContainerStarted","Data":"e6370a781933c63e4aee54770b865d1ebdb3daf9f6801591e93e132cd2e9ce28"} Nov 28 16:15:26 crc kubenswrapper[4909]: I1128 16:15:26.074304 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-7ff9c64758-x28sn" event={"ID":"51dc9bf8-a5fb-483d-b9d7-2593847029ef","Type":"ContainerStarted","Data":"8076f37fed4234f650c43a90064a0e228b79209ba7181389ec754fe41e18dec3"} Nov 28 16:15:26 crc kubenswrapper[4909]: I1128 16:15:26.089395 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-7ff9c64758-x28sn" podStartSLOduration=3.089377547 podStartE2EDuration="3.089377547s" podCreationTimestamp="2025-11-28 16:15:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:15:26.088109156 +0000 UTC m=+308.484793680" watchObservedRunningTime="2025-11-28 16:15:26.089377547 +0000 UTC m=+308.486062071" Nov 28 16:15:27 crc kubenswrapper[4909]: I1128 16:15:27.083512 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-7ff9c64758-x28sn" Nov 28 16:15:27 crc kubenswrapper[4909]: I1128 16:15:27.092046 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-7ff9c64758-x28sn" Nov 28 16:15:42 crc kubenswrapper[4909]: I1128 16:15:42.094103 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-8587bbf9b-gtlw7"] Nov 28 16:15:42 crc kubenswrapper[4909]: I1128 16:15:42.095077 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-8587bbf9b-gtlw7" podUID="da514bae-771e-4a15-87e6-2f1f7b81f125" containerName="route-controller-manager" containerID="cri-o://3238e07306a47a28ab5e4f23ba6643d39048781b8c8a214024e48c809c3f708f" gracePeriod=30 Nov 28 16:15:43 crc kubenswrapper[4909]: I1128 16:15:43.103456 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-8587bbf9b-gtlw7" Nov 28 16:15:43 crc kubenswrapper[4909]: I1128 16:15:43.130951 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6985b54c84-s5tpn"] Nov 28 16:15:43 crc kubenswrapper[4909]: E1128 16:15:43.131210 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="da514bae-771e-4a15-87e6-2f1f7b81f125" containerName="route-controller-manager" Nov 28 16:15:43 crc kubenswrapper[4909]: I1128 16:15:43.131229 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="da514bae-771e-4a15-87e6-2f1f7b81f125" containerName="route-controller-manager" Nov 28 16:15:43 crc kubenswrapper[4909]: I1128 16:15:43.131355 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="da514bae-771e-4a15-87e6-2f1f7b81f125" containerName="route-controller-manager" Nov 28 16:15:43 crc kubenswrapper[4909]: I1128 16:15:43.131811 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6985b54c84-s5tpn" Nov 28 16:15:43 crc kubenswrapper[4909]: I1128 16:15:43.151450 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6985b54c84-s5tpn"] Nov 28 16:15:43 crc kubenswrapper[4909]: I1128 16:15:43.174813 4909 generic.go:334] "Generic (PLEG): container finished" podID="da514bae-771e-4a15-87e6-2f1f7b81f125" containerID="3238e07306a47a28ab5e4f23ba6643d39048781b8c8a214024e48c809c3f708f" exitCode=0 Nov 28 16:15:43 crc kubenswrapper[4909]: I1128 16:15:43.174857 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-8587bbf9b-gtlw7" event={"ID":"da514bae-771e-4a15-87e6-2f1f7b81f125","Type":"ContainerDied","Data":"3238e07306a47a28ab5e4f23ba6643d39048781b8c8a214024e48c809c3f708f"} Nov 28 16:15:43 crc kubenswrapper[4909]: I1128 16:15:43.174891 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-8587bbf9b-gtlw7" event={"ID":"da514bae-771e-4a15-87e6-2f1f7b81f125","Type":"ContainerDied","Data":"68dc700b310fd698c744682338b28c62e769aa40d61d9958b889423b03c11d7e"} Nov 28 16:15:43 crc kubenswrapper[4909]: I1128 16:15:43.174911 4909 scope.go:117] "RemoveContainer" containerID="3238e07306a47a28ab5e4f23ba6643d39048781b8c8a214024e48c809c3f708f" Nov 28 16:15:43 crc kubenswrapper[4909]: I1128 16:15:43.175049 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-8587bbf9b-gtlw7" Nov 28 16:15:43 crc kubenswrapper[4909]: I1128 16:15:43.189536 4909 scope.go:117] "RemoveContainer" containerID="3238e07306a47a28ab5e4f23ba6643d39048781b8c8a214024e48c809c3f708f" Nov 28 16:15:43 crc kubenswrapper[4909]: E1128 16:15:43.189885 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3238e07306a47a28ab5e4f23ba6643d39048781b8c8a214024e48c809c3f708f\": container with ID starting with 3238e07306a47a28ab5e4f23ba6643d39048781b8c8a214024e48c809c3f708f not found: ID does not exist" containerID="3238e07306a47a28ab5e4f23ba6643d39048781b8c8a214024e48c809c3f708f" Nov 28 16:15:43 crc kubenswrapper[4909]: I1128 16:15:43.189922 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3238e07306a47a28ab5e4f23ba6643d39048781b8c8a214024e48c809c3f708f"} err="failed to get container status \"3238e07306a47a28ab5e4f23ba6643d39048781b8c8a214024e48c809c3f708f\": rpc error: code = NotFound desc = could not find container \"3238e07306a47a28ab5e4f23ba6643d39048781b8c8a214024e48c809c3f708f\": container with ID starting with 3238e07306a47a28ab5e4f23ba6643d39048781b8c8a214024e48c809c3f708f not found: ID does not exist" Nov 28 16:15:43 crc kubenswrapper[4909]: I1128 16:15:43.207636 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/da514bae-771e-4a15-87e6-2f1f7b81f125-client-ca\") pod \"da514bae-771e-4a15-87e6-2f1f7b81f125\" (UID: \"da514bae-771e-4a15-87e6-2f1f7b81f125\") " Nov 28 16:15:43 crc kubenswrapper[4909]: I1128 16:15:43.207752 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wdxrn\" (UniqueName: \"kubernetes.io/projected/da514bae-771e-4a15-87e6-2f1f7b81f125-kube-api-access-wdxrn\") pod \"da514bae-771e-4a15-87e6-2f1f7b81f125\" (UID: \"da514bae-771e-4a15-87e6-2f1f7b81f125\") " Nov 28 16:15:43 crc kubenswrapper[4909]: I1128 16:15:43.207923 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/da514bae-771e-4a15-87e6-2f1f7b81f125-config\") pod \"da514bae-771e-4a15-87e6-2f1f7b81f125\" (UID: \"da514bae-771e-4a15-87e6-2f1f7b81f125\") " Nov 28 16:15:43 crc kubenswrapper[4909]: I1128 16:15:43.208527 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/da514bae-771e-4a15-87e6-2f1f7b81f125-config" (OuterVolumeSpecName: "config") pod "da514bae-771e-4a15-87e6-2f1f7b81f125" (UID: "da514bae-771e-4a15-87e6-2f1f7b81f125"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:15:43 crc kubenswrapper[4909]: I1128 16:15:43.208837 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/da514bae-771e-4a15-87e6-2f1f7b81f125-serving-cert\") pod \"da514bae-771e-4a15-87e6-2f1f7b81f125\" (UID: \"da514bae-771e-4a15-87e6-2f1f7b81f125\") " Nov 28 16:15:43 crc kubenswrapper[4909]: I1128 16:15:43.209051 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/da514bae-771e-4a15-87e6-2f1f7b81f125-client-ca" (OuterVolumeSpecName: "client-ca") pod "da514bae-771e-4a15-87e6-2f1f7b81f125" (UID: "da514bae-771e-4a15-87e6-2f1f7b81f125"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:15:43 crc kubenswrapper[4909]: I1128 16:15:43.209446 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/aac0295b-0605-4ffc-9148-135858a66a25-config\") pod \"route-controller-manager-6985b54c84-s5tpn\" (UID: \"aac0295b-0605-4ffc-9148-135858a66a25\") " pod="openshift-route-controller-manager/route-controller-manager-6985b54c84-s5tpn" Nov 28 16:15:43 crc kubenswrapper[4909]: I1128 16:15:43.209539 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/aac0295b-0605-4ffc-9148-135858a66a25-serving-cert\") pod \"route-controller-manager-6985b54c84-s5tpn\" (UID: \"aac0295b-0605-4ffc-9148-135858a66a25\") " pod="openshift-route-controller-manager/route-controller-manager-6985b54c84-s5tpn" Nov 28 16:15:43 crc kubenswrapper[4909]: I1128 16:15:43.209598 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/aac0295b-0605-4ffc-9148-135858a66a25-client-ca\") pod \"route-controller-manager-6985b54c84-s5tpn\" (UID: \"aac0295b-0605-4ffc-9148-135858a66a25\") " pod="openshift-route-controller-manager/route-controller-manager-6985b54c84-s5tpn" Nov 28 16:15:43 crc kubenswrapper[4909]: I1128 16:15:43.209687 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lz9q5\" (UniqueName: \"kubernetes.io/projected/aac0295b-0605-4ffc-9148-135858a66a25-kube-api-access-lz9q5\") pod \"route-controller-manager-6985b54c84-s5tpn\" (UID: \"aac0295b-0605-4ffc-9148-135858a66a25\") " pod="openshift-route-controller-manager/route-controller-manager-6985b54c84-s5tpn" Nov 28 16:15:43 crc kubenswrapper[4909]: I1128 16:15:43.209798 4909 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/da514bae-771e-4a15-87e6-2f1f7b81f125-config\") on node \"crc\" DevicePath \"\"" Nov 28 16:15:43 crc kubenswrapper[4909]: I1128 16:15:43.209812 4909 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/da514bae-771e-4a15-87e6-2f1f7b81f125-client-ca\") on node \"crc\" DevicePath \"\"" Nov 28 16:15:43 crc kubenswrapper[4909]: I1128 16:15:43.213283 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/da514bae-771e-4a15-87e6-2f1f7b81f125-kube-api-access-wdxrn" (OuterVolumeSpecName: "kube-api-access-wdxrn") pod "da514bae-771e-4a15-87e6-2f1f7b81f125" (UID: "da514bae-771e-4a15-87e6-2f1f7b81f125"). InnerVolumeSpecName "kube-api-access-wdxrn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:15:43 crc kubenswrapper[4909]: I1128 16:15:43.213377 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/da514bae-771e-4a15-87e6-2f1f7b81f125-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "da514bae-771e-4a15-87e6-2f1f7b81f125" (UID: "da514bae-771e-4a15-87e6-2f1f7b81f125"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:15:43 crc kubenswrapper[4909]: I1128 16:15:43.310832 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/aac0295b-0605-4ffc-9148-135858a66a25-config\") pod \"route-controller-manager-6985b54c84-s5tpn\" (UID: \"aac0295b-0605-4ffc-9148-135858a66a25\") " pod="openshift-route-controller-manager/route-controller-manager-6985b54c84-s5tpn" Nov 28 16:15:43 crc kubenswrapper[4909]: I1128 16:15:43.310896 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/aac0295b-0605-4ffc-9148-135858a66a25-serving-cert\") pod \"route-controller-manager-6985b54c84-s5tpn\" (UID: \"aac0295b-0605-4ffc-9148-135858a66a25\") " pod="openshift-route-controller-manager/route-controller-manager-6985b54c84-s5tpn" Nov 28 16:15:43 crc kubenswrapper[4909]: I1128 16:15:43.310920 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/aac0295b-0605-4ffc-9148-135858a66a25-client-ca\") pod \"route-controller-manager-6985b54c84-s5tpn\" (UID: \"aac0295b-0605-4ffc-9148-135858a66a25\") " pod="openshift-route-controller-manager/route-controller-manager-6985b54c84-s5tpn" Nov 28 16:15:43 crc kubenswrapper[4909]: I1128 16:15:43.310958 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lz9q5\" (UniqueName: \"kubernetes.io/projected/aac0295b-0605-4ffc-9148-135858a66a25-kube-api-access-lz9q5\") pod \"route-controller-manager-6985b54c84-s5tpn\" (UID: \"aac0295b-0605-4ffc-9148-135858a66a25\") " pod="openshift-route-controller-manager/route-controller-manager-6985b54c84-s5tpn" Nov 28 16:15:43 crc kubenswrapper[4909]: I1128 16:15:43.311058 4909 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/da514bae-771e-4a15-87e6-2f1f7b81f125-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 16:15:43 crc kubenswrapper[4909]: I1128 16:15:43.311073 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wdxrn\" (UniqueName: \"kubernetes.io/projected/da514bae-771e-4a15-87e6-2f1f7b81f125-kube-api-access-wdxrn\") on node \"crc\" DevicePath \"\"" Nov 28 16:15:43 crc kubenswrapper[4909]: I1128 16:15:43.312269 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/aac0295b-0605-4ffc-9148-135858a66a25-client-ca\") pod \"route-controller-manager-6985b54c84-s5tpn\" (UID: \"aac0295b-0605-4ffc-9148-135858a66a25\") " pod="openshift-route-controller-manager/route-controller-manager-6985b54c84-s5tpn" Nov 28 16:15:43 crc kubenswrapper[4909]: I1128 16:15:43.312633 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/aac0295b-0605-4ffc-9148-135858a66a25-config\") pod \"route-controller-manager-6985b54c84-s5tpn\" (UID: \"aac0295b-0605-4ffc-9148-135858a66a25\") " pod="openshift-route-controller-manager/route-controller-manager-6985b54c84-s5tpn" Nov 28 16:15:43 crc kubenswrapper[4909]: I1128 16:15:43.317461 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/aac0295b-0605-4ffc-9148-135858a66a25-serving-cert\") pod \"route-controller-manager-6985b54c84-s5tpn\" (UID: \"aac0295b-0605-4ffc-9148-135858a66a25\") " pod="openshift-route-controller-manager/route-controller-manager-6985b54c84-s5tpn" Nov 28 16:15:43 crc kubenswrapper[4909]: I1128 16:15:43.335557 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lz9q5\" (UniqueName: \"kubernetes.io/projected/aac0295b-0605-4ffc-9148-135858a66a25-kube-api-access-lz9q5\") pod \"route-controller-manager-6985b54c84-s5tpn\" (UID: \"aac0295b-0605-4ffc-9148-135858a66a25\") " pod="openshift-route-controller-manager/route-controller-manager-6985b54c84-s5tpn" Nov 28 16:15:43 crc kubenswrapper[4909]: I1128 16:15:43.461135 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6985b54c84-s5tpn" Nov 28 16:15:43 crc kubenswrapper[4909]: I1128 16:15:43.533791 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-8587bbf9b-gtlw7"] Nov 28 16:15:43 crc kubenswrapper[4909]: I1128 16:15:43.538111 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-8587bbf9b-gtlw7"] Nov 28 16:15:43 crc kubenswrapper[4909]: I1128 16:15:43.884625 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6985b54c84-s5tpn"] Nov 28 16:15:43 crc kubenswrapper[4909]: W1128 16:15:43.893087 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podaac0295b_0605_4ffc_9148_135858a66a25.slice/crio-4435cca2338351a845255f4eec0febf24182cc918f5bd1f04a8351dfc509e16b WatchSource:0}: Error finding container 4435cca2338351a845255f4eec0febf24182cc918f5bd1f04a8351dfc509e16b: Status 404 returned error can't find the container with id 4435cca2338351a845255f4eec0febf24182cc918f5bd1f04a8351dfc509e16b Nov 28 16:15:43 crc kubenswrapper[4909]: I1128 16:15:43.908034 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="da514bae-771e-4a15-87e6-2f1f7b81f125" path="/var/lib/kubelet/pods/da514bae-771e-4a15-87e6-2f1f7b81f125/volumes" Nov 28 16:15:44 crc kubenswrapper[4909]: I1128 16:15:44.184471 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6985b54c84-s5tpn" event={"ID":"aac0295b-0605-4ffc-9148-135858a66a25","Type":"ContainerStarted","Data":"23d6e2c6f709b1662c2869c69109aeb1faaf918259007611d21b270aa3473bc4"} Nov 28 16:15:44 crc kubenswrapper[4909]: I1128 16:15:44.186211 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-6985b54c84-s5tpn" Nov 28 16:15:44 crc kubenswrapper[4909]: I1128 16:15:44.186327 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6985b54c84-s5tpn" event={"ID":"aac0295b-0605-4ffc-9148-135858a66a25","Type":"ContainerStarted","Data":"4435cca2338351a845255f4eec0febf24182cc918f5bd1f04a8351dfc509e16b"} Nov 28 16:15:44 crc kubenswrapper[4909]: I1128 16:15:44.246396 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-6985b54c84-s5tpn" podStartSLOduration=2.24637352 podStartE2EDuration="2.24637352s" podCreationTimestamp="2025-11-28 16:15:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:15:44.242158353 +0000 UTC m=+326.638842887" watchObservedRunningTime="2025-11-28 16:15:44.24637352 +0000 UTC m=+326.643058044" Nov 28 16:15:44 crc kubenswrapper[4909]: I1128 16:15:44.421214 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-6985b54c84-s5tpn" Nov 28 16:16:07 crc kubenswrapper[4909]: I1128 16:16:07.698536 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-gxvw8"] Nov 28 16:16:07 crc kubenswrapper[4909]: I1128 16:16:07.700704 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-gxvw8" Nov 28 16:16:07 crc kubenswrapper[4909]: I1128 16:16:07.715253 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-gxvw8"] Nov 28 16:16:07 crc kubenswrapper[4909]: I1128 16:16:07.851907 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/9f37f198-5fa7-424d-bd5d-336a1c97e926-bound-sa-token\") pod \"image-registry-66df7c8f76-gxvw8\" (UID: \"9f37f198-5fa7-424d-bd5d-336a1c97e926\") " pod="openshift-image-registry/image-registry-66df7c8f76-gxvw8" Nov 28 16:16:07 crc kubenswrapper[4909]: I1128 16:16:07.852066 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/9f37f198-5fa7-424d-bd5d-336a1c97e926-installation-pull-secrets\") pod \"image-registry-66df7c8f76-gxvw8\" (UID: \"9f37f198-5fa7-424d-bd5d-336a1c97e926\") " pod="openshift-image-registry/image-registry-66df7c8f76-gxvw8" Nov 28 16:16:07 crc kubenswrapper[4909]: I1128 16:16:07.852105 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/9f37f198-5fa7-424d-bd5d-336a1c97e926-registry-certificates\") pod \"image-registry-66df7c8f76-gxvw8\" (UID: \"9f37f198-5fa7-424d-bd5d-336a1c97e926\") " pod="openshift-image-registry/image-registry-66df7c8f76-gxvw8" Nov 28 16:16:07 crc kubenswrapper[4909]: I1128 16:16:07.852152 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/9f37f198-5fa7-424d-bd5d-336a1c97e926-registry-tls\") pod \"image-registry-66df7c8f76-gxvw8\" (UID: \"9f37f198-5fa7-424d-bd5d-336a1c97e926\") " pod="openshift-image-registry/image-registry-66df7c8f76-gxvw8" Nov 28 16:16:07 crc kubenswrapper[4909]: I1128 16:16:07.852199 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-gxvw8\" (UID: \"9f37f198-5fa7-424d-bd5d-336a1c97e926\") " pod="openshift-image-registry/image-registry-66df7c8f76-gxvw8" Nov 28 16:16:07 crc kubenswrapper[4909]: I1128 16:16:07.852242 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/9f37f198-5fa7-424d-bd5d-336a1c97e926-ca-trust-extracted\") pod \"image-registry-66df7c8f76-gxvw8\" (UID: \"9f37f198-5fa7-424d-bd5d-336a1c97e926\") " pod="openshift-image-registry/image-registry-66df7c8f76-gxvw8" Nov 28 16:16:07 crc kubenswrapper[4909]: I1128 16:16:07.852272 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9f37f198-5fa7-424d-bd5d-336a1c97e926-trusted-ca\") pod \"image-registry-66df7c8f76-gxvw8\" (UID: \"9f37f198-5fa7-424d-bd5d-336a1c97e926\") " pod="openshift-image-registry/image-registry-66df7c8f76-gxvw8" Nov 28 16:16:07 crc kubenswrapper[4909]: I1128 16:16:07.852303 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qk74p\" (UniqueName: \"kubernetes.io/projected/9f37f198-5fa7-424d-bd5d-336a1c97e926-kube-api-access-qk74p\") pod \"image-registry-66df7c8f76-gxvw8\" (UID: \"9f37f198-5fa7-424d-bd5d-336a1c97e926\") " pod="openshift-image-registry/image-registry-66df7c8f76-gxvw8" Nov 28 16:16:07 crc kubenswrapper[4909]: I1128 16:16:07.874367 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-gxvw8\" (UID: \"9f37f198-5fa7-424d-bd5d-336a1c97e926\") " pod="openshift-image-registry/image-registry-66df7c8f76-gxvw8" Nov 28 16:16:07 crc kubenswrapper[4909]: I1128 16:16:07.953228 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/9f37f198-5fa7-424d-bd5d-336a1c97e926-installation-pull-secrets\") pod \"image-registry-66df7c8f76-gxvw8\" (UID: \"9f37f198-5fa7-424d-bd5d-336a1c97e926\") " pod="openshift-image-registry/image-registry-66df7c8f76-gxvw8" Nov 28 16:16:07 crc kubenswrapper[4909]: I1128 16:16:07.953270 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/9f37f198-5fa7-424d-bd5d-336a1c97e926-registry-certificates\") pod \"image-registry-66df7c8f76-gxvw8\" (UID: \"9f37f198-5fa7-424d-bd5d-336a1c97e926\") " pod="openshift-image-registry/image-registry-66df7c8f76-gxvw8" Nov 28 16:16:07 crc kubenswrapper[4909]: I1128 16:16:07.953298 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/9f37f198-5fa7-424d-bd5d-336a1c97e926-registry-tls\") pod \"image-registry-66df7c8f76-gxvw8\" (UID: \"9f37f198-5fa7-424d-bd5d-336a1c97e926\") " pod="openshift-image-registry/image-registry-66df7c8f76-gxvw8" Nov 28 16:16:07 crc kubenswrapper[4909]: I1128 16:16:07.953325 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/9f37f198-5fa7-424d-bd5d-336a1c97e926-ca-trust-extracted\") pod \"image-registry-66df7c8f76-gxvw8\" (UID: \"9f37f198-5fa7-424d-bd5d-336a1c97e926\") " pod="openshift-image-registry/image-registry-66df7c8f76-gxvw8" Nov 28 16:16:07 crc kubenswrapper[4909]: I1128 16:16:07.953341 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9f37f198-5fa7-424d-bd5d-336a1c97e926-trusted-ca\") pod \"image-registry-66df7c8f76-gxvw8\" (UID: \"9f37f198-5fa7-424d-bd5d-336a1c97e926\") " pod="openshift-image-registry/image-registry-66df7c8f76-gxvw8" Nov 28 16:16:07 crc kubenswrapper[4909]: I1128 16:16:07.953355 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qk74p\" (UniqueName: \"kubernetes.io/projected/9f37f198-5fa7-424d-bd5d-336a1c97e926-kube-api-access-qk74p\") pod \"image-registry-66df7c8f76-gxvw8\" (UID: \"9f37f198-5fa7-424d-bd5d-336a1c97e926\") " pod="openshift-image-registry/image-registry-66df7c8f76-gxvw8" Nov 28 16:16:07 crc kubenswrapper[4909]: I1128 16:16:07.953383 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/9f37f198-5fa7-424d-bd5d-336a1c97e926-bound-sa-token\") pod \"image-registry-66df7c8f76-gxvw8\" (UID: \"9f37f198-5fa7-424d-bd5d-336a1c97e926\") " pod="openshift-image-registry/image-registry-66df7c8f76-gxvw8" Nov 28 16:16:07 crc kubenswrapper[4909]: I1128 16:16:07.954721 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/9f37f198-5fa7-424d-bd5d-336a1c97e926-registry-certificates\") pod \"image-registry-66df7c8f76-gxvw8\" (UID: \"9f37f198-5fa7-424d-bd5d-336a1c97e926\") " pod="openshift-image-registry/image-registry-66df7c8f76-gxvw8" Nov 28 16:16:07 crc kubenswrapper[4909]: I1128 16:16:07.954801 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/9f37f198-5fa7-424d-bd5d-336a1c97e926-ca-trust-extracted\") pod \"image-registry-66df7c8f76-gxvw8\" (UID: \"9f37f198-5fa7-424d-bd5d-336a1c97e926\") " pod="openshift-image-registry/image-registry-66df7c8f76-gxvw8" Nov 28 16:16:07 crc kubenswrapper[4909]: I1128 16:16:07.955184 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9f37f198-5fa7-424d-bd5d-336a1c97e926-trusted-ca\") pod \"image-registry-66df7c8f76-gxvw8\" (UID: \"9f37f198-5fa7-424d-bd5d-336a1c97e926\") " pod="openshift-image-registry/image-registry-66df7c8f76-gxvw8" Nov 28 16:16:07 crc kubenswrapper[4909]: I1128 16:16:07.961788 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/9f37f198-5fa7-424d-bd5d-336a1c97e926-registry-tls\") pod \"image-registry-66df7c8f76-gxvw8\" (UID: \"9f37f198-5fa7-424d-bd5d-336a1c97e926\") " pod="openshift-image-registry/image-registry-66df7c8f76-gxvw8" Nov 28 16:16:07 crc kubenswrapper[4909]: I1128 16:16:07.962996 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/9f37f198-5fa7-424d-bd5d-336a1c97e926-installation-pull-secrets\") pod \"image-registry-66df7c8f76-gxvw8\" (UID: \"9f37f198-5fa7-424d-bd5d-336a1c97e926\") " pod="openshift-image-registry/image-registry-66df7c8f76-gxvw8" Nov 28 16:16:07 crc kubenswrapper[4909]: I1128 16:16:07.980739 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qk74p\" (UniqueName: \"kubernetes.io/projected/9f37f198-5fa7-424d-bd5d-336a1c97e926-kube-api-access-qk74p\") pod \"image-registry-66df7c8f76-gxvw8\" (UID: \"9f37f198-5fa7-424d-bd5d-336a1c97e926\") " pod="openshift-image-registry/image-registry-66df7c8f76-gxvw8" Nov 28 16:16:07 crc kubenswrapper[4909]: I1128 16:16:07.980794 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/9f37f198-5fa7-424d-bd5d-336a1c97e926-bound-sa-token\") pod \"image-registry-66df7c8f76-gxvw8\" (UID: \"9f37f198-5fa7-424d-bd5d-336a1c97e926\") " pod="openshift-image-registry/image-registry-66df7c8f76-gxvw8" Nov 28 16:16:08 crc kubenswrapper[4909]: I1128 16:16:08.029849 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-gxvw8" Nov 28 16:16:08 crc kubenswrapper[4909]: I1128 16:16:08.534400 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-gxvw8"] Nov 28 16:16:09 crc kubenswrapper[4909]: I1128 16:16:09.341853 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-gxvw8" event={"ID":"9f37f198-5fa7-424d-bd5d-336a1c97e926","Type":"ContainerStarted","Data":"e4d56b5c828507f3736aa112247739ad55c3961f2b2ccd8dcb5fb72162e3b7a5"} Nov 28 16:16:09 crc kubenswrapper[4909]: I1128 16:16:09.343355 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-gxvw8" event={"ID":"9f37f198-5fa7-424d-bd5d-336a1c97e926","Type":"ContainerStarted","Data":"930dd0969ae6c6d84250b21291b194fffe90c95659abc4a5d225f10b08bbbf2c"} Nov 28 16:16:09 crc kubenswrapper[4909]: I1128 16:16:09.343474 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-66df7c8f76-gxvw8" Nov 28 16:16:09 crc kubenswrapper[4909]: I1128 16:16:09.381023 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-66df7c8f76-gxvw8" podStartSLOduration=2.380988416 podStartE2EDuration="2.380988416s" podCreationTimestamp="2025-11-28 16:16:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:16:09.369264279 +0000 UTC m=+351.765948903" watchObservedRunningTime="2025-11-28 16:16:09.380988416 +0000 UTC m=+351.777672990" Nov 28 16:16:19 crc kubenswrapper[4909]: I1128 16:16:19.910569 4909 patch_prober.go:28] interesting pod/machine-config-daemon-d5nd7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 16:16:19 crc kubenswrapper[4909]: I1128 16:16:19.911782 4909 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 16:16:21 crc kubenswrapper[4909]: I1128 16:16:21.969866 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-42h7p"] Nov 28 16:16:21 crc kubenswrapper[4909]: I1128 16:16:21.970959 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-42h7p" podUID="52118b39-f1b7-486d-a819-ae1f464d793d" containerName="registry-server" containerID="cri-o://a1468afc4f9e49253e0248b48724aa02ef0c25e2df428ced93b5df419a619897" gracePeriod=30 Nov 28 16:16:21 crc kubenswrapper[4909]: I1128 16:16:21.985849 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-rkbfc"] Nov 28 16:16:21 crc kubenswrapper[4909]: I1128 16:16:21.986142 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-rkbfc" podUID="e831d096-86aa-4351-8d67-bdf81194727c" containerName="registry-server" containerID="cri-o://77d07557bf73b5f34dcab3f8ab701ab2566220050e5268a52dffb857cb9b172a" gracePeriod=30 Nov 28 16:16:22 crc kubenswrapper[4909]: I1128 16:16:22.001394 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-pnjkw"] Nov 28 16:16:22 crc kubenswrapper[4909]: I1128 16:16:22.001769 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/marketplace-operator-79b997595-pnjkw" podUID="410969ad-6fe5-4169-a78a-5e459f402cd3" containerName="marketplace-operator" containerID="cri-o://a4d92d41dad11e55de2a595d1b0f1287e2777f238c4a9e3c7707b25ab44afca2" gracePeriod=30 Nov 28 16:16:22 crc kubenswrapper[4909]: I1128 16:16:22.026962 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-9p7bm"] Nov 28 16:16:22 crc kubenswrapper[4909]: I1128 16:16:22.027424 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-9p7bm" podUID="4e3c8494-44f8-475d-8ae3-2613649d6c73" containerName="registry-server" containerID="cri-o://cffa0c54f48406f8f946d3133ef1ff15af43ceda767c5fd6adbeedc719146e2c" gracePeriod=30 Nov 28 16:16:22 crc kubenswrapper[4909]: I1128 16:16:22.036276 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-nggsh"] Nov 28 16:16:22 crc kubenswrapper[4909]: I1128 16:16:22.037085 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-nggsh" Nov 28 16:16:22 crc kubenswrapper[4909]: I1128 16:16:22.041610 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-sz62r"] Nov 28 16:16:22 crc kubenswrapper[4909]: I1128 16:16:22.042201 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-sz62r" podUID="2e369b4b-cc5e-474d-8a7e-f3f5fcbb9f98" containerName="registry-server" containerID="cri-o://1a1ef1c84a77b88343460d5d03bebcb83c6e97155e80e21f045566260c7666f7" gracePeriod=30 Nov 28 16:16:22 crc kubenswrapper[4909]: I1128 16:16:22.045568 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-nggsh"] Nov 28 16:16:22 crc kubenswrapper[4909]: I1128 16:16:22.112474 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-7ff9c64758-x28sn"] Nov 28 16:16:22 crc kubenswrapper[4909]: I1128 16:16:22.113933 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-7ff9c64758-x28sn" podUID="51dc9bf8-a5fb-483d-b9d7-2593847029ef" containerName="controller-manager" containerID="cri-o://e6370a781933c63e4aee54770b865d1ebdb3daf9f6801591e93e132cd2e9ce28" gracePeriod=30 Nov 28 16:16:22 crc kubenswrapper[4909]: I1128 16:16:22.161279 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/367a78b1-06bc-48a7-ad2b-4e825e5f683f-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-nggsh\" (UID: \"367a78b1-06bc-48a7-ad2b-4e825e5f683f\") " pod="openshift-marketplace/marketplace-operator-79b997595-nggsh" Nov 28 16:16:22 crc kubenswrapper[4909]: I1128 16:16:22.161347 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ztggv\" (UniqueName: \"kubernetes.io/projected/367a78b1-06bc-48a7-ad2b-4e825e5f683f-kube-api-access-ztggv\") pod \"marketplace-operator-79b997595-nggsh\" (UID: \"367a78b1-06bc-48a7-ad2b-4e825e5f683f\") " pod="openshift-marketplace/marketplace-operator-79b997595-nggsh" Nov 28 16:16:22 crc kubenswrapper[4909]: I1128 16:16:22.161390 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/367a78b1-06bc-48a7-ad2b-4e825e5f683f-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-nggsh\" (UID: \"367a78b1-06bc-48a7-ad2b-4e825e5f683f\") " pod="openshift-marketplace/marketplace-operator-79b997595-nggsh" Nov 28 16:16:22 crc kubenswrapper[4909]: I1128 16:16:22.262270 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/367a78b1-06bc-48a7-ad2b-4e825e5f683f-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-nggsh\" (UID: \"367a78b1-06bc-48a7-ad2b-4e825e5f683f\") " pod="openshift-marketplace/marketplace-operator-79b997595-nggsh" Nov 28 16:16:22 crc kubenswrapper[4909]: I1128 16:16:22.262736 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ztggv\" (UniqueName: \"kubernetes.io/projected/367a78b1-06bc-48a7-ad2b-4e825e5f683f-kube-api-access-ztggv\") pod \"marketplace-operator-79b997595-nggsh\" (UID: \"367a78b1-06bc-48a7-ad2b-4e825e5f683f\") " pod="openshift-marketplace/marketplace-operator-79b997595-nggsh" Nov 28 16:16:22 crc kubenswrapper[4909]: I1128 16:16:22.262768 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/367a78b1-06bc-48a7-ad2b-4e825e5f683f-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-nggsh\" (UID: \"367a78b1-06bc-48a7-ad2b-4e825e5f683f\") " pod="openshift-marketplace/marketplace-operator-79b997595-nggsh" Nov 28 16:16:22 crc kubenswrapper[4909]: I1128 16:16:22.263984 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/367a78b1-06bc-48a7-ad2b-4e825e5f683f-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-nggsh\" (UID: \"367a78b1-06bc-48a7-ad2b-4e825e5f683f\") " pod="openshift-marketplace/marketplace-operator-79b997595-nggsh" Nov 28 16:16:22 crc kubenswrapper[4909]: I1128 16:16:22.271067 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/367a78b1-06bc-48a7-ad2b-4e825e5f683f-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-nggsh\" (UID: \"367a78b1-06bc-48a7-ad2b-4e825e5f683f\") " pod="openshift-marketplace/marketplace-operator-79b997595-nggsh" Nov 28 16:16:22 crc kubenswrapper[4909]: I1128 16:16:22.283106 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ztggv\" (UniqueName: \"kubernetes.io/projected/367a78b1-06bc-48a7-ad2b-4e825e5f683f-kube-api-access-ztggv\") pod \"marketplace-operator-79b997595-nggsh\" (UID: \"367a78b1-06bc-48a7-ad2b-4e825e5f683f\") " pod="openshift-marketplace/marketplace-operator-79b997595-nggsh" Nov 28 16:16:22 crc kubenswrapper[4909]: I1128 16:16:22.364331 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-nggsh" Nov 28 16:16:22 crc kubenswrapper[4909]: I1128 16:16:22.507900 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-42h7p" Nov 28 16:16:22 crc kubenswrapper[4909]: I1128 16:16:22.552426 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-9p7bm" Nov 28 16:16:22 crc kubenswrapper[4909]: I1128 16:16:22.560869 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-pnjkw" Nov 28 16:16:22 crc kubenswrapper[4909]: I1128 16:16:22.564484 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-rkbfc" Nov 28 16:16:22 crc kubenswrapper[4909]: I1128 16:16:22.567243 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/52118b39-f1b7-486d-a819-ae1f464d793d-utilities\") pod \"52118b39-f1b7-486d-a819-ae1f464d793d\" (UID: \"52118b39-f1b7-486d-a819-ae1f464d793d\") " Nov 28 16:16:22 crc kubenswrapper[4909]: I1128 16:16:22.567320 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-msv77\" (UniqueName: \"kubernetes.io/projected/52118b39-f1b7-486d-a819-ae1f464d793d-kube-api-access-msv77\") pod \"52118b39-f1b7-486d-a819-ae1f464d793d\" (UID: \"52118b39-f1b7-486d-a819-ae1f464d793d\") " Nov 28 16:16:22 crc kubenswrapper[4909]: I1128 16:16:22.567364 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/52118b39-f1b7-486d-a819-ae1f464d793d-catalog-content\") pod \"52118b39-f1b7-486d-a819-ae1f464d793d\" (UID: \"52118b39-f1b7-486d-a819-ae1f464d793d\") " Nov 28 16:16:22 crc kubenswrapper[4909]: I1128 16:16:22.569010 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/52118b39-f1b7-486d-a819-ae1f464d793d-utilities" (OuterVolumeSpecName: "utilities") pod "52118b39-f1b7-486d-a819-ae1f464d793d" (UID: "52118b39-f1b7-486d-a819-ae1f464d793d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:16:22 crc kubenswrapper[4909]: I1128 16:16:22.594281 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/52118b39-f1b7-486d-a819-ae1f464d793d-kube-api-access-msv77" (OuterVolumeSpecName: "kube-api-access-msv77") pod "52118b39-f1b7-486d-a819-ae1f464d793d" (UID: "52118b39-f1b7-486d-a819-ae1f464d793d"). InnerVolumeSpecName "kube-api-access-msv77". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:16:22 crc kubenswrapper[4909]: I1128 16:16:22.599994 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-sz62r" Nov 28 16:16:22 crc kubenswrapper[4909]: I1128 16:16:22.637468 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/52118b39-f1b7-486d-a819-ae1f464d793d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "52118b39-f1b7-486d-a819-ae1f464d793d" (UID: "52118b39-f1b7-486d-a819-ae1f464d793d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:16:22 crc kubenswrapper[4909]: I1128 16:16:22.672095 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e831d096-86aa-4351-8d67-bdf81194727c-utilities\") pod \"e831d096-86aa-4351-8d67-bdf81194727c\" (UID: \"e831d096-86aa-4351-8d67-bdf81194727c\") " Nov 28 16:16:22 crc kubenswrapper[4909]: I1128 16:16:22.672136 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cfztf\" (UniqueName: \"kubernetes.io/projected/410969ad-6fe5-4169-a78a-5e459f402cd3-kube-api-access-cfztf\") pod \"410969ad-6fe5-4169-a78a-5e459f402cd3\" (UID: \"410969ad-6fe5-4169-a78a-5e459f402cd3\") " Nov 28 16:16:22 crc kubenswrapper[4909]: I1128 16:16:22.672214 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2e369b4b-cc5e-474d-8a7e-f3f5fcbb9f98-catalog-content\") pod \"2e369b4b-cc5e-474d-8a7e-f3f5fcbb9f98\" (UID: \"2e369b4b-cc5e-474d-8a7e-f3f5fcbb9f98\") " Nov 28 16:16:22 crc kubenswrapper[4909]: I1128 16:16:22.672233 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4e3c8494-44f8-475d-8ae3-2613649d6c73-utilities\") pod \"4e3c8494-44f8-475d-8ae3-2613649d6c73\" (UID: \"4e3c8494-44f8-475d-8ae3-2613649d6c73\") " Nov 28 16:16:22 crc kubenswrapper[4909]: I1128 16:16:22.672260 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2e369b4b-cc5e-474d-8a7e-f3f5fcbb9f98-utilities\") pod \"2e369b4b-cc5e-474d-8a7e-f3f5fcbb9f98\" (UID: \"2e369b4b-cc5e-474d-8a7e-f3f5fcbb9f98\") " Nov 28 16:16:22 crc kubenswrapper[4909]: I1128 16:16:22.672291 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/410969ad-6fe5-4169-a78a-5e459f402cd3-marketplace-operator-metrics\") pod \"410969ad-6fe5-4169-a78a-5e459f402cd3\" (UID: \"410969ad-6fe5-4169-a78a-5e459f402cd3\") " Nov 28 16:16:22 crc kubenswrapper[4909]: I1128 16:16:22.672309 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e831d096-86aa-4351-8d67-bdf81194727c-catalog-content\") pod \"e831d096-86aa-4351-8d67-bdf81194727c\" (UID: \"e831d096-86aa-4351-8d67-bdf81194727c\") " Nov 28 16:16:22 crc kubenswrapper[4909]: I1128 16:16:22.672331 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-snwjp\" (UniqueName: \"kubernetes.io/projected/e831d096-86aa-4351-8d67-bdf81194727c-kube-api-access-snwjp\") pod \"e831d096-86aa-4351-8d67-bdf81194727c\" (UID: \"e831d096-86aa-4351-8d67-bdf81194727c\") " Nov 28 16:16:22 crc kubenswrapper[4909]: I1128 16:16:22.672349 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6b58m\" (UniqueName: \"kubernetes.io/projected/4e3c8494-44f8-475d-8ae3-2613649d6c73-kube-api-access-6b58m\") pod \"4e3c8494-44f8-475d-8ae3-2613649d6c73\" (UID: \"4e3c8494-44f8-475d-8ae3-2613649d6c73\") " Nov 28 16:16:22 crc kubenswrapper[4909]: I1128 16:16:22.672364 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4e3c8494-44f8-475d-8ae3-2613649d6c73-catalog-content\") pod \"4e3c8494-44f8-475d-8ae3-2613649d6c73\" (UID: \"4e3c8494-44f8-475d-8ae3-2613649d6c73\") " Nov 28 16:16:22 crc kubenswrapper[4909]: I1128 16:16:22.672394 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/410969ad-6fe5-4169-a78a-5e459f402cd3-marketplace-trusted-ca\") pod \"410969ad-6fe5-4169-a78a-5e459f402cd3\" (UID: \"410969ad-6fe5-4169-a78a-5e459f402cd3\") " Nov 28 16:16:22 crc kubenswrapper[4909]: I1128 16:16:22.672413 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5s2pn\" (UniqueName: \"kubernetes.io/projected/2e369b4b-cc5e-474d-8a7e-f3f5fcbb9f98-kube-api-access-5s2pn\") pod \"2e369b4b-cc5e-474d-8a7e-f3f5fcbb9f98\" (UID: \"2e369b4b-cc5e-474d-8a7e-f3f5fcbb9f98\") " Nov 28 16:16:22 crc kubenswrapper[4909]: I1128 16:16:22.672591 4909 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/52118b39-f1b7-486d-a819-ae1f464d793d-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 16:16:22 crc kubenswrapper[4909]: I1128 16:16:22.672602 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-msv77\" (UniqueName: \"kubernetes.io/projected/52118b39-f1b7-486d-a819-ae1f464d793d-kube-api-access-msv77\") on node \"crc\" DevicePath \"\"" Nov 28 16:16:22 crc kubenswrapper[4909]: I1128 16:16:22.672611 4909 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/52118b39-f1b7-486d-a819-ae1f464d793d-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 16:16:22 crc kubenswrapper[4909]: I1128 16:16:22.672787 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e831d096-86aa-4351-8d67-bdf81194727c-utilities" (OuterVolumeSpecName: "utilities") pod "e831d096-86aa-4351-8d67-bdf81194727c" (UID: "e831d096-86aa-4351-8d67-bdf81194727c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:16:22 crc kubenswrapper[4909]: I1128 16:16:22.679378 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4e3c8494-44f8-475d-8ae3-2613649d6c73-utilities" (OuterVolumeSpecName: "utilities") pod "4e3c8494-44f8-475d-8ae3-2613649d6c73" (UID: "4e3c8494-44f8-475d-8ae3-2613649d6c73"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:16:22 crc kubenswrapper[4909]: I1128 16:16:22.690735 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/410969ad-6fe5-4169-a78a-5e459f402cd3-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "410969ad-6fe5-4169-a78a-5e459f402cd3" (UID: "410969ad-6fe5-4169-a78a-5e459f402cd3"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:16:22 crc kubenswrapper[4909]: I1128 16:16:22.695464 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2e369b4b-cc5e-474d-8a7e-f3f5fcbb9f98-utilities" (OuterVolumeSpecName: "utilities") pod "2e369b4b-cc5e-474d-8a7e-f3f5fcbb9f98" (UID: "2e369b4b-cc5e-474d-8a7e-f3f5fcbb9f98"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:16:22 crc kubenswrapper[4909]: I1128 16:16:22.697906 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2e369b4b-cc5e-474d-8a7e-f3f5fcbb9f98-kube-api-access-5s2pn" (OuterVolumeSpecName: "kube-api-access-5s2pn") pod "2e369b4b-cc5e-474d-8a7e-f3f5fcbb9f98" (UID: "2e369b4b-cc5e-474d-8a7e-f3f5fcbb9f98"). InnerVolumeSpecName "kube-api-access-5s2pn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:16:22 crc kubenswrapper[4909]: I1128 16:16:22.700159 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/410969ad-6fe5-4169-a78a-5e459f402cd3-kube-api-access-cfztf" (OuterVolumeSpecName: "kube-api-access-cfztf") pod "410969ad-6fe5-4169-a78a-5e459f402cd3" (UID: "410969ad-6fe5-4169-a78a-5e459f402cd3"). InnerVolumeSpecName "kube-api-access-cfztf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:16:22 crc kubenswrapper[4909]: I1128 16:16:22.700274 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4e3c8494-44f8-475d-8ae3-2613649d6c73-kube-api-access-6b58m" (OuterVolumeSpecName: "kube-api-access-6b58m") pod "4e3c8494-44f8-475d-8ae3-2613649d6c73" (UID: "4e3c8494-44f8-475d-8ae3-2613649d6c73"). InnerVolumeSpecName "kube-api-access-6b58m". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:16:22 crc kubenswrapper[4909]: I1128 16:16:22.700808 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e831d096-86aa-4351-8d67-bdf81194727c-kube-api-access-snwjp" (OuterVolumeSpecName: "kube-api-access-snwjp") pod "e831d096-86aa-4351-8d67-bdf81194727c" (UID: "e831d096-86aa-4351-8d67-bdf81194727c"). InnerVolumeSpecName "kube-api-access-snwjp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:16:22 crc kubenswrapper[4909]: I1128 16:16:22.708155 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/410969ad-6fe5-4169-a78a-5e459f402cd3-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "410969ad-6fe5-4169-a78a-5e459f402cd3" (UID: "410969ad-6fe5-4169-a78a-5e459f402cd3"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:16:22 crc kubenswrapper[4909]: I1128 16:16:22.711959 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4e3c8494-44f8-475d-8ae3-2613649d6c73-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "4e3c8494-44f8-475d-8ae3-2613649d6c73" (UID: "4e3c8494-44f8-475d-8ae3-2613649d6c73"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:16:22 crc kubenswrapper[4909]: I1128 16:16:22.735472 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-nggsh"] Nov 28 16:16:22 crc kubenswrapper[4909]: I1128 16:16:22.768363 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e831d096-86aa-4351-8d67-bdf81194727c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "e831d096-86aa-4351-8d67-bdf81194727c" (UID: "e831d096-86aa-4351-8d67-bdf81194727c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:16:22 crc kubenswrapper[4909]: I1128 16:16:22.774412 4909 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2e369b4b-cc5e-474d-8a7e-f3f5fcbb9f98-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 16:16:22 crc kubenswrapper[4909]: I1128 16:16:22.774441 4909 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/410969ad-6fe5-4169-a78a-5e459f402cd3-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Nov 28 16:16:22 crc kubenswrapper[4909]: I1128 16:16:22.774456 4909 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e831d096-86aa-4351-8d67-bdf81194727c-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 16:16:22 crc kubenswrapper[4909]: I1128 16:16:22.774465 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-snwjp\" (UniqueName: \"kubernetes.io/projected/e831d096-86aa-4351-8d67-bdf81194727c-kube-api-access-snwjp\") on node \"crc\" DevicePath \"\"" Nov 28 16:16:22 crc kubenswrapper[4909]: I1128 16:16:22.774474 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6b58m\" (UniqueName: \"kubernetes.io/projected/4e3c8494-44f8-475d-8ae3-2613649d6c73-kube-api-access-6b58m\") on node \"crc\" DevicePath \"\"" Nov 28 16:16:22 crc kubenswrapper[4909]: I1128 16:16:22.774482 4909 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4e3c8494-44f8-475d-8ae3-2613649d6c73-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 16:16:22 crc kubenswrapper[4909]: I1128 16:16:22.774491 4909 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/410969ad-6fe5-4169-a78a-5e459f402cd3-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 28 16:16:22 crc kubenswrapper[4909]: I1128 16:16:22.774500 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5s2pn\" (UniqueName: \"kubernetes.io/projected/2e369b4b-cc5e-474d-8a7e-f3f5fcbb9f98-kube-api-access-5s2pn\") on node \"crc\" DevicePath \"\"" Nov 28 16:16:22 crc kubenswrapper[4909]: I1128 16:16:22.774515 4909 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e831d096-86aa-4351-8d67-bdf81194727c-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 16:16:22 crc kubenswrapper[4909]: I1128 16:16:22.774535 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cfztf\" (UniqueName: \"kubernetes.io/projected/410969ad-6fe5-4169-a78a-5e459f402cd3-kube-api-access-cfztf\") on node \"crc\" DevicePath \"\"" Nov 28 16:16:22 crc kubenswrapper[4909]: I1128 16:16:22.774542 4909 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4e3c8494-44f8-475d-8ae3-2613649d6c73-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 16:16:22 crc kubenswrapper[4909]: I1128 16:16:22.775601 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-7ff9c64758-x28sn" Nov 28 16:16:22 crc kubenswrapper[4909]: I1128 16:16:22.866196 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2e369b4b-cc5e-474d-8a7e-f3f5fcbb9f98-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "2e369b4b-cc5e-474d-8a7e-f3f5fcbb9f98" (UID: "2e369b4b-cc5e-474d-8a7e-f3f5fcbb9f98"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:16:22 crc kubenswrapper[4909]: I1128 16:16:22.875239 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/51dc9bf8-a5fb-483d-b9d7-2593847029ef-client-ca\") pod \"51dc9bf8-a5fb-483d-b9d7-2593847029ef\" (UID: \"51dc9bf8-a5fb-483d-b9d7-2593847029ef\") " Nov 28 16:16:22 crc kubenswrapper[4909]: I1128 16:16:22.875312 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/51dc9bf8-a5fb-483d-b9d7-2593847029ef-proxy-ca-bundles\") pod \"51dc9bf8-a5fb-483d-b9d7-2593847029ef\" (UID: \"51dc9bf8-a5fb-483d-b9d7-2593847029ef\") " Nov 28 16:16:22 crc kubenswrapper[4909]: I1128 16:16:22.875362 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/51dc9bf8-a5fb-483d-b9d7-2593847029ef-serving-cert\") pod \"51dc9bf8-a5fb-483d-b9d7-2593847029ef\" (UID: \"51dc9bf8-a5fb-483d-b9d7-2593847029ef\") " Nov 28 16:16:22 crc kubenswrapper[4909]: I1128 16:16:22.876305 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mkjn5\" (UniqueName: \"kubernetes.io/projected/51dc9bf8-a5fb-483d-b9d7-2593847029ef-kube-api-access-mkjn5\") pod \"51dc9bf8-a5fb-483d-b9d7-2593847029ef\" (UID: \"51dc9bf8-a5fb-483d-b9d7-2593847029ef\") " Nov 28 16:16:22 crc kubenswrapper[4909]: I1128 16:16:22.876344 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/51dc9bf8-a5fb-483d-b9d7-2593847029ef-config\") pod \"51dc9bf8-a5fb-483d-b9d7-2593847029ef\" (UID: \"51dc9bf8-a5fb-483d-b9d7-2593847029ef\") " Nov 28 16:16:22 crc kubenswrapper[4909]: I1128 16:16:22.876425 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/51dc9bf8-a5fb-483d-b9d7-2593847029ef-client-ca" (OuterVolumeSpecName: "client-ca") pod "51dc9bf8-a5fb-483d-b9d7-2593847029ef" (UID: "51dc9bf8-a5fb-483d-b9d7-2593847029ef"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:16:22 crc kubenswrapper[4909]: I1128 16:16:22.876423 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/51dc9bf8-a5fb-483d-b9d7-2593847029ef-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "51dc9bf8-a5fb-483d-b9d7-2593847029ef" (UID: "51dc9bf8-a5fb-483d-b9d7-2593847029ef"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:16:22 crc kubenswrapper[4909]: I1128 16:16:22.876718 4909 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2e369b4b-cc5e-474d-8a7e-f3f5fcbb9f98-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 16:16:22 crc kubenswrapper[4909]: I1128 16:16:22.876737 4909 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/51dc9bf8-a5fb-483d-b9d7-2593847029ef-client-ca\") on node \"crc\" DevicePath \"\"" Nov 28 16:16:22 crc kubenswrapper[4909]: I1128 16:16:22.876749 4909 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/51dc9bf8-a5fb-483d-b9d7-2593847029ef-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Nov 28 16:16:22 crc kubenswrapper[4909]: I1128 16:16:22.877223 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/51dc9bf8-a5fb-483d-b9d7-2593847029ef-config" (OuterVolumeSpecName: "config") pod "51dc9bf8-a5fb-483d-b9d7-2593847029ef" (UID: "51dc9bf8-a5fb-483d-b9d7-2593847029ef"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:16:22 crc kubenswrapper[4909]: I1128 16:16:22.879634 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/51dc9bf8-a5fb-483d-b9d7-2593847029ef-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "51dc9bf8-a5fb-483d-b9d7-2593847029ef" (UID: "51dc9bf8-a5fb-483d-b9d7-2593847029ef"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:16:22 crc kubenswrapper[4909]: I1128 16:16:22.884865 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/51dc9bf8-a5fb-483d-b9d7-2593847029ef-kube-api-access-mkjn5" (OuterVolumeSpecName: "kube-api-access-mkjn5") pod "51dc9bf8-a5fb-483d-b9d7-2593847029ef" (UID: "51dc9bf8-a5fb-483d-b9d7-2593847029ef"). InnerVolumeSpecName "kube-api-access-mkjn5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:16:22 crc kubenswrapper[4909]: I1128 16:16:22.942481 4909 generic.go:334] "Generic (PLEG): container finished" podID="51dc9bf8-a5fb-483d-b9d7-2593847029ef" containerID="e6370a781933c63e4aee54770b865d1ebdb3daf9f6801591e93e132cd2e9ce28" exitCode=0 Nov 28 16:16:22 crc kubenswrapper[4909]: I1128 16:16:22.942527 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-7ff9c64758-x28sn" Nov 28 16:16:22 crc kubenswrapper[4909]: I1128 16:16:22.942563 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-7ff9c64758-x28sn" event={"ID":"51dc9bf8-a5fb-483d-b9d7-2593847029ef","Type":"ContainerDied","Data":"e6370a781933c63e4aee54770b865d1ebdb3daf9f6801591e93e132cd2e9ce28"} Nov 28 16:16:22 crc kubenswrapper[4909]: I1128 16:16:22.942596 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-7ff9c64758-x28sn" event={"ID":"51dc9bf8-a5fb-483d-b9d7-2593847029ef","Type":"ContainerDied","Data":"8076f37fed4234f650c43a90064a0e228b79209ba7181389ec754fe41e18dec3"} Nov 28 16:16:22 crc kubenswrapper[4909]: I1128 16:16:22.942617 4909 scope.go:117] "RemoveContainer" containerID="e6370a781933c63e4aee54770b865d1ebdb3daf9f6801591e93e132cd2e9ce28" Nov 28 16:16:22 crc kubenswrapper[4909]: I1128 16:16:22.948466 4909 generic.go:334] "Generic (PLEG): container finished" podID="2e369b4b-cc5e-474d-8a7e-f3f5fcbb9f98" containerID="1a1ef1c84a77b88343460d5d03bebcb83c6e97155e80e21f045566260c7666f7" exitCode=0 Nov 28 16:16:22 crc kubenswrapper[4909]: I1128 16:16:22.948523 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-sz62r" Nov 28 16:16:22 crc kubenswrapper[4909]: I1128 16:16:22.948568 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-sz62r" event={"ID":"2e369b4b-cc5e-474d-8a7e-f3f5fcbb9f98","Type":"ContainerDied","Data":"1a1ef1c84a77b88343460d5d03bebcb83c6e97155e80e21f045566260c7666f7"} Nov 28 16:16:22 crc kubenswrapper[4909]: I1128 16:16:22.948607 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-sz62r" event={"ID":"2e369b4b-cc5e-474d-8a7e-f3f5fcbb9f98","Type":"ContainerDied","Data":"90c03f5224ea4d34e2edcfe82e970fba8733c1a77c17c68987df0cd2d67bfa64"} Nov 28 16:16:22 crc kubenswrapper[4909]: I1128 16:16:22.960023 4909 generic.go:334] "Generic (PLEG): container finished" podID="52118b39-f1b7-486d-a819-ae1f464d793d" containerID="a1468afc4f9e49253e0248b48724aa02ef0c25e2df428ced93b5df419a619897" exitCode=0 Nov 28 16:16:22 crc kubenswrapper[4909]: I1128 16:16:22.960298 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-42h7p" Nov 28 16:16:22 crc kubenswrapper[4909]: I1128 16:16:22.960491 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-42h7p" event={"ID":"52118b39-f1b7-486d-a819-ae1f464d793d","Type":"ContainerDied","Data":"a1468afc4f9e49253e0248b48724aa02ef0c25e2df428ced93b5df419a619897"} Nov 28 16:16:22 crc kubenswrapper[4909]: I1128 16:16:22.961850 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-42h7p" event={"ID":"52118b39-f1b7-486d-a819-ae1f464d793d","Type":"ContainerDied","Data":"5fe0e4acd00ddf6b5fcc7598a7eee3de0818bb3840bbf9ecf6a46492213f9ae6"} Nov 28 16:16:22 crc kubenswrapper[4909]: I1128 16:16:22.964352 4909 generic.go:334] "Generic (PLEG): container finished" podID="410969ad-6fe5-4169-a78a-5e459f402cd3" containerID="a4d92d41dad11e55de2a595d1b0f1287e2777f238c4a9e3c7707b25ab44afca2" exitCode=0 Nov 28 16:16:22 crc kubenswrapper[4909]: I1128 16:16:22.964424 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-pnjkw" event={"ID":"410969ad-6fe5-4169-a78a-5e459f402cd3","Type":"ContainerDied","Data":"a4d92d41dad11e55de2a595d1b0f1287e2777f238c4a9e3c7707b25ab44afca2"} Nov 28 16:16:22 crc kubenswrapper[4909]: I1128 16:16:22.964454 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-pnjkw" event={"ID":"410969ad-6fe5-4169-a78a-5e459f402cd3","Type":"ContainerDied","Data":"ae8083dc8b11ea8b7f8929a28abf86dd38a2824f428eb5573589142355c138bc"} Nov 28 16:16:22 crc kubenswrapper[4909]: I1128 16:16:22.964532 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-pnjkw" Nov 28 16:16:22 crc kubenswrapper[4909]: I1128 16:16:22.979004 4909 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/51dc9bf8-a5fb-483d-b9d7-2593847029ef-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 16:16:22 crc kubenswrapper[4909]: I1128 16:16:22.979062 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mkjn5\" (UniqueName: \"kubernetes.io/projected/51dc9bf8-a5fb-483d-b9d7-2593847029ef-kube-api-access-mkjn5\") on node \"crc\" DevicePath \"\"" Nov 28 16:16:22 crc kubenswrapper[4909]: I1128 16:16:22.979074 4909 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/51dc9bf8-a5fb-483d-b9d7-2593847029ef-config\") on node \"crc\" DevicePath \"\"" Nov 28 16:16:22 crc kubenswrapper[4909]: I1128 16:16:22.979438 4909 scope.go:117] "RemoveContainer" containerID="e6370a781933c63e4aee54770b865d1ebdb3daf9f6801591e93e132cd2e9ce28" Nov 28 16:16:22 crc kubenswrapper[4909]: E1128 16:16:22.980967 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e6370a781933c63e4aee54770b865d1ebdb3daf9f6801591e93e132cd2e9ce28\": container with ID starting with e6370a781933c63e4aee54770b865d1ebdb3daf9f6801591e93e132cd2e9ce28 not found: ID does not exist" containerID="e6370a781933c63e4aee54770b865d1ebdb3daf9f6801591e93e132cd2e9ce28" Nov 28 16:16:22 crc kubenswrapper[4909]: I1128 16:16:22.981437 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-nggsh" event={"ID":"367a78b1-06bc-48a7-ad2b-4e825e5f683f","Type":"ContainerStarted","Data":"a6f990f8c9acb280f5edfed116cc1311de4b3d88f375525e045f51523703db63"} Nov 28 16:16:22 crc kubenswrapper[4909]: I1128 16:16:22.982370 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-nggsh" event={"ID":"367a78b1-06bc-48a7-ad2b-4e825e5f683f","Type":"ContainerStarted","Data":"8d510908d37040ff6bc911845e11be21df2dbcb9c46454796f392cb06d7edf61"} Nov 28 16:16:22 crc kubenswrapper[4909]: I1128 16:16:22.982397 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-nggsh" Nov 28 16:16:22 crc kubenswrapper[4909]: I1128 16:16:22.982512 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e6370a781933c63e4aee54770b865d1ebdb3daf9f6801591e93e132cd2e9ce28"} err="failed to get container status \"e6370a781933c63e4aee54770b865d1ebdb3daf9f6801591e93e132cd2e9ce28\": rpc error: code = NotFound desc = could not find container \"e6370a781933c63e4aee54770b865d1ebdb3daf9f6801591e93e132cd2e9ce28\": container with ID starting with e6370a781933c63e4aee54770b865d1ebdb3daf9f6801591e93e132cd2e9ce28 not found: ID does not exist" Nov 28 16:16:22 crc kubenswrapper[4909]: I1128 16:16:22.982545 4909 scope.go:117] "RemoveContainer" containerID="1a1ef1c84a77b88343460d5d03bebcb83c6e97155e80e21f045566260c7666f7" Nov 28 16:16:22 crc kubenswrapper[4909]: I1128 16:16:22.987767 4909 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-nggsh container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.63:8080/healthz\": dial tcp 10.217.0.63:8080: connect: connection refused" start-of-body= Nov 28 16:16:22 crc kubenswrapper[4909]: I1128 16:16:22.987825 4909 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-nggsh" podUID="367a78b1-06bc-48a7-ad2b-4e825e5f683f" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.63:8080/healthz\": dial tcp 10.217.0.63:8080: connect: connection refused" Nov 28 16:16:22 crc kubenswrapper[4909]: I1128 16:16:22.992568 4909 generic.go:334] "Generic (PLEG): container finished" podID="4e3c8494-44f8-475d-8ae3-2613649d6c73" containerID="cffa0c54f48406f8f946d3133ef1ff15af43ceda767c5fd6adbeedc719146e2c" exitCode=0 Nov 28 16:16:22 crc kubenswrapper[4909]: I1128 16:16:22.992660 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-9p7bm" event={"ID":"4e3c8494-44f8-475d-8ae3-2613649d6c73","Type":"ContainerDied","Data":"cffa0c54f48406f8f946d3133ef1ff15af43ceda767c5fd6adbeedc719146e2c"} Nov 28 16:16:22 crc kubenswrapper[4909]: I1128 16:16:22.992713 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-9p7bm" event={"ID":"4e3c8494-44f8-475d-8ae3-2613649d6c73","Type":"ContainerDied","Data":"0e9d1df6205bf1a741be6102ddde6e90e5331bad08d370b21c7ccb714f8f83d6"} Nov 28 16:16:22 crc kubenswrapper[4909]: I1128 16:16:22.992800 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-9p7bm" Nov 28 16:16:22 crc kubenswrapper[4909]: I1128 16:16:22.995441 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-7ff9c64758-x28sn"] Nov 28 16:16:22 crc kubenswrapper[4909]: I1128 16:16:22.999087 4909 generic.go:334] "Generic (PLEG): container finished" podID="e831d096-86aa-4351-8d67-bdf81194727c" containerID="77d07557bf73b5f34dcab3f8ab701ab2566220050e5268a52dffb857cb9b172a" exitCode=0 Nov 28 16:16:22 crc kubenswrapper[4909]: I1128 16:16:22.999261 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rkbfc" event={"ID":"e831d096-86aa-4351-8d67-bdf81194727c","Type":"ContainerDied","Data":"77d07557bf73b5f34dcab3f8ab701ab2566220050e5268a52dffb857cb9b172a"} Nov 28 16:16:22 crc kubenswrapper[4909]: I1128 16:16:22.999356 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rkbfc" event={"ID":"e831d096-86aa-4351-8d67-bdf81194727c","Type":"ContainerDied","Data":"23670d109b89358f2efd2e124f2f82e1e9fc43db526410d2dfb3e58165a33e5a"} Nov 28 16:16:22 crc kubenswrapper[4909]: I1128 16:16:22.999504 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-rkbfc" Nov 28 16:16:23 crc kubenswrapper[4909]: I1128 16:16:22.999748 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-7ff9c64758-x28sn"] Nov 28 16:16:23 crc kubenswrapper[4909]: I1128 16:16:23.016107 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-sz62r"] Nov 28 16:16:23 crc kubenswrapper[4909]: I1128 16:16:23.022355 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-sz62r"] Nov 28 16:16:23 crc kubenswrapper[4909]: I1128 16:16:23.024559 4909 scope.go:117] "RemoveContainer" containerID="6e383ba7faa431efb72a65caf0cd05b7874550f25432847545e7be55a4b89bda" Nov 28 16:16:23 crc kubenswrapper[4909]: I1128 16:16:23.028576 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-pnjkw"] Nov 28 16:16:23 crc kubenswrapper[4909]: I1128 16:16:23.032187 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-pnjkw"] Nov 28 16:16:23 crc kubenswrapper[4909]: I1128 16:16:23.043917 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-42h7p"] Nov 28 16:16:23 crc kubenswrapper[4909]: I1128 16:16:23.047499 4909 scope.go:117] "RemoveContainer" containerID="e5e1491bc4bb047c814c9415d1546349f91e517df99fdef355efc0e09724c5cb" Nov 28 16:16:23 crc kubenswrapper[4909]: I1128 16:16:23.051145 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-42h7p"] Nov 28 16:16:23 crc kubenswrapper[4909]: I1128 16:16:23.063436 4909 scope.go:117] "RemoveContainer" containerID="1a1ef1c84a77b88343460d5d03bebcb83c6e97155e80e21f045566260c7666f7" Nov 28 16:16:23 crc kubenswrapper[4909]: I1128 16:16:23.064616 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-nggsh" podStartSLOduration=1.064607626 podStartE2EDuration="1.064607626s" podCreationTimestamp="2025-11-28 16:16:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:16:23.063748664 +0000 UTC m=+365.460433208" watchObservedRunningTime="2025-11-28 16:16:23.064607626 +0000 UTC m=+365.461292150" Nov 28 16:16:23 crc kubenswrapper[4909]: E1128 16:16:23.065314 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1a1ef1c84a77b88343460d5d03bebcb83c6e97155e80e21f045566260c7666f7\": container with ID starting with 1a1ef1c84a77b88343460d5d03bebcb83c6e97155e80e21f045566260c7666f7 not found: ID does not exist" containerID="1a1ef1c84a77b88343460d5d03bebcb83c6e97155e80e21f045566260c7666f7" Nov 28 16:16:23 crc kubenswrapper[4909]: I1128 16:16:23.065353 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1a1ef1c84a77b88343460d5d03bebcb83c6e97155e80e21f045566260c7666f7"} err="failed to get container status \"1a1ef1c84a77b88343460d5d03bebcb83c6e97155e80e21f045566260c7666f7\": rpc error: code = NotFound desc = could not find container \"1a1ef1c84a77b88343460d5d03bebcb83c6e97155e80e21f045566260c7666f7\": container with ID starting with 1a1ef1c84a77b88343460d5d03bebcb83c6e97155e80e21f045566260c7666f7 not found: ID does not exist" Nov 28 16:16:23 crc kubenswrapper[4909]: I1128 16:16:23.065381 4909 scope.go:117] "RemoveContainer" containerID="6e383ba7faa431efb72a65caf0cd05b7874550f25432847545e7be55a4b89bda" Nov 28 16:16:23 crc kubenswrapper[4909]: E1128 16:16:23.065972 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6e383ba7faa431efb72a65caf0cd05b7874550f25432847545e7be55a4b89bda\": container with ID starting with 6e383ba7faa431efb72a65caf0cd05b7874550f25432847545e7be55a4b89bda not found: ID does not exist" containerID="6e383ba7faa431efb72a65caf0cd05b7874550f25432847545e7be55a4b89bda" Nov 28 16:16:23 crc kubenswrapper[4909]: I1128 16:16:23.066004 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6e383ba7faa431efb72a65caf0cd05b7874550f25432847545e7be55a4b89bda"} err="failed to get container status \"6e383ba7faa431efb72a65caf0cd05b7874550f25432847545e7be55a4b89bda\": rpc error: code = NotFound desc = could not find container \"6e383ba7faa431efb72a65caf0cd05b7874550f25432847545e7be55a4b89bda\": container with ID starting with 6e383ba7faa431efb72a65caf0cd05b7874550f25432847545e7be55a4b89bda not found: ID does not exist" Nov 28 16:16:23 crc kubenswrapper[4909]: I1128 16:16:23.066023 4909 scope.go:117] "RemoveContainer" containerID="e5e1491bc4bb047c814c9415d1546349f91e517df99fdef355efc0e09724c5cb" Nov 28 16:16:23 crc kubenswrapper[4909]: E1128 16:16:23.068595 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e5e1491bc4bb047c814c9415d1546349f91e517df99fdef355efc0e09724c5cb\": container with ID starting with e5e1491bc4bb047c814c9415d1546349f91e517df99fdef355efc0e09724c5cb not found: ID does not exist" containerID="e5e1491bc4bb047c814c9415d1546349f91e517df99fdef355efc0e09724c5cb" Nov 28 16:16:23 crc kubenswrapper[4909]: I1128 16:16:23.068625 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e5e1491bc4bb047c814c9415d1546349f91e517df99fdef355efc0e09724c5cb"} err="failed to get container status \"e5e1491bc4bb047c814c9415d1546349f91e517df99fdef355efc0e09724c5cb\": rpc error: code = NotFound desc = could not find container \"e5e1491bc4bb047c814c9415d1546349f91e517df99fdef355efc0e09724c5cb\": container with ID starting with e5e1491bc4bb047c814c9415d1546349f91e517df99fdef355efc0e09724c5cb not found: ID does not exist" Nov 28 16:16:23 crc kubenswrapper[4909]: I1128 16:16:23.068641 4909 scope.go:117] "RemoveContainer" containerID="a1468afc4f9e49253e0248b48724aa02ef0c25e2df428ced93b5df419a619897" Nov 28 16:16:23 crc kubenswrapper[4909]: I1128 16:16:23.084872 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-rkbfc"] Nov 28 16:16:23 crc kubenswrapper[4909]: I1128 16:16:23.087558 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-rkbfc"] Nov 28 16:16:23 crc kubenswrapper[4909]: I1128 16:16:23.089338 4909 scope.go:117] "RemoveContainer" containerID="85eb98ed2bdb798022896afd7e0a4c88282d245288a897f0823eb32722a91272" Nov 28 16:16:23 crc kubenswrapper[4909]: I1128 16:16:23.098149 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-9p7bm"] Nov 28 16:16:23 crc kubenswrapper[4909]: I1128 16:16:23.102279 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-9p7bm"] Nov 28 16:16:23 crc kubenswrapper[4909]: I1128 16:16:23.108329 4909 scope.go:117] "RemoveContainer" containerID="d4a91133eac17c2b3e7f9c4d51ef3868264e0e0645b5824d7fc16244c77e4d17" Nov 28 16:16:23 crc kubenswrapper[4909]: I1128 16:16:23.125035 4909 scope.go:117] "RemoveContainer" containerID="a1468afc4f9e49253e0248b48724aa02ef0c25e2df428ced93b5df419a619897" Nov 28 16:16:23 crc kubenswrapper[4909]: E1128 16:16:23.125536 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a1468afc4f9e49253e0248b48724aa02ef0c25e2df428ced93b5df419a619897\": container with ID starting with a1468afc4f9e49253e0248b48724aa02ef0c25e2df428ced93b5df419a619897 not found: ID does not exist" containerID="a1468afc4f9e49253e0248b48724aa02ef0c25e2df428ced93b5df419a619897" Nov 28 16:16:23 crc kubenswrapper[4909]: I1128 16:16:23.125574 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a1468afc4f9e49253e0248b48724aa02ef0c25e2df428ced93b5df419a619897"} err="failed to get container status \"a1468afc4f9e49253e0248b48724aa02ef0c25e2df428ced93b5df419a619897\": rpc error: code = NotFound desc = could not find container \"a1468afc4f9e49253e0248b48724aa02ef0c25e2df428ced93b5df419a619897\": container with ID starting with a1468afc4f9e49253e0248b48724aa02ef0c25e2df428ced93b5df419a619897 not found: ID does not exist" Nov 28 16:16:23 crc kubenswrapper[4909]: I1128 16:16:23.125596 4909 scope.go:117] "RemoveContainer" containerID="85eb98ed2bdb798022896afd7e0a4c88282d245288a897f0823eb32722a91272" Nov 28 16:16:23 crc kubenswrapper[4909]: E1128 16:16:23.125917 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"85eb98ed2bdb798022896afd7e0a4c88282d245288a897f0823eb32722a91272\": container with ID starting with 85eb98ed2bdb798022896afd7e0a4c88282d245288a897f0823eb32722a91272 not found: ID does not exist" containerID="85eb98ed2bdb798022896afd7e0a4c88282d245288a897f0823eb32722a91272" Nov 28 16:16:23 crc kubenswrapper[4909]: I1128 16:16:23.125938 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"85eb98ed2bdb798022896afd7e0a4c88282d245288a897f0823eb32722a91272"} err="failed to get container status \"85eb98ed2bdb798022896afd7e0a4c88282d245288a897f0823eb32722a91272\": rpc error: code = NotFound desc = could not find container \"85eb98ed2bdb798022896afd7e0a4c88282d245288a897f0823eb32722a91272\": container with ID starting with 85eb98ed2bdb798022896afd7e0a4c88282d245288a897f0823eb32722a91272 not found: ID does not exist" Nov 28 16:16:23 crc kubenswrapper[4909]: I1128 16:16:23.125951 4909 scope.go:117] "RemoveContainer" containerID="d4a91133eac17c2b3e7f9c4d51ef3868264e0e0645b5824d7fc16244c77e4d17" Nov 28 16:16:23 crc kubenswrapper[4909]: E1128 16:16:23.126258 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d4a91133eac17c2b3e7f9c4d51ef3868264e0e0645b5824d7fc16244c77e4d17\": container with ID starting with d4a91133eac17c2b3e7f9c4d51ef3868264e0e0645b5824d7fc16244c77e4d17 not found: ID does not exist" containerID="d4a91133eac17c2b3e7f9c4d51ef3868264e0e0645b5824d7fc16244c77e4d17" Nov 28 16:16:23 crc kubenswrapper[4909]: I1128 16:16:23.126303 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d4a91133eac17c2b3e7f9c4d51ef3868264e0e0645b5824d7fc16244c77e4d17"} err="failed to get container status \"d4a91133eac17c2b3e7f9c4d51ef3868264e0e0645b5824d7fc16244c77e4d17\": rpc error: code = NotFound desc = could not find container \"d4a91133eac17c2b3e7f9c4d51ef3868264e0e0645b5824d7fc16244c77e4d17\": container with ID starting with d4a91133eac17c2b3e7f9c4d51ef3868264e0e0645b5824d7fc16244c77e4d17 not found: ID does not exist" Nov 28 16:16:23 crc kubenswrapper[4909]: I1128 16:16:23.126331 4909 scope.go:117] "RemoveContainer" containerID="a4d92d41dad11e55de2a595d1b0f1287e2777f238c4a9e3c7707b25ab44afca2" Nov 28 16:16:23 crc kubenswrapper[4909]: I1128 16:16:23.139725 4909 scope.go:117] "RemoveContainer" containerID="03a017ebdf97fbdc2be2c38251a1008c90ba09a22584b15a7e4f8a764e954c4b" Nov 28 16:16:23 crc kubenswrapper[4909]: I1128 16:16:23.196233 4909 scope.go:117] "RemoveContainer" containerID="a4d92d41dad11e55de2a595d1b0f1287e2777f238c4a9e3c7707b25ab44afca2" Nov 28 16:16:23 crc kubenswrapper[4909]: E1128 16:16:23.196987 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a4d92d41dad11e55de2a595d1b0f1287e2777f238c4a9e3c7707b25ab44afca2\": container with ID starting with a4d92d41dad11e55de2a595d1b0f1287e2777f238c4a9e3c7707b25ab44afca2 not found: ID does not exist" containerID="a4d92d41dad11e55de2a595d1b0f1287e2777f238c4a9e3c7707b25ab44afca2" Nov 28 16:16:23 crc kubenswrapper[4909]: I1128 16:16:23.197030 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a4d92d41dad11e55de2a595d1b0f1287e2777f238c4a9e3c7707b25ab44afca2"} err="failed to get container status \"a4d92d41dad11e55de2a595d1b0f1287e2777f238c4a9e3c7707b25ab44afca2\": rpc error: code = NotFound desc = could not find container \"a4d92d41dad11e55de2a595d1b0f1287e2777f238c4a9e3c7707b25ab44afca2\": container with ID starting with a4d92d41dad11e55de2a595d1b0f1287e2777f238c4a9e3c7707b25ab44afca2 not found: ID does not exist" Nov 28 16:16:23 crc kubenswrapper[4909]: I1128 16:16:23.197068 4909 scope.go:117] "RemoveContainer" containerID="03a017ebdf97fbdc2be2c38251a1008c90ba09a22584b15a7e4f8a764e954c4b" Nov 28 16:16:23 crc kubenswrapper[4909]: E1128 16:16:23.197796 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"03a017ebdf97fbdc2be2c38251a1008c90ba09a22584b15a7e4f8a764e954c4b\": container with ID starting with 03a017ebdf97fbdc2be2c38251a1008c90ba09a22584b15a7e4f8a764e954c4b not found: ID does not exist" containerID="03a017ebdf97fbdc2be2c38251a1008c90ba09a22584b15a7e4f8a764e954c4b" Nov 28 16:16:23 crc kubenswrapper[4909]: I1128 16:16:23.197867 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"03a017ebdf97fbdc2be2c38251a1008c90ba09a22584b15a7e4f8a764e954c4b"} err="failed to get container status \"03a017ebdf97fbdc2be2c38251a1008c90ba09a22584b15a7e4f8a764e954c4b\": rpc error: code = NotFound desc = could not find container \"03a017ebdf97fbdc2be2c38251a1008c90ba09a22584b15a7e4f8a764e954c4b\": container with ID starting with 03a017ebdf97fbdc2be2c38251a1008c90ba09a22584b15a7e4f8a764e954c4b not found: ID does not exist" Nov 28 16:16:23 crc kubenswrapper[4909]: I1128 16:16:23.197897 4909 scope.go:117] "RemoveContainer" containerID="cffa0c54f48406f8f946d3133ef1ff15af43ceda767c5fd6adbeedc719146e2c" Nov 28 16:16:23 crc kubenswrapper[4909]: I1128 16:16:23.213359 4909 scope.go:117] "RemoveContainer" containerID="e166515e87435186b7799c72a8e6540638343ac39f832c90e256df338e36c67e" Nov 28 16:16:23 crc kubenswrapper[4909]: I1128 16:16:23.230870 4909 scope.go:117] "RemoveContainer" containerID="221d793acad81d9374268688764596065b92f87c2903edcf05e5a1dc9852f4f0" Nov 28 16:16:23 crc kubenswrapper[4909]: I1128 16:16:23.253528 4909 scope.go:117] "RemoveContainer" containerID="cffa0c54f48406f8f946d3133ef1ff15af43ceda767c5fd6adbeedc719146e2c" Nov 28 16:16:23 crc kubenswrapper[4909]: E1128 16:16:23.254114 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cffa0c54f48406f8f946d3133ef1ff15af43ceda767c5fd6adbeedc719146e2c\": container with ID starting with cffa0c54f48406f8f946d3133ef1ff15af43ceda767c5fd6adbeedc719146e2c not found: ID does not exist" containerID="cffa0c54f48406f8f946d3133ef1ff15af43ceda767c5fd6adbeedc719146e2c" Nov 28 16:16:23 crc kubenswrapper[4909]: I1128 16:16:23.254161 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cffa0c54f48406f8f946d3133ef1ff15af43ceda767c5fd6adbeedc719146e2c"} err="failed to get container status \"cffa0c54f48406f8f946d3133ef1ff15af43ceda767c5fd6adbeedc719146e2c\": rpc error: code = NotFound desc = could not find container \"cffa0c54f48406f8f946d3133ef1ff15af43ceda767c5fd6adbeedc719146e2c\": container with ID starting with cffa0c54f48406f8f946d3133ef1ff15af43ceda767c5fd6adbeedc719146e2c not found: ID does not exist" Nov 28 16:16:23 crc kubenswrapper[4909]: I1128 16:16:23.254189 4909 scope.go:117] "RemoveContainer" containerID="e166515e87435186b7799c72a8e6540638343ac39f832c90e256df338e36c67e" Nov 28 16:16:23 crc kubenswrapper[4909]: E1128 16:16:23.255966 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e166515e87435186b7799c72a8e6540638343ac39f832c90e256df338e36c67e\": container with ID starting with e166515e87435186b7799c72a8e6540638343ac39f832c90e256df338e36c67e not found: ID does not exist" containerID="e166515e87435186b7799c72a8e6540638343ac39f832c90e256df338e36c67e" Nov 28 16:16:23 crc kubenswrapper[4909]: I1128 16:16:23.255989 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e166515e87435186b7799c72a8e6540638343ac39f832c90e256df338e36c67e"} err="failed to get container status \"e166515e87435186b7799c72a8e6540638343ac39f832c90e256df338e36c67e\": rpc error: code = NotFound desc = could not find container \"e166515e87435186b7799c72a8e6540638343ac39f832c90e256df338e36c67e\": container with ID starting with e166515e87435186b7799c72a8e6540638343ac39f832c90e256df338e36c67e not found: ID does not exist" Nov 28 16:16:23 crc kubenswrapper[4909]: I1128 16:16:23.256002 4909 scope.go:117] "RemoveContainer" containerID="221d793acad81d9374268688764596065b92f87c2903edcf05e5a1dc9852f4f0" Nov 28 16:16:23 crc kubenswrapper[4909]: E1128 16:16:23.256410 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"221d793acad81d9374268688764596065b92f87c2903edcf05e5a1dc9852f4f0\": container with ID starting with 221d793acad81d9374268688764596065b92f87c2903edcf05e5a1dc9852f4f0 not found: ID does not exist" containerID="221d793acad81d9374268688764596065b92f87c2903edcf05e5a1dc9852f4f0" Nov 28 16:16:23 crc kubenswrapper[4909]: I1128 16:16:23.256448 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"221d793acad81d9374268688764596065b92f87c2903edcf05e5a1dc9852f4f0"} err="failed to get container status \"221d793acad81d9374268688764596065b92f87c2903edcf05e5a1dc9852f4f0\": rpc error: code = NotFound desc = could not find container \"221d793acad81d9374268688764596065b92f87c2903edcf05e5a1dc9852f4f0\": container with ID starting with 221d793acad81d9374268688764596065b92f87c2903edcf05e5a1dc9852f4f0 not found: ID does not exist" Nov 28 16:16:23 crc kubenswrapper[4909]: I1128 16:16:23.256461 4909 scope.go:117] "RemoveContainer" containerID="77d07557bf73b5f34dcab3f8ab701ab2566220050e5268a52dffb857cb9b172a" Nov 28 16:16:23 crc kubenswrapper[4909]: I1128 16:16:23.271292 4909 scope.go:117] "RemoveContainer" containerID="558d6d23696475180141a8cc5656299ee5da8be154125f2df474c5b3420d162e" Nov 28 16:16:23 crc kubenswrapper[4909]: I1128 16:16:23.286971 4909 scope.go:117] "RemoveContainer" containerID="8070c9c93c3734ff833204cbea830f4977d7b5624e15b1635c2d85b2316fa3fc" Nov 28 16:16:23 crc kubenswrapper[4909]: I1128 16:16:23.303629 4909 scope.go:117] "RemoveContainer" containerID="77d07557bf73b5f34dcab3f8ab701ab2566220050e5268a52dffb857cb9b172a" Nov 28 16:16:23 crc kubenswrapper[4909]: E1128 16:16:23.304159 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"77d07557bf73b5f34dcab3f8ab701ab2566220050e5268a52dffb857cb9b172a\": container with ID starting with 77d07557bf73b5f34dcab3f8ab701ab2566220050e5268a52dffb857cb9b172a not found: ID does not exist" containerID="77d07557bf73b5f34dcab3f8ab701ab2566220050e5268a52dffb857cb9b172a" Nov 28 16:16:23 crc kubenswrapper[4909]: I1128 16:16:23.304192 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"77d07557bf73b5f34dcab3f8ab701ab2566220050e5268a52dffb857cb9b172a"} err="failed to get container status \"77d07557bf73b5f34dcab3f8ab701ab2566220050e5268a52dffb857cb9b172a\": rpc error: code = NotFound desc = could not find container \"77d07557bf73b5f34dcab3f8ab701ab2566220050e5268a52dffb857cb9b172a\": container with ID starting with 77d07557bf73b5f34dcab3f8ab701ab2566220050e5268a52dffb857cb9b172a not found: ID does not exist" Nov 28 16:16:23 crc kubenswrapper[4909]: I1128 16:16:23.304214 4909 scope.go:117] "RemoveContainer" containerID="558d6d23696475180141a8cc5656299ee5da8be154125f2df474c5b3420d162e" Nov 28 16:16:23 crc kubenswrapper[4909]: E1128 16:16:23.304499 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"558d6d23696475180141a8cc5656299ee5da8be154125f2df474c5b3420d162e\": container with ID starting with 558d6d23696475180141a8cc5656299ee5da8be154125f2df474c5b3420d162e not found: ID does not exist" containerID="558d6d23696475180141a8cc5656299ee5da8be154125f2df474c5b3420d162e" Nov 28 16:16:23 crc kubenswrapper[4909]: I1128 16:16:23.304519 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"558d6d23696475180141a8cc5656299ee5da8be154125f2df474c5b3420d162e"} err="failed to get container status \"558d6d23696475180141a8cc5656299ee5da8be154125f2df474c5b3420d162e\": rpc error: code = NotFound desc = could not find container \"558d6d23696475180141a8cc5656299ee5da8be154125f2df474c5b3420d162e\": container with ID starting with 558d6d23696475180141a8cc5656299ee5da8be154125f2df474c5b3420d162e not found: ID does not exist" Nov 28 16:16:23 crc kubenswrapper[4909]: I1128 16:16:23.304534 4909 scope.go:117] "RemoveContainer" containerID="8070c9c93c3734ff833204cbea830f4977d7b5624e15b1635c2d85b2316fa3fc" Nov 28 16:16:23 crc kubenswrapper[4909]: E1128 16:16:23.304829 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8070c9c93c3734ff833204cbea830f4977d7b5624e15b1635c2d85b2316fa3fc\": container with ID starting with 8070c9c93c3734ff833204cbea830f4977d7b5624e15b1635c2d85b2316fa3fc not found: ID does not exist" containerID="8070c9c93c3734ff833204cbea830f4977d7b5624e15b1635c2d85b2316fa3fc" Nov 28 16:16:23 crc kubenswrapper[4909]: I1128 16:16:23.304871 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8070c9c93c3734ff833204cbea830f4977d7b5624e15b1635c2d85b2316fa3fc"} err="failed to get container status \"8070c9c93c3734ff833204cbea830f4977d7b5624e15b1635c2d85b2316fa3fc\": rpc error: code = NotFound desc = could not find container \"8070c9c93c3734ff833204cbea830f4977d7b5624e15b1635c2d85b2316fa3fc\": container with ID starting with 8070c9c93c3734ff833204cbea830f4977d7b5624e15b1635c2d85b2316fa3fc not found: ID does not exist" Nov 28 16:16:23 crc kubenswrapper[4909]: I1128 16:16:23.907032 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2e369b4b-cc5e-474d-8a7e-f3f5fcbb9f98" path="/var/lib/kubelet/pods/2e369b4b-cc5e-474d-8a7e-f3f5fcbb9f98/volumes" Nov 28 16:16:23 crc kubenswrapper[4909]: I1128 16:16:23.907875 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="410969ad-6fe5-4169-a78a-5e459f402cd3" path="/var/lib/kubelet/pods/410969ad-6fe5-4169-a78a-5e459f402cd3/volumes" Nov 28 16:16:23 crc kubenswrapper[4909]: I1128 16:16:23.908435 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4e3c8494-44f8-475d-8ae3-2613649d6c73" path="/var/lib/kubelet/pods/4e3c8494-44f8-475d-8ae3-2613649d6c73/volumes" Nov 28 16:16:23 crc kubenswrapper[4909]: I1128 16:16:23.909680 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="51dc9bf8-a5fb-483d-b9d7-2593847029ef" path="/var/lib/kubelet/pods/51dc9bf8-a5fb-483d-b9d7-2593847029ef/volumes" Nov 28 16:16:23 crc kubenswrapper[4909]: I1128 16:16:23.910222 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="52118b39-f1b7-486d-a819-ae1f464d793d" path="/var/lib/kubelet/pods/52118b39-f1b7-486d-a819-ae1f464d793d/volumes" Nov 28 16:16:23 crc kubenswrapper[4909]: I1128 16:16:23.911363 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e831d096-86aa-4351-8d67-bdf81194727c" path="/var/lib/kubelet/pods/e831d096-86aa-4351-8d67-bdf81194727c/volumes" Nov 28 16:16:23 crc kubenswrapper[4909]: I1128 16:16:23.944989 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-b9f6c7676-zwp8h"] Nov 28 16:16:23 crc kubenswrapper[4909]: E1128 16:16:23.945204 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e831d096-86aa-4351-8d67-bdf81194727c" containerName="extract-utilities" Nov 28 16:16:23 crc kubenswrapper[4909]: I1128 16:16:23.945218 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="e831d096-86aa-4351-8d67-bdf81194727c" containerName="extract-utilities" Nov 28 16:16:23 crc kubenswrapper[4909]: E1128 16:16:23.945231 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="410969ad-6fe5-4169-a78a-5e459f402cd3" containerName="marketplace-operator" Nov 28 16:16:23 crc kubenswrapper[4909]: I1128 16:16:23.945239 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="410969ad-6fe5-4169-a78a-5e459f402cd3" containerName="marketplace-operator" Nov 28 16:16:23 crc kubenswrapper[4909]: E1128 16:16:23.945249 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2e369b4b-cc5e-474d-8a7e-f3f5fcbb9f98" containerName="extract-utilities" Nov 28 16:16:23 crc kubenswrapper[4909]: I1128 16:16:23.945257 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="2e369b4b-cc5e-474d-8a7e-f3f5fcbb9f98" containerName="extract-utilities" Nov 28 16:16:23 crc kubenswrapper[4909]: E1128 16:16:23.945268 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e831d096-86aa-4351-8d67-bdf81194727c" containerName="extract-content" Nov 28 16:16:23 crc kubenswrapper[4909]: I1128 16:16:23.945276 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="e831d096-86aa-4351-8d67-bdf81194727c" containerName="extract-content" Nov 28 16:16:23 crc kubenswrapper[4909]: E1128 16:16:23.945289 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="52118b39-f1b7-486d-a819-ae1f464d793d" containerName="extract-content" Nov 28 16:16:23 crc kubenswrapper[4909]: I1128 16:16:23.945299 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="52118b39-f1b7-486d-a819-ae1f464d793d" containerName="extract-content" Nov 28 16:16:23 crc kubenswrapper[4909]: E1128 16:16:23.945309 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="52118b39-f1b7-486d-a819-ae1f464d793d" containerName="extract-utilities" Nov 28 16:16:23 crc kubenswrapper[4909]: I1128 16:16:23.945316 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="52118b39-f1b7-486d-a819-ae1f464d793d" containerName="extract-utilities" Nov 28 16:16:23 crc kubenswrapper[4909]: E1128 16:16:23.945326 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="52118b39-f1b7-486d-a819-ae1f464d793d" containerName="registry-server" Nov 28 16:16:23 crc kubenswrapper[4909]: I1128 16:16:23.945333 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="52118b39-f1b7-486d-a819-ae1f464d793d" containerName="registry-server" Nov 28 16:16:23 crc kubenswrapper[4909]: E1128 16:16:23.945343 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4e3c8494-44f8-475d-8ae3-2613649d6c73" containerName="extract-content" Nov 28 16:16:23 crc kubenswrapper[4909]: I1128 16:16:23.945350 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="4e3c8494-44f8-475d-8ae3-2613649d6c73" containerName="extract-content" Nov 28 16:16:23 crc kubenswrapper[4909]: E1128 16:16:23.945362 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2e369b4b-cc5e-474d-8a7e-f3f5fcbb9f98" containerName="extract-content" Nov 28 16:16:23 crc kubenswrapper[4909]: I1128 16:16:23.945369 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="2e369b4b-cc5e-474d-8a7e-f3f5fcbb9f98" containerName="extract-content" Nov 28 16:16:23 crc kubenswrapper[4909]: E1128 16:16:23.945378 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e831d096-86aa-4351-8d67-bdf81194727c" containerName="registry-server" Nov 28 16:16:23 crc kubenswrapper[4909]: I1128 16:16:23.945385 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="e831d096-86aa-4351-8d67-bdf81194727c" containerName="registry-server" Nov 28 16:16:23 crc kubenswrapper[4909]: E1128 16:16:23.945397 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2e369b4b-cc5e-474d-8a7e-f3f5fcbb9f98" containerName="registry-server" Nov 28 16:16:23 crc kubenswrapper[4909]: I1128 16:16:23.945404 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="2e369b4b-cc5e-474d-8a7e-f3f5fcbb9f98" containerName="registry-server" Nov 28 16:16:23 crc kubenswrapper[4909]: E1128 16:16:23.945412 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="51dc9bf8-a5fb-483d-b9d7-2593847029ef" containerName="controller-manager" Nov 28 16:16:23 crc kubenswrapper[4909]: I1128 16:16:23.945419 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="51dc9bf8-a5fb-483d-b9d7-2593847029ef" containerName="controller-manager" Nov 28 16:16:23 crc kubenswrapper[4909]: E1128 16:16:23.945427 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4e3c8494-44f8-475d-8ae3-2613649d6c73" containerName="extract-utilities" Nov 28 16:16:23 crc kubenswrapper[4909]: I1128 16:16:23.945435 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="4e3c8494-44f8-475d-8ae3-2613649d6c73" containerName="extract-utilities" Nov 28 16:16:23 crc kubenswrapper[4909]: E1128 16:16:23.945444 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4e3c8494-44f8-475d-8ae3-2613649d6c73" containerName="registry-server" Nov 28 16:16:23 crc kubenswrapper[4909]: I1128 16:16:23.945450 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="4e3c8494-44f8-475d-8ae3-2613649d6c73" containerName="registry-server" Nov 28 16:16:23 crc kubenswrapper[4909]: I1128 16:16:23.945541 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="2e369b4b-cc5e-474d-8a7e-f3f5fcbb9f98" containerName="registry-server" Nov 28 16:16:23 crc kubenswrapper[4909]: I1128 16:16:23.945553 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="51dc9bf8-a5fb-483d-b9d7-2593847029ef" containerName="controller-manager" Nov 28 16:16:23 crc kubenswrapper[4909]: I1128 16:16:23.945564 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="52118b39-f1b7-486d-a819-ae1f464d793d" containerName="registry-server" Nov 28 16:16:23 crc kubenswrapper[4909]: I1128 16:16:23.945573 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="4e3c8494-44f8-475d-8ae3-2613649d6c73" containerName="registry-server" Nov 28 16:16:23 crc kubenswrapper[4909]: I1128 16:16:23.945588 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="410969ad-6fe5-4169-a78a-5e459f402cd3" containerName="marketplace-operator" Nov 28 16:16:23 crc kubenswrapper[4909]: I1128 16:16:23.945598 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="e831d096-86aa-4351-8d67-bdf81194727c" containerName="registry-server" Nov 28 16:16:23 crc kubenswrapper[4909]: I1128 16:16:23.946065 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-b9f6c7676-zwp8h" Nov 28 16:16:23 crc kubenswrapper[4909]: I1128 16:16:23.949606 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Nov 28 16:16:23 crc kubenswrapper[4909]: I1128 16:16:23.949891 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Nov 28 16:16:23 crc kubenswrapper[4909]: I1128 16:16:23.950455 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Nov 28 16:16:23 crc kubenswrapper[4909]: I1128 16:16:23.950588 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Nov 28 16:16:23 crc kubenswrapper[4909]: I1128 16:16:23.950744 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Nov 28 16:16:23 crc kubenswrapper[4909]: I1128 16:16:23.955149 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Nov 28 16:16:23 crc kubenswrapper[4909]: I1128 16:16:23.963911 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Nov 28 16:16:23 crc kubenswrapper[4909]: I1128 16:16:23.976732 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-b9f6c7676-zwp8h"] Nov 28 16:16:23 crc kubenswrapper[4909]: I1128 16:16:23.991926 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/bcf78c49-a9ce-40f8-9a5e-899a2efe286d-proxy-ca-bundles\") pod \"controller-manager-b9f6c7676-zwp8h\" (UID: \"bcf78c49-a9ce-40f8-9a5e-899a2efe286d\") " pod="openshift-controller-manager/controller-manager-b9f6c7676-zwp8h" Nov 28 16:16:23 crc kubenswrapper[4909]: I1128 16:16:23.991999 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/bcf78c49-a9ce-40f8-9a5e-899a2efe286d-client-ca\") pod \"controller-manager-b9f6c7676-zwp8h\" (UID: \"bcf78c49-a9ce-40f8-9a5e-899a2efe286d\") " pod="openshift-controller-manager/controller-manager-b9f6c7676-zwp8h" Nov 28 16:16:23 crc kubenswrapper[4909]: I1128 16:16:23.992082 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bcf78c49-a9ce-40f8-9a5e-899a2efe286d-config\") pod \"controller-manager-b9f6c7676-zwp8h\" (UID: \"bcf78c49-a9ce-40f8-9a5e-899a2efe286d\") " pod="openshift-controller-manager/controller-manager-b9f6c7676-zwp8h" Nov 28 16:16:23 crc kubenswrapper[4909]: I1128 16:16:23.992121 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mjpw4\" (UniqueName: \"kubernetes.io/projected/bcf78c49-a9ce-40f8-9a5e-899a2efe286d-kube-api-access-mjpw4\") pod \"controller-manager-b9f6c7676-zwp8h\" (UID: \"bcf78c49-a9ce-40f8-9a5e-899a2efe286d\") " pod="openshift-controller-manager/controller-manager-b9f6c7676-zwp8h" Nov 28 16:16:23 crc kubenswrapper[4909]: I1128 16:16:23.992182 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bcf78c49-a9ce-40f8-9a5e-899a2efe286d-serving-cert\") pod \"controller-manager-b9f6c7676-zwp8h\" (UID: \"bcf78c49-a9ce-40f8-9a5e-899a2efe286d\") " pod="openshift-controller-manager/controller-manager-b9f6c7676-zwp8h" Nov 28 16:16:24 crc kubenswrapper[4909]: I1128 16:16:24.016291 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-nggsh" Nov 28 16:16:24 crc kubenswrapper[4909]: I1128 16:16:24.093051 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bcf78c49-a9ce-40f8-9a5e-899a2efe286d-config\") pod \"controller-manager-b9f6c7676-zwp8h\" (UID: \"bcf78c49-a9ce-40f8-9a5e-899a2efe286d\") " pod="openshift-controller-manager/controller-manager-b9f6c7676-zwp8h" Nov 28 16:16:24 crc kubenswrapper[4909]: I1128 16:16:24.093105 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mjpw4\" (UniqueName: \"kubernetes.io/projected/bcf78c49-a9ce-40f8-9a5e-899a2efe286d-kube-api-access-mjpw4\") pod \"controller-manager-b9f6c7676-zwp8h\" (UID: \"bcf78c49-a9ce-40f8-9a5e-899a2efe286d\") " pod="openshift-controller-manager/controller-manager-b9f6c7676-zwp8h" Nov 28 16:16:24 crc kubenswrapper[4909]: I1128 16:16:24.093155 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bcf78c49-a9ce-40f8-9a5e-899a2efe286d-serving-cert\") pod \"controller-manager-b9f6c7676-zwp8h\" (UID: \"bcf78c49-a9ce-40f8-9a5e-899a2efe286d\") " pod="openshift-controller-manager/controller-manager-b9f6c7676-zwp8h" Nov 28 16:16:24 crc kubenswrapper[4909]: I1128 16:16:24.093206 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/bcf78c49-a9ce-40f8-9a5e-899a2efe286d-proxy-ca-bundles\") pod \"controller-manager-b9f6c7676-zwp8h\" (UID: \"bcf78c49-a9ce-40f8-9a5e-899a2efe286d\") " pod="openshift-controller-manager/controller-manager-b9f6c7676-zwp8h" Nov 28 16:16:24 crc kubenswrapper[4909]: I1128 16:16:24.093226 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/bcf78c49-a9ce-40f8-9a5e-899a2efe286d-client-ca\") pod \"controller-manager-b9f6c7676-zwp8h\" (UID: \"bcf78c49-a9ce-40f8-9a5e-899a2efe286d\") " pod="openshift-controller-manager/controller-manager-b9f6c7676-zwp8h" Nov 28 16:16:24 crc kubenswrapper[4909]: I1128 16:16:24.094144 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/bcf78c49-a9ce-40f8-9a5e-899a2efe286d-client-ca\") pod \"controller-manager-b9f6c7676-zwp8h\" (UID: \"bcf78c49-a9ce-40f8-9a5e-899a2efe286d\") " pod="openshift-controller-manager/controller-manager-b9f6c7676-zwp8h" Nov 28 16:16:24 crc kubenswrapper[4909]: I1128 16:16:24.094547 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bcf78c49-a9ce-40f8-9a5e-899a2efe286d-config\") pod \"controller-manager-b9f6c7676-zwp8h\" (UID: \"bcf78c49-a9ce-40f8-9a5e-899a2efe286d\") " pod="openshift-controller-manager/controller-manager-b9f6c7676-zwp8h" Nov 28 16:16:24 crc kubenswrapper[4909]: I1128 16:16:24.095143 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/bcf78c49-a9ce-40f8-9a5e-899a2efe286d-proxy-ca-bundles\") pod \"controller-manager-b9f6c7676-zwp8h\" (UID: \"bcf78c49-a9ce-40f8-9a5e-899a2efe286d\") " pod="openshift-controller-manager/controller-manager-b9f6c7676-zwp8h" Nov 28 16:16:24 crc kubenswrapper[4909]: I1128 16:16:24.097736 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bcf78c49-a9ce-40f8-9a5e-899a2efe286d-serving-cert\") pod \"controller-manager-b9f6c7676-zwp8h\" (UID: \"bcf78c49-a9ce-40f8-9a5e-899a2efe286d\") " pod="openshift-controller-manager/controller-manager-b9f6c7676-zwp8h" Nov 28 16:16:24 crc kubenswrapper[4909]: I1128 16:16:24.110567 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mjpw4\" (UniqueName: \"kubernetes.io/projected/bcf78c49-a9ce-40f8-9a5e-899a2efe286d-kube-api-access-mjpw4\") pod \"controller-manager-b9f6c7676-zwp8h\" (UID: \"bcf78c49-a9ce-40f8-9a5e-899a2efe286d\") " pod="openshift-controller-manager/controller-manager-b9f6c7676-zwp8h" Nov 28 16:16:24 crc kubenswrapper[4909]: I1128 16:16:24.136342 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-6bmn8"] Nov 28 16:16:24 crc kubenswrapper[4909]: E1128 16:16:24.136603 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="410969ad-6fe5-4169-a78a-5e459f402cd3" containerName="marketplace-operator" Nov 28 16:16:24 crc kubenswrapper[4909]: I1128 16:16:24.136627 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="410969ad-6fe5-4169-a78a-5e459f402cd3" containerName="marketplace-operator" Nov 28 16:16:24 crc kubenswrapper[4909]: I1128 16:16:24.136883 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="410969ad-6fe5-4169-a78a-5e459f402cd3" containerName="marketplace-operator" Nov 28 16:16:24 crc kubenswrapper[4909]: I1128 16:16:24.137634 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-6bmn8" Nov 28 16:16:24 crc kubenswrapper[4909]: I1128 16:16:24.139581 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Nov 28 16:16:24 crc kubenswrapper[4909]: I1128 16:16:24.147174 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-6bmn8"] Nov 28 16:16:24 crc kubenswrapper[4909]: I1128 16:16:24.194445 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6d8f5823-a0ca-4fcc-9b8e-265a045e6ab8-utilities\") pod \"community-operators-6bmn8\" (UID: \"6d8f5823-a0ca-4fcc-9b8e-265a045e6ab8\") " pod="openshift-marketplace/community-operators-6bmn8" Nov 28 16:16:24 crc kubenswrapper[4909]: I1128 16:16:24.194500 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bmwn8\" (UniqueName: \"kubernetes.io/projected/6d8f5823-a0ca-4fcc-9b8e-265a045e6ab8-kube-api-access-bmwn8\") pod \"community-operators-6bmn8\" (UID: \"6d8f5823-a0ca-4fcc-9b8e-265a045e6ab8\") " pod="openshift-marketplace/community-operators-6bmn8" Nov 28 16:16:24 crc kubenswrapper[4909]: I1128 16:16:24.194548 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6d8f5823-a0ca-4fcc-9b8e-265a045e6ab8-catalog-content\") pod \"community-operators-6bmn8\" (UID: \"6d8f5823-a0ca-4fcc-9b8e-265a045e6ab8\") " pod="openshift-marketplace/community-operators-6bmn8" Nov 28 16:16:24 crc kubenswrapper[4909]: I1128 16:16:24.274810 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-b9f6c7676-zwp8h" Nov 28 16:16:24 crc kubenswrapper[4909]: I1128 16:16:24.295226 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bmwn8\" (UniqueName: \"kubernetes.io/projected/6d8f5823-a0ca-4fcc-9b8e-265a045e6ab8-kube-api-access-bmwn8\") pod \"community-operators-6bmn8\" (UID: \"6d8f5823-a0ca-4fcc-9b8e-265a045e6ab8\") " pod="openshift-marketplace/community-operators-6bmn8" Nov 28 16:16:24 crc kubenswrapper[4909]: I1128 16:16:24.295270 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6d8f5823-a0ca-4fcc-9b8e-265a045e6ab8-catalog-content\") pod \"community-operators-6bmn8\" (UID: \"6d8f5823-a0ca-4fcc-9b8e-265a045e6ab8\") " pod="openshift-marketplace/community-operators-6bmn8" Nov 28 16:16:24 crc kubenswrapper[4909]: I1128 16:16:24.295343 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6d8f5823-a0ca-4fcc-9b8e-265a045e6ab8-utilities\") pod \"community-operators-6bmn8\" (UID: \"6d8f5823-a0ca-4fcc-9b8e-265a045e6ab8\") " pod="openshift-marketplace/community-operators-6bmn8" Nov 28 16:16:24 crc kubenswrapper[4909]: I1128 16:16:24.296221 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6d8f5823-a0ca-4fcc-9b8e-265a045e6ab8-catalog-content\") pod \"community-operators-6bmn8\" (UID: \"6d8f5823-a0ca-4fcc-9b8e-265a045e6ab8\") " pod="openshift-marketplace/community-operators-6bmn8" Nov 28 16:16:24 crc kubenswrapper[4909]: I1128 16:16:24.296220 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6d8f5823-a0ca-4fcc-9b8e-265a045e6ab8-utilities\") pod \"community-operators-6bmn8\" (UID: \"6d8f5823-a0ca-4fcc-9b8e-265a045e6ab8\") " pod="openshift-marketplace/community-operators-6bmn8" Nov 28 16:16:24 crc kubenswrapper[4909]: I1128 16:16:24.317432 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bmwn8\" (UniqueName: \"kubernetes.io/projected/6d8f5823-a0ca-4fcc-9b8e-265a045e6ab8-kube-api-access-bmwn8\") pod \"community-operators-6bmn8\" (UID: \"6d8f5823-a0ca-4fcc-9b8e-265a045e6ab8\") " pod="openshift-marketplace/community-operators-6bmn8" Nov 28 16:16:24 crc kubenswrapper[4909]: I1128 16:16:24.475076 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-6bmn8" Nov 28 16:16:24 crc kubenswrapper[4909]: I1128 16:16:24.649148 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-b9f6c7676-zwp8h"] Nov 28 16:16:24 crc kubenswrapper[4909]: I1128 16:16:24.654132 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-6bmn8"] Nov 28 16:16:24 crc kubenswrapper[4909]: W1128 16:16:24.659678 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6d8f5823_a0ca_4fcc_9b8e_265a045e6ab8.slice/crio-54a04210e0277e79f28fa35f99b914cecb0b4537ea64a8e111a66c5311cffe7c WatchSource:0}: Error finding container 54a04210e0277e79f28fa35f99b914cecb0b4537ea64a8e111a66c5311cffe7c: Status 404 returned error can't find the container with id 54a04210e0277e79f28fa35f99b914cecb0b4537ea64a8e111a66c5311cffe7c Nov 28 16:16:25 crc kubenswrapper[4909]: I1128 16:16:25.020679 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-b9f6c7676-zwp8h" event={"ID":"bcf78c49-a9ce-40f8-9a5e-899a2efe286d","Type":"ContainerStarted","Data":"df67079365a5b673f57a3e734cb10ce97971393e6324722258ba409c028aab52"} Nov 28 16:16:25 crc kubenswrapper[4909]: I1128 16:16:25.021040 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-b9f6c7676-zwp8h" event={"ID":"bcf78c49-a9ce-40f8-9a5e-899a2efe286d","Type":"ContainerStarted","Data":"886ee3ba99f8cc193eff1e8079668bb9a613fa7b024849ff32bb3a3b42bea7bd"} Nov 28 16:16:25 crc kubenswrapper[4909]: I1128 16:16:25.021065 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-b9f6c7676-zwp8h" Nov 28 16:16:25 crc kubenswrapper[4909]: I1128 16:16:25.022836 4909 generic.go:334] "Generic (PLEG): container finished" podID="6d8f5823-a0ca-4fcc-9b8e-265a045e6ab8" containerID="f19dfc8391e9d9a9b72eaa15b3ca393a5025e8e52f4c87b8c1bdc5df3e9a13fa" exitCode=0 Nov 28 16:16:25 crc kubenswrapper[4909]: I1128 16:16:25.023489 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6bmn8" event={"ID":"6d8f5823-a0ca-4fcc-9b8e-265a045e6ab8","Type":"ContainerDied","Data":"f19dfc8391e9d9a9b72eaa15b3ca393a5025e8e52f4c87b8c1bdc5df3e9a13fa"} Nov 28 16:16:25 crc kubenswrapper[4909]: I1128 16:16:25.023517 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6bmn8" event={"ID":"6d8f5823-a0ca-4fcc-9b8e-265a045e6ab8","Type":"ContainerStarted","Data":"54a04210e0277e79f28fa35f99b914cecb0b4537ea64a8e111a66c5311cffe7c"} Nov 28 16:16:25 crc kubenswrapper[4909]: I1128 16:16:25.025186 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-b9f6c7676-zwp8h" Nov 28 16:16:25 crc kubenswrapper[4909]: I1128 16:16:25.052779 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-b9f6c7676-zwp8h" podStartSLOduration=3.052762023 podStartE2EDuration="3.052762023s" podCreationTimestamp="2025-11-28 16:16:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:16:25.038359717 +0000 UTC m=+367.435044241" watchObservedRunningTime="2025-11-28 16:16:25.052762023 +0000 UTC m=+367.449446547" Nov 28 16:16:25 crc kubenswrapper[4909]: I1128 16:16:25.342145 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-skkwd"] Nov 28 16:16:25 crc kubenswrapper[4909]: I1128 16:16:25.343577 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-skkwd" Nov 28 16:16:25 crc kubenswrapper[4909]: I1128 16:16:25.348161 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Nov 28 16:16:25 crc kubenswrapper[4909]: I1128 16:16:25.366602 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-skkwd"] Nov 28 16:16:25 crc kubenswrapper[4909]: I1128 16:16:25.409073 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ce487258-c9fc-4b91-9878-87ca92c76d15-utilities\") pod \"redhat-marketplace-skkwd\" (UID: \"ce487258-c9fc-4b91-9878-87ca92c76d15\") " pod="openshift-marketplace/redhat-marketplace-skkwd" Nov 28 16:16:25 crc kubenswrapper[4909]: I1128 16:16:25.409128 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8p6xw\" (UniqueName: \"kubernetes.io/projected/ce487258-c9fc-4b91-9878-87ca92c76d15-kube-api-access-8p6xw\") pod \"redhat-marketplace-skkwd\" (UID: \"ce487258-c9fc-4b91-9878-87ca92c76d15\") " pod="openshift-marketplace/redhat-marketplace-skkwd" Nov 28 16:16:25 crc kubenswrapper[4909]: I1128 16:16:25.409157 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ce487258-c9fc-4b91-9878-87ca92c76d15-catalog-content\") pod \"redhat-marketplace-skkwd\" (UID: \"ce487258-c9fc-4b91-9878-87ca92c76d15\") " pod="openshift-marketplace/redhat-marketplace-skkwd" Nov 28 16:16:25 crc kubenswrapper[4909]: I1128 16:16:25.510586 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ce487258-c9fc-4b91-9878-87ca92c76d15-utilities\") pod \"redhat-marketplace-skkwd\" (UID: \"ce487258-c9fc-4b91-9878-87ca92c76d15\") " pod="openshift-marketplace/redhat-marketplace-skkwd" Nov 28 16:16:25 crc kubenswrapper[4909]: I1128 16:16:25.510636 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8p6xw\" (UniqueName: \"kubernetes.io/projected/ce487258-c9fc-4b91-9878-87ca92c76d15-kube-api-access-8p6xw\") pod \"redhat-marketplace-skkwd\" (UID: \"ce487258-c9fc-4b91-9878-87ca92c76d15\") " pod="openshift-marketplace/redhat-marketplace-skkwd" Nov 28 16:16:25 crc kubenswrapper[4909]: I1128 16:16:25.510684 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ce487258-c9fc-4b91-9878-87ca92c76d15-catalog-content\") pod \"redhat-marketplace-skkwd\" (UID: \"ce487258-c9fc-4b91-9878-87ca92c76d15\") " pod="openshift-marketplace/redhat-marketplace-skkwd" Nov 28 16:16:25 crc kubenswrapper[4909]: I1128 16:16:25.511136 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ce487258-c9fc-4b91-9878-87ca92c76d15-utilities\") pod \"redhat-marketplace-skkwd\" (UID: \"ce487258-c9fc-4b91-9878-87ca92c76d15\") " pod="openshift-marketplace/redhat-marketplace-skkwd" Nov 28 16:16:25 crc kubenswrapper[4909]: I1128 16:16:25.511149 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ce487258-c9fc-4b91-9878-87ca92c76d15-catalog-content\") pod \"redhat-marketplace-skkwd\" (UID: \"ce487258-c9fc-4b91-9878-87ca92c76d15\") " pod="openshift-marketplace/redhat-marketplace-skkwd" Nov 28 16:16:25 crc kubenswrapper[4909]: I1128 16:16:25.538939 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8p6xw\" (UniqueName: \"kubernetes.io/projected/ce487258-c9fc-4b91-9878-87ca92c76d15-kube-api-access-8p6xw\") pod \"redhat-marketplace-skkwd\" (UID: \"ce487258-c9fc-4b91-9878-87ca92c76d15\") " pod="openshift-marketplace/redhat-marketplace-skkwd" Nov 28 16:16:25 crc kubenswrapper[4909]: I1128 16:16:25.659412 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-skkwd" Nov 28 16:16:26 crc kubenswrapper[4909]: I1128 16:16:26.095871 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-skkwd"] Nov 28 16:16:26 crc kubenswrapper[4909]: W1128 16:16:26.111378 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podce487258_c9fc_4b91_9878_87ca92c76d15.slice/crio-6422f3449fbf217b4a14d6322bf25a001b82306571b6337834ad59140f4bd350 WatchSource:0}: Error finding container 6422f3449fbf217b4a14d6322bf25a001b82306571b6337834ad59140f4bd350: Status 404 returned error can't find the container with id 6422f3449fbf217b4a14d6322bf25a001b82306571b6337834ad59140f4bd350 Nov 28 16:16:26 crc kubenswrapper[4909]: I1128 16:16:26.739492 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-grq6g"] Nov 28 16:16:26 crc kubenswrapper[4909]: I1128 16:16:26.740881 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-grq6g" Nov 28 16:16:26 crc kubenswrapper[4909]: I1128 16:16:26.742742 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Nov 28 16:16:26 crc kubenswrapper[4909]: I1128 16:16:26.747549 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-grq6g"] Nov 28 16:16:26 crc kubenswrapper[4909]: I1128 16:16:26.828629 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hnwdx\" (UniqueName: \"kubernetes.io/projected/ce9fe3b1-d4b1-4de1-843a-976216847bda-kube-api-access-hnwdx\") pod \"redhat-operators-grq6g\" (UID: \"ce9fe3b1-d4b1-4de1-843a-976216847bda\") " pod="openshift-marketplace/redhat-operators-grq6g" Nov 28 16:16:26 crc kubenswrapper[4909]: I1128 16:16:26.828814 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ce9fe3b1-d4b1-4de1-843a-976216847bda-catalog-content\") pod \"redhat-operators-grq6g\" (UID: \"ce9fe3b1-d4b1-4de1-843a-976216847bda\") " pod="openshift-marketplace/redhat-operators-grq6g" Nov 28 16:16:26 crc kubenswrapper[4909]: I1128 16:16:26.828868 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ce9fe3b1-d4b1-4de1-843a-976216847bda-utilities\") pod \"redhat-operators-grq6g\" (UID: \"ce9fe3b1-d4b1-4de1-843a-976216847bda\") " pod="openshift-marketplace/redhat-operators-grq6g" Nov 28 16:16:26 crc kubenswrapper[4909]: I1128 16:16:26.929853 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ce9fe3b1-d4b1-4de1-843a-976216847bda-catalog-content\") pod \"redhat-operators-grq6g\" (UID: \"ce9fe3b1-d4b1-4de1-843a-976216847bda\") " pod="openshift-marketplace/redhat-operators-grq6g" Nov 28 16:16:26 crc kubenswrapper[4909]: I1128 16:16:26.929890 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ce9fe3b1-d4b1-4de1-843a-976216847bda-utilities\") pod \"redhat-operators-grq6g\" (UID: \"ce9fe3b1-d4b1-4de1-843a-976216847bda\") " pod="openshift-marketplace/redhat-operators-grq6g" Nov 28 16:16:26 crc kubenswrapper[4909]: I1128 16:16:26.929938 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hnwdx\" (UniqueName: \"kubernetes.io/projected/ce9fe3b1-d4b1-4de1-843a-976216847bda-kube-api-access-hnwdx\") pod \"redhat-operators-grq6g\" (UID: \"ce9fe3b1-d4b1-4de1-843a-976216847bda\") " pod="openshift-marketplace/redhat-operators-grq6g" Nov 28 16:16:26 crc kubenswrapper[4909]: I1128 16:16:26.930424 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ce9fe3b1-d4b1-4de1-843a-976216847bda-catalog-content\") pod \"redhat-operators-grq6g\" (UID: \"ce9fe3b1-d4b1-4de1-843a-976216847bda\") " pod="openshift-marketplace/redhat-operators-grq6g" Nov 28 16:16:26 crc kubenswrapper[4909]: I1128 16:16:26.930831 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ce9fe3b1-d4b1-4de1-843a-976216847bda-utilities\") pod \"redhat-operators-grq6g\" (UID: \"ce9fe3b1-d4b1-4de1-843a-976216847bda\") " pod="openshift-marketplace/redhat-operators-grq6g" Nov 28 16:16:26 crc kubenswrapper[4909]: I1128 16:16:26.950325 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hnwdx\" (UniqueName: \"kubernetes.io/projected/ce9fe3b1-d4b1-4de1-843a-976216847bda-kube-api-access-hnwdx\") pod \"redhat-operators-grq6g\" (UID: \"ce9fe3b1-d4b1-4de1-843a-976216847bda\") " pod="openshift-marketplace/redhat-operators-grq6g" Nov 28 16:16:27 crc kubenswrapper[4909]: I1128 16:16:27.034518 4909 generic.go:334] "Generic (PLEG): container finished" podID="ce487258-c9fc-4b91-9878-87ca92c76d15" containerID="01904f59f3ed146d43d0cd2a0cd3a48487b01553abf0d95a40d4a371708a39d4" exitCode=0 Nov 28 16:16:27 crc kubenswrapper[4909]: I1128 16:16:27.034622 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-skkwd" event={"ID":"ce487258-c9fc-4b91-9878-87ca92c76d15","Type":"ContainerDied","Data":"01904f59f3ed146d43d0cd2a0cd3a48487b01553abf0d95a40d4a371708a39d4"} Nov 28 16:16:27 crc kubenswrapper[4909]: I1128 16:16:27.034708 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-skkwd" event={"ID":"ce487258-c9fc-4b91-9878-87ca92c76d15","Type":"ContainerStarted","Data":"6422f3449fbf217b4a14d6322bf25a001b82306571b6337834ad59140f4bd350"} Nov 28 16:16:27 crc kubenswrapper[4909]: I1128 16:16:27.039823 4909 generic.go:334] "Generic (PLEG): container finished" podID="6d8f5823-a0ca-4fcc-9b8e-265a045e6ab8" containerID="f2199381a8746b4f3caa80285db0fac38d8e0780ad53b9af86d7deb265352775" exitCode=0 Nov 28 16:16:27 crc kubenswrapper[4909]: I1128 16:16:27.039873 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6bmn8" event={"ID":"6d8f5823-a0ca-4fcc-9b8e-265a045e6ab8","Type":"ContainerDied","Data":"f2199381a8746b4f3caa80285db0fac38d8e0780ad53b9af86d7deb265352775"} Nov 28 16:16:27 crc kubenswrapper[4909]: I1128 16:16:27.055696 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-grq6g" Nov 28 16:16:27 crc kubenswrapper[4909]: I1128 16:16:27.475937 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-grq6g"] Nov 28 16:16:27 crc kubenswrapper[4909]: W1128 16:16:27.481552 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podce9fe3b1_d4b1_4de1_843a_976216847bda.slice/crio-3b1c474b1be537310cf8c7f9149d2156fc7f264dfdfe120bb5887785fa61a581 WatchSource:0}: Error finding container 3b1c474b1be537310cf8c7f9149d2156fc7f264dfdfe120bb5887785fa61a581: Status 404 returned error can't find the container with id 3b1c474b1be537310cf8c7f9149d2156fc7f264dfdfe120bb5887785fa61a581 Nov 28 16:16:27 crc kubenswrapper[4909]: I1128 16:16:27.734949 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-jjgbb"] Nov 28 16:16:27 crc kubenswrapper[4909]: I1128 16:16:27.739502 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-jjgbb" Nov 28 16:16:27 crc kubenswrapper[4909]: I1128 16:16:27.741143 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Nov 28 16:16:27 crc kubenswrapper[4909]: I1128 16:16:27.748149 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-jjgbb"] Nov 28 16:16:27 crc kubenswrapper[4909]: I1128 16:16:27.847423 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e2776b5d-d9ac-4133-a89c-fad6d411a7c1-catalog-content\") pod \"certified-operators-jjgbb\" (UID: \"e2776b5d-d9ac-4133-a89c-fad6d411a7c1\") " pod="openshift-marketplace/certified-operators-jjgbb" Nov 28 16:16:27 crc kubenswrapper[4909]: I1128 16:16:27.847744 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kz8rc\" (UniqueName: \"kubernetes.io/projected/e2776b5d-d9ac-4133-a89c-fad6d411a7c1-kube-api-access-kz8rc\") pod \"certified-operators-jjgbb\" (UID: \"e2776b5d-d9ac-4133-a89c-fad6d411a7c1\") " pod="openshift-marketplace/certified-operators-jjgbb" Nov 28 16:16:27 crc kubenswrapper[4909]: I1128 16:16:27.847770 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e2776b5d-d9ac-4133-a89c-fad6d411a7c1-utilities\") pod \"certified-operators-jjgbb\" (UID: \"e2776b5d-d9ac-4133-a89c-fad6d411a7c1\") " pod="openshift-marketplace/certified-operators-jjgbb" Nov 28 16:16:27 crc kubenswrapper[4909]: I1128 16:16:27.949084 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e2776b5d-d9ac-4133-a89c-fad6d411a7c1-catalog-content\") pod \"certified-operators-jjgbb\" (UID: \"e2776b5d-d9ac-4133-a89c-fad6d411a7c1\") " pod="openshift-marketplace/certified-operators-jjgbb" Nov 28 16:16:27 crc kubenswrapper[4909]: I1128 16:16:27.949141 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kz8rc\" (UniqueName: \"kubernetes.io/projected/e2776b5d-d9ac-4133-a89c-fad6d411a7c1-kube-api-access-kz8rc\") pod \"certified-operators-jjgbb\" (UID: \"e2776b5d-d9ac-4133-a89c-fad6d411a7c1\") " pod="openshift-marketplace/certified-operators-jjgbb" Nov 28 16:16:27 crc kubenswrapper[4909]: I1128 16:16:27.949201 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e2776b5d-d9ac-4133-a89c-fad6d411a7c1-utilities\") pod \"certified-operators-jjgbb\" (UID: \"e2776b5d-d9ac-4133-a89c-fad6d411a7c1\") " pod="openshift-marketplace/certified-operators-jjgbb" Nov 28 16:16:27 crc kubenswrapper[4909]: I1128 16:16:27.949750 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e2776b5d-d9ac-4133-a89c-fad6d411a7c1-catalog-content\") pod \"certified-operators-jjgbb\" (UID: \"e2776b5d-d9ac-4133-a89c-fad6d411a7c1\") " pod="openshift-marketplace/certified-operators-jjgbb" Nov 28 16:16:27 crc kubenswrapper[4909]: I1128 16:16:27.950040 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e2776b5d-d9ac-4133-a89c-fad6d411a7c1-utilities\") pod \"certified-operators-jjgbb\" (UID: \"e2776b5d-d9ac-4133-a89c-fad6d411a7c1\") " pod="openshift-marketplace/certified-operators-jjgbb" Nov 28 16:16:27 crc kubenswrapper[4909]: I1128 16:16:27.965237 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kz8rc\" (UniqueName: \"kubernetes.io/projected/e2776b5d-d9ac-4133-a89c-fad6d411a7c1-kube-api-access-kz8rc\") pod \"certified-operators-jjgbb\" (UID: \"e2776b5d-d9ac-4133-a89c-fad6d411a7c1\") " pod="openshift-marketplace/certified-operators-jjgbb" Nov 28 16:16:28 crc kubenswrapper[4909]: I1128 16:16:28.035233 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-66df7c8f76-gxvw8" Nov 28 16:16:28 crc kubenswrapper[4909]: I1128 16:16:28.049906 4909 generic.go:334] "Generic (PLEG): container finished" podID="ce9fe3b1-d4b1-4de1-843a-976216847bda" containerID="59f085b978af54da786304e92ccdc7ba03ddd71fbc58c19275594669bccbd204" exitCode=0 Nov 28 16:16:28 crc kubenswrapper[4909]: I1128 16:16:28.049954 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-grq6g" event={"ID":"ce9fe3b1-d4b1-4de1-843a-976216847bda","Type":"ContainerDied","Data":"59f085b978af54da786304e92ccdc7ba03ddd71fbc58c19275594669bccbd204"} Nov 28 16:16:28 crc kubenswrapper[4909]: I1128 16:16:28.050007 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-grq6g" event={"ID":"ce9fe3b1-d4b1-4de1-843a-976216847bda","Type":"ContainerStarted","Data":"3b1c474b1be537310cf8c7f9149d2156fc7f264dfdfe120bb5887785fa61a581"} Nov 28 16:16:28 crc kubenswrapper[4909]: I1128 16:16:28.083851 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-jjgbb" Nov 28 16:16:28 crc kubenswrapper[4909]: I1128 16:16:28.084167 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-p5p29"] Nov 28 16:16:28 crc kubenswrapper[4909]: I1128 16:16:28.556134 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-jjgbb"] Nov 28 16:16:29 crc kubenswrapper[4909]: I1128 16:16:29.055721 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-jjgbb" event={"ID":"e2776b5d-d9ac-4133-a89c-fad6d411a7c1","Type":"ContainerStarted","Data":"cb25205bb7dd8e8fc4cc402156bd74e519f06aa735d0de95a8bcafc7bda7ae59"} Nov 28 16:16:29 crc kubenswrapper[4909]: I1128 16:16:29.059376 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6bmn8" event={"ID":"6d8f5823-a0ca-4fcc-9b8e-265a045e6ab8","Type":"ContainerStarted","Data":"2912497954d6918ddf640af2cb9ce41701e3e051be1f6c6bdf11e21d9381b9f7"} Nov 28 16:16:29 crc kubenswrapper[4909]: I1128 16:16:29.061537 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-skkwd" event={"ID":"ce487258-c9fc-4b91-9878-87ca92c76d15","Type":"ContainerStarted","Data":"2c87ff67c0dec40fb6678a41103c70dcf3570cbbf2ed150d51b34ad7fc1ad754"} Nov 28 16:16:29 crc kubenswrapper[4909]: I1128 16:16:29.078409 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-6bmn8" podStartSLOduration=2.456613474 podStartE2EDuration="5.078391567s" podCreationTimestamp="2025-11-28 16:16:24 +0000 UTC" firstStartedPulling="2025-11-28 16:16:25.025316756 +0000 UTC m=+367.422001280" lastFinishedPulling="2025-11-28 16:16:27.647094839 +0000 UTC m=+370.043779373" observedRunningTime="2025-11-28 16:16:29.077054253 +0000 UTC m=+371.473738777" watchObservedRunningTime="2025-11-28 16:16:29.078391567 +0000 UTC m=+371.475076091" Nov 28 16:16:30 crc kubenswrapper[4909]: I1128 16:16:30.068070 4909 generic.go:334] "Generic (PLEG): container finished" podID="e2776b5d-d9ac-4133-a89c-fad6d411a7c1" containerID="6c73cb074f4150e16644130a6c10e708c2310be209360ab3f0df6dd46ab6aad5" exitCode=0 Nov 28 16:16:30 crc kubenswrapper[4909]: I1128 16:16:30.068136 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-jjgbb" event={"ID":"e2776b5d-d9ac-4133-a89c-fad6d411a7c1","Type":"ContainerDied","Data":"6c73cb074f4150e16644130a6c10e708c2310be209360ab3f0df6dd46ab6aad5"} Nov 28 16:16:30 crc kubenswrapper[4909]: I1128 16:16:30.070302 4909 generic.go:334] "Generic (PLEG): container finished" podID="ce9fe3b1-d4b1-4de1-843a-976216847bda" containerID="8d8cdaaa8266e892d19b3b0afe8ab2173e72c818e54f7123436f0959e2e4fcdb" exitCode=0 Nov 28 16:16:30 crc kubenswrapper[4909]: I1128 16:16:30.070362 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-grq6g" event={"ID":"ce9fe3b1-d4b1-4de1-843a-976216847bda","Type":"ContainerDied","Data":"8d8cdaaa8266e892d19b3b0afe8ab2173e72c818e54f7123436f0959e2e4fcdb"} Nov 28 16:16:30 crc kubenswrapper[4909]: I1128 16:16:30.072089 4909 generic.go:334] "Generic (PLEG): container finished" podID="ce487258-c9fc-4b91-9878-87ca92c76d15" containerID="2c87ff67c0dec40fb6678a41103c70dcf3570cbbf2ed150d51b34ad7fc1ad754" exitCode=0 Nov 28 16:16:30 crc kubenswrapper[4909]: I1128 16:16:30.072127 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-skkwd" event={"ID":"ce487258-c9fc-4b91-9878-87ca92c76d15","Type":"ContainerDied","Data":"2c87ff67c0dec40fb6678a41103c70dcf3570cbbf2ed150d51b34ad7fc1ad754"} Nov 28 16:16:32 crc kubenswrapper[4909]: I1128 16:16:32.091612 4909 generic.go:334] "Generic (PLEG): container finished" podID="e2776b5d-d9ac-4133-a89c-fad6d411a7c1" containerID="3976d4aead058965985428936e5741c8841af3488f59d5cf55e40e37fd139b44" exitCode=0 Nov 28 16:16:32 crc kubenswrapper[4909]: I1128 16:16:32.092271 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-jjgbb" event={"ID":"e2776b5d-d9ac-4133-a89c-fad6d411a7c1","Type":"ContainerDied","Data":"3976d4aead058965985428936e5741c8841af3488f59d5cf55e40e37fd139b44"} Nov 28 16:16:32 crc kubenswrapper[4909]: I1128 16:16:32.100442 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-grq6g" event={"ID":"ce9fe3b1-d4b1-4de1-843a-976216847bda","Type":"ContainerStarted","Data":"99ad8c978700fdd878a98798b4c1b6872422610c85a8dada330ac63083d3881d"} Nov 28 16:16:32 crc kubenswrapper[4909]: I1128 16:16:32.103388 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-skkwd" event={"ID":"ce487258-c9fc-4b91-9878-87ca92c76d15","Type":"ContainerStarted","Data":"a615688e3a4a2c8aadbefada068a9a1a8ebdd716706df0696d134b67b3798331"} Nov 28 16:16:32 crc kubenswrapper[4909]: I1128 16:16:32.177584 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-grq6g" podStartSLOduration=3.297347582 podStartE2EDuration="6.177563363s" podCreationTimestamp="2025-11-28 16:16:26 +0000 UTC" firstStartedPulling="2025-11-28 16:16:28.051173692 +0000 UTC m=+370.447858216" lastFinishedPulling="2025-11-28 16:16:30.931389463 +0000 UTC m=+373.328073997" observedRunningTime="2025-11-28 16:16:32.159413213 +0000 UTC m=+374.556097747" watchObservedRunningTime="2025-11-28 16:16:32.177563363 +0000 UTC m=+374.574247897" Nov 28 16:16:32 crc kubenswrapper[4909]: I1128 16:16:32.179766 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-skkwd" podStartSLOduration=3.084425086 podStartE2EDuration="7.179757769s" podCreationTimestamp="2025-11-28 16:16:25 +0000 UTC" firstStartedPulling="2025-11-28 16:16:27.039371139 +0000 UTC m=+369.436055663" lastFinishedPulling="2025-11-28 16:16:31.134703822 +0000 UTC m=+373.531388346" observedRunningTime="2025-11-28 16:16:32.176801074 +0000 UTC m=+374.573485608" watchObservedRunningTime="2025-11-28 16:16:32.179757769 +0000 UTC m=+374.576442293" Nov 28 16:16:33 crc kubenswrapper[4909]: I1128 16:16:33.112641 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-jjgbb" event={"ID":"e2776b5d-d9ac-4133-a89c-fad6d411a7c1","Type":"ContainerStarted","Data":"9692c25c5d70baec3bdec26ec9bd3fa04e7cf68b2da1a6632112f94b2320d547"} Nov 28 16:16:33 crc kubenswrapper[4909]: I1128 16:16:33.134511 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-jjgbb" podStartSLOduration=3.7143594760000003 podStartE2EDuration="6.134490934s" podCreationTimestamp="2025-11-28 16:16:27 +0000 UTC" firstStartedPulling="2025-11-28 16:16:30.069851533 +0000 UTC m=+372.466536057" lastFinishedPulling="2025-11-28 16:16:32.489982991 +0000 UTC m=+374.886667515" observedRunningTime="2025-11-28 16:16:33.130088392 +0000 UTC m=+375.526772916" watchObservedRunningTime="2025-11-28 16:16:33.134490934 +0000 UTC m=+375.531175458" Nov 28 16:16:34 crc kubenswrapper[4909]: I1128 16:16:34.476204 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-6bmn8" Nov 28 16:16:34 crc kubenswrapper[4909]: I1128 16:16:34.476256 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-6bmn8" Nov 28 16:16:34 crc kubenswrapper[4909]: I1128 16:16:34.538129 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-6bmn8" Nov 28 16:16:35 crc kubenswrapper[4909]: I1128 16:16:35.172809 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-6bmn8" Nov 28 16:16:35 crc kubenswrapper[4909]: I1128 16:16:35.660557 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-skkwd" Nov 28 16:16:35 crc kubenswrapper[4909]: I1128 16:16:35.660627 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-skkwd" Nov 28 16:16:35 crc kubenswrapper[4909]: I1128 16:16:35.704341 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-skkwd" Nov 28 16:16:36 crc kubenswrapper[4909]: I1128 16:16:36.187523 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-skkwd" Nov 28 16:16:37 crc kubenswrapper[4909]: I1128 16:16:37.056975 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-grq6g" Nov 28 16:16:37 crc kubenswrapper[4909]: I1128 16:16:37.057466 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-grq6g" Nov 28 16:16:37 crc kubenswrapper[4909]: I1128 16:16:37.100638 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-grq6g" Nov 28 16:16:37 crc kubenswrapper[4909]: I1128 16:16:37.187101 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-grq6g" Nov 28 16:16:38 crc kubenswrapper[4909]: I1128 16:16:38.084375 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-jjgbb" Nov 28 16:16:38 crc kubenswrapper[4909]: I1128 16:16:38.084443 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-jjgbb" Nov 28 16:16:38 crc kubenswrapper[4909]: I1128 16:16:38.134145 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-jjgbb" Nov 28 16:16:38 crc kubenswrapper[4909]: I1128 16:16:38.189384 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-jjgbb" Nov 28 16:16:49 crc kubenswrapper[4909]: I1128 16:16:49.910883 4909 patch_prober.go:28] interesting pod/machine-config-daemon-d5nd7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 16:16:49 crc kubenswrapper[4909]: I1128 16:16:49.911492 4909 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 16:16:53 crc kubenswrapper[4909]: I1128 16:16:53.129987 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-image-registry/image-registry-697d97f7c8-p5p29" podUID="cbee12e6-b82e-4451-8292-dca1540e2ab5" containerName="registry" containerID="cri-o://eaffd4db9346545391d8cdfe91b539c95595a1c1e74c030bea6ee1f67e0cbf48" gracePeriod=30 Nov 28 16:16:53 crc kubenswrapper[4909]: I1128 16:16:53.639949 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-p5p29" Nov 28 16:16:53 crc kubenswrapper[4909]: I1128 16:16:53.732138 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/cbee12e6-b82e-4451-8292-dca1540e2ab5-installation-pull-secrets\") pod \"cbee12e6-b82e-4451-8292-dca1540e2ab5\" (UID: \"cbee12e6-b82e-4451-8292-dca1540e2ab5\") " Nov 28 16:16:53 crc kubenswrapper[4909]: I1128 16:16:53.732341 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/cbee12e6-b82e-4451-8292-dca1540e2ab5-registry-certificates\") pod \"cbee12e6-b82e-4451-8292-dca1540e2ab5\" (UID: \"cbee12e6-b82e-4451-8292-dca1540e2ab5\") " Nov 28 16:16:53 crc kubenswrapper[4909]: I1128 16:16:53.733043 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-storage\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"cbee12e6-b82e-4451-8292-dca1540e2ab5\" (UID: \"cbee12e6-b82e-4451-8292-dca1540e2ab5\") " Nov 28 16:16:53 crc kubenswrapper[4909]: I1128 16:16:53.733091 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/cbee12e6-b82e-4451-8292-dca1540e2ab5-bound-sa-token\") pod \"cbee12e6-b82e-4451-8292-dca1540e2ab5\" (UID: \"cbee12e6-b82e-4451-8292-dca1540e2ab5\") " Nov 28 16:16:53 crc kubenswrapper[4909]: I1128 16:16:53.733208 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/cbee12e6-b82e-4451-8292-dca1540e2ab5-registry-tls\") pod \"cbee12e6-b82e-4451-8292-dca1540e2ab5\" (UID: \"cbee12e6-b82e-4451-8292-dca1540e2ab5\") " Nov 28 16:16:53 crc kubenswrapper[4909]: I1128 16:16:53.733241 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/cbee12e6-b82e-4451-8292-dca1540e2ab5-ca-trust-extracted\") pod \"cbee12e6-b82e-4451-8292-dca1540e2ab5\" (UID: \"cbee12e6-b82e-4451-8292-dca1540e2ab5\") " Nov 28 16:16:53 crc kubenswrapper[4909]: I1128 16:16:53.733291 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/cbee12e6-b82e-4451-8292-dca1540e2ab5-trusted-ca\") pod \"cbee12e6-b82e-4451-8292-dca1540e2ab5\" (UID: \"cbee12e6-b82e-4451-8292-dca1540e2ab5\") " Nov 28 16:16:53 crc kubenswrapper[4909]: I1128 16:16:53.733355 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9mjmj\" (UniqueName: \"kubernetes.io/projected/cbee12e6-b82e-4451-8292-dca1540e2ab5-kube-api-access-9mjmj\") pod \"cbee12e6-b82e-4451-8292-dca1540e2ab5\" (UID: \"cbee12e6-b82e-4451-8292-dca1540e2ab5\") " Nov 28 16:16:53 crc kubenswrapper[4909]: I1128 16:16:53.734527 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cbee12e6-b82e-4451-8292-dca1540e2ab5-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "cbee12e6-b82e-4451-8292-dca1540e2ab5" (UID: "cbee12e6-b82e-4451-8292-dca1540e2ab5"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:16:53 crc kubenswrapper[4909]: I1128 16:16:53.734752 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cbee12e6-b82e-4451-8292-dca1540e2ab5-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "cbee12e6-b82e-4451-8292-dca1540e2ab5" (UID: "cbee12e6-b82e-4451-8292-dca1540e2ab5"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:16:53 crc kubenswrapper[4909]: I1128 16:16:53.742594 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cbee12e6-b82e-4451-8292-dca1540e2ab5-kube-api-access-9mjmj" (OuterVolumeSpecName: "kube-api-access-9mjmj") pod "cbee12e6-b82e-4451-8292-dca1540e2ab5" (UID: "cbee12e6-b82e-4451-8292-dca1540e2ab5"). InnerVolumeSpecName "kube-api-access-9mjmj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:16:53 crc kubenswrapper[4909]: I1128 16:16:53.743796 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cbee12e6-b82e-4451-8292-dca1540e2ab5-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "cbee12e6-b82e-4451-8292-dca1540e2ab5" (UID: "cbee12e6-b82e-4451-8292-dca1540e2ab5"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:16:53 crc kubenswrapper[4909]: I1128 16:16:53.743874 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cbee12e6-b82e-4451-8292-dca1540e2ab5-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "cbee12e6-b82e-4451-8292-dca1540e2ab5" (UID: "cbee12e6-b82e-4451-8292-dca1540e2ab5"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:16:53 crc kubenswrapper[4909]: I1128 16:16:53.744324 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cbee12e6-b82e-4451-8292-dca1540e2ab5-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "cbee12e6-b82e-4451-8292-dca1540e2ab5" (UID: "cbee12e6-b82e-4451-8292-dca1540e2ab5"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:16:53 crc kubenswrapper[4909]: I1128 16:16:53.748691 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "registry-storage") pod "cbee12e6-b82e-4451-8292-dca1540e2ab5" (UID: "cbee12e6-b82e-4451-8292-dca1540e2ab5"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Nov 28 16:16:53 crc kubenswrapper[4909]: I1128 16:16:53.749946 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cbee12e6-b82e-4451-8292-dca1540e2ab5-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "cbee12e6-b82e-4451-8292-dca1540e2ab5" (UID: "cbee12e6-b82e-4451-8292-dca1540e2ab5"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:16:53 crc kubenswrapper[4909]: I1128 16:16:53.835187 4909 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/cbee12e6-b82e-4451-8292-dca1540e2ab5-registry-tls\") on node \"crc\" DevicePath \"\"" Nov 28 16:16:53 crc kubenswrapper[4909]: I1128 16:16:53.835248 4909 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/cbee12e6-b82e-4451-8292-dca1540e2ab5-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Nov 28 16:16:53 crc kubenswrapper[4909]: I1128 16:16:53.835269 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9mjmj\" (UniqueName: \"kubernetes.io/projected/cbee12e6-b82e-4451-8292-dca1540e2ab5-kube-api-access-9mjmj\") on node \"crc\" DevicePath \"\"" Nov 28 16:16:53 crc kubenswrapper[4909]: I1128 16:16:53.835289 4909 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/cbee12e6-b82e-4451-8292-dca1540e2ab5-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 28 16:16:53 crc kubenswrapper[4909]: I1128 16:16:53.835308 4909 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/cbee12e6-b82e-4451-8292-dca1540e2ab5-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Nov 28 16:16:53 crc kubenswrapper[4909]: I1128 16:16:53.835327 4909 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/cbee12e6-b82e-4451-8292-dca1540e2ab5-registry-certificates\") on node \"crc\" DevicePath \"\"" Nov 28 16:16:53 crc kubenswrapper[4909]: I1128 16:16:53.835352 4909 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/cbee12e6-b82e-4451-8292-dca1540e2ab5-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 28 16:16:54 crc kubenswrapper[4909]: I1128 16:16:54.233827 4909 generic.go:334] "Generic (PLEG): container finished" podID="cbee12e6-b82e-4451-8292-dca1540e2ab5" containerID="eaffd4db9346545391d8cdfe91b539c95595a1c1e74c030bea6ee1f67e0cbf48" exitCode=0 Nov 28 16:16:54 crc kubenswrapper[4909]: I1128 16:16:54.233888 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-p5p29" event={"ID":"cbee12e6-b82e-4451-8292-dca1540e2ab5","Type":"ContainerDied","Data":"eaffd4db9346545391d8cdfe91b539c95595a1c1e74c030bea6ee1f67e0cbf48"} Nov 28 16:16:54 crc kubenswrapper[4909]: I1128 16:16:54.233922 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-p5p29" Nov 28 16:16:54 crc kubenswrapper[4909]: I1128 16:16:54.233965 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-p5p29" event={"ID":"cbee12e6-b82e-4451-8292-dca1540e2ab5","Type":"ContainerDied","Data":"013197be78954c0ec7b8f6584bebf24eff562a7b9f777f6a805b5fa8774256ec"} Nov 28 16:16:54 crc kubenswrapper[4909]: I1128 16:16:54.233989 4909 scope.go:117] "RemoveContainer" containerID="eaffd4db9346545391d8cdfe91b539c95595a1c1e74c030bea6ee1f67e0cbf48" Nov 28 16:16:54 crc kubenswrapper[4909]: I1128 16:16:54.258722 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-p5p29"] Nov 28 16:16:54 crc kubenswrapper[4909]: I1128 16:16:54.263431 4909 scope.go:117] "RemoveContainer" containerID="eaffd4db9346545391d8cdfe91b539c95595a1c1e74c030bea6ee1f67e0cbf48" Nov 28 16:16:54 crc kubenswrapper[4909]: E1128 16:16:54.263992 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"eaffd4db9346545391d8cdfe91b539c95595a1c1e74c030bea6ee1f67e0cbf48\": container with ID starting with eaffd4db9346545391d8cdfe91b539c95595a1c1e74c030bea6ee1f67e0cbf48 not found: ID does not exist" containerID="eaffd4db9346545391d8cdfe91b539c95595a1c1e74c030bea6ee1f67e0cbf48" Nov 28 16:16:54 crc kubenswrapper[4909]: I1128 16:16:54.264026 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"eaffd4db9346545391d8cdfe91b539c95595a1c1e74c030bea6ee1f67e0cbf48"} err="failed to get container status \"eaffd4db9346545391d8cdfe91b539c95595a1c1e74c030bea6ee1f67e0cbf48\": rpc error: code = NotFound desc = could not find container \"eaffd4db9346545391d8cdfe91b539c95595a1c1e74c030bea6ee1f67e0cbf48\": container with ID starting with eaffd4db9346545391d8cdfe91b539c95595a1c1e74c030bea6ee1f67e0cbf48 not found: ID does not exist" Nov 28 16:16:54 crc kubenswrapper[4909]: I1128 16:16:54.265049 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-p5p29"] Nov 28 16:16:55 crc kubenswrapper[4909]: I1128 16:16:55.912624 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cbee12e6-b82e-4451-8292-dca1540e2ab5" path="/var/lib/kubelet/pods/cbee12e6-b82e-4451-8292-dca1540e2ab5/volumes" Nov 28 16:17:19 crc kubenswrapper[4909]: I1128 16:17:19.911244 4909 patch_prober.go:28] interesting pod/machine-config-daemon-d5nd7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 16:17:19 crc kubenswrapper[4909]: I1128 16:17:19.912076 4909 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 16:17:19 crc kubenswrapper[4909]: I1128 16:17:19.912963 4909 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" Nov 28 16:17:19 crc kubenswrapper[4909]: I1128 16:17:19.914643 4909 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"d061b7e6392d6d737369fee3bbd3cb565547cf5b6232329c2831c199654babf4"} pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 16:17:19 crc kubenswrapper[4909]: I1128 16:17:19.914801 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerName="machine-config-daemon" containerID="cri-o://d061b7e6392d6d737369fee3bbd3cb565547cf5b6232329c2831c199654babf4" gracePeriod=600 Nov 28 16:17:20 crc kubenswrapper[4909]: I1128 16:17:20.444917 4909 generic.go:334] "Generic (PLEG): container finished" podID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerID="d061b7e6392d6d737369fee3bbd3cb565547cf5b6232329c2831c199654babf4" exitCode=0 Nov 28 16:17:20 crc kubenswrapper[4909]: I1128 16:17:20.445045 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" event={"ID":"5f0ac931-d37b-4342-8c12-c2779b455cc5","Type":"ContainerDied","Data":"d061b7e6392d6d737369fee3bbd3cb565547cf5b6232329c2831c199654babf4"} Nov 28 16:17:20 crc kubenswrapper[4909]: I1128 16:17:20.445231 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" event={"ID":"5f0ac931-d37b-4342-8c12-c2779b455cc5","Type":"ContainerStarted","Data":"f11c9461d8b182600e549ffbcf770617ff5031ffd03f68d1eee82d8afc48a636"} Nov 28 16:17:20 crc kubenswrapper[4909]: I1128 16:17:20.445255 4909 scope.go:117] "RemoveContainer" containerID="1c2757c4dc287e41bc57c065df2906fc5961d005829fa24f22d3b5078d17555a" Nov 28 16:19:18 crc kubenswrapper[4909]: I1128 16:19:18.141446 4909 scope.go:117] "RemoveContainer" containerID="bc3116d54b6a95bef18b47a8a906a4f3bed525c0d2c300837a2f87ffa77680d5" Nov 28 16:19:49 crc kubenswrapper[4909]: I1128 16:19:49.910492 4909 patch_prober.go:28] interesting pod/machine-config-daemon-d5nd7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 16:19:49 crc kubenswrapper[4909]: I1128 16:19:49.911377 4909 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 16:20:18 crc kubenswrapper[4909]: I1128 16:20:18.164560 4909 scope.go:117] "RemoveContainer" containerID="405fdc32bcb5f833a33c6681dda4f7e064f104693d375e9ec308992cbaca745c" Nov 28 16:20:18 crc kubenswrapper[4909]: I1128 16:20:18.203485 4909 scope.go:117] "RemoveContainer" containerID="af2b8315ca367d40073676d9c49360ed1d8b63d081331e1575c71d6bd36b84f3" Nov 28 16:20:19 crc kubenswrapper[4909]: I1128 16:20:19.911471 4909 patch_prober.go:28] interesting pod/machine-config-daemon-d5nd7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 16:20:19 crc kubenswrapper[4909]: I1128 16:20:19.911546 4909 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 16:20:49 crc kubenswrapper[4909]: I1128 16:20:49.910790 4909 patch_prober.go:28] interesting pod/machine-config-daemon-d5nd7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 16:20:49 crc kubenswrapper[4909]: I1128 16:20:49.911377 4909 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 16:20:49 crc kubenswrapper[4909]: I1128 16:20:49.911420 4909 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" Nov 28 16:20:49 crc kubenswrapper[4909]: I1128 16:20:49.911982 4909 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"f11c9461d8b182600e549ffbcf770617ff5031ffd03f68d1eee82d8afc48a636"} pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 16:20:49 crc kubenswrapper[4909]: I1128 16:20:49.912047 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerName="machine-config-daemon" containerID="cri-o://f11c9461d8b182600e549ffbcf770617ff5031ffd03f68d1eee82d8afc48a636" gracePeriod=600 Nov 28 16:20:50 crc kubenswrapper[4909]: I1128 16:20:50.720551 4909 generic.go:334] "Generic (PLEG): container finished" podID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerID="f11c9461d8b182600e549ffbcf770617ff5031ffd03f68d1eee82d8afc48a636" exitCode=0 Nov 28 16:20:50 crc kubenswrapper[4909]: I1128 16:20:50.720623 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" event={"ID":"5f0ac931-d37b-4342-8c12-c2779b455cc5","Type":"ContainerDied","Data":"f11c9461d8b182600e549ffbcf770617ff5031ffd03f68d1eee82d8afc48a636"} Nov 28 16:20:50 crc kubenswrapper[4909]: I1128 16:20:50.721096 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" event={"ID":"5f0ac931-d37b-4342-8c12-c2779b455cc5","Type":"ContainerStarted","Data":"385cb21d269057f04b94911a7382004d4c8760b6e581a733ba61cea01c0b4b65"} Nov 28 16:20:50 crc kubenswrapper[4909]: I1128 16:20:50.721115 4909 scope.go:117] "RemoveContainer" containerID="d061b7e6392d6d737369fee3bbd3cb565547cf5b6232329c2831c199654babf4" Nov 28 16:22:49 crc kubenswrapper[4909]: I1128 16:22:49.170483 4909 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Nov 28 16:23:05 crc kubenswrapper[4909]: I1128 16:23:05.403505 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-2znvn"] Nov 28 16:23:05 crc kubenswrapper[4909]: E1128 16:23:05.404643 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cbee12e6-b82e-4451-8292-dca1540e2ab5" containerName="registry" Nov 28 16:23:05 crc kubenswrapper[4909]: I1128 16:23:05.404734 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="cbee12e6-b82e-4451-8292-dca1540e2ab5" containerName="registry" Nov 28 16:23:05 crc kubenswrapper[4909]: I1128 16:23:05.404944 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="cbee12e6-b82e-4451-8292-dca1540e2ab5" containerName="registry" Nov 28 16:23:05 crc kubenswrapper[4909]: I1128 16:23:05.406353 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-2znvn" Nov 28 16:23:05 crc kubenswrapper[4909]: I1128 16:23:05.420424 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-2znvn"] Nov 28 16:23:05 crc kubenswrapper[4909]: I1128 16:23:05.563135 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5nfkb\" (UniqueName: \"kubernetes.io/projected/57eccbac-0d16-4a79-ad64-3a4c829c956d-kube-api-access-5nfkb\") pod \"certified-operators-2znvn\" (UID: \"57eccbac-0d16-4a79-ad64-3a4c829c956d\") " pod="openshift-marketplace/certified-operators-2znvn" Nov 28 16:23:05 crc kubenswrapper[4909]: I1128 16:23:05.563207 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57eccbac-0d16-4a79-ad64-3a4c829c956d-catalog-content\") pod \"certified-operators-2znvn\" (UID: \"57eccbac-0d16-4a79-ad64-3a4c829c956d\") " pod="openshift-marketplace/certified-operators-2znvn" Nov 28 16:23:05 crc kubenswrapper[4909]: I1128 16:23:05.563377 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57eccbac-0d16-4a79-ad64-3a4c829c956d-utilities\") pod \"certified-operators-2znvn\" (UID: \"57eccbac-0d16-4a79-ad64-3a4c829c956d\") " pod="openshift-marketplace/certified-operators-2znvn" Nov 28 16:23:05 crc kubenswrapper[4909]: I1128 16:23:05.664776 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5nfkb\" (UniqueName: \"kubernetes.io/projected/57eccbac-0d16-4a79-ad64-3a4c829c956d-kube-api-access-5nfkb\") pod \"certified-operators-2znvn\" (UID: \"57eccbac-0d16-4a79-ad64-3a4c829c956d\") " pod="openshift-marketplace/certified-operators-2znvn" Nov 28 16:23:05 crc kubenswrapper[4909]: I1128 16:23:05.664876 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57eccbac-0d16-4a79-ad64-3a4c829c956d-catalog-content\") pod \"certified-operators-2znvn\" (UID: \"57eccbac-0d16-4a79-ad64-3a4c829c956d\") " pod="openshift-marketplace/certified-operators-2znvn" Nov 28 16:23:05 crc kubenswrapper[4909]: I1128 16:23:05.664899 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57eccbac-0d16-4a79-ad64-3a4c829c956d-utilities\") pod \"certified-operators-2znvn\" (UID: \"57eccbac-0d16-4a79-ad64-3a4c829c956d\") " pod="openshift-marketplace/certified-operators-2znvn" Nov 28 16:23:05 crc kubenswrapper[4909]: I1128 16:23:05.666197 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57eccbac-0d16-4a79-ad64-3a4c829c956d-catalog-content\") pod \"certified-operators-2znvn\" (UID: \"57eccbac-0d16-4a79-ad64-3a4c829c956d\") " pod="openshift-marketplace/certified-operators-2znvn" Nov 28 16:23:05 crc kubenswrapper[4909]: I1128 16:23:05.698759 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57eccbac-0d16-4a79-ad64-3a4c829c956d-utilities\") pod \"certified-operators-2znvn\" (UID: \"57eccbac-0d16-4a79-ad64-3a4c829c956d\") " pod="openshift-marketplace/certified-operators-2znvn" Nov 28 16:23:05 crc kubenswrapper[4909]: I1128 16:23:05.701121 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5nfkb\" (UniqueName: \"kubernetes.io/projected/57eccbac-0d16-4a79-ad64-3a4c829c956d-kube-api-access-5nfkb\") pod \"certified-operators-2znvn\" (UID: \"57eccbac-0d16-4a79-ad64-3a4c829c956d\") " pod="openshift-marketplace/certified-operators-2znvn" Nov 28 16:23:05 crc kubenswrapper[4909]: I1128 16:23:05.739755 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-2znvn" Nov 28 16:23:06 crc kubenswrapper[4909]: I1128 16:23:06.085384 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-2znvn"] Nov 28 16:23:06 crc kubenswrapper[4909]: W1128 16:23:06.093979 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod57eccbac_0d16_4a79_ad64_3a4c829c956d.slice/crio-5fa84234e8db9aa961b299d46c320e2821b682f26bdedee127cbf247f3d1d83d WatchSource:0}: Error finding container 5fa84234e8db9aa961b299d46c320e2821b682f26bdedee127cbf247f3d1d83d: Status 404 returned error can't find the container with id 5fa84234e8db9aa961b299d46c320e2821b682f26bdedee127cbf247f3d1d83d Nov 28 16:23:06 crc kubenswrapper[4909]: I1128 16:23:06.546450 4909 generic.go:334] "Generic (PLEG): container finished" podID="57eccbac-0d16-4a79-ad64-3a4c829c956d" containerID="b56778ed1d2a51ec50eb78ecb08029abd1a8793b9f3ed1c0df23e9e11796717c" exitCode=0 Nov 28 16:23:06 crc kubenswrapper[4909]: I1128 16:23:06.546490 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2znvn" event={"ID":"57eccbac-0d16-4a79-ad64-3a4c829c956d","Type":"ContainerDied","Data":"b56778ed1d2a51ec50eb78ecb08029abd1a8793b9f3ed1c0df23e9e11796717c"} Nov 28 16:23:06 crc kubenswrapper[4909]: I1128 16:23:06.546514 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2znvn" event={"ID":"57eccbac-0d16-4a79-ad64-3a4c829c956d","Type":"ContainerStarted","Data":"5fa84234e8db9aa961b299d46c320e2821b682f26bdedee127cbf247f3d1d83d"} Nov 28 16:23:06 crc kubenswrapper[4909]: I1128 16:23:06.549257 4909 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 28 16:23:09 crc kubenswrapper[4909]: I1128 16:23:09.567375 4909 generic.go:334] "Generic (PLEG): container finished" podID="57eccbac-0d16-4a79-ad64-3a4c829c956d" containerID="1048e801c24246fcfb5bfb33e082e0976f33a4accca6a489729490a518f8b330" exitCode=0 Nov 28 16:23:09 crc kubenswrapper[4909]: I1128 16:23:09.567467 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2znvn" event={"ID":"57eccbac-0d16-4a79-ad64-3a4c829c956d","Type":"ContainerDied","Data":"1048e801c24246fcfb5bfb33e082e0976f33a4accca6a489729490a518f8b330"} Nov 28 16:23:10 crc kubenswrapper[4909]: I1128 16:23:10.576493 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2znvn" event={"ID":"57eccbac-0d16-4a79-ad64-3a4c829c956d","Type":"ContainerStarted","Data":"3a5328039f0184772f6341332bdb58b584e280958cfcd2b2cfcbf747ebf39512"} Nov 28 16:23:10 crc kubenswrapper[4909]: I1128 16:23:10.593822 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-2znvn" podStartSLOduration=1.853198675 podStartE2EDuration="5.593803461s" podCreationTimestamp="2025-11-28 16:23:05 +0000 UTC" firstStartedPulling="2025-11-28 16:23:06.548943448 +0000 UTC m=+768.945627962" lastFinishedPulling="2025-11-28 16:23:10.289548224 +0000 UTC m=+772.686232748" observedRunningTime="2025-11-28 16:23:10.591872729 +0000 UTC m=+772.988557263" watchObservedRunningTime="2025-11-28 16:23:10.593803461 +0000 UTC m=+772.990487995" Nov 28 16:23:15 crc kubenswrapper[4909]: I1128 16:23:15.740709 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-2znvn" Nov 28 16:23:15 crc kubenswrapper[4909]: I1128 16:23:15.741084 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-2znvn" Nov 28 16:23:15 crc kubenswrapper[4909]: I1128 16:23:15.778065 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-2znvn" Nov 28 16:23:16 crc kubenswrapper[4909]: I1128 16:23:16.660368 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-2znvn" Nov 28 16:23:16 crc kubenswrapper[4909]: I1128 16:23:16.703631 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-2znvn"] Nov 28 16:23:18 crc kubenswrapper[4909]: I1128 16:23:18.621553 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-2znvn" podUID="57eccbac-0d16-4a79-ad64-3a4c829c956d" containerName="registry-server" containerID="cri-o://3a5328039f0184772f6341332bdb58b584e280958cfcd2b2cfcbf747ebf39512" gracePeriod=2 Nov 28 16:23:19 crc kubenswrapper[4909]: I1128 16:23:19.911021 4909 patch_prober.go:28] interesting pod/machine-config-daemon-d5nd7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 16:23:19 crc kubenswrapper[4909]: I1128 16:23:19.911092 4909 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 16:23:20 crc kubenswrapper[4909]: I1128 16:23:20.022287 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-2znvn" Nov 28 16:23:20 crc kubenswrapper[4909]: I1128 16:23:20.156594 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57eccbac-0d16-4a79-ad64-3a4c829c956d-utilities\") pod \"57eccbac-0d16-4a79-ad64-3a4c829c956d\" (UID: \"57eccbac-0d16-4a79-ad64-3a4c829c956d\") " Nov 28 16:23:20 crc kubenswrapper[4909]: I1128 16:23:20.156766 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57eccbac-0d16-4a79-ad64-3a4c829c956d-catalog-content\") pod \"57eccbac-0d16-4a79-ad64-3a4c829c956d\" (UID: \"57eccbac-0d16-4a79-ad64-3a4c829c956d\") " Nov 28 16:23:20 crc kubenswrapper[4909]: I1128 16:23:20.156896 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5nfkb\" (UniqueName: \"kubernetes.io/projected/57eccbac-0d16-4a79-ad64-3a4c829c956d-kube-api-access-5nfkb\") pod \"57eccbac-0d16-4a79-ad64-3a4c829c956d\" (UID: \"57eccbac-0d16-4a79-ad64-3a4c829c956d\") " Nov 28 16:23:20 crc kubenswrapper[4909]: I1128 16:23:20.158448 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57eccbac-0d16-4a79-ad64-3a4c829c956d-utilities" (OuterVolumeSpecName: "utilities") pod "57eccbac-0d16-4a79-ad64-3a4c829c956d" (UID: "57eccbac-0d16-4a79-ad64-3a4c829c956d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:23:20 crc kubenswrapper[4909]: I1128 16:23:20.165508 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/57eccbac-0d16-4a79-ad64-3a4c829c956d-kube-api-access-5nfkb" (OuterVolumeSpecName: "kube-api-access-5nfkb") pod "57eccbac-0d16-4a79-ad64-3a4c829c956d" (UID: "57eccbac-0d16-4a79-ad64-3a4c829c956d"). InnerVolumeSpecName "kube-api-access-5nfkb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:23:20 crc kubenswrapper[4909]: I1128 16:23:20.228181 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57eccbac-0d16-4a79-ad64-3a4c829c956d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "57eccbac-0d16-4a79-ad64-3a4c829c956d" (UID: "57eccbac-0d16-4a79-ad64-3a4c829c956d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:23:20 crc kubenswrapper[4909]: I1128 16:23:20.258442 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5nfkb\" (UniqueName: \"kubernetes.io/projected/57eccbac-0d16-4a79-ad64-3a4c829c956d-kube-api-access-5nfkb\") on node \"crc\" DevicePath \"\"" Nov 28 16:23:20 crc kubenswrapper[4909]: I1128 16:23:20.258472 4909 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57eccbac-0d16-4a79-ad64-3a4c829c956d-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 16:23:20 crc kubenswrapper[4909]: I1128 16:23:20.258486 4909 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57eccbac-0d16-4a79-ad64-3a4c829c956d-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 16:23:20 crc kubenswrapper[4909]: I1128 16:23:20.634515 4909 generic.go:334] "Generic (PLEG): container finished" podID="57eccbac-0d16-4a79-ad64-3a4c829c956d" containerID="3a5328039f0184772f6341332bdb58b584e280958cfcd2b2cfcbf747ebf39512" exitCode=0 Nov 28 16:23:20 crc kubenswrapper[4909]: I1128 16:23:20.634562 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2znvn" event={"ID":"57eccbac-0d16-4a79-ad64-3a4c829c956d","Type":"ContainerDied","Data":"3a5328039f0184772f6341332bdb58b584e280958cfcd2b2cfcbf747ebf39512"} Nov 28 16:23:20 crc kubenswrapper[4909]: I1128 16:23:20.634580 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-2znvn" Nov 28 16:23:20 crc kubenswrapper[4909]: I1128 16:23:20.634596 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2znvn" event={"ID":"57eccbac-0d16-4a79-ad64-3a4c829c956d","Type":"ContainerDied","Data":"5fa84234e8db9aa961b299d46c320e2821b682f26bdedee127cbf247f3d1d83d"} Nov 28 16:23:20 crc kubenswrapper[4909]: I1128 16:23:20.634613 4909 scope.go:117] "RemoveContainer" containerID="3a5328039f0184772f6341332bdb58b584e280958cfcd2b2cfcbf747ebf39512" Nov 28 16:23:20 crc kubenswrapper[4909]: I1128 16:23:20.661895 4909 scope.go:117] "RemoveContainer" containerID="1048e801c24246fcfb5bfb33e082e0976f33a4accca6a489729490a518f8b330" Nov 28 16:23:20 crc kubenswrapper[4909]: I1128 16:23:20.663873 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-2znvn"] Nov 28 16:23:20 crc kubenswrapper[4909]: I1128 16:23:20.670140 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-2znvn"] Nov 28 16:23:20 crc kubenswrapper[4909]: I1128 16:23:20.689744 4909 scope.go:117] "RemoveContainer" containerID="b56778ed1d2a51ec50eb78ecb08029abd1a8793b9f3ed1c0df23e9e11796717c" Nov 28 16:23:20 crc kubenswrapper[4909]: I1128 16:23:20.702117 4909 scope.go:117] "RemoveContainer" containerID="3a5328039f0184772f6341332bdb58b584e280958cfcd2b2cfcbf747ebf39512" Nov 28 16:23:20 crc kubenswrapper[4909]: E1128 16:23:20.702527 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3a5328039f0184772f6341332bdb58b584e280958cfcd2b2cfcbf747ebf39512\": container with ID starting with 3a5328039f0184772f6341332bdb58b584e280958cfcd2b2cfcbf747ebf39512 not found: ID does not exist" containerID="3a5328039f0184772f6341332bdb58b584e280958cfcd2b2cfcbf747ebf39512" Nov 28 16:23:20 crc kubenswrapper[4909]: I1128 16:23:20.702573 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3a5328039f0184772f6341332bdb58b584e280958cfcd2b2cfcbf747ebf39512"} err="failed to get container status \"3a5328039f0184772f6341332bdb58b584e280958cfcd2b2cfcbf747ebf39512\": rpc error: code = NotFound desc = could not find container \"3a5328039f0184772f6341332bdb58b584e280958cfcd2b2cfcbf747ebf39512\": container with ID starting with 3a5328039f0184772f6341332bdb58b584e280958cfcd2b2cfcbf747ebf39512 not found: ID does not exist" Nov 28 16:23:20 crc kubenswrapper[4909]: I1128 16:23:20.702601 4909 scope.go:117] "RemoveContainer" containerID="1048e801c24246fcfb5bfb33e082e0976f33a4accca6a489729490a518f8b330" Nov 28 16:23:20 crc kubenswrapper[4909]: E1128 16:23:20.703235 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1048e801c24246fcfb5bfb33e082e0976f33a4accca6a489729490a518f8b330\": container with ID starting with 1048e801c24246fcfb5bfb33e082e0976f33a4accca6a489729490a518f8b330 not found: ID does not exist" containerID="1048e801c24246fcfb5bfb33e082e0976f33a4accca6a489729490a518f8b330" Nov 28 16:23:20 crc kubenswrapper[4909]: I1128 16:23:20.703274 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1048e801c24246fcfb5bfb33e082e0976f33a4accca6a489729490a518f8b330"} err="failed to get container status \"1048e801c24246fcfb5bfb33e082e0976f33a4accca6a489729490a518f8b330\": rpc error: code = NotFound desc = could not find container \"1048e801c24246fcfb5bfb33e082e0976f33a4accca6a489729490a518f8b330\": container with ID starting with 1048e801c24246fcfb5bfb33e082e0976f33a4accca6a489729490a518f8b330 not found: ID does not exist" Nov 28 16:23:20 crc kubenswrapper[4909]: I1128 16:23:20.703301 4909 scope.go:117] "RemoveContainer" containerID="b56778ed1d2a51ec50eb78ecb08029abd1a8793b9f3ed1c0df23e9e11796717c" Nov 28 16:23:20 crc kubenswrapper[4909]: E1128 16:23:20.703599 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b56778ed1d2a51ec50eb78ecb08029abd1a8793b9f3ed1c0df23e9e11796717c\": container with ID starting with b56778ed1d2a51ec50eb78ecb08029abd1a8793b9f3ed1c0df23e9e11796717c not found: ID does not exist" containerID="b56778ed1d2a51ec50eb78ecb08029abd1a8793b9f3ed1c0df23e9e11796717c" Nov 28 16:23:20 crc kubenswrapper[4909]: I1128 16:23:20.703635 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b56778ed1d2a51ec50eb78ecb08029abd1a8793b9f3ed1c0df23e9e11796717c"} err="failed to get container status \"b56778ed1d2a51ec50eb78ecb08029abd1a8793b9f3ed1c0df23e9e11796717c\": rpc error: code = NotFound desc = could not find container \"b56778ed1d2a51ec50eb78ecb08029abd1a8793b9f3ed1c0df23e9e11796717c\": container with ID starting with b56778ed1d2a51ec50eb78ecb08029abd1a8793b9f3ed1c0df23e9e11796717c not found: ID does not exist" Nov 28 16:23:21 crc kubenswrapper[4909]: I1128 16:23:21.907339 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="57eccbac-0d16-4a79-ad64-3a4c829c956d" path="/var/lib/kubelet/pods/57eccbac-0d16-4a79-ad64-3a4c829c956d/volumes" Nov 28 16:23:32 crc kubenswrapper[4909]: I1128 16:23:32.246855 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-q5cjm"] Nov 28 16:23:32 crc kubenswrapper[4909]: E1128 16:23:32.247750 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="57eccbac-0d16-4a79-ad64-3a4c829c956d" containerName="registry-server" Nov 28 16:23:32 crc kubenswrapper[4909]: I1128 16:23:32.247770 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="57eccbac-0d16-4a79-ad64-3a4c829c956d" containerName="registry-server" Nov 28 16:23:32 crc kubenswrapper[4909]: E1128 16:23:32.247790 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="57eccbac-0d16-4a79-ad64-3a4c829c956d" containerName="extract-utilities" Nov 28 16:23:32 crc kubenswrapper[4909]: I1128 16:23:32.247799 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="57eccbac-0d16-4a79-ad64-3a4c829c956d" containerName="extract-utilities" Nov 28 16:23:32 crc kubenswrapper[4909]: E1128 16:23:32.247815 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="57eccbac-0d16-4a79-ad64-3a4c829c956d" containerName="extract-content" Nov 28 16:23:32 crc kubenswrapper[4909]: I1128 16:23:32.247824 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="57eccbac-0d16-4a79-ad64-3a4c829c956d" containerName="extract-content" Nov 28 16:23:32 crc kubenswrapper[4909]: I1128 16:23:32.247951 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="57eccbac-0d16-4a79-ad64-3a4c829c956d" containerName="registry-server" Nov 28 16:23:32 crc kubenswrapper[4909]: I1128 16:23:32.248614 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-q5cjm" Nov 28 16:23:32 crc kubenswrapper[4909]: I1128 16:23:32.255857 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-q5cjm"] Nov 28 16:23:32 crc kubenswrapper[4909]: I1128 16:23:32.406377 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/58c771ac-d55b-4445-8d90-ccb68530941d-utilities\") pod \"redhat-operators-q5cjm\" (UID: \"58c771ac-d55b-4445-8d90-ccb68530941d\") " pod="openshift-marketplace/redhat-operators-q5cjm" Nov 28 16:23:32 crc kubenswrapper[4909]: I1128 16:23:32.406440 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/58c771ac-d55b-4445-8d90-ccb68530941d-catalog-content\") pod \"redhat-operators-q5cjm\" (UID: \"58c771ac-d55b-4445-8d90-ccb68530941d\") " pod="openshift-marketplace/redhat-operators-q5cjm" Nov 28 16:23:32 crc kubenswrapper[4909]: I1128 16:23:32.406467 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2wnjv\" (UniqueName: \"kubernetes.io/projected/58c771ac-d55b-4445-8d90-ccb68530941d-kube-api-access-2wnjv\") pod \"redhat-operators-q5cjm\" (UID: \"58c771ac-d55b-4445-8d90-ccb68530941d\") " pod="openshift-marketplace/redhat-operators-q5cjm" Nov 28 16:23:32 crc kubenswrapper[4909]: I1128 16:23:32.507981 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/58c771ac-d55b-4445-8d90-ccb68530941d-catalog-content\") pod \"redhat-operators-q5cjm\" (UID: \"58c771ac-d55b-4445-8d90-ccb68530941d\") " pod="openshift-marketplace/redhat-operators-q5cjm" Nov 28 16:23:32 crc kubenswrapper[4909]: I1128 16:23:32.508044 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2wnjv\" (UniqueName: \"kubernetes.io/projected/58c771ac-d55b-4445-8d90-ccb68530941d-kube-api-access-2wnjv\") pod \"redhat-operators-q5cjm\" (UID: \"58c771ac-d55b-4445-8d90-ccb68530941d\") " pod="openshift-marketplace/redhat-operators-q5cjm" Nov 28 16:23:32 crc kubenswrapper[4909]: I1128 16:23:32.508127 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/58c771ac-d55b-4445-8d90-ccb68530941d-utilities\") pod \"redhat-operators-q5cjm\" (UID: \"58c771ac-d55b-4445-8d90-ccb68530941d\") " pod="openshift-marketplace/redhat-operators-q5cjm" Nov 28 16:23:32 crc kubenswrapper[4909]: I1128 16:23:32.508587 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/58c771ac-d55b-4445-8d90-ccb68530941d-catalog-content\") pod \"redhat-operators-q5cjm\" (UID: \"58c771ac-d55b-4445-8d90-ccb68530941d\") " pod="openshift-marketplace/redhat-operators-q5cjm" Nov 28 16:23:32 crc kubenswrapper[4909]: I1128 16:23:32.508677 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/58c771ac-d55b-4445-8d90-ccb68530941d-utilities\") pod \"redhat-operators-q5cjm\" (UID: \"58c771ac-d55b-4445-8d90-ccb68530941d\") " pod="openshift-marketplace/redhat-operators-q5cjm" Nov 28 16:23:32 crc kubenswrapper[4909]: I1128 16:23:32.526345 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2wnjv\" (UniqueName: \"kubernetes.io/projected/58c771ac-d55b-4445-8d90-ccb68530941d-kube-api-access-2wnjv\") pod \"redhat-operators-q5cjm\" (UID: \"58c771ac-d55b-4445-8d90-ccb68530941d\") " pod="openshift-marketplace/redhat-operators-q5cjm" Nov 28 16:23:32 crc kubenswrapper[4909]: I1128 16:23:32.567564 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-q5cjm" Nov 28 16:23:32 crc kubenswrapper[4909]: I1128 16:23:32.749793 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-q5cjm"] Nov 28 16:23:33 crc kubenswrapper[4909]: I1128 16:23:33.705989 4909 generic.go:334] "Generic (PLEG): container finished" podID="58c771ac-d55b-4445-8d90-ccb68530941d" containerID="463f7c9ecee8a00a38846687a373cf32ac07eddebf430c59f0c118c4d1bc6081" exitCode=0 Nov 28 16:23:33 crc kubenswrapper[4909]: I1128 16:23:33.706041 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-q5cjm" event={"ID":"58c771ac-d55b-4445-8d90-ccb68530941d","Type":"ContainerDied","Data":"463f7c9ecee8a00a38846687a373cf32ac07eddebf430c59f0c118c4d1bc6081"} Nov 28 16:23:33 crc kubenswrapper[4909]: I1128 16:23:33.706072 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-q5cjm" event={"ID":"58c771ac-d55b-4445-8d90-ccb68530941d","Type":"ContainerStarted","Data":"e94a8caedda80b7d4246ff634d857cc850d98bd3f0cbc4cbf4326246e6edc40d"} Nov 28 16:23:34 crc kubenswrapper[4909]: I1128 16:23:34.712696 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-q5cjm" event={"ID":"58c771ac-d55b-4445-8d90-ccb68530941d","Type":"ContainerStarted","Data":"e4acb7787ce2f5025f7da18883f42dd88077d7891c550ff5406ace8fd6a780d2"} Nov 28 16:23:35 crc kubenswrapper[4909]: I1128 16:23:35.719721 4909 generic.go:334] "Generic (PLEG): container finished" podID="58c771ac-d55b-4445-8d90-ccb68530941d" containerID="e4acb7787ce2f5025f7da18883f42dd88077d7891c550ff5406ace8fd6a780d2" exitCode=0 Nov 28 16:23:35 crc kubenswrapper[4909]: I1128 16:23:35.719784 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-q5cjm" event={"ID":"58c771ac-d55b-4445-8d90-ccb68530941d","Type":"ContainerDied","Data":"e4acb7787ce2f5025f7da18883f42dd88077d7891c550ff5406ace8fd6a780d2"} Nov 28 16:23:36 crc kubenswrapper[4909]: I1128 16:23:36.728727 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-q5cjm" event={"ID":"58c771ac-d55b-4445-8d90-ccb68530941d","Type":"ContainerStarted","Data":"48e93b9e731ca31cc9107ee5fde81fdf675c866437f72c33c8318e4fb377179d"} Nov 28 16:23:36 crc kubenswrapper[4909]: I1128 16:23:36.749179 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-q5cjm" podStartSLOduration=2.278634707 podStartE2EDuration="4.749160131s" podCreationTimestamp="2025-11-28 16:23:32 +0000 UTC" firstStartedPulling="2025-11-28 16:23:33.708629722 +0000 UTC m=+796.105314246" lastFinishedPulling="2025-11-28 16:23:36.179155136 +0000 UTC m=+798.575839670" observedRunningTime="2025-11-28 16:23:36.747280561 +0000 UTC m=+799.143965085" watchObservedRunningTime="2025-11-28 16:23:36.749160131 +0000 UTC m=+799.145844655" Nov 28 16:23:42 crc kubenswrapper[4909]: I1128 16:23:42.569369 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-q5cjm" Nov 28 16:23:42 crc kubenswrapper[4909]: I1128 16:23:42.569425 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-q5cjm" Nov 28 16:23:42 crc kubenswrapper[4909]: I1128 16:23:42.612618 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-q5cjm" Nov 28 16:23:42 crc kubenswrapper[4909]: I1128 16:23:42.807954 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-q5cjm" Nov 28 16:23:42 crc kubenswrapper[4909]: I1128 16:23:42.848262 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-q5cjm"] Nov 28 16:23:44 crc kubenswrapper[4909]: I1128 16:23:44.783950 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-q5cjm" podUID="58c771ac-d55b-4445-8d90-ccb68530941d" containerName="registry-server" containerID="cri-o://48e93b9e731ca31cc9107ee5fde81fdf675c866437f72c33c8318e4fb377179d" gracePeriod=2 Nov 28 16:23:47 crc kubenswrapper[4909]: I1128 16:23:47.813241 4909 generic.go:334] "Generic (PLEG): container finished" podID="58c771ac-d55b-4445-8d90-ccb68530941d" containerID="48e93b9e731ca31cc9107ee5fde81fdf675c866437f72c33c8318e4fb377179d" exitCode=0 Nov 28 16:23:47 crc kubenswrapper[4909]: I1128 16:23:47.813304 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-q5cjm" event={"ID":"58c771ac-d55b-4445-8d90-ccb68530941d","Type":"ContainerDied","Data":"48e93b9e731ca31cc9107ee5fde81fdf675c866437f72c33c8318e4fb377179d"} Nov 28 16:23:47 crc kubenswrapper[4909]: I1128 16:23:47.926071 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-q5cjm" Nov 28 16:23:48 crc kubenswrapper[4909]: I1128 16:23:48.034935 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/58c771ac-d55b-4445-8d90-ccb68530941d-catalog-content\") pod \"58c771ac-d55b-4445-8d90-ccb68530941d\" (UID: \"58c771ac-d55b-4445-8d90-ccb68530941d\") " Nov 28 16:23:48 crc kubenswrapper[4909]: I1128 16:23:48.034988 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2wnjv\" (UniqueName: \"kubernetes.io/projected/58c771ac-d55b-4445-8d90-ccb68530941d-kube-api-access-2wnjv\") pod \"58c771ac-d55b-4445-8d90-ccb68530941d\" (UID: \"58c771ac-d55b-4445-8d90-ccb68530941d\") " Nov 28 16:23:48 crc kubenswrapper[4909]: I1128 16:23:48.035034 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/58c771ac-d55b-4445-8d90-ccb68530941d-utilities\") pod \"58c771ac-d55b-4445-8d90-ccb68530941d\" (UID: \"58c771ac-d55b-4445-8d90-ccb68530941d\") " Nov 28 16:23:48 crc kubenswrapper[4909]: I1128 16:23:48.035940 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/58c771ac-d55b-4445-8d90-ccb68530941d-utilities" (OuterVolumeSpecName: "utilities") pod "58c771ac-d55b-4445-8d90-ccb68530941d" (UID: "58c771ac-d55b-4445-8d90-ccb68530941d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:23:48 crc kubenswrapper[4909]: I1128 16:23:48.040281 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/58c771ac-d55b-4445-8d90-ccb68530941d-kube-api-access-2wnjv" (OuterVolumeSpecName: "kube-api-access-2wnjv") pod "58c771ac-d55b-4445-8d90-ccb68530941d" (UID: "58c771ac-d55b-4445-8d90-ccb68530941d"). InnerVolumeSpecName "kube-api-access-2wnjv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:23:48 crc kubenswrapper[4909]: I1128 16:23:48.136782 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2wnjv\" (UniqueName: \"kubernetes.io/projected/58c771ac-d55b-4445-8d90-ccb68530941d-kube-api-access-2wnjv\") on node \"crc\" DevicePath \"\"" Nov 28 16:23:48 crc kubenswrapper[4909]: I1128 16:23:48.136826 4909 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/58c771ac-d55b-4445-8d90-ccb68530941d-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 16:23:48 crc kubenswrapper[4909]: I1128 16:23:48.165752 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/58c771ac-d55b-4445-8d90-ccb68530941d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "58c771ac-d55b-4445-8d90-ccb68530941d" (UID: "58c771ac-d55b-4445-8d90-ccb68530941d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:23:48 crc kubenswrapper[4909]: I1128 16:23:48.237594 4909 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/58c771ac-d55b-4445-8d90-ccb68530941d-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 16:23:48 crc kubenswrapper[4909]: I1128 16:23:48.820111 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-q5cjm" event={"ID":"58c771ac-d55b-4445-8d90-ccb68530941d","Type":"ContainerDied","Data":"e94a8caedda80b7d4246ff634d857cc850d98bd3f0cbc4cbf4326246e6edc40d"} Nov 28 16:23:48 crc kubenswrapper[4909]: I1128 16:23:48.820217 4909 scope.go:117] "RemoveContainer" containerID="48e93b9e731ca31cc9107ee5fde81fdf675c866437f72c33c8318e4fb377179d" Nov 28 16:23:48 crc kubenswrapper[4909]: I1128 16:23:48.820215 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-q5cjm" Nov 28 16:23:48 crc kubenswrapper[4909]: I1128 16:23:48.840301 4909 scope.go:117] "RemoveContainer" containerID="e4acb7787ce2f5025f7da18883f42dd88077d7891c550ff5406ace8fd6a780d2" Nov 28 16:23:48 crc kubenswrapper[4909]: I1128 16:23:48.880786 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-q5cjm"] Nov 28 16:23:48 crc kubenswrapper[4909]: I1128 16:23:48.881169 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-q5cjm"] Nov 28 16:23:48 crc kubenswrapper[4909]: I1128 16:23:48.893890 4909 scope.go:117] "RemoveContainer" containerID="463f7c9ecee8a00a38846687a373cf32ac07eddebf430c59f0c118c4d1bc6081" Nov 28 16:23:49 crc kubenswrapper[4909]: I1128 16:23:49.908280 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="58c771ac-d55b-4445-8d90-ccb68530941d" path="/var/lib/kubelet/pods/58c771ac-d55b-4445-8d90-ccb68530941d/volumes" Nov 28 16:23:49 crc kubenswrapper[4909]: I1128 16:23:49.910620 4909 patch_prober.go:28] interesting pod/machine-config-daemon-d5nd7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 16:23:49 crc kubenswrapper[4909]: I1128 16:23:49.910697 4909 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 16:24:12 crc kubenswrapper[4909]: I1128 16:24:12.975248 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-qxw94"] Nov 28 16:24:12 crc kubenswrapper[4909]: I1128 16:24:12.976277 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" podUID="c17e2fff-c7ee-475c-8c17-58a394744b91" containerName="ovn-controller" containerID="cri-o://983e9f73b8e344dbe445f95436cbe143d63f1da2e868201b441496df8cdc99ef" gracePeriod=30 Nov 28 16:24:12 crc kubenswrapper[4909]: I1128 16:24:12.976332 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" podUID="c17e2fff-c7ee-475c-8c17-58a394744b91" containerName="nbdb" containerID="cri-o://d0b87eb388aab62c94756b5f20b73cf95fc80da8aa3d83f2848757f860cdd38e" gracePeriod=30 Nov 28 16:24:12 crc kubenswrapper[4909]: I1128 16:24:12.976413 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" podUID="c17e2fff-c7ee-475c-8c17-58a394744b91" containerName="sbdb" containerID="cri-o://841b67fe2e8ae0e4ebf834f141b83ad5e850b8e81c3f6410cef7df9894c33dc6" gracePeriod=30 Nov 28 16:24:12 crc kubenswrapper[4909]: I1128 16:24:12.976418 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" podUID="c17e2fff-c7ee-475c-8c17-58a394744b91" containerName="northd" containerID="cri-o://031c30950c06bdcae1736062069b96a6ce7cb66d5f286b73d691ee2d2db5832e" gracePeriod=30 Nov 28 16:24:12 crc kubenswrapper[4909]: I1128 16:24:12.976466 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" podUID="c17e2fff-c7ee-475c-8c17-58a394744b91" containerName="kube-rbac-proxy-ovn-metrics" containerID="cri-o://697293d217dd4fbc4cecc3bc4be7acc14dbb5aa97b9faf0a608792ef7e4c8dba" gracePeriod=30 Nov 28 16:24:12 crc kubenswrapper[4909]: I1128 16:24:12.976487 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" podUID="c17e2fff-c7ee-475c-8c17-58a394744b91" containerName="ovn-acl-logging" containerID="cri-o://66731a2e0518dd575f621ddf9e760e844e647a224138957440ccaecad3e4b8b7" gracePeriod=30 Nov 28 16:24:12 crc kubenswrapper[4909]: I1128 16:24:12.976532 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" podUID="c17e2fff-c7ee-475c-8c17-58a394744b91" containerName="kube-rbac-proxy-node" containerID="cri-o://c9f22dca75c10791e7f5e083e737d4d6aad20598ff0d29dd7562fa99b5dbdfd4" gracePeriod=30 Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.022065 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" podUID="c17e2fff-c7ee-475c-8c17-58a394744b91" containerName="ovnkube-controller" containerID="cri-o://8dfb42d6b85ca4de9c79dd23da69ea50faaaca28d01cf5b666fcebc2058e7cf1" gracePeriod=30 Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.269787 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-qxw94_c17e2fff-c7ee-475c-8c17-58a394744b91/ovnkube-controller/3.log" Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.271740 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-qxw94_c17e2fff-c7ee-475c-8c17-58a394744b91/ovn-acl-logging/0.log" Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.272170 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-qxw94_c17e2fff-c7ee-475c-8c17-58a394744b91/ovn-controller/0.log" Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.272516 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.340578 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-7qglw"] Nov 28 16:24:13 crc kubenswrapper[4909]: E1128 16:24:13.340920 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c17e2fff-c7ee-475c-8c17-58a394744b91" containerName="ovnkube-controller" Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.340949 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="c17e2fff-c7ee-475c-8c17-58a394744b91" containerName="ovnkube-controller" Nov 28 16:24:13 crc kubenswrapper[4909]: E1128 16:24:13.340966 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c17e2fff-c7ee-475c-8c17-58a394744b91" containerName="sbdb" Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.340979 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="c17e2fff-c7ee-475c-8c17-58a394744b91" containerName="sbdb" Nov 28 16:24:13 crc kubenswrapper[4909]: E1128 16:24:13.340993 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c17e2fff-c7ee-475c-8c17-58a394744b91" containerName="kubecfg-setup" Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.341007 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="c17e2fff-c7ee-475c-8c17-58a394744b91" containerName="kubecfg-setup" Nov 28 16:24:13 crc kubenswrapper[4909]: E1128 16:24:13.341023 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="58c771ac-d55b-4445-8d90-ccb68530941d" containerName="extract-utilities" Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.341037 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="58c771ac-d55b-4445-8d90-ccb68530941d" containerName="extract-utilities" Nov 28 16:24:13 crc kubenswrapper[4909]: E1128 16:24:13.341056 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c17e2fff-c7ee-475c-8c17-58a394744b91" containerName="kube-rbac-proxy-ovn-metrics" Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.341067 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="c17e2fff-c7ee-475c-8c17-58a394744b91" containerName="kube-rbac-proxy-ovn-metrics" Nov 28 16:24:13 crc kubenswrapper[4909]: E1128 16:24:13.341081 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c17e2fff-c7ee-475c-8c17-58a394744b91" containerName="northd" Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.341093 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="c17e2fff-c7ee-475c-8c17-58a394744b91" containerName="northd" Nov 28 16:24:13 crc kubenswrapper[4909]: E1128 16:24:13.341113 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c17e2fff-c7ee-475c-8c17-58a394744b91" containerName="ovnkube-controller" Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.341125 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="c17e2fff-c7ee-475c-8c17-58a394744b91" containerName="ovnkube-controller" Nov 28 16:24:13 crc kubenswrapper[4909]: E1128 16:24:13.341138 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c17e2fff-c7ee-475c-8c17-58a394744b91" containerName="ovnkube-controller" Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.341149 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="c17e2fff-c7ee-475c-8c17-58a394744b91" containerName="ovnkube-controller" Nov 28 16:24:13 crc kubenswrapper[4909]: E1128 16:24:13.341164 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c17e2fff-c7ee-475c-8c17-58a394744b91" containerName="ovn-acl-logging" Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.341176 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="c17e2fff-c7ee-475c-8c17-58a394744b91" containerName="ovn-acl-logging" Nov 28 16:24:13 crc kubenswrapper[4909]: E1128 16:24:13.341192 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c17e2fff-c7ee-475c-8c17-58a394744b91" containerName="ovnkube-controller" Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.341203 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="c17e2fff-c7ee-475c-8c17-58a394744b91" containerName="ovnkube-controller" Nov 28 16:24:13 crc kubenswrapper[4909]: E1128 16:24:13.341220 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c17e2fff-c7ee-475c-8c17-58a394744b91" containerName="kube-rbac-proxy-node" Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.341232 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="c17e2fff-c7ee-475c-8c17-58a394744b91" containerName="kube-rbac-proxy-node" Nov 28 16:24:13 crc kubenswrapper[4909]: E1128 16:24:13.341253 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="58c771ac-d55b-4445-8d90-ccb68530941d" containerName="registry-server" Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.341264 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="58c771ac-d55b-4445-8d90-ccb68530941d" containerName="registry-server" Nov 28 16:24:13 crc kubenswrapper[4909]: E1128 16:24:13.341281 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c17e2fff-c7ee-475c-8c17-58a394744b91" containerName="ovn-controller" Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.341293 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="c17e2fff-c7ee-475c-8c17-58a394744b91" containerName="ovn-controller" Nov 28 16:24:13 crc kubenswrapper[4909]: E1128 16:24:13.341312 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="58c771ac-d55b-4445-8d90-ccb68530941d" containerName="extract-content" Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.341324 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="58c771ac-d55b-4445-8d90-ccb68530941d" containerName="extract-content" Nov 28 16:24:13 crc kubenswrapper[4909]: E1128 16:24:13.341335 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c17e2fff-c7ee-475c-8c17-58a394744b91" containerName="nbdb" Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.341347 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="c17e2fff-c7ee-475c-8c17-58a394744b91" containerName="nbdb" Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.341501 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="c17e2fff-c7ee-475c-8c17-58a394744b91" containerName="nbdb" Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.341522 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="c17e2fff-c7ee-475c-8c17-58a394744b91" containerName="ovnkube-controller" Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.341537 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="58c771ac-d55b-4445-8d90-ccb68530941d" containerName="registry-server" Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.341555 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="c17e2fff-c7ee-475c-8c17-58a394744b91" containerName="ovn-acl-logging" Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.341572 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="c17e2fff-c7ee-475c-8c17-58a394744b91" containerName="kube-rbac-proxy-ovn-metrics" Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.341590 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="c17e2fff-c7ee-475c-8c17-58a394744b91" containerName="ovnkube-controller" Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.341605 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="c17e2fff-c7ee-475c-8c17-58a394744b91" containerName="ovnkube-controller" Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.341621 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="c17e2fff-c7ee-475c-8c17-58a394744b91" containerName="ovn-controller" Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.341634 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="c17e2fff-c7ee-475c-8c17-58a394744b91" containerName="kube-rbac-proxy-node" Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.341650 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="c17e2fff-c7ee-475c-8c17-58a394744b91" containerName="ovnkube-controller" Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.341693 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="c17e2fff-c7ee-475c-8c17-58a394744b91" containerName="sbdb" Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.341707 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="c17e2fff-c7ee-475c-8c17-58a394744b91" containerName="northd" Nov 28 16:24:13 crc kubenswrapper[4909]: E1128 16:24:13.341903 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c17e2fff-c7ee-475c-8c17-58a394744b91" containerName="ovnkube-controller" Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.341918 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="c17e2fff-c7ee-475c-8c17-58a394744b91" containerName="ovnkube-controller" Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.342071 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="c17e2fff-c7ee-475c-8c17-58a394744b91" containerName="ovnkube-controller" Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.344102 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-7qglw" Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.371351 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/032a1970-4ddb-48d8-87d2-44536c92e7f7-run-ovn\") pod \"ovnkube-node-7qglw\" (UID: \"032a1970-4ddb-48d8-87d2-44536c92e7f7\") " pod="openshift-ovn-kubernetes/ovnkube-node-7qglw" Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.371408 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/032a1970-4ddb-48d8-87d2-44536c92e7f7-ovn-node-metrics-cert\") pod \"ovnkube-node-7qglw\" (UID: \"032a1970-4ddb-48d8-87d2-44536c92e7f7\") " pod="openshift-ovn-kubernetes/ovnkube-node-7qglw" Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.371440 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/032a1970-4ddb-48d8-87d2-44536c92e7f7-host-run-ovn-kubernetes\") pod \"ovnkube-node-7qglw\" (UID: \"032a1970-4ddb-48d8-87d2-44536c92e7f7\") " pod="openshift-ovn-kubernetes/ovnkube-node-7qglw" Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.371470 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/032a1970-4ddb-48d8-87d2-44536c92e7f7-log-socket\") pod \"ovnkube-node-7qglw\" (UID: \"032a1970-4ddb-48d8-87d2-44536c92e7f7\") " pod="openshift-ovn-kubernetes/ovnkube-node-7qglw" Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.371490 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/032a1970-4ddb-48d8-87d2-44536c92e7f7-run-systemd\") pod \"ovnkube-node-7qglw\" (UID: \"032a1970-4ddb-48d8-87d2-44536c92e7f7\") " pod="openshift-ovn-kubernetes/ovnkube-node-7qglw" Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.371506 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/032a1970-4ddb-48d8-87d2-44536c92e7f7-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-7qglw\" (UID: \"032a1970-4ddb-48d8-87d2-44536c92e7f7\") " pod="openshift-ovn-kubernetes/ovnkube-node-7qglw" Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.371543 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/032a1970-4ddb-48d8-87d2-44536c92e7f7-ovnkube-config\") pod \"ovnkube-node-7qglw\" (UID: \"032a1970-4ddb-48d8-87d2-44536c92e7f7\") " pod="openshift-ovn-kubernetes/ovnkube-node-7qglw" Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.371567 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/032a1970-4ddb-48d8-87d2-44536c92e7f7-host-cni-netd\") pod \"ovnkube-node-7qglw\" (UID: \"032a1970-4ddb-48d8-87d2-44536c92e7f7\") " pod="openshift-ovn-kubernetes/ovnkube-node-7qglw" Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.371596 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/032a1970-4ddb-48d8-87d2-44536c92e7f7-systemd-units\") pod \"ovnkube-node-7qglw\" (UID: \"032a1970-4ddb-48d8-87d2-44536c92e7f7\") " pod="openshift-ovn-kubernetes/ovnkube-node-7qglw" Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.371612 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l55qs\" (UniqueName: \"kubernetes.io/projected/032a1970-4ddb-48d8-87d2-44536c92e7f7-kube-api-access-l55qs\") pod \"ovnkube-node-7qglw\" (UID: \"032a1970-4ddb-48d8-87d2-44536c92e7f7\") " pod="openshift-ovn-kubernetes/ovnkube-node-7qglw" Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.371633 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/032a1970-4ddb-48d8-87d2-44536c92e7f7-host-cni-bin\") pod \"ovnkube-node-7qglw\" (UID: \"032a1970-4ddb-48d8-87d2-44536c92e7f7\") " pod="openshift-ovn-kubernetes/ovnkube-node-7qglw" Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.371677 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/032a1970-4ddb-48d8-87d2-44536c92e7f7-run-openvswitch\") pod \"ovnkube-node-7qglw\" (UID: \"032a1970-4ddb-48d8-87d2-44536c92e7f7\") " pod="openshift-ovn-kubernetes/ovnkube-node-7qglw" Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.371701 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/032a1970-4ddb-48d8-87d2-44536c92e7f7-node-log\") pod \"ovnkube-node-7qglw\" (UID: \"032a1970-4ddb-48d8-87d2-44536c92e7f7\") " pod="openshift-ovn-kubernetes/ovnkube-node-7qglw" Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.371721 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/032a1970-4ddb-48d8-87d2-44536c92e7f7-host-run-netns\") pod \"ovnkube-node-7qglw\" (UID: \"032a1970-4ddb-48d8-87d2-44536c92e7f7\") " pod="openshift-ovn-kubernetes/ovnkube-node-7qglw" Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.371745 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/032a1970-4ddb-48d8-87d2-44536c92e7f7-var-lib-openvswitch\") pod \"ovnkube-node-7qglw\" (UID: \"032a1970-4ddb-48d8-87d2-44536c92e7f7\") " pod="openshift-ovn-kubernetes/ovnkube-node-7qglw" Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.371766 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/032a1970-4ddb-48d8-87d2-44536c92e7f7-host-kubelet\") pod \"ovnkube-node-7qglw\" (UID: \"032a1970-4ddb-48d8-87d2-44536c92e7f7\") " pod="openshift-ovn-kubernetes/ovnkube-node-7qglw" Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.371784 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/032a1970-4ddb-48d8-87d2-44536c92e7f7-etc-openvswitch\") pod \"ovnkube-node-7qglw\" (UID: \"032a1970-4ddb-48d8-87d2-44536c92e7f7\") " pod="openshift-ovn-kubernetes/ovnkube-node-7qglw" Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.371800 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/032a1970-4ddb-48d8-87d2-44536c92e7f7-host-slash\") pod \"ovnkube-node-7qglw\" (UID: \"032a1970-4ddb-48d8-87d2-44536c92e7f7\") " pod="openshift-ovn-kubernetes/ovnkube-node-7qglw" Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.371818 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/032a1970-4ddb-48d8-87d2-44536c92e7f7-env-overrides\") pod \"ovnkube-node-7qglw\" (UID: \"032a1970-4ddb-48d8-87d2-44536c92e7f7\") " pod="openshift-ovn-kubernetes/ovnkube-node-7qglw" Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.371835 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/032a1970-4ddb-48d8-87d2-44536c92e7f7-ovnkube-script-lib\") pod \"ovnkube-node-7qglw\" (UID: \"032a1970-4ddb-48d8-87d2-44536c92e7f7\") " pod="openshift-ovn-kubernetes/ovnkube-node-7qglw" Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.472268 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/c17e2fff-c7ee-475c-8c17-58a394744b91-ovn-node-metrics-cert\") pod \"c17e2fff-c7ee-475c-8c17-58a394744b91\" (UID: \"c17e2fff-c7ee-475c-8c17-58a394744b91\") " Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.472337 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/c17e2fff-c7ee-475c-8c17-58a394744b91-etc-openvswitch\") pod \"c17e2fff-c7ee-475c-8c17-58a394744b91\" (UID: \"c17e2fff-c7ee-475c-8c17-58a394744b91\") " Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.472360 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/c17e2fff-c7ee-475c-8c17-58a394744b91-host-run-ovn-kubernetes\") pod \"c17e2fff-c7ee-475c-8c17-58a394744b91\" (UID: \"c17e2fff-c7ee-475c-8c17-58a394744b91\") " Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.472400 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/c17e2fff-c7ee-475c-8c17-58a394744b91-host-var-lib-cni-networks-ovn-kubernetes\") pod \"c17e2fff-c7ee-475c-8c17-58a394744b91\" (UID: \"c17e2fff-c7ee-475c-8c17-58a394744b91\") " Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.472447 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/c17e2fff-c7ee-475c-8c17-58a394744b91-host-kubelet\") pod \"c17e2fff-c7ee-475c-8c17-58a394744b91\" (UID: \"c17e2fff-c7ee-475c-8c17-58a394744b91\") " Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.472454 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/c17e2fff-c7ee-475c-8c17-58a394744b91-etc-openvswitch" (OuterVolumeSpecName: "etc-openvswitch") pod "c17e2fff-c7ee-475c-8c17-58a394744b91" (UID: "c17e2fff-c7ee-475c-8c17-58a394744b91"). InnerVolumeSpecName "etc-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.472480 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/c17e2fff-c7ee-475c-8c17-58a394744b91-host-cni-netd\") pod \"c17e2fff-c7ee-475c-8c17-58a394744b91\" (UID: \"c17e2fff-c7ee-475c-8c17-58a394744b91\") " Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.472486 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/c17e2fff-c7ee-475c-8c17-58a394744b91-host-run-ovn-kubernetes" (OuterVolumeSpecName: "host-run-ovn-kubernetes") pod "c17e2fff-c7ee-475c-8c17-58a394744b91" (UID: "c17e2fff-c7ee-475c-8c17-58a394744b91"). InnerVolumeSpecName "host-run-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.472505 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/c17e2fff-c7ee-475c-8c17-58a394744b91-host-kubelet" (OuterVolumeSpecName: "host-kubelet") pod "c17e2fff-c7ee-475c-8c17-58a394744b91" (UID: "c17e2fff-c7ee-475c-8c17-58a394744b91"). InnerVolumeSpecName "host-kubelet". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.472511 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/c17e2fff-c7ee-475c-8c17-58a394744b91-env-overrides\") pod \"c17e2fff-c7ee-475c-8c17-58a394744b91\" (UID: \"c17e2fff-c7ee-475c-8c17-58a394744b91\") " Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.472513 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/c17e2fff-c7ee-475c-8c17-58a394744b91-host-var-lib-cni-networks-ovn-kubernetes" (OuterVolumeSpecName: "host-var-lib-cni-networks-ovn-kubernetes") pod "c17e2fff-c7ee-475c-8c17-58a394744b91" (UID: "c17e2fff-c7ee-475c-8c17-58a394744b91"). InnerVolumeSpecName "host-var-lib-cni-networks-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.472528 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/c17e2fff-c7ee-475c-8c17-58a394744b91-host-cni-netd" (OuterVolumeSpecName: "host-cni-netd") pod "c17e2fff-c7ee-475c-8c17-58a394744b91" (UID: "c17e2fff-c7ee-475c-8c17-58a394744b91"). InnerVolumeSpecName "host-cni-netd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.472552 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zbrc9\" (UniqueName: \"kubernetes.io/projected/c17e2fff-c7ee-475c-8c17-58a394744b91-kube-api-access-zbrc9\") pod \"c17e2fff-c7ee-475c-8c17-58a394744b91\" (UID: \"c17e2fff-c7ee-475c-8c17-58a394744b91\") " Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.472581 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/c17e2fff-c7ee-475c-8c17-58a394744b91-var-lib-openvswitch\") pod \"c17e2fff-c7ee-475c-8c17-58a394744b91\" (UID: \"c17e2fff-c7ee-475c-8c17-58a394744b91\") " Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.472609 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/c17e2fff-c7ee-475c-8c17-58a394744b91-systemd-units\") pod \"c17e2fff-c7ee-475c-8c17-58a394744b91\" (UID: \"c17e2fff-c7ee-475c-8c17-58a394744b91\") " Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.472640 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/c17e2fff-c7ee-475c-8c17-58a394744b91-var-lib-openvswitch" (OuterVolumeSpecName: "var-lib-openvswitch") pod "c17e2fff-c7ee-475c-8c17-58a394744b91" (UID: "c17e2fff-c7ee-475c-8c17-58a394744b91"). InnerVolumeSpecName "var-lib-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.472639 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/c17e2fff-c7ee-475c-8c17-58a394744b91-run-systemd\") pod \"c17e2fff-c7ee-475c-8c17-58a394744b91\" (UID: \"c17e2fff-c7ee-475c-8c17-58a394744b91\") " Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.472678 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/c17e2fff-c7ee-475c-8c17-58a394744b91-systemd-units" (OuterVolumeSpecName: "systemd-units") pod "c17e2fff-c7ee-475c-8c17-58a394744b91" (UID: "c17e2fff-c7ee-475c-8c17-58a394744b91"). InnerVolumeSpecName "systemd-units". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.472708 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/c17e2fff-c7ee-475c-8c17-58a394744b91-ovnkube-script-lib\") pod \"c17e2fff-c7ee-475c-8c17-58a394744b91\" (UID: \"c17e2fff-c7ee-475c-8c17-58a394744b91\") " Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.472729 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/c17e2fff-c7ee-475c-8c17-58a394744b91-host-cni-bin\") pod \"c17e2fff-c7ee-475c-8c17-58a394744b91\" (UID: \"c17e2fff-c7ee-475c-8c17-58a394744b91\") " Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.472751 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/c17e2fff-c7ee-475c-8c17-58a394744b91-node-log\") pod \"c17e2fff-c7ee-475c-8c17-58a394744b91\" (UID: \"c17e2fff-c7ee-475c-8c17-58a394744b91\") " Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.472785 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/c17e2fff-c7ee-475c-8c17-58a394744b91-ovnkube-config\") pod \"c17e2fff-c7ee-475c-8c17-58a394744b91\" (UID: \"c17e2fff-c7ee-475c-8c17-58a394744b91\") " Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.472812 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/c17e2fff-c7ee-475c-8c17-58a394744b91-run-ovn\") pod \"c17e2fff-c7ee-475c-8c17-58a394744b91\" (UID: \"c17e2fff-c7ee-475c-8c17-58a394744b91\") " Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.472837 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/c17e2fff-c7ee-475c-8c17-58a394744b91-host-slash\") pod \"c17e2fff-c7ee-475c-8c17-58a394744b91\" (UID: \"c17e2fff-c7ee-475c-8c17-58a394744b91\") " Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.472865 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/c17e2fff-c7ee-475c-8c17-58a394744b91-host-run-netns\") pod \"c17e2fff-c7ee-475c-8c17-58a394744b91\" (UID: \"c17e2fff-c7ee-475c-8c17-58a394744b91\") " Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.472893 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/c17e2fff-c7ee-475c-8c17-58a394744b91-log-socket\") pod \"c17e2fff-c7ee-475c-8c17-58a394744b91\" (UID: \"c17e2fff-c7ee-475c-8c17-58a394744b91\") " Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.472913 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/c17e2fff-c7ee-475c-8c17-58a394744b91-run-openvswitch\") pod \"c17e2fff-c7ee-475c-8c17-58a394744b91\" (UID: \"c17e2fff-c7ee-475c-8c17-58a394744b91\") " Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.473055 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/032a1970-4ddb-48d8-87d2-44536c92e7f7-log-socket\") pod \"ovnkube-node-7qglw\" (UID: \"032a1970-4ddb-48d8-87d2-44536c92e7f7\") " pod="openshift-ovn-kubernetes/ovnkube-node-7qglw" Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.473083 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/032a1970-4ddb-48d8-87d2-44536c92e7f7-run-systemd\") pod \"ovnkube-node-7qglw\" (UID: \"032a1970-4ddb-48d8-87d2-44536c92e7f7\") " pod="openshift-ovn-kubernetes/ovnkube-node-7qglw" Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.473106 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/032a1970-4ddb-48d8-87d2-44536c92e7f7-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-7qglw\" (UID: \"032a1970-4ddb-48d8-87d2-44536c92e7f7\") " pod="openshift-ovn-kubernetes/ovnkube-node-7qglw" Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.473125 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c17e2fff-c7ee-475c-8c17-58a394744b91-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "c17e2fff-c7ee-475c-8c17-58a394744b91" (UID: "c17e2fff-c7ee-475c-8c17-58a394744b91"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.473134 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c17e2fff-c7ee-475c-8c17-58a394744b91-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "c17e2fff-c7ee-475c-8c17-58a394744b91" (UID: "c17e2fff-c7ee-475c-8c17-58a394744b91"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.473145 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/032a1970-4ddb-48d8-87d2-44536c92e7f7-ovnkube-config\") pod \"ovnkube-node-7qglw\" (UID: \"032a1970-4ddb-48d8-87d2-44536c92e7f7\") " pod="openshift-ovn-kubernetes/ovnkube-node-7qglw" Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.473208 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/032a1970-4ddb-48d8-87d2-44536c92e7f7-host-cni-netd\") pod \"ovnkube-node-7qglw\" (UID: \"032a1970-4ddb-48d8-87d2-44536c92e7f7\") " pod="openshift-ovn-kubernetes/ovnkube-node-7qglw" Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.473252 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/032a1970-4ddb-48d8-87d2-44536c92e7f7-systemd-units\") pod \"ovnkube-node-7qglw\" (UID: \"032a1970-4ddb-48d8-87d2-44536c92e7f7\") " pod="openshift-ovn-kubernetes/ovnkube-node-7qglw" Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.473274 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l55qs\" (UniqueName: \"kubernetes.io/projected/032a1970-4ddb-48d8-87d2-44536c92e7f7-kube-api-access-l55qs\") pod \"ovnkube-node-7qglw\" (UID: \"032a1970-4ddb-48d8-87d2-44536c92e7f7\") " pod="openshift-ovn-kubernetes/ovnkube-node-7qglw" Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.473314 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/032a1970-4ddb-48d8-87d2-44536c92e7f7-host-cni-bin\") pod \"ovnkube-node-7qglw\" (UID: \"032a1970-4ddb-48d8-87d2-44536c92e7f7\") " pod="openshift-ovn-kubernetes/ovnkube-node-7qglw" Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.473343 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/032a1970-4ddb-48d8-87d2-44536c92e7f7-run-openvswitch\") pod \"ovnkube-node-7qglw\" (UID: \"032a1970-4ddb-48d8-87d2-44536c92e7f7\") " pod="openshift-ovn-kubernetes/ovnkube-node-7qglw" Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.473370 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/032a1970-4ddb-48d8-87d2-44536c92e7f7-node-log\") pod \"ovnkube-node-7qglw\" (UID: \"032a1970-4ddb-48d8-87d2-44536c92e7f7\") " pod="openshift-ovn-kubernetes/ovnkube-node-7qglw" Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.473400 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/032a1970-4ddb-48d8-87d2-44536c92e7f7-host-run-netns\") pod \"ovnkube-node-7qglw\" (UID: \"032a1970-4ddb-48d8-87d2-44536c92e7f7\") " pod="openshift-ovn-kubernetes/ovnkube-node-7qglw" Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.473446 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/032a1970-4ddb-48d8-87d2-44536c92e7f7-var-lib-openvswitch\") pod \"ovnkube-node-7qglw\" (UID: \"032a1970-4ddb-48d8-87d2-44536c92e7f7\") " pod="openshift-ovn-kubernetes/ovnkube-node-7qglw" Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.473484 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/032a1970-4ddb-48d8-87d2-44536c92e7f7-host-kubelet\") pod \"ovnkube-node-7qglw\" (UID: \"032a1970-4ddb-48d8-87d2-44536c92e7f7\") " pod="openshift-ovn-kubernetes/ovnkube-node-7qglw" Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.473513 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/032a1970-4ddb-48d8-87d2-44536c92e7f7-etc-openvswitch\") pod \"ovnkube-node-7qglw\" (UID: \"032a1970-4ddb-48d8-87d2-44536c92e7f7\") " pod="openshift-ovn-kubernetes/ovnkube-node-7qglw" Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.473544 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/032a1970-4ddb-48d8-87d2-44536c92e7f7-host-slash\") pod \"ovnkube-node-7qglw\" (UID: \"032a1970-4ddb-48d8-87d2-44536c92e7f7\") " pod="openshift-ovn-kubernetes/ovnkube-node-7qglw" Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.473574 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/032a1970-4ddb-48d8-87d2-44536c92e7f7-env-overrides\") pod \"ovnkube-node-7qglw\" (UID: \"032a1970-4ddb-48d8-87d2-44536c92e7f7\") " pod="openshift-ovn-kubernetes/ovnkube-node-7qglw" Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.473604 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/032a1970-4ddb-48d8-87d2-44536c92e7f7-ovnkube-script-lib\") pod \"ovnkube-node-7qglw\" (UID: \"032a1970-4ddb-48d8-87d2-44536c92e7f7\") " pod="openshift-ovn-kubernetes/ovnkube-node-7qglw" Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.473635 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/032a1970-4ddb-48d8-87d2-44536c92e7f7-run-ovn\") pod \"ovnkube-node-7qglw\" (UID: \"032a1970-4ddb-48d8-87d2-44536c92e7f7\") " pod="openshift-ovn-kubernetes/ovnkube-node-7qglw" Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.473712 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/032a1970-4ddb-48d8-87d2-44536c92e7f7-ovn-node-metrics-cert\") pod \"ovnkube-node-7qglw\" (UID: \"032a1970-4ddb-48d8-87d2-44536c92e7f7\") " pod="openshift-ovn-kubernetes/ovnkube-node-7qglw" Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.473743 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/032a1970-4ddb-48d8-87d2-44536c92e7f7-host-run-ovn-kubernetes\") pod \"ovnkube-node-7qglw\" (UID: \"032a1970-4ddb-48d8-87d2-44536c92e7f7\") " pod="openshift-ovn-kubernetes/ovnkube-node-7qglw" Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.473809 4909 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/c17e2fff-c7ee-475c-8c17-58a394744b91-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.473828 4909 reconciler_common.go:293] "Volume detached for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/c17e2fff-c7ee-475c-8c17-58a394744b91-etc-openvswitch\") on node \"crc\" DevicePath \"\"" Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.473846 4909 reconciler_common.go:293] "Volume detached for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/c17e2fff-c7ee-475c-8c17-58a394744b91-host-run-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.473863 4909 reconciler_common.go:293] "Volume detached for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/c17e2fff-c7ee-475c-8c17-58a394744b91-host-var-lib-cni-networks-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.473880 4909 reconciler_common.go:293] "Volume detached for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/c17e2fff-c7ee-475c-8c17-58a394744b91-host-kubelet\") on node \"crc\" DevicePath \"\"" Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.473896 4909 reconciler_common.go:293] "Volume detached for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/c17e2fff-c7ee-475c-8c17-58a394744b91-host-cni-netd\") on node \"crc\" DevicePath \"\"" Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.473913 4909 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/c17e2fff-c7ee-475c-8c17-58a394744b91-env-overrides\") on node \"crc\" DevicePath \"\"" Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.473929 4909 reconciler_common.go:293] "Volume detached for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/c17e2fff-c7ee-475c-8c17-58a394744b91-var-lib-openvswitch\") on node \"crc\" DevicePath \"\"" Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.473944 4909 reconciler_common.go:293] "Volume detached for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/c17e2fff-c7ee-475c-8c17-58a394744b91-systemd-units\") on node \"crc\" DevicePath \"\"" Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.473961 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/032a1970-4ddb-48d8-87d2-44536c92e7f7-ovnkube-config\") pod \"ovnkube-node-7qglw\" (UID: \"032a1970-4ddb-48d8-87d2-44536c92e7f7\") " pod="openshift-ovn-kubernetes/ovnkube-node-7qglw" Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.473996 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/032a1970-4ddb-48d8-87d2-44536c92e7f7-host-run-ovn-kubernetes\") pod \"ovnkube-node-7qglw\" (UID: \"032a1970-4ddb-48d8-87d2-44536c92e7f7\") " pod="openshift-ovn-kubernetes/ovnkube-node-7qglw" Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.474023 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/c17e2fff-c7ee-475c-8c17-58a394744b91-host-cni-bin" (OuterVolumeSpecName: "host-cni-bin") pod "c17e2fff-c7ee-475c-8c17-58a394744b91" (UID: "c17e2fff-c7ee-475c-8c17-58a394744b91"). InnerVolumeSpecName "host-cni-bin". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.474047 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/032a1970-4ddb-48d8-87d2-44536c92e7f7-host-cni-netd\") pod \"ovnkube-node-7qglw\" (UID: \"032a1970-4ddb-48d8-87d2-44536c92e7f7\") " pod="openshift-ovn-kubernetes/ovnkube-node-7qglw" Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.474055 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/c17e2fff-c7ee-475c-8c17-58a394744b91-node-log" (OuterVolumeSpecName: "node-log") pod "c17e2fff-c7ee-475c-8c17-58a394744b91" (UID: "c17e2fff-c7ee-475c-8c17-58a394744b91"). InnerVolumeSpecName "node-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.474089 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/032a1970-4ddb-48d8-87d2-44536c92e7f7-systemd-units\") pod \"ovnkube-node-7qglw\" (UID: \"032a1970-4ddb-48d8-87d2-44536c92e7f7\") " pod="openshift-ovn-kubernetes/ovnkube-node-7qglw" Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.474502 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c17e2fff-c7ee-475c-8c17-58a394744b91-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "c17e2fff-c7ee-475c-8c17-58a394744b91" (UID: "c17e2fff-c7ee-475c-8c17-58a394744b91"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.474538 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/c17e2fff-c7ee-475c-8c17-58a394744b91-run-ovn" (OuterVolumeSpecName: "run-ovn") pod "c17e2fff-c7ee-475c-8c17-58a394744b91" (UID: "c17e2fff-c7ee-475c-8c17-58a394744b91"). InnerVolumeSpecName "run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.474566 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/c17e2fff-c7ee-475c-8c17-58a394744b91-host-slash" (OuterVolumeSpecName: "host-slash") pod "c17e2fff-c7ee-475c-8c17-58a394744b91" (UID: "c17e2fff-c7ee-475c-8c17-58a394744b91"). InnerVolumeSpecName "host-slash". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.474589 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/c17e2fff-c7ee-475c-8c17-58a394744b91-host-run-netns" (OuterVolumeSpecName: "host-run-netns") pod "c17e2fff-c7ee-475c-8c17-58a394744b91" (UID: "c17e2fff-c7ee-475c-8c17-58a394744b91"). InnerVolumeSpecName "host-run-netns". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.474612 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/c17e2fff-c7ee-475c-8c17-58a394744b91-log-socket" (OuterVolumeSpecName: "log-socket") pod "c17e2fff-c7ee-475c-8c17-58a394744b91" (UID: "c17e2fff-c7ee-475c-8c17-58a394744b91"). InnerVolumeSpecName "log-socket". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.474634 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/c17e2fff-c7ee-475c-8c17-58a394744b91-run-openvswitch" (OuterVolumeSpecName: "run-openvswitch") pod "c17e2fff-c7ee-475c-8c17-58a394744b91" (UID: "c17e2fff-c7ee-475c-8c17-58a394744b91"). InnerVolumeSpecName "run-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.474707 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/032a1970-4ddb-48d8-87d2-44536c92e7f7-log-socket\") pod \"ovnkube-node-7qglw\" (UID: \"032a1970-4ddb-48d8-87d2-44536c92e7f7\") " pod="openshift-ovn-kubernetes/ovnkube-node-7qglw" Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.474741 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/032a1970-4ddb-48d8-87d2-44536c92e7f7-run-systemd\") pod \"ovnkube-node-7qglw\" (UID: \"032a1970-4ddb-48d8-87d2-44536c92e7f7\") " pod="openshift-ovn-kubernetes/ovnkube-node-7qglw" Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.474958 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/032a1970-4ddb-48d8-87d2-44536c92e7f7-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-7qglw\" (UID: \"032a1970-4ddb-48d8-87d2-44536c92e7f7\") " pod="openshift-ovn-kubernetes/ovnkube-node-7qglw" Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.475007 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/032a1970-4ddb-48d8-87d2-44536c92e7f7-etc-openvswitch\") pod \"ovnkube-node-7qglw\" (UID: \"032a1970-4ddb-48d8-87d2-44536c92e7f7\") " pod="openshift-ovn-kubernetes/ovnkube-node-7qglw" Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.475012 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/032a1970-4ddb-48d8-87d2-44536c92e7f7-host-cni-bin\") pod \"ovnkube-node-7qglw\" (UID: \"032a1970-4ddb-48d8-87d2-44536c92e7f7\") " pod="openshift-ovn-kubernetes/ovnkube-node-7qglw" Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.475045 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/032a1970-4ddb-48d8-87d2-44536c92e7f7-run-openvswitch\") pod \"ovnkube-node-7qglw\" (UID: \"032a1970-4ddb-48d8-87d2-44536c92e7f7\") " pod="openshift-ovn-kubernetes/ovnkube-node-7qglw" Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.475082 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/032a1970-4ddb-48d8-87d2-44536c92e7f7-host-run-netns\") pod \"ovnkube-node-7qglw\" (UID: \"032a1970-4ddb-48d8-87d2-44536c92e7f7\") " pod="openshift-ovn-kubernetes/ovnkube-node-7qglw" Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.475094 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/032a1970-4ddb-48d8-87d2-44536c92e7f7-node-log\") pod \"ovnkube-node-7qglw\" (UID: \"032a1970-4ddb-48d8-87d2-44536c92e7f7\") " pod="openshift-ovn-kubernetes/ovnkube-node-7qglw" Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.475115 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/032a1970-4ddb-48d8-87d2-44536c92e7f7-var-lib-openvswitch\") pod \"ovnkube-node-7qglw\" (UID: \"032a1970-4ddb-48d8-87d2-44536c92e7f7\") " pod="openshift-ovn-kubernetes/ovnkube-node-7qglw" Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.475148 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/032a1970-4ddb-48d8-87d2-44536c92e7f7-host-kubelet\") pod \"ovnkube-node-7qglw\" (UID: \"032a1970-4ddb-48d8-87d2-44536c92e7f7\") " pod="openshift-ovn-kubernetes/ovnkube-node-7qglw" Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.475185 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/032a1970-4ddb-48d8-87d2-44536c92e7f7-host-slash\") pod \"ovnkube-node-7qglw\" (UID: \"032a1970-4ddb-48d8-87d2-44536c92e7f7\") " pod="openshift-ovn-kubernetes/ovnkube-node-7qglw" Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.475269 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/032a1970-4ddb-48d8-87d2-44536c92e7f7-run-ovn\") pod \"ovnkube-node-7qglw\" (UID: \"032a1970-4ddb-48d8-87d2-44536c92e7f7\") " pod="openshift-ovn-kubernetes/ovnkube-node-7qglw" Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.476593 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/032a1970-4ddb-48d8-87d2-44536c92e7f7-env-overrides\") pod \"ovnkube-node-7qglw\" (UID: \"032a1970-4ddb-48d8-87d2-44536c92e7f7\") " pod="openshift-ovn-kubernetes/ovnkube-node-7qglw" Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.478011 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/032a1970-4ddb-48d8-87d2-44536c92e7f7-ovnkube-script-lib\") pod \"ovnkube-node-7qglw\" (UID: \"032a1970-4ddb-48d8-87d2-44536c92e7f7\") " pod="openshift-ovn-kubernetes/ovnkube-node-7qglw" Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.478230 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c17e2fff-c7ee-475c-8c17-58a394744b91-kube-api-access-zbrc9" (OuterVolumeSpecName: "kube-api-access-zbrc9") pod "c17e2fff-c7ee-475c-8c17-58a394744b91" (UID: "c17e2fff-c7ee-475c-8c17-58a394744b91"). InnerVolumeSpecName "kube-api-access-zbrc9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.478637 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c17e2fff-c7ee-475c-8c17-58a394744b91-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "c17e2fff-c7ee-475c-8c17-58a394744b91" (UID: "c17e2fff-c7ee-475c-8c17-58a394744b91"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.479274 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/032a1970-4ddb-48d8-87d2-44536c92e7f7-ovn-node-metrics-cert\") pod \"ovnkube-node-7qglw\" (UID: \"032a1970-4ddb-48d8-87d2-44536c92e7f7\") " pod="openshift-ovn-kubernetes/ovnkube-node-7qglw" Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.487920 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/c17e2fff-c7ee-475c-8c17-58a394744b91-run-systemd" (OuterVolumeSpecName: "run-systemd") pod "c17e2fff-c7ee-475c-8c17-58a394744b91" (UID: "c17e2fff-c7ee-475c-8c17-58a394744b91"). InnerVolumeSpecName "run-systemd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.502680 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l55qs\" (UniqueName: \"kubernetes.io/projected/032a1970-4ddb-48d8-87d2-44536c92e7f7-kube-api-access-l55qs\") pod \"ovnkube-node-7qglw\" (UID: \"032a1970-4ddb-48d8-87d2-44536c92e7f7\") " pod="openshift-ovn-kubernetes/ovnkube-node-7qglw" Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.575263 4909 reconciler_common.go:293] "Volume detached for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/c17e2fff-c7ee-475c-8c17-58a394744b91-run-systemd\") on node \"crc\" DevicePath \"\"" Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.575311 4909 reconciler_common.go:293] "Volume detached for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/c17e2fff-c7ee-475c-8c17-58a394744b91-host-cni-bin\") on node \"crc\" DevicePath \"\"" Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.575325 4909 reconciler_common.go:293] "Volume detached for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/c17e2fff-c7ee-475c-8c17-58a394744b91-node-log\") on node \"crc\" DevicePath \"\"" Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.575337 4909 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/c17e2fff-c7ee-475c-8c17-58a394744b91-ovnkube-config\") on node \"crc\" DevicePath \"\"" Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.575350 4909 reconciler_common.go:293] "Volume detached for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/c17e2fff-c7ee-475c-8c17-58a394744b91-run-ovn\") on node \"crc\" DevicePath \"\"" Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.575361 4909 reconciler_common.go:293] "Volume detached for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/c17e2fff-c7ee-475c-8c17-58a394744b91-host-slash\") on node \"crc\" DevicePath \"\"" Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.575372 4909 reconciler_common.go:293] "Volume detached for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/c17e2fff-c7ee-475c-8c17-58a394744b91-host-run-netns\") on node \"crc\" DevicePath \"\"" Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.575383 4909 reconciler_common.go:293] "Volume detached for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/c17e2fff-c7ee-475c-8c17-58a394744b91-log-socket\") on node \"crc\" DevicePath \"\"" Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.575394 4909 reconciler_common.go:293] "Volume detached for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/c17e2fff-c7ee-475c-8c17-58a394744b91-run-openvswitch\") on node \"crc\" DevicePath \"\"" Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.575408 4909 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/c17e2fff-c7ee-475c-8c17-58a394744b91-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.575421 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zbrc9\" (UniqueName: \"kubernetes.io/projected/c17e2fff-c7ee-475c-8c17-58a394744b91-kube-api-access-zbrc9\") on node \"crc\" DevicePath \"\"" Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.657072 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-7qglw" Nov 28 16:24:13 crc kubenswrapper[4909]: W1128 16:24:13.687731 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod032a1970_4ddb_48d8_87d2_44536c92e7f7.slice/crio-bf2bdf2ace5b2d2cd7b5bd4ef3da375356135c0e4adcd179b59636608a7ffc8f WatchSource:0}: Error finding container bf2bdf2ace5b2d2cd7b5bd4ef3da375356135c0e4adcd179b59636608a7ffc8f: Status 404 returned error can't find the container with id bf2bdf2ace5b2d2cd7b5bd4ef3da375356135c0e4adcd179b59636608a7ffc8f Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.964918 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-qxw94_c17e2fff-c7ee-475c-8c17-58a394744b91/ovnkube-controller/3.log" Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.968101 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-qxw94_c17e2fff-c7ee-475c-8c17-58a394744b91/ovn-acl-logging/0.log" Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.968774 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-qxw94_c17e2fff-c7ee-475c-8c17-58a394744b91/ovn-controller/0.log" Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.969258 4909 generic.go:334] "Generic (PLEG): container finished" podID="c17e2fff-c7ee-475c-8c17-58a394744b91" containerID="8dfb42d6b85ca4de9c79dd23da69ea50faaaca28d01cf5b666fcebc2058e7cf1" exitCode=0 Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.969298 4909 generic.go:334] "Generic (PLEG): container finished" podID="c17e2fff-c7ee-475c-8c17-58a394744b91" containerID="841b67fe2e8ae0e4ebf834f141b83ad5e850b8e81c3f6410cef7df9894c33dc6" exitCode=0 Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.969311 4909 generic.go:334] "Generic (PLEG): container finished" podID="c17e2fff-c7ee-475c-8c17-58a394744b91" containerID="d0b87eb388aab62c94756b5f20b73cf95fc80da8aa3d83f2848757f860cdd38e" exitCode=0 Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.969321 4909 generic.go:334] "Generic (PLEG): container finished" podID="c17e2fff-c7ee-475c-8c17-58a394744b91" containerID="031c30950c06bdcae1736062069b96a6ce7cb66d5f286b73d691ee2d2db5832e" exitCode=0 Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.969329 4909 generic.go:334] "Generic (PLEG): container finished" podID="c17e2fff-c7ee-475c-8c17-58a394744b91" containerID="697293d217dd4fbc4cecc3bc4be7acc14dbb5aa97b9faf0a608792ef7e4c8dba" exitCode=0 Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.969338 4909 generic.go:334] "Generic (PLEG): container finished" podID="c17e2fff-c7ee-475c-8c17-58a394744b91" containerID="c9f22dca75c10791e7f5e083e737d4d6aad20598ff0d29dd7562fa99b5dbdfd4" exitCode=0 Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.969348 4909 generic.go:334] "Generic (PLEG): container finished" podID="c17e2fff-c7ee-475c-8c17-58a394744b91" containerID="66731a2e0518dd575f621ddf9e760e844e647a224138957440ccaecad3e4b8b7" exitCode=143 Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.969369 4909 generic.go:334] "Generic (PLEG): container finished" podID="c17e2fff-c7ee-475c-8c17-58a394744b91" containerID="983e9f73b8e344dbe445f95436cbe143d63f1da2e868201b441496df8cdc99ef" exitCode=143 Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.969349 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.969371 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" event={"ID":"c17e2fff-c7ee-475c-8c17-58a394744b91","Type":"ContainerDied","Data":"8dfb42d6b85ca4de9c79dd23da69ea50faaaca28d01cf5b666fcebc2058e7cf1"} Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.969528 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" event={"ID":"c17e2fff-c7ee-475c-8c17-58a394744b91","Type":"ContainerDied","Data":"841b67fe2e8ae0e4ebf834f141b83ad5e850b8e81c3f6410cef7df9894c33dc6"} Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.969548 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" event={"ID":"c17e2fff-c7ee-475c-8c17-58a394744b91","Type":"ContainerDied","Data":"d0b87eb388aab62c94756b5f20b73cf95fc80da8aa3d83f2848757f860cdd38e"} Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.969567 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" event={"ID":"c17e2fff-c7ee-475c-8c17-58a394744b91","Type":"ContainerDied","Data":"031c30950c06bdcae1736062069b96a6ce7cb66d5f286b73d691ee2d2db5832e"} Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.969582 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" event={"ID":"c17e2fff-c7ee-475c-8c17-58a394744b91","Type":"ContainerDied","Data":"697293d217dd4fbc4cecc3bc4be7acc14dbb5aa97b9faf0a608792ef7e4c8dba"} Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.969611 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" event={"ID":"c17e2fff-c7ee-475c-8c17-58a394744b91","Type":"ContainerDied","Data":"c9f22dca75c10791e7f5e083e737d4d6aad20598ff0d29dd7562fa99b5dbdfd4"} Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.969626 4909 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"3ddca078c58f2bc3df2f19603ac7190dde988c7fe8b49ca5d94af1bf01cd1162"} Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.969639 4909 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"841b67fe2e8ae0e4ebf834f141b83ad5e850b8e81c3f6410cef7df9894c33dc6"} Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.969647 4909 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"d0b87eb388aab62c94756b5f20b73cf95fc80da8aa3d83f2848757f860cdd38e"} Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.969613 4909 scope.go:117] "RemoveContainer" containerID="8dfb42d6b85ca4de9c79dd23da69ea50faaaca28d01cf5b666fcebc2058e7cf1" Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.969666 4909 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"031c30950c06bdcae1736062069b96a6ce7cb66d5f286b73d691ee2d2db5832e"} Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.969676 4909 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"697293d217dd4fbc4cecc3bc4be7acc14dbb5aa97b9faf0a608792ef7e4c8dba"} Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.969683 4909 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"c9f22dca75c10791e7f5e083e737d4d6aad20598ff0d29dd7562fa99b5dbdfd4"} Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.969690 4909 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"66731a2e0518dd575f621ddf9e760e844e647a224138957440ccaecad3e4b8b7"} Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.969697 4909 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"983e9f73b8e344dbe445f95436cbe143d63f1da2e868201b441496df8cdc99ef"} Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.969704 4909 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"e1fffda4b5571ab3fe797073c7201feadb7818eac66f6dc76b7761ef13a8d0ef"} Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.969714 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" event={"ID":"c17e2fff-c7ee-475c-8c17-58a394744b91","Type":"ContainerDied","Data":"66731a2e0518dd575f621ddf9e760e844e647a224138957440ccaecad3e4b8b7"} Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.969725 4909 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"8dfb42d6b85ca4de9c79dd23da69ea50faaaca28d01cf5b666fcebc2058e7cf1"} Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.969733 4909 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"3ddca078c58f2bc3df2f19603ac7190dde988c7fe8b49ca5d94af1bf01cd1162"} Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.969739 4909 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"841b67fe2e8ae0e4ebf834f141b83ad5e850b8e81c3f6410cef7df9894c33dc6"} Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.969748 4909 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"d0b87eb388aab62c94756b5f20b73cf95fc80da8aa3d83f2848757f860cdd38e"} Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.969755 4909 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"031c30950c06bdcae1736062069b96a6ce7cb66d5f286b73d691ee2d2db5832e"} Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.969761 4909 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"697293d217dd4fbc4cecc3bc4be7acc14dbb5aa97b9faf0a608792ef7e4c8dba"} Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.969769 4909 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"c9f22dca75c10791e7f5e083e737d4d6aad20598ff0d29dd7562fa99b5dbdfd4"} Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.969776 4909 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"66731a2e0518dd575f621ddf9e760e844e647a224138957440ccaecad3e4b8b7"} Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.969791 4909 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"983e9f73b8e344dbe445f95436cbe143d63f1da2e868201b441496df8cdc99ef"} Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.969798 4909 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"e1fffda4b5571ab3fe797073c7201feadb7818eac66f6dc76b7761ef13a8d0ef"} Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.969809 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" event={"ID":"c17e2fff-c7ee-475c-8c17-58a394744b91","Type":"ContainerDied","Data":"983e9f73b8e344dbe445f95436cbe143d63f1da2e868201b441496df8cdc99ef"} Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.969820 4909 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"8dfb42d6b85ca4de9c79dd23da69ea50faaaca28d01cf5b666fcebc2058e7cf1"} Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.969829 4909 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"3ddca078c58f2bc3df2f19603ac7190dde988c7fe8b49ca5d94af1bf01cd1162"} Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.969837 4909 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"841b67fe2e8ae0e4ebf834f141b83ad5e850b8e81c3f6410cef7df9894c33dc6"} Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.969846 4909 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"d0b87eb388aab62c94756b5f20b73cf95fc80da8aa3d83f2848757f860cdd38e"} Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.969853 4909 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"031c30950c06bdcae1736062069b96a6ce7cb66d5f286b73d691ee2d2db5832e"} Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.969859 4909 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"697293d217dd4fbc4cecc3bc4be7acc14dbb5aa97b9faf0a608792ef7e4c8dba"} Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.969866 4909 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"c9f22dca75c10791e7f5e083e737d4d6aad20598ff0d29dd7562fa99b5dbdfd4"} Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.969872 4909 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"66731a2e0518dd575f621ddf9e760e844e647a224138957440ccaecad3e4b8b7"} Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.969879 4909 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"983e9f73b8e344dbe445f95436cbe143d63f1da2e868201b441496df8cdc99ef"} Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.969886 4909 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"e1fffda4b5571ab3fe797073c7201feadb7818eac66f6dc76b7761ef13a8d0ef"} Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.969896 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qxw94" event={"ID":"c17e2fff-c7ee-475c-8c17-58a394744b91","Type":"ContainerDied","Data":"483b860a481510c4981596c62db5b70137ac2db168b668b14bc7b85e6ded95e3"} Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.969907 4909 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"8dfb42d6b85ca4de9c79dd23da69ea50faaaca28d01cf5b666fcebc2058e7cf1"} Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.969915 4909 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"3ddca078c58f2bc3df2f19603ac7190dde988c7fe8b49ca5d94af1bf01cd1162"} Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.969922 4909 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"841b67fe2e8ae0e4ebf834f141b83ad5e850b8e81c3f6410cef7df9894c33dc6"} Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.969929 4909 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"d0b87eb388aab62c94756b5f20b73cf95fc80da8aa3d83f2848757f860cdd38e"} Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.969936 4909 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"031c30950c06bdcae1736062069b96a6ce7cb66d5f286b73d691ee2d2db5832e"} Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.969942 4909 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"697293d217dd4fbc4cecc3bc4be7acc14dbb5aa97b9faf0a608792ef7e4c8dba"} Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.969950 4909 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"c9f22dca75c10791e7f5e083e737d4d6aad20598ff0d29dd7562fa99b5dbdfd4"} Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.969957 4909 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"66731a2e0518dd575f621ddf9e760e844e647a224138957440ccaecad3e4b8b7"} Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.969964 4909 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"983e9f73b8e344dbe445f95436cbe143d63f1da2e868201b441496df8cdc99ef"} Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.969970 4909 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"e1fffda4b5571ab3fe797073c7201feadb7818eac66f6dc76b7761ef13a8d0ef"} Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.972404 4909 generic.go:334] "Generic (PLEG): container finished" podID="032a1970-4ddb-48d8-87d2-44536c92e7f7" containerID="959a2c4af92f34a18454277e8c0c1166e555ea320bbaddae9a73923046316aea" exitCode=0 Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.972522 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-7qglw" event={"ID":"032a1970-4ddb-48d8-87d2-44536c92e7f7","Type":"ContainerDied","Data":"959a2c4af92f34a18454277e8c0c1166e555ea320bbaddae9a73923046316aea"} Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.972560 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-7qglw" event={"ID":"032a1970-4ddb-48d8-87d2-44536c92e7f7","Type":"ContainerStarted","Data":"bf2bdf2ace5b2d2cd7b5bd4ef3da375356135c0e4adcd179b59636608a7ffc8f"} Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.975134 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-wx2jj_6e3805b2-8ad3-4fa6-b88f-e0ae42294202/kube-multus/2.log" Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.975625 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-wx2jj_6e3805b2-8ad3-4fa6-b88f-e0ae42294202/kube-multus/1.log" Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.975678 4909 generic.go:334] "Generic (PLEG): container finished" podID="6e3805b2-8ad3-4fa6-b88f-e0ae42294202" containerID="ff55a4f30c31edf7245bab6cba501e9e5bde33dd3277e5a9e39f85eb66c216aa" exitCode=2 Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.975710 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-wx2jj" event={"ID":"6e3805b2-8ad3-4fa6-b88f-e0ae42294202","Type":"ContainerDied","Data":"ff55a4f30c31edf7245bab6cba501e9e5bde33dd3277e5a9e39f85eb66c216aa"} Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.975730 4909 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"e3a9a82264968374209ed690b43cf96557d426af065ba14cd189ae9e31ed0f0a"} Nov 28 16:24:13 crc kubenswrapper[4909]: I1128 16:24:13.976438 4909 scope.go:117] "RemoveContainer" containerID="ff55a4f30c31edf7245bab6cba501e9e5bde33dd3277e5a9e39f85eb66c216aa" Nov 28 16:24:14 crc kubenswrapper[4909]: I1128 16:24:14.001270 4909 scope.go:117] "RemoveContainer" containerID="3ddca078c58f2bc3df2f19603ac7190dde988c7fe8b49ca5d94af1bf01cd1162" Nov 28 16:24:14 crc kubenswrapper[4909]: I1128 16:24:14.037777 4909 scope.go:117] "RemoveContainer" containerID="841b67fe2e8ae0e4ebf834f141b83ad5e850b8e81c3f6410cef7df9894c33dc6" Nov 28 16:24:14 crc kubenswrapper[4909]: I1128 16:24:14.048488 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-qxw94"] Nov 28 16:24:14 crc kubenswrapper[4909]: I1128 16:24:14.065355 4909 scope.go:117] "RemoveContainer" containerID="d0b87eb388aab62c94756b5f20b73cf95fc80da8aa3d83f2848757f860cdd38e" Nov 28 16:24:14 crc kubenswrapper[4909]: I1128 16:24:14.069254 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-qxw94"] Nov 28 16:24:14 crc kubenswrapper[4909]: I1128 16:24:14.078366 4909 scope.go:117] "RemoveContainer" containerID="031c30950c06bdcae1736062069b96a6ce7cb66d5f286b73d691ee2d2db5832e" Nov 28 16:24:14 crc kubenswrapper[4909]: I1128 16:24:14.092139 4909 scope.go:117] "RemoveContainer" containerID="697293d217dd4fbc4cecc3bc4be7acc14dbb5aa97b9faf0a608792ef7e4c8dba" Nov 28 16:24:14 crc kubenswrapper[4909]: I1128 16:24:14.135045 4909 scope.go:117] "RemoveContainer" containerID="c9f22dca75c10791e7f5e083e737d4d6aad20598ff0d29dd7562fa99b5dbdfd4" Nov 28 16:24:14 crc kubenswrapper[4909]: I1128 16:24:14.149094 4909 scope.go:117] "RemoveContainer" containerID="66731a2e0518dd575f621ddf9e760e844e647a224138957440ccaecad3e4b8b7" Nov 28 16:24:14 crc kubenswrapper[4909]: I1128 16:24:14.179707 4909 scope.go:117] "RemoveContainer" containerID="983e9f73b8e344dbe445f95436cbe143d63f1da2e868201b441496df8cdc99ef" Nov 28 16:24:14 crc kubenswrapper[4909]: I1128 16:24:14.204853 4909 scope.go:117] "RemoveContainer" containerID="e1fffda4b5571ab3fe797073c7201feadb7818eac66f6dc76b7761ef13a8d0ef" Nov 28 16:24:14 crc kubenswrapper[4909]: I1128 16:24:14.220824 4909 scope.go:117] "RemoveContainer" containerID="8dfb42d6b85ca4de9c79dd23da69ea50faaaca28d01cf5b666fcebc2058e7cf1" Nov 28 16:24:14 crc kubenswrapper[4909]: E1128 16:24:14.221628 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8dfb42d6b85ca4de9c79dd23da69ea50faaaca28d01cf5b666fcebc2058e7cf1\": container with ID starting with 8dfb42d6b85ca4de9c79dd23da69ea50faaaca28d01cf5b666fcebc2058e7cf1 not found: ID does not exist" containerID="8dfb42d6b85ca4de9c79dd23da69ea50faaaca28d01cf5b666fcebc2058e7cf1" Nov 28 16:24:14 crc kubenswrapper[4909]: I1128 16:24:14.221684 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8dfb42d6b85ca4de9c79dd23da69ea50faaaca28d01cf5b666fcebc2058e7cf1"} err="failed to get container status \"8dfb42d6b85ca4de9c79dd23da69ea50faaaca28d01cf5b666fcebc2058e7cf1\": rpc error: code = NotFound desc = could not find container \"8dfb42d6b85ca4de9c79dd23da69ea50faaaca28d01cf5b666fcebc2058e7cf1\": container with ID starting with 8dfb42d6b85ca4de9c79dd23da69ea50faaaca28d01cf5b666fcebc2058e7cf1 not found: ID does not exist" Nov 28 16:24:14 crc kubenswrapper[4909]: I1128 16:24:14.221712 4909 scope.go:117] "RemoveContainer" containerID="3ddca078c58f2bc3df2f19603ac7190dde988c7fe8b49ca5d94af1bf01cd1162" Nov 28 16:24:14 crc kubenswrapper[4909]: E1128 16:24:14.222424 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3ddca078c58f2bc3df2f19603ac7190dde988c7fe8b49ca5d94af1bf01cd1162\": container with ID starting with 3ddca078c58f2bc3df2f19603ac7190dde988c7fe8b49ca5d94af1bf01cd1162 not found: ID does not exist" containerID="3ddca078c58f2bc3df2f19603ac7190dde988c7fe8b49ca5d94af1bf01cd1162" Nov 28 16:24:14 crc kubenswrapper[4909]: I1128 16:24:14.222478 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3ddca078c58f2bc3df2f19603ac7190dde988c7fe8b49ca5d94af1bf01cd1162"} err="failed to get container status \"3ddca078c58f2bc3df2f19603ac7190dde988c7fe8b49ca5d94af1bf01cd1162\": rpc error: code = NotFound desc = could not find container \"3ddca078c58f2bc3df2f19603ac7190dde988c7fe8b49ca5d94af1bf01cd1162\": container with ID starting with 3ddca078c58f2bc3df2f19603ac7190dde988c7fe8b49ca5d94af1bf01cd1162 not found: ID does not exist" Nov 28 16:24:14 crc kubenswrapper[4909]: I1128 16:24:14.222510 4909 scope.go:117] "RemoveContainer" containerID="841b67fe2e8ae0e4ebf834f141b83ad5e850b8e81c3f6410cef7df9894c33dc6" Nov 28 16:24:14 crc kubenswrapper[4909]: E1128 16:24:14.228288 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"841b67fe2e8ae0e4ebf834f141b83ad5e850b8e81c3f6410cef7df9894c33dc6\": container with ID starting with 841b67fe2e8ae0e4ebf834f141b83ad5e850b8e81c3f6410cef7df9894c33dc6 not found: ID does not exist" containerID="841b67fe2e8ae0e4ebf834f141b83ad5e850b8e81c3f6410cef7df9894c33dc6" Nov 28 16:24:14 crc kubenswrapper[4909]: I1128 16:24:14.228336 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"841b67fe2e8ae0e4ebf834f141b83ad5e850b8e81c3f6410cef7df9894c33dc6"} err="failed to get container status \"841b67fe2e8ae0e4ebf834f141b83ad5e850b8e81c3f6410cef7df9894c33dc6\": rpc error: code = NotFound desc = could not find container \"841b67fe2e8ae0e4ebf834f141b83ad5e850b8e81c3f6410cef7df9894c33dc6\": container with ID starting with 841b67fe2e8ae0e4ebf834f141b83ad5e850b8e81c3f6410cef7df9894c33dc6 not found: ID does not exist" Nov 28 16:24:14 crc kubenswrapper[4909]: I1128 16:24:14.228366 4909 scope.go:117] "RemoveContainer" containerID="d0b87eb388aab62c94756b5f20b73cf95fc80da8aa3d83f2848757f860cdd38e" Nov 28 16:24:14 crc kubenswrapper[4909]: E1128 16:24:14.232260 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d0b87eb388aab62c94756b5f20b73cf95fc80da8aa3d83f2848757f860cdd38e\": container with ID starting with d0b87eb388aab62c94756b5f20b73cf95fc80da8aa3d83f2848757f860cdd38e not found: ID does not exist" containerID="d0b87eb388aab62c94756b5f20b73cf95fc80da8aa3d83f2848757f860cdd38e" Nov 28 16:24:14 crc kubenswrapper[4909]: I1128 16:24:14.232300 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d0b87eb388aab62c94756b5f20b73cf95fc80da8aa3d83f2848757f860cdd38e"} err="failed to get container status \"d0b87eb388aab62c94756b5f20b73cf95fc80da8aa3d83f2848757f860cdd38e\": rpc error: code = NotFound desc = could not find container \"d0b87eb388aab62c94756b5f20b73cf95fc80da8aa3d83f2848757f860cdd38e\": container with ID starting with d0b87eb388aab62c94756b5f20b73cf95fc80da8aa3d83f2848757f860cdd38e not found: ID does not exist" Nov 28 16:24:14 crc kubenswrapper[4909]: I1128 16:24:14.232323 4909 scope.go:117] "RemoveContainer" containerID="031c30950c06bdcae1736062069b96a6ce7cb66d5f286b73d691ee2d2db5832e" Nov 28 16:24:14 crc kubenswrapper[4909]: E1128 16:24:14.232641 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"031c30950c06bdcae1736062069b96a6ce7cb66d5f286b73d691ee2d2db5832e\": container with ID starting with 031c30950c06bdcae1736062069b96a6ce7cb66d5f286b73d691ee2d2db5832e not found: ID does not exist" containerID="031c30950c06bdcae1736062069b96a6ce7cb66d5f286b73d691ee2d2db5832e" Nov 28 16:24:14 crc kubenswrapper[4909]: I1128 16:24:14.232702 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"031c30950c06bdcae1736062069b96a6ce7cb66d5f286b73d691ee2d2db5832e"} err="failed to get container status \"031c30950c06bdcae1736062069b96a6ce7cb66d5f286b73d691ee2d2db5832e\": rpc error: code = NotFound desc = could not find container \"031c30950c06bdcae1736062069b96a6ce7cb66d5f286b73d691ee2d2db5832e\": container with ID starting with 031c30950c06bdcae1736062069b96a6ce7cb66d5f286b73d691ee2d2db5832e not found: ID does not exist" Nov 28 16:24:14 crc kubenswrapper[4909]: I1128 16:24:14.232733 4909 scope.go:117] "RemoveContainer" containerID="697293d217dd4fbc4cecc3bc4be7acc14dbb5aa97b9faf0a608792ef7e4c8dba" Nov 28 16:24:14 crc kubenswrapper[4909]: E1128 16:24:14.233270 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"697293d217dd4fbc4cecc3bc4be7acc14dbb5aa97b9faf0a608792ef7e4c8dba\": container with ID starting with 697293d217dd4fbc4cecc3bc4be7acc14dbb5aa97b9faf0a608792ef7e4c8dba not found: ID does not exist" containerID="697293d217dd4fbc4cecc3bc4be7acc14dbb5aa97b9faf0a608792ef7e4c8dba" Nov 28 16:24:14 crc kubenswrapper[4909]: I1128 16:24:14.233307 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"697293d217dd4fbc4cecc3bc4be7acc14dbb5aa97b9faf0a608792ef7e4c8dba"} err="failed to get container status \"697293d217dd4fbc4cecc3bc4be7acc14dbb5aa97b9faf0a608792ef7e4c8dba\": rpc error: code = NotFound desc = could not find container \"697293d217dd4fbc4cecc3bc4be7acc14dbb5aa97b9faf0a608792ef7e4c8dba\": container with ID starting with 697293d217dd4fbc4cecc3bc4be7acc14dbb5aa97b9faf0a608792ef7e4c8dba not found: ID does not exist" Nov 28 16:24:14 crc kubenswrapper[4909]: I1128 16:24:14.233322 4909 scope.go:117] "RemoveContainer" containerID="c9f22dca75c10791e7f5e083e737d4d6aad20598ff0d29dd7562fa99b5dbdfd4" Nov 28 16:24:14 crc kubenswrapper[4909]: E1128 16:24:14.234278 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c9f22dca75c10791e7f5e083e737d4d6aad20598ff0d29dd7562fa99b5dbdfd4\": container with ID starting with c9f22dca75c10791e7f5e083e737d4d6aad20598ff0d29dd7562fa99b5dbdfd4 not found: ID does not exist" containerID="c9f22dca75c10791e7f5e083e737d4d6aad20598ff0d29dd7562fa99b5dbdfd4" Nov 28 16:24:14 crc kubenswrapper[4909]: I1128 16:24:14.234320 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c9f22dca75c10791e7f5e083e737d4d6aad20598ff0d29dd7562fa99b5dbdfd4"} err="failed to get container status \"c9f22dca75c10791e7f5e083e737d4d6aad20598ff0d29dd7562fa99b5dbdfd4\": rpc error: code = NotFound desc = could not find container \"c9f22dca75c10791e7f5e083e737d4d6aad20598ff0d29dd7562fa99b5dbdfd4\": container with ID starting with c9f22dca75c10791e7f5e083e737d4d6aad20598ff0d29dd7562fa99b5dbdfd4 not found: ID does not exist" Nov 28 16:24:14 crc kubenswrapper[4909]: I1128 16:24:14.234349 4909 scope.go:117] "RemoveContainer" containerID="66731a2e0518dd575f621ddf9e760e844e647a224138957440ccaecad3e4b8b7" Nov 28 16:24:14 crc kubenswrapper[4909]: E1128 16:24:14.234946 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"66731a2e0518dd575f621ddf9e760e844e647a224138957440ccaecad3e4b8b7\": container with ID starting with 66731a2e0518dd575f621ddf9e760e844e647a224138957440ccaecad3e4b8b7 not found: ID does not exist" containerID="66731a2e0518dd575f621ddf9e760e844e647a224138957440ccaecad3e4b8b7" Nov 28 16:24:14 crc kubenswrapper[4909]: I1128 16:24:14.234974 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"66731a2e0518dd575f621ddf9e760e844e647a224138957440ccaecad3e4b8b7"} err="failed to get container status \"66731a2e0518dd575f621ddf9e760e844e647a224138957440ccaecad3e4b8b7\": rpc error: code = NotFound desc = could not find container \"66731a2e0518dd575f621ddf9e760e844e647a224138957440ccaecad3e4b8b7\": container with ID starting with 66731a2e0518dd575f621ddf9e760e844e647a224138957440ccaecad3e4b8b7 not found: ID does not exist" Nov 28 16:24:14 crc kubenswrapper[4909]: I1128 16:24:14.234989 4909 scope.go:117] "RemoveContainer" containerID="983e9f73b8e344dbe445f95436cbe143d63f1da2e868201b441496df8cdc99ef" Nov 28 16:24:14 crc kubenswrapper[4909]: E1128 16:24:14.235389 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"983e9f73b8e344dbe445f95436cbe143d63f1da2e868201b441496df8cdc99ef\": container with ID starting with 983e9f73b8e344dbe445f95436cbe143d63f1da2e868201b441496df8cdc99ef not found: ID does not exist" containerID="983e9f73b8e344dbe445f95436cbe143d63f1da2e868201b441496df8cdc99ef" Nov 28 16:24:14 crc kubenswrapper[4909]: I1128 16:24:14.235411 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"983e9f73b8e344dbe445f95436cbe143d63f1da2e868201b441496df8cdc99ef"} err="failed to get container status \"983e9f73b8e344dbe445f95436cbe143d63f1da2e868201b441496df8cdc99ef\": rpc error: code = NotFound desc = could not find container \"983e9f73b8e344dbe445f95436cbe143d63f1da2e868201b441496df8cdc99ef\": container with ID starting with 983e9f73b8e344dbe445f95436cbe143d63f1da2e868201b441496df8cdc99ef not found: ID does not exist" Nov 28 16:24:14 crc kubenswrapper[4909]: I1128 16:24:14.235423 4909 scope.go:117] "RemoveContainer" containerID="e1fffda4b5571ab3fe797073c7201feadb7818eac66f6dc76b7761ef13a8d0ef" Nov 28 16:24:14 crc kubenswrapper[4909]: E1128 16:24:14.235761 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e1fffda4b5571ab3fe797073c7201feadb7818eac66f6dc76b7761ef13a8d0ef\": container with ID starting with e1fffda4b5571ab3fe797073c7201feadb7818eac66f6dc76b7761ef13a8d0ef not found: ID does not exist" containerID="e1fffda4b5571ab3fe797073c7201feadb7818eac66f6dc76b7761ef13a8d0ef" Nov 28 16:24:14 crc kubenswrapper[4909]: I1128 16:24:14.235780 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e1fffda4b5571ab3fe797073c7201feadb7818eac66f6dc76b7761ef13a8d0ef"} err="failed to get container status \"e1fffda4b5571ab3fe797073c7201feadb7818eac66f6dc76b7761ef13a8d0ef\": rpc error: code = NotFound desc = could not find container \"e1fffda4b5571ab3fe797073c7201feadb7818eac66f6dc76b7761ef13a8d0ef\": container with ID starting with e1fffda4b5571ab3fe797073c7201feadb7818eac66f6dc76b7761ef13a8d0ef not found: ID does not exist" Nov 28 16:24:14 crc kubenswrapper[4909]: I1128 16:24:14.235793 4909 scope.go:117] "RemoveContainer" containerID="8dfb42d6b85ca4de9c79dd23da69ea50faaaca28d01cf5b666fcebc2058e7cf1" Nov 28 16:24:14 crc kubenswrapper[4909]: I1128 16:24:14.236125 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8dfb42d6b85ca4de9c79dd23da69ea50faaaca28d01cf5b666fcebc2058e7cf1"} err="failed to get container status \"8dfb42d6b85ca4de9c79dd23da69ea50faaaca28d01cf5b666fcebc2058e7cf1\": rpc error: code = NotFound desc = could not find container \"8dfb42d6b85ca4de9c79dd23da69ea50faaaca28d01cf5b666fcebc2058e7cf1\": container with ID starting with 8dfb42d6b85ca4de9c79dd23da69ea50faaaca28d01cf5b666fcebc2058e7cf1 not found: ID does not exist" Nov 28 16:24:14 crc kubenswrapper[4909]: I1128 16:24:14.236145 4909 scope.go:117] "RemoveContainer" containerID="3ddca078c58f2bc3df2f19603ac7190dde988c7fe8b49ca5d94af1bf01cd1162" Nov 28 16:24:14 crc kubenswrapper[4909]: I1128 16:24:14.236810 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3ddca078c58f2bc3df2f19603ac7190dde988c7fe8b49ca5d94af1bf01cd1162"} err="failed to get container status \"3ddca078c58f2bc3df2f19603ac7190dde988c7fe8b49ca5d94af1bf01cd1162\": rpc error: code = NotFound desc = could not find container \"3ddca078c58f2bc3df2f19603ac7190dde988c7fe8b49ca5d94af1bf01cd1162\": container with ID starting with 3ddca078c58f2bc3df2f19603ac7190dde988c7fe8b49ca5d94af1bf01cd1162 not found: ID does not exist" Nov 28 16:24:14 crc kubenswrapper[4909]: I1128 16:24:14.236834 4909 scope.go:117] "RemoveContainer" containerID="841b67fe2e8ae0e4ebf834f141b83ad5e850b8e81c3f6410cef7df9894c33dc6" Nov 28 16:24:14 crc kubenswrapper[4909]: I1128 16:24:14.237306 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"841b67fe2e8ae0e4ebf834f141b83ad5e850b8e81c3f6410cef7df9894c33dc6"} err="failed to get container status \"841b67fe2e8ae0e4ebf834f141b83ad5e850b8e81c3f6410cef7df9894c33dc6\": rpc error: code = NotFound desc = could not find container \"841b67fe2e8ae0e4ebf834f141b83ad5e850b8e81c3f6410cef7df9894c33dc6\": container with ID starting with 841b67fe2e8ae0e4ebf834f141b83ad5e850b8e81c3f6410cef7df9894c33dc6 not found: ID does not exist" Nov 28 16:24:14 crc kubenswrapper[4909]: I1128 16:24:14.237336 4909 scope.go:117] "RemoveContainer" containerID="d0b87eb388aab62c94756b5f20b73cf95fc80da8aa3d83f2848757f860cdd38e" Nov 28 16:24:14 crc kubenswrapper[4909]: I1128 16:24:14.238010 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d0b87eb388aab62c94756b5f20b73cf95fc80da8aa3d83f2848757f860cdd38e"} err="failed to get container status \"d0b87eb388aab62c94756b5f20b73cf95fc80da8aa3d83f2848757f860cdd38e\": rpc error: code = NotFound desc = could not find container \"d0b87eb388aab62c94756b5f20b73cf95fc80da8aa3d83f2848757f860cdd38e\": container with ID starting with d0b87eb388aab62c94756b5f20b73cf95fc80da8aa3d83f2848757f860cdd38e not found: ID does not exist" Nov 28 16:24:14 crc kubenswrapper[4909]: I1128 16:24:14.238040 4909 scope.go:117] "RemoveContainer" containerID="031c30950c06bdcae1736062069b96a6ce7cb66d5f286b73d691ee2d2db5832e" Nov 28 16:24:14 crc kubenswrapper[4909]: I1128 16:24:14.238669 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"031c30950c06bdcae1736062069b96a6ce7cb66d5f286b73d691ee2d2db5832e"} err="failed to get container status \"031c30950c06bdcae1736062069b96a6ce7cb66d5f286b73d691ee2d2db5832e\": rpc error: code = NotFound desc = could not find container \"031c30950c06bdcae1736062069b96a6ce7cb66d5f286b73d691ee2d2db5832e\": container with ID starting with 031c30950c06bdcae1736062069b96a6ce7cb66d5f286b73d691ee2d2db5832e not found: ID does not exist" Nov 28 16:24:14 crc kubenswrapper[4909]: I1128 16:24:14.238707 4909 scope.go:117] "RemoveContainer" containerID="697293d217dd4fbc4cecc3bc4be7acc14dbb5aa97b9faf0a608792ef7e4c8dba" Nov 28 16:24:14 crc kubenswrapper[4909]: I1128 16:24:14.239086 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"697293d217dd4fbc4cecc3bc4be7acc14dbb5aa97b9faf0a608792ef7e4c8dba"} err="failed to get container status \"697293d217dd4fbc4cecc3bc4be7acc14dbb5aa97b9faf0a608792ef7e4c8dba\": rpc error: code = NotFound desc = could not find container \"697293d217dd4fbc4cecc3bc4be7acc14dbb5aa97b9faf0a608792ef7e4c8dba\": container with ID starting with 697293d217dd4fbc4cecc3bc4be7acc14dbb5aa97b9faf0a608792ef7e4c8dba not found: ID does not exist" Nov 28 16:24:14 crc kubenswrapper[4909]: I1128 16:24:14.239158 4909 scope.go:117] "RemoveContainer" containerID="c9f22dca75c10791e7f5e083e737d4d6aad20598ff0d29dd7562fa99b5dbdfd4" Nov 28 16:24:14 crc kubenswrapper[4909]: I1128 16:24:14.239684 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c9f22dca75c10791e7f5e083e737d4d6aad20598ff0d29dd7562fa99b5dbdfd4"} err="failed to get container status \"c9f22dca75c10791e7f5e083e737d4d6aad20598ff0d29dd7562fa99b5dbdfd4\": rpc error: code = NotFound desc = could not find container \"c9f22dca75c10791e7f5e083e737d4d6aad20598ff0d29dd7562fa99b5dbdfd4\": container with ID starting with c9f22dca75c10791e7f5e083e737d4d6aad20598ff0d29dd7562fa99b5dbdfd4 not found: ID does not exist" Nov 28 16:24:14 crc kubenswrapper[4909]: I1128 16:24:14.239704 4909 scope.go:117] "RemoveContainer" containerID="66731a2e0518dd575f621ddf9e760e844e647a224138957440ccaecad3e4b8b7" Nov 28 16:24:14 crc kubenswrapper[4909]: I1128 16:24:14.239950 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"66731a2e0518dd575f621ddf9e760e844e647a224138957440ccaecad3e4b8b7"} err="failed to get container status \"66731a2e0518dd575f621ddf9e760e844e647a224138957440ccaecad3e4b8b7\": rpc error: code = NotFound desc = could not find container \"66731a2e0518dd575f621ddf9e760e844e647a224138957440ccaecad3e4b8b7\": container with ID starting with 66731a2e0518dd575f621ddf9e760e844e647a224138957440ccaecad3e4b8b7 not found: ID does not exist" Nov 28 16:24:14 crc kubenswrapper[4909]: I1128 16:24:14.239964 4909 scope.go:117] "RemoveContainer" containerID="983e9f73b8e344dbe445f95436cbe143d63f1da2e868201b441496df8cdc99ef" Nov 28 16:24:14 crc kubenswrapper[4909]: I1128 16:24:14.240380 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"983e9f73b8e344dbe445f95436cbe143d63f1da2e868201b441496df8cdc99ef"} err="failed to get container status \"983e9f73b8e344dbe445f95436cbe143d63f1da2e868201b441496df8cdc99ef\": rpc error: code = NotFound desc = could not find container \"983e9f73b8e344dbe445f95436cbe143d63f1da2e868201b441496df8cdc99ef\": container with ID starting with 983e9f73b8e344dbe445f95436cbe143d63f1da2e868201b441496df8cdc99ef not found: ID does not exist" Nov 28 16:24:14 crc kubenswrapper[4909]: I1128 16:24:14.240398 4909 scope.go:117] "RemoveContainer" containerID="e1fffda4b5571ab3fe797073c7201feadb7818eac66f6dc76b7761ef13a8d0ef" Nov 28 16:24:14 crc kubenswrapper[4909]: I1128 16:24:14.240680 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e1fffda4b5571ab3fe797073c7201feadb7818eac66f6dc76b7761ef13a8d0ef"} err="failed to get container status \"e1fffda4b5571ab3fe797073c7201feadb7818eac66f6dc76b7761ef13a8d0ef\": rpc error: code = NotFound desc = could not find container \"e1fffda4b5571ab3fe797073c7201feadb7818eac66f6dc76b7761ef13a8d0ef\": container with ID starting with e1fffda4b5571ab3fe797073c7201feadb7818eac66f6dc76b7761ef13a8d0ef not found: ID does not exist" Nov 28 16:24:14 crc kubenswrapper[4909]: I1128 16:24:14.240712 4909 scope.go:117] "RemoveContainer" containerID="8dfb42d6b85ca4de9c79dd23da69ea50faaaca28d01cf5b666fcebc2058e7cf1" Nov 28 16:24:14 crc kubenswrapper[4909]: I1128 16:24:14.241090 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8dfb42d6b85ca4de9c79dd23da69ea50faaaca28d01cf5b666fcebc2058e7cf1"} err="failed to get container status \"8dfb42d6b85ca4de9c79dd23da69ea50faaaca28d01cf5b666fcebc2058e7cf1\": rpc error: code = NotFound desc = could not find container \"8dfb42d6b85ca4de9c79dd23da69ea50faaaca28d01cf5b666fcebc2058e7cf1\": container with ID starting with 8dfb42d6b85ca4de9c79dd23da69ea50faaaca28d01cf5b666fcebc2058e7cf1 not found: ID does not exist" Nov 28 16:24:14 crc kubenswrapper[4909]: I1128 16:24:14.241109 4909 scope.go:117] "RemoveContainer" containerID="3ddca078c58f2bc3df2f19603ac7190dde988c7fe8b49ca5d94af1bf01cd1162" Nov 28 16:24:14 crc kubenswrapper[4909]: I1128 16:24:14.241406 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3ddca078c58f2bc3df2f19603ac7190dde988c7fe8b49ca5d94af1bf01cd1162"} err="failed to get container status \"3ddca078c58f2bc3df2f19603ac7190dde988c7fe8b49ca5d94af1bf01cd1162\": rpc error: code = NotFound desc = could not find container \"3ddca078c58f2bc3df2f19603ac7190dde988c7fe8b49ca5d94af1bf01cd1162\": container with ID starting with 3ddca078c58f2bc3df2f19603ac7190dde988c7fe8b49ca5d94af1bf01cd1162 not found: ID does not exist" Nov 28 16:24:14 crc kubenswrapper[4909]: I1128 16:24:14.241428 4909 scope.go:117] "RemoveContainer" containerID="841b67fe2e8ae0e4ebf834f141b83ad5e850b8e81c3f6410cef7df9894c33dc6" Nov 28 16:24:14 crc kubenswrapper[4909]: I1128 16:24:14.241702 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"841b67fe2e8ae0e4ebf834f141b83ad5e850b8e81c3f6410cef7df9894c33dc6"} err="failed to get container status \"841b67fe2e8ae0e4ebf834f141b83ad5e850b8e81c3f6410cef7df9894c33dc6\": rpc error: code = NotFound desc = could not find container \"841b67fe2e8ae0e4ebf834f141b83ad5e850b8e81c3f6410cef7df9894c33dc6\": container with ID starting with 841b67fe2e8ae0e4ebf834f141b83ad5e850b8e81c3f6410cef7df9894c33dc6 not found: ID does not exist" Nov 28 16:24:14 crc kubenswrapper[4909]: I1128 16:24:14.241731 4909 scope.go:117] "RemoveContainer" containerID="d0b87eb388aab62c94756b5f20b73cf95fc80da8aa3d83f2848757f860cdd38e" Nov 28 16:24:14 crc kubenswrapper[4909]: I1128 16:24:14.242054 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d0b87eb388aab62c94756b5f20b73cf95fc80da8aa3d83f2848757f860cdd38e"} err="failed to get container status \"d0b87eb388aab62c94756b5f20b73cf95fc80da8aa3d83f2848757f860cdd38e\": rpc error: code = NotFound desc = could not find container \"d0b87eb388aab62c94756b5f20b73cf95fc80da8aa3d83f2848757f860cdd38e\": container with ID starting with d0b87eb388aab62c94756b5f20b73cf95fc80da8aa3d83f2848757f860cdd38e not found: ID does not exist" Nov 28 16:24:14 crc kubenswrapper[4909]: I1128 16:24:14.242085 4909 scope.go:117] "RemoveContainer" containerID="031c30950c06bdcae1736062069b96a6ce7cb66d5f286b73d691ee2d2db5832e" Nov 28 16:24:14 crc kubenswrapper[4909]: I1128 16:24:14.242390 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"031c30950c06bdcae1736062069b96a6ce7cb66d5f286b73d691ee2d2db5832e"} err="failed to get container status \"031c30950c06bdcae1736062069b96a6ce7cb66d5f286b73d691ee2d2db5832e\": rpc error: code = NotFound desc = could not find container \"031c30950c06bdcae1736062069b96a6ce7cb66d5f286b73d691ee2d2db5832e\": container with ID starting with 031c30950c06bdcae1736062069b96a6ce7cb66d5f286b73d691ee2d2db5832e not found: ID does not exist" Nov 28 16:24:14 crc kubenswrapper[4909]: I1128 16:24:14.242415 4909 scope.go:117] "RemoveContainer" containerID="697293d217dd4fbc4cecc3bc4be7acc14dbb5aa97b9faf0a608792ef7e4c8dba" Nov 28 16:24:14 crc kubenswrapper[4909]: I1128 16:24:14.244432 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"697293d217dd4fbc4cecc3bc4be7acc14dbb5aa97b9faf0a608792ef7e4c8dba"} err="failed to get container status \"697293d217dd4fbc4cecc3bc4be7acc14dbb5aa97b9faf0a608792ef7e4c8dba\": rpc error: code = NotFound desc = could not find container \"697293d217dd4fbc4cecc3bc4be7acc14dbb5aa97b9faf0a608792ef7e4c8dba\": container with ID starting with 697293d217dd4fbc4cecc3bc4be7acc14dbb5aa97b9faf0a608792ef7e4c8dba not found: ID does not exist" Nov 28 16:24:14 crc kubenswrapper[4909]: I1128 16:24:14.244459 4909 scope.go:117] "RemoveContainer" containerID="c9f22dca75c10791e7f5e083e737d4d6aad20598ff0d29dd7562fa99b5dbdfd4" Nov 28 16:24:14 crc kubenswrapper[4909]: I1128 16:24:14.246010 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c9f22dca75c10791e7f5e083e737d4d6aad20598ff0d29dd7562fa99b5dbdfd4"} err="failed to get container status \"c9f22dca75c10791e7f5e083e737d4d6aad20598ff0d29dd7562fa99b5dbdfd4\": rpc error: code = NotFound desc = could not find container \"c9f22dca75c10791e7f5e083e737d4d6aad20598ff0d29dd7562fa99b5dbdfd4\": container with ID starting with c9f22dca75c10791e7f5e083e737d4d6aad20598ff0d29dd7562fa99b5dbdfd4 not found: ID does not exist" Nov 28 16:24:14 crc kubenswrapper[4909]: I1128 16:24:14.246033 4909 scope.go:117] "RemoveContainer" containerID="66731a2e0518dd575f621ddf9e760e844e647a224138957440ccaecad3e4b8b7" Nov 28 16:24:14 crc kubenswrapper[4909]: I1128 16:24:14.246405 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"66731a2e0518dd575f621ddf9e760e844e647a224138957440ccaecad3e4b8b7"} err="failed to get container status \"66731a2e0518dd575f621ddf9e760e844e647a224138957440ccaecad3e4b8b7\": rpc error: code = NotFound desc = could not find container \"66731a2e0518dd575f621ddf9e760e844e647a224138957440ccaecad3e4b8b7\": container with ID starting with 66731a2e0518dd575f621ddf9e760e844e647a224138957440ccaecad3e4b8b7 not found: ID does not exist" Nov 28 16:24:14 crc kubenswrapper[4909]: I1128 16:24:14.246438 4909 scope.go:117] "RemoveContainer" containerID="983e9f73b8e344dbe445f95436cbe143d63f1da2e868201b441496df8cdc99ef" Nov 28 16:24:14 crc kubenswrapper[4909]: I1128 16:24:14.246700 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"983e9f73b8e344dbe445f95436cbe143d63f1da2e868201b441496df8cdc99ef"} err="failed to get container status \"983e9f73b8e344dbe445f95436cbe143d63f1da2e868201b441496df8cdc99ef\": rpc error: code = NotFound desc = could not find container \"983e9f73b8e344dbe445f95436cbe143d63f1da2e868201b441496df8cdc99ef\": container with ID starting with 983e9f73b8e344dbe445f95436cbe143d63f1da2e868201b441496df8cdc99ef not found: ID does not exist" Nov 28 16:24:14 crc kubenswrapper[4909]: I1128 16:24:14.246724 4909 scope.go:117] "RemoveContainer" containerID="e1fffda4b5571ab3fe797073c7201feadb7818eac66f6dc76b7761ef13a8d0ef" Nov 28 16:24:14 crc kubenswrapper[4909]: I1128 16:24:14.247106 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e1fffda4b5571ab3fe797073c7201feadb7818eac66f6dc76b7761ef13a8d0ef"} err="failed to get container status \"e1fffda4b5571ab3fe797073c7201feadb7818eac66f6dc76b7761ef13a8d0ef\": rpc error: code = NotFound desc = could not find container \"e1fffda4b5571ab3fe797073c7201feadb7818eac66f6dc76b7761ef13a8d0ef\": container with ID starting with e1fffda4b5571ab3fe797073c7201feadb7818eac66f6dc76b7761ef13a8d0ef not found: ID does not exist" Nov 28 16:24:14 crc kubenswrapper[4909]: I1128 16:24:14.247158 4909 scope.go:117] "RemoveContainer" containerID="8dfb42d6b85ca4de9c79dd23da69ea50faaaca28d01cf5b666fcebc2058e7cf1" Nov 28 16:24:14 crc kubenswrapper[4909]: I1128 16:24:14.247425 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8dfb42d6b85ca4de9c79dd23da69ea50faaaca28d01cf5b666fcebc2058e7cf1"} err="failed to get container status \"8dfb42d6b85ca4de9c79dd23da69ea50faaaca28d01cf5b666fcebc2058e7cf1\": rpc error: code = NotFound desc = could not find container \"8dfb42d6b85ca4de9c79dd23da69ea50faaaca28d01cf5b666fcebc2058e7cf1\": container with ID starting with 8dfb42d6b85ca4de9c79dd23da69ea50faaaca28d01cf5b666fcebc2058e7cf1 not found: ID does not exist" Nov 28 16:24:14 crc kubenswrapper[4909]: I1128 16:24:14.247444 4909 scope.go:117] "RemoveContainer" containerID="3ddca078c58f2bc3df2f19603ac7190dde988c7fe8b49ca5d94af1bf01cd1162" Nov 28 16:24:14 crc kubenswrapper[4909]: I1128 16:24:14.247647 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3ddca078c58f2bc3df2f19603ac7190dde988c7fe8b49ca5d94af1bf01cd1162"} err="failed to get container status \"3ddca078c58f2bc3df2f19603ac7190dde988c7fe8b49ca5d94af1bf01cd1162\": rpc error: code = NotFound desc = could not find container \"3ddca078c58f2bc3df2f19603ac7190dde988c7fe8b49ca5d94af1bf01cd1162\": container with ID starting with 3ddca078c58f2bc3df2f19603ac7190dde988c7fe8b49ca5d94af1bf01cd1162 not found: ID does not exist" Nov 28 16:24:14 crc kubenswrapper[4909]: I1128 16:24:14.247691 4909 scope.go:117] "RemoveContainer" containerID="841b67fe2e8ae0e4ebf834f141b83ad5e850b8e81c3f6410cef7df9894c33dc6" Nov 28 16:24:14 crc kubenswrapper[4909]: I1128 16:24:14.248215 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"841b67fe2e8ae0e4ebf834f141b83ad5e850b8e81c3f6410cef7df9894c33dc6"} err="failed to get container status \"841b67fe2e8ae0e4ebf834f141b83ad5e850b8e81c3f6410cef7df9894c33dc6\": rpc error: code = NotFound desc = could not find container \"841b67fe2e8ae0e4ebf834f141b83ad5e850b8e81c3f6410cef7df9894c33dc6\": container with ID starting with 841b67fe2e8ae0e4ebf834f141b83ad5e850b8e81c3f6410cef7df9894c33dc6 not found: ID does not exist" Nov 28 16:24:14 crc kubenswrapper[4909]: I1128 16:24:14.248249 4909 scope.go:117] "RemoveContainer" containerID="d0b87eb388aab62c94756b5f20b73cf95fc80da8aa3d83f2848757f860cdd38e" Nov 28 16:24:14 crc kubenswrapper[4909]: I1128 16:24:14.248624 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d0b87eb388aab62c94756b5f20b73cf95fc80da8aa3d83f2848757f860cdd38e"} err="failed to get container status \"d0b87eb388aab62c94756b5f20b73cf95fc80da8aa3d83f2848757f860cdd38e\": rpc error: code = NotFound desc = could not find container \"d0b87eb388aab62c94756b5f20b73cf95fc80da8aa3d83f2848757f860cdd38e\": container with ID starting with d0b87eb388aab62c94756b5f20b73cf95fc80da8aa3d83f2848757f860cdd38e not found: ID does not exist" Nov 28 16:24:14 crc kubenswrapper[4909]: I1128 16:24:14.248669 4909 scope.go:117] "RemoveContainer" containerID="031c30950c06bdcae1736062069b96a6ce7cb66d5f286b73d691ee2d2db5832e" Nov 28 16:24:14 crc kubenswrapper[4909]: I1128 16:24:14.249022 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"031c30950c06bdcae1736062069b96a6ce7cb66d5f286b73d691ee2d2db5832e"} err="failed to get container status \"031c30950c06bdcae1736062069b96a6ce7cb66d5f286b73d691ee2d2db5832e\": rpc error: code = NotFound desc = could not find container \"031c30950c06bdcae1736062069b96a6ce7cb66d5f286b73d691ee2d2db5832e\": container with ID starting with 031c30950c06bdcae1736062069b96a6ce7cb66d5f286b73d691ee2d2db5832e not found: ID does not exist" Nov 28 16:24:14 crc kubenswrapper[4909]: I1128 16:24:14.249044 4909 scope.go:117] "RemoveContainer" containerID="697293d217dd4fbc4cecc3bc4be7acc14dbb5aa97b9faf0a608792ef7e4c8dba" Nov 28 16:24:14 crc kubenswrapper[4909]: I1128 16:24:14.249365 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"697293d217dd4fbc4cecc3bc4be7acc14dbb5aa97b9faf0a608792ef7e4c8dba"} err="failed to get container status \"697293d217dd4fbc4cecc3bc4be7acc14dbb5aa97b9faf0a608792ef7e4c8dba\": rpc error: code = NotFound desc = could not find container \"697293d217dd4fbc4cecc3bc4be7acc14dbb5aa97b9faf0a608792ef7e4c8dba\": container with ID starting with 697293d217dd4fbc4cecc3bc4be7acc14dbb5aa97b9faf0a608792ef7e4c8dba not found: ID does not exist" Nov 28 16:24:14 crc kubenswrapper[4909]: I1128 16:24:14.249386 4909 scope.go:117] "RemoveContainer" containerID="c9f22dca75c10791e7f5e083e737d4d6aad20598ff0d29dd7562fa99b5dbdfd4" Nov 28 16:24:14 crc kubenswrapper[4909]: I1128 16:24:14.249696 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c9f22dca75c10791e7f5e083e737d4d6aad20598ff0d29dd7562fa99b5dbdfd4"} err="failed to get container status \"c9f22dca75c10791e7f5e083e737d4d6aad20598ff0d29dd7562fa99b5dbdfd4\": rpc error: code = NotFound desc = could not find container \"c9f22dca75c10791e7f5e083e737d4d6aad20598ff0d29dd7562fa99b5dbdfd4\": container with ID starting with c9f22dca75c10791e7f5e083e737d4d6aad20598ff0d29dd7562fa99b5dbdfd4 not found: ID does not exist" Nov 28 16:24:14 crc kubenswrapper[4909]: I1128 16:24:14.249719 4909 scope.go:117] "RemoveContainer" containerID="66731a2e0518dd575f621ddf9e760e844e647a224138957440ccaecad3e4b8b7" Nov 28 16:24:14 crc kubenswrapper[4909]: I1128 16:24:14.249989 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"66731a2e0518dd575f621ddf9e760e844e647a224138957440ccaecad3e4b8b7"} err="failed to get container status \"66731a2e0518dd575f621ddf9e760e844e647a224138957440ccaecad3e4b8b7\": rpc error: code = NotFound desc = could not find container \"66731a2e0518dd575f621ddf9e760e844e647a224138957440ccaecad3e4b8b7\": container with ID starting with 66731a2e0518dd575f621ddf9e760e844e647a224138957440ccaecad3e4b8b7 not found: ID does not exist" Nov 28 16:24:14 crc kubenswrapper[4909]: I1128 16:24:14.250004 4909 scope.go:117] "RemoveContainer" containerID="983e9f73b8e344dbe445f95436cbe143d63f1da2e868201b441496df8cdc99ef" Nov 28 16:24:14 crc kubenswrapper[4909]: I1128 16:24:14.250220 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"983e9f73b8e344dbe445f95436cbe143d63f1da2e868201b441496df8cdc99ef"} err="failed to get container status \"983e9f73b8e344dbe445f95436cbe143d63f1da2e868201b441496df8cdc99ef\": rpc error: code = NotFound desc = could not find container \"983e9f73b8e344dbe445f95436cbe143d63f1da2e868201b441496df8cdc99ef\": container with ID starting with 983e9f73b8e344dbe445f95436cbe143d63f1da2e868201b441496df8cdc99ef not found: ID does not exist" Nov 28 16:24:14 crc kubenswrapper[4909]: I1128 16:24:14.250240 4909 scope.go:117] "RemoveContainer" containerID="e1fffda4b5571ab3fe797073c7201feadb7818eac66f6dc76b7761ef13a8d0ef" Nov 28 16:24:14 crc kubenswrapper[4909]: I1128 16:24:14.250468 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e1fffda4b5571ab3fe797073c7201feadb7818eac66f6dc76b7761ef13a8d0ef"} err="failed to get container status \"e1fffda4b5571ab3fe797073c7201feadb7818eac66f6dc76b7761ef13a8d0ef\": rpc error: code = NotFound desc = could not find container \"e1fffda4b5571ab3fe797073c7201feadb7818eac66f6dc76b7761ef13a8d0ef\": container with ID starting with e1fffda4b5571ab3fe797073c7201feadb7818eac66f6dc76b7761ef13a8d0ef not found: ID does not exist" Nov 28 16:24:14 crc kubenswrapper[4909]: I1128 16:24:14.250488 4909 scope.go:117] "RemoveContainer" containerID="8dfb42d6b85ca4de9c79dd23da69ea50faaaca28d01cf5b666fcebc2058e7cf1" Nov 28 16:24:14 crc kubenswrapper[4909]: I1128 16:24:14.250689 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8dfb42d6b85ca4de9c79dd23da69ea50faaaca28d01cf5b666fcebc2058e7cf1"} err="failed to get container status \"8dfb42d6b85ca4de9c79dd23da69ea50faaaca28d01cf5b666fcebc2058e7cf1\": rpc error: code = NotFound desc = could not find container \"8dfb42d6b85ca4de9c79dd23da69ea50faaaca28d01cf5b666fcebc2058e7cf1\": container with ID starting with 8dfb42d6b85ca4de9c79dd23da69ea50faaaca28d01cf5b666fcebc2058e7cf1 not found: ID does not exist" Nov 28 16:24:14 crc kubenswrapper[4909]: I1128 16:24:14.984247 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-wx2jj_6e3805b2-8ad3-4fa6-b88f-e0ae42294202/kube-multus/2.log" Nov 28 16:24:14 crc kubenswrapper[4909]: I1128 16:24:14.985028 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-wx2jj_6e3805b2-8ad3-4fa6-b88f-e0ae42294202/kube-multus/1.log" Nov 28 16:24:14 crc kubenswrapper[4909]: I1128 16:24:14.985136 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-wx2jj" event={"ID":"6e3805b2-8ad3-4fa6-b88f-e0ae42294202","Type":"ContainerStarted","Data":"4e10650868a7f42553589369dd8945a373365f16bcda6dcc41272b905582b90f"} Nov 28 16:24:14 crc kubenswrapper[4909]: I1128 16:24:14.991533 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-7qglw" event={"ID":"032a1970-4ddb-48d8-87d2-44536c92e7f7","Type":"ContainerStarted","Data":"5697dcb75c63761be0a5719a50e3f1f954311c178ec321b6f3f07bfad812d0fb"} Nov 28 16:24:14 crc kubenswrapper[4909]: I1128 16:24:14.991567 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-7qglw" event={"ID":"032a1970-4ddb-48d8-87d2-44536c92e7f7","Type":"ContainerStarted","Data":"d9e61f58863bc6714917e63839c35e9d2c76738c2f1607664802ebeb0409175a"} Nov 28 16:24:14 crc kubenswrapper[4909]: I1128 16:24:14.991578 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-7qglw" event={"ID":"032a1970-4ddb-48d8-87d2-44536c92e7f7","Type":"ContainerStarted","Data":"90c0d43f108f8c614916a6549a654c7100c8f9dd079da500c6ae2284d72ccbef"} Nov 28 16:24:14 crc kubenswrapper[4909]: I1128 16:24:14.991587 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-7qglw" event={"ID":"032a1970-4ddb-48d8-87d2-44536c92e7f7","Type":"ContainerStarted","Data":"9c7f2fdd745a088f9829dd512342a09f9772b6d84a5d03ca841a21ed816dfe8f"} Nov 28 16:24:14 crc kubenswrapper[4909]: I1128 16:24:14.991597 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-7qglw" event={"ID":"032a1970-4ddb-48d8-87d2-44536c92e7f7","Type":"ContainerStarted","Data":"a73c2c42a9bf44da0a439d85637ee61d120baf9a4bd57599054ddbe60353aced"} Nov 28 16:24:14 crc kubenswrapper[4909]: I1128 16:24:14.991605 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-7qglw" event={"ID":"032a1970-4ddb-48d8-87d2-44536c92e7f7","Type":"ContainerStarted","Data":"be138d91e29a7617415b768b1e48add12e8721a52f551b966c45756aeb04e2dc"} Nov 28 16:24:15 crc kubenswrapper[4909]: I1128 16:24:15.909544 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c17e2fff-c7ee-475c-8c17-58a394744b91" path="/var/lib/kubelet/pods/c17e2fff-c7ee-475c-8c17-58a394744b91/volumes" Nov 28 16:24:17 crc kubenswrapper[4909]: I1128 16:24:17.005423 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-7qglw" event={"ID":"032a1970-4ddb-48d8-87d2-44536c92e7f7","Type":"ContainerStarted","Data":"43ec553c7d77e6f6b221188c80e76d69ec9e6888734fed648d2923251ec733e0"} Nov 28 16:24:18 crc kubenswrapper[4909]: I1128 16:24:18.266092 4909 scope.go:117] "RemoveContainer" containerID="e3a9a82264968374209ed690b43cf96557d426af065ba14cd189ae9e31ed0f0a" Nov 28 16:24:19 crc kubenswrapper[4909]: I1128 16:24:19.019895 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-wx2jj_6e3805b2-8ad3-4fa6-b88f-e0ae42294202/kube-multus/2.log" Nov 28 16:24:19 crc kubenswrapper[4909]: I1128 16:24:19.910426 4909 patch_prober.go:28] interesting pod/machine-config-daemon-d5nd7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 16:24:19 crc kubenswrapper[4909]: I1128 16:24:19.910726 4909 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 16:24:19 crc kubenswrapper[4909]: I1128 16:24:19.911566 4909 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" Nov 28 16:24:19 crc kubenswrapper[4909]: I1128 16:24:19.912559 4909 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"385cb21d269057f04b94911a7382004d4c8760b6e581a733ba61cea01c0b4b65"} pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 16:24:19 crc kubenswrapper[4909]: I1128 16:24:19.912678 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerName="machine-config-daemon" containerID="cri-o://385cb21d269057f04b94911a7382004d4c8760b6e581a733ba61cea01c0b4b65" gracePeriod=600 Nov 28 16:24:20 crc kubenswrapper[4909]: I1128 16:24:20.033079 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-7qglw" event={"ID":"032a1970-4ddb-48d8-87d2-44536c92e7f7","Type":"ContainerStarted","Data":"2886a9c1a1ccbdd19f9662bc8f83ef541d6ecc33a8aff52f079da41c79c69240"} Nov 28 16:24:20 crc kubenswrapper[4909]: I1128 16:24:20.033367 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-7qglw" Nov 28 16:24:20 crc kubenswrapper[4909]: I1128 16:24:20.033466 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-7qglw" Nov 28 16:24:20 crc kubenswrapper[4909]: I1128 16:24:20.033522 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-7qglw" Nov 28 16:24:20 crc kubenswrapper[4909]: I1128 16:24:20.060118 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-7qglw" Nov 28 16:24:20 crc kubenswrapper[4909]: I1128 16:24:20.066250 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-7qglw" podStartSLOduration=7.066226683 podStartE2EDuration="7.066226683s" podCreationTimestamp="2025-11-28 16:24:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:24:20.062068353 +0000 UTC m=+842.458752877" watchObservedRunningTime="2025-11-28 16:24:20.066226683 +0000 UTC m=+842.462911207" Nov 28 16:24:20 crc kubenswrapper[4909]: I1128 16:24:20.068511 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-7qglw" Nov 28 16:24:20 crc kubenswrapper[4909]: I1128 16:24:20.899412 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["crc-storage/crc-storage-crc-rhqg8"] Nov 28 16:24:20 crc kubenswrapper[4909]: I1128 16:24:20.900555 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-rhqg8" Nov 28 16:24:20 crc kubenswrapper[4909]: I1128 16:24:20.902893 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"crc-storage"/"crc-storage" Nov 28 16:24:20 crc kubenswrapper[4909]: I1128 16:24:20.903039 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"crc-storage"/"openshift-service-ca.crt" Nov 28 16:24:20 crc kubenswrapper[4909]: I1128 16:24:20.903530 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"crc-storage"/"kube-root-ca.crt" Nov 28 16:24:20 crc kubenswrapper[4909]: I1128 16:24:20.905838 4909 reflector.go:368] Caches populated for *v1.Secret from object-"crc-storage"/"crc-storage-dockercfg-29stw" Nov 28 16:24:20 crc kubenswrapper[4909]: I1128 16:24:20.911223 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["crc-storage/crc-storage-crc-rhqg8"] Nov 28 16:24:20 crc kubenswrapper[4909]: I1128 16:24:20.969666 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pd5st\" (UniqueName: \"kubernetes.io/projected/a3224e96-5227-4540-b352-e9a86d53e4fd-kube-api-access-pd5st\") pod \"crc-storage-crc-rhqg8\" (UID: \"a3224e96-5227-4540-b352-e9a86d53e4fd\") " pod="crc-storage/crc-storage-crc-rhqg8" Nov 28 16:24:20 crc kubenswrapper[4909]: I1128 16:24:20.969737 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/a3224e96-5227-4540-b352-e9a86d53e4fd-crc-storage\") pod \"crc-storage-crc-rhqg8\" (UID: \"a3224e96-5227-4540-b352-e9a86d53e4fd\") " pod="crc-storage/crc-storage-crc-rhqg8" Nov 28 16:24:20 crc kubenswrapper[4909]: I1128 16:24:20.969758 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/a3224e96-5227-4540-b352-e9a86d53e4fd-node-mnt\") pod \"crc-storage-crc-rhqg8\" (UID: \"a3224e96-5227-4540-b352-e9a86d53e4fd\") " pod="crc-storage/crc-storage-crc-rhqg8" Nov 28 16:24:21 crc kubenswrapper[4909]: I1128 16:24:21.071471 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/a3224e96-5227-4540-b352-e9a86d53e4fd-crc-storage\") pod \"crc-storage-crc-rhqg8\" (UID: \"a3224e96-5227-4540-b352-e9a86d53e4fd\") " pod="crc-storage/crc-storage-crc-rhqg8" Nov 28 16:24:21 crc kubenswrapper[4909]: I1128 16:24:21.071514 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/a3224e96-5227-4540-b352-e9a86d53e4fd-node-mnt\") pod \"crc-storage-crc-rhqg8\" (UID: \"a3224e96-5227-4540-b352-e9a86d53e4fd\") " pod="crc-storage/crc-storage-crc-rhqg8" Nov 28 16:24:21 crc kubenswrapper[4909]: I1128 16:24:21.071588 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pd5st\" (UniqueName: \"kubernetes.io/projected/a3224e96-5227-4540-b352-e9a86d53e4fd-kube-api-access-pd5st\") pod \"crc-storage-crc-rhqg8\" (UID: \"a3224e96-5227-4540-b352-e9a86d53e4fd\") " pod="crc-storage/crc-storage-crc-rhqg8" Nov 28 16:24:21 crc kubenswrapper[4909]: I1128 16:24:21.071801 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/a3224e96-5227-4540-b352-e9a86d53e4fd-node-mnt\") pod \"crc-storage-crc-rhqg8\" (UID: \"a3224e96-5227-4540-b352-e9a86d53e4fd\") " pod="crc-storage/crc-storage-crc-rhqg8" Nov 28 16:24:21 crc kubenswrapper[4909]: I1128 16:24:21.072977 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/a3224e96-5227-4540-b352-e9a86d53e4fd-crc-storage\") pod \"crc-storage-crc-rhqg8\" (UID: \"a3224e96-5227-4540-b352-e9a86d53e4fd\") " pod="crc-storage/crc-storage-crc-rhqg8" Nov 28 16:24:21 crc kubenswrapper[4909]: I1128 16:24:21.088707 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pd5st\" (UniqueName: \"kubernetes.io/projected/a3224e96-5227-4540-b352-e9a86d53e4fd-kube-api-access-pd5st\") pod \"crc-storage-crc-rhqg8\" (UID: \"a3224e96-5227-4540-b352-e9a86d53e4fd\") " pod="crc-storage/crc-storage-crc-rhqg8" Nov 28 16:24:21 crc kubenswrapper[4909]: I1128 16:24:21.218741 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-rhqg8" Nov 28 16:24:22 crc kubenswrapper[4909]: E1128 16:24:22.262025 4909 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-rhqg8_crc-storage_a3224e96-5227-4540-b352-e9a86d53e4fd_0(1d40af010fedd9343b37f1b14af403246707b0233f1805014cf5b08401ac8b9d): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 28 16:24:22 crc kubenswrapper[4909]: E1128 16:24:22.262540 4909 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-rhqg8_crc-storage_a3224e96-5227-4540-b352-e9a86d53e4fd_0(1d40af010fedd9343b37f1b14af403246707b0233f1805014cf5b08401ac8b9d): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="crc-storage/crc-storage-crc-rhqg8" Nov 28 16:24:22 crc kubenswrapper[4909]: E1128 16:24:22.262564 4909 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-rhqg8_crc-storage_a3224e96-5227-4540-b352-e9a86d53e4fd_0(1d40af010fedd9343b37f1b14af403246707b0233f1805014cf5b08401ac8b9d): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="crc-storage/crc-storage-crc-rhqg8" Nov 28 16:24:22 crc kubenswrapper[4909]: E1128 16:24:22.262606 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"crc-storage-crc-rhqg8_crc-storage(a3224e96-5227-4540-b352-e9a86d53e4fd)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"crc-storage-crc-rhqg8_crc-storage(a3224e96-5227-4540-b352-e9a86d53e4fd)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-rhqg8_crc-storage_a3224e96-5227-4540-b352-e9a86d53e4fd_0(1d40af010fedd9343b37f1b14af403246707b0233f1805014cf5b08401ac8b9d): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="crc-storage/crc-storage-crc-rhqg8" podUID="a3224e96-5227-4540-b352-e9a86d53e4fd" Nov 28 16:24:23 crc kubenswrapper[4909]: I1128 16:24:23.046933 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-rhqg8" Nov 28 16:24:23 crc kubenswrapper[4909]: I1128 16:24:23.047433 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-rhqg8" Nov 28 16:24:23 crc kubenswrapper[4909]: I1128 16:24:23.211108 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["crc-storage/crc-storage-crc-rhqg8"] Nov 28 16:24:24 crc kubenswrapper[4909]: I1128 16:24:24.055008 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-rhqg8" event={"ID":"a3224e96-5227-4540-b352-e9a86d53e4fd","Type":"ContainerStarted","Data":"0bf5ad389ac6baf5f4adc52ca7e65a9bc9d5cfcb10834531aaf8d8a011b0e022"} Nov 28 16:24:25 crc kubenswrapper[4909]: I1128 16:24:25.061775 4909 generic.go:334] "Generic (PLEG): container finished" podID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerID="385cb21d269057f04b94911a7382004d4c8760b6e581a733ba61cea01c0b4b65" exitCode=0 Nov 28 16:24:25 crc kubenswrapper[4909]: I1128 16:24:25.061857 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" event={"ID":"5f0ac931-d37b-4342-8c12-c2779b455cc5","Type":"ContainerDied","Data":"385cb21d269057f04b94911a7382004d4c8760b6e581a733ba61cea01c0b4b65"} Nov 28 16:24:25 crc kubenswrapper[4909]: I1128 16:24:25.062234 4909 scope.go:117] "RemoveContainer" containerID="f11c9461d8b182600e549ffbcf770617ff5031ffd03f68d1eee82d8afc48a636" Nov 28 16:24:26 crc kubenswrapper[4909]: I1128 16:24:26.068854 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" event={"ID":"5f0ac931-d37b-4342-8c12-c2779b455cc5","Type":"ContainerStarted","Data":"a1d1b89b71acf3efad2ebc7b1465d76f5e0a096e6a7cd92cd5f8be9dcf1f258e"} Nov 28 16:24:27 crc kubenswrapper[4909]: I1128 16:24:27.078764 4909 generic.go:334] "Generic (PLEG): container finished" podID="a3224e96-5227-4540-b352-e9a86d53e4fd" containerID="ce7bf5c13a3546d4e9feaa6589935c33668c6d1380e6d9b3e3ccc6cce544ac80" exitCode=0 Nov 28 16:24:27 crc kubenswrapper[4909]: I1128 16:24:27.079031 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-rhqg8" event={"ID":"a3224e96-5227-4540-b352-e9a86d53e4fd","Type":"ContainerDied","Data":"ce7bf5c13a3546d4e9feaa6589935c33668c6d1380e6d9b3e3ccc6cce544ac80"} Nov 28 16:24:28 crc kubenswrapper[4909]: I1128 16:24:28.361066 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-rhqg8" Nov 28 16:24:28 crc kubenswrapper[4909]: I1128 16:24:28.467446 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pd5st\" (UniqueName: \"kubernetes.io/projected/a3224e96-5227-4540-b352-e9a86d53e4fd-kube-api-access-pd5st\") pod \"a3224e96-5227-4540-b352-e9a86d53e4fd\" (UID: \"a3224e96-5227-4540-b352-e9a86d53e4fd\") " Nov 28 16:24:28 crc kubenswrapper[4909]: I1128 16:24:28.467518 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/a3224e96-5227-4540-b352-e9a86d53e4fd-crc-storage\") pod \"a3224e96-5227-4540-b352-e9a86d53e4fd\" (UID: \"a3224e96-5227-4540-b352-e9a86d53e4fd\") " Nov 28 16:24:28 crc kubenswrapper[4909]: I1128 16:24:28.467567 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/a3224e96-5227-4540-b352-e9a86d53e4fd-node-mnt\") pod \"a3224e96-5227-4540-b352-e9a86d53e4fd\" (UID: \"a3224e96-5227-4540-b352-e9a86d53e4fd\") " Nov 28 16:24:28 crc kubenswrapper[4909]: I1128 16:24:28.467741 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/a3224e96-5227-4540-b352-e9a86d53e4fd-node-mnt" (OuterVolumeSpecName: "node-mnt") pod "a3224e96-5227-4540-b352-e9a86d53e4fd" (UID: "a3224e96-5227-4540-b352-e9a86d53e4fd"). InnerVolumeSpecName "node-mnt". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 16:24:28 crc kubenswrapper[4909]: I1128 16:24:28.473694 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a3224e96-5227-4540-b352-e9a86d53e4fd-kube-api-access-pd5st" (OuterVolumeSpecName: "kube-api-access-pd5st") pod "a3224e96-5227-4540-b352-e9a86d53e4fd" (UID: "a3224e96-5227-4540-b352-e9a86d53e4fd"). InnerVolumeSpecName "kube-api-access-pd5st". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:24:28 crc kubenswrapper[4909]: I1128 16:24:28.485371 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a3224e96-5227-4540-b352-e9a86d53e4fd-crc-storage" (OuterVolumeSpecName: "crc-storage") pod "a3224e96-5227-4540-b352-e9a86d53e4fd" (UID: "a3224e96-5227-4540-b352-e9a86d53e4fd"). InnerVolumeSpecName "crc-storage". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:24:28 crc kubenswrapper[4909]: I1128 16:24:28.568930 4909 reconciler_common.go:293] "Volume detached for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/a3224e96-5227-4540-b352-e9a86d53e4fd-crc-storage\") on node \"crc\" DevicePath \"\"" Nov 28 16:24:28 crc kubenswrapper[4909]: I1128 16:24:28.569353 4909 reconciler_common.go:293] "Volume detached for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/a3224e96-5227-4540-b352-e9a86d53e4fd-node-mnt\") on node \"crc\" DevicePath \"\"" Nov 28 16:24:28 crc kubenswrapper[4909]: I1128 16:24:28.569417 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pd5st\" (UniqueName: \"kubernetes.io/projected/a3224e96-5227-4540-b352-e9a86d53e4fd-kube-api-access-pd5st\") on node \"crc\" DevicePath \"\"" Nov 28 16:24:29 crc kubenswrapper[4909]: I1128 16:24:29.094081 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-rhqg8" event={"ID":"a3224e96-5227-4540-b352-e9a86d53e4fd","Type":"ContainerDied","Data":"0bf5ad389ac6baf5f4adc52ca7e65a9bc9d5cfcb10834531aaf8d8a011b0e022"} Nov 28 16:24:29 crc kubenswrapper[4909]: I1128 16:24:29.094606 4909 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0bf5ad389ac6baf5f4adc52ca7e65a9bc9d5cfcb10834531aaf8d8a011b0e022" Nov 28 16:24:29 crc kubenswrapper[4909]: I1128 16:24:29.094133 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-rhqg8" Nov 28 16:24:35 crc kubenswrapper[4909]: I1128 16:24:35.425562 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fqvnvh"] Nov 28 16:24:35 crc kubenswrapper[4909]: E1128 16:24:35.427404 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a3224e96-5227-4540-b352-e9a86d53e4fd" containerName="storage" Nov 28 16:24:35 crc kubenswrapper[4909]: I1128 16:24:35.427476 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="a3224e96-5227-4540-b352-e9a86d53e4fd" containerName="storage" Nov 28 16:24:35 crc kubenswrapper[4909]: I1128 16:24:35.427622 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="a3224e96-5227-4540-b352-e9a86d53e4fd" containerName="storage" Nov 28 16:24:35 crc kubenswrapper[4909]: I1128 16:24:35.428432 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fqvnvh" Nov 28 16:24:35 crc kubenswrapper[4909]: I1128 16:24:35.432947 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Nov 28 16:24:35 crc kubenswrapper[4909]: I1128 16:24:35.441071 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fqvnvh"] Nov 28 16:24:35 crc kubenswrapper[4909]: I1128 16:24:35.466277 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/d0a72a3a-dca8-4920-a505-fe8bed189e5e-util\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fqvnvh\" (UID: \"d0a72a3a-dca8-4920-a505-fe8bed189e5e\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fqvnvh" Nov 28 16:24:35 crc kubenswrapper[4909]: I1128 16:24:35.466359 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/d0a72a3a-dca8-4920-a505-fe8bed189e5e-bundle\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fqvnvh\" (UID: \"d0a72a3a-dca8-4920-a505-fe8bed189e5e\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fqvnvh" Nov 28 16:24:35 crc kubenswrapper[4909]: I1128 16:24:35.466381 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-44dqx\" (UniqueName: \"kubernetes.io/projected/d0a72a3a-dca8-4920-a505-fe8bed189e5e-kube-api-access-44dqx\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fqvnvh\" (UID: \"d0a72a3a-dca8-4920-a505-fe8bed189e5e\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fqvnvh" Nov 28 16:24:35 crc kubenswrapper[4909]: I1128 16:24:35.567241 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/d0a72a3a-dca8-4920-a505-fe8bed189e5e-bundle\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fqvnvh\" (UID: \"d0a72a3a-dca8-4920-a505-fe8bed189e5e\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fqvnvh" Nov 28 16:24:35 crc kubenswrapper[4909]: I1128 16:24:35.567309 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-44dqx\" (UniqueName: \"kubernetes.io/projected/d0a72a3a-dca8-4920-a505-fe8bed189e5e-kube-api-access-44dqx\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fqvnvh\" (UID: \"d0a72a3a-dca8-4920-a505-fe8bed189e5e\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fqvnvh" Nov 28 16:24:35 crc kubenswrapper[4909]: I1128 16:24:35.567384 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/d0a72a3a-dca8-4920-a505-fe8bed189e5e-util\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fqvnvh\" (UID: \"d0a72a3a-dca8-4920-a505-fe8bed189e5e\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fqvnvh" Nov 28 16:24:35 crc kubenswrapper[4909]: I1128 16:24:35.567923 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/d0a72a3a-dca8-4920-a505-fe8bed189e5e-bundle\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fqvnvh\" (UID: \"d0a72a3a-dca8-4920-a505-fe8bed189e5e\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fqvnvh" Nov 28 16:24:35 crc kubenswrapper[4909]: I1128 16:24:35.567938 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/d0a72a3a-dca8-4920-a505-fe8bed189e5e-util\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fqvnvh\" (UID: \"d0a72a3a-dca8-4920-a505-fe8bed189e5e\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fqvnvh" Nov 28 16:24:35 crc kubenswrapper[4909]: I1128 16:24:35.601943 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-44dqx\" (UniqueName: \"kubernetes.io/projected/d0a72a3a-dca8-4920-a505-fe8bed189e5e-kube-api-access-44dqx\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fqvnvh\" (UID: \"d0a72a3a-dca8-4920-a505-fe8bed189e5e\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fqvnvh" Nov 28 16:24:35 crc kubenswrapper[4909]: I1128 16:24:35.746186 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fqvnvh" Nov 28 16:24:35 crc kubenswrapper[4909]: I1128 16:24:35.929281 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fqvnvh"] Nov 28 16:24:36 crc kubenswrapper[4909]: I1128 16:24:36.132188 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fqvnvh" event={"ID":"d0a72a3a-dca8-4920-a505-fe8bed189e5e","Type":"ContainerStarted","Data":"7bfa9e5f083f673479e717fbc15d5ed5261179843ebad052565fdf03e95cfa08"} Nov 28 16:24:37 crc kubenswrapper[4909]: I1128 16:24:37.140015 4909 generic.go:334] "Generic (PLEG): container finished" podID="d0a72a3a-dca8-4920-a505-fe8bed189e5e" containerID="27bbcd1e964a6e32b4ed94fa0d4ec41d05df523a3656130e6d79f99f7b284e65" exitCode=0 Nov 28 16:24:37 crc kubenswrapper[4909]: I1128 16:24:37.140304 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fqvnvh" event={"ID":"d0a72a3a-dca8-4920-a505-fe8bed189e5e","Type":"ContainerDied","Data":"27bbcd1e964a6e32b4ed94fa0d4ec41d05df523a3656130e6d79f99f7b284e65"} Nov 28 16:24:39 crc kubenswrapper[4909]: I1128 16:24:39.153173 4909 generic.go:334] "Generic (PLEG): container finished" podID="d0a72a3a-dca8-4920-a505-fe8bed189e5e" containerID="11e3d08579168cc3537c5346d9d5603c12d49d2d550635112890e100e9e628e1" exitCode=0 Nov 28 16:24:39 crc kubenswrapper[4909]: I1128 16:24:39.153262 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fqvnvh" event={"ID":"d0a72a3a-dca8-4920-a505-fe8bed189e5e","Type":"ContainerDied","Data":"11e3d08579168cc3537c5346d9d5603c12d49d2d550635112890e100e9e628e1"} Nov 28 16:24:40 crc kubenswrapper[4909]: I1128 16:24:40.164440 4909 generic.go:334] "Generic (PLEG): container finished" podID="d0a72a3a-dca8-4920-a505-fe8bed189e5e" containerID="364a57fbdf1b2e5f24a0ee2b5957d329093b7be4cfaa793aec08c9d6a7b2e51e" exitCode=0 Nov 28 16:24:40 crc kubenswrapper[4909]: I1128 16:24:40.164539 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fqvnvh" event={"ID":"d0a72a3a-dca8-4920-a505-fe8bed189e5e","Type":"ContainerDied","Data":"364a57fbdf1b2e5f24a0ee2b5957d329093b7be4cfaa793aec08c9d6a7b2e51e"} Nov 28 16:24:41 crc kubenswrapper[4909]: I1128 16:24:41.405491 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fqvnvh" Nov 28 16:24:41 crc kubenswrapper[4909]: I1128 16:24:41.450726 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/d0a72a3a-dca8-4920-a505-fe8bed189e5e-util\") pod \"d0a72a3a-dca8-4920-a505-fe8bed189e5e\" (UID: \"d0a72a3a-dca8-4920-a505-fe8bed189e5e\") " Nov 28 16:24:41 crc kubenswrapper[4909]: I1128 16:24:41.450907 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/d0a72a3a-dca8-4920-a505-fe8bed189e5e-bundle\") pod \"d0a72a3a-dca8-4920-a505-fe8bed189e5e\" (UID: \"d0a72a3a-dca8-4920-a505-fe8bed189e5e\") " Nov 28 16:24:41 crc kubenswrapper[4909]: I1128 16:24:41.451002 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-44dqx\" (UniqueName: \"kubernetes.io/projected/d0a72a3a-dca8-4920-a505-fe8bed189e5e-kube-api-access-44dqx\") pod \"d0a72a3a-dca8-4920-a505-fe8bed189e5e\" (UID: \"d0a72a3a-dca8-4920-a505-fe8bed189e5e\") " Nov 28 16:24:41 crc kubenswrapper[4909]: I1128 16:24:41.451439 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d0a72a3a-dca8-4920-a505-fe8bed189e5e-bundle" (OuterVolumeSpecName: "bundle") pod "d0a72a3a-dca8-4920-a505-fe8bed189e5e" (UID: "d0a72a3a-dca8-4920-a505-fe8bed189e5e"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:24:41 crc kubenswrapper[4909]: I1128 16:24:41.462946 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d0a72a3a-dca8-4920-a505-fe8bed189e5e-kube-api-access-44dqx" (OuterVolumeSpecName: "kube-api-access-44dqx") pod "d0a72a3a-dca8-4920-a505-fe8bed189e5e" (UID: "d0a72a3a-dca8-4920-a505-fe8bed189e5e"). InnerVolumeSpecName "kube-api-access-44dqx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:24:41 crc kubenswrapper[4909]: I1128 16:24:41.552140 4909 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/d0a72a3a-dca8-4920-a505-fe8bed189e5e-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:24:41 crc kubenswrapper[4909]: I1128 16:24:41.552177 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-44dqx\" (UniqueName: \"kubernetes.io/projected/d0a72a3a-dca8-4920-a505-fe8bed189e5e-kube-api-access-44dqx\") on node \"crc\" DevicePath \"\"" Nov 28 16:24:41 crc kubenswrapper[4909]: I1128 16:24:41.682819 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d0a72a3a-dca8-4920-a505-fe8bed189e5e-util" (OuterVolumeSpecName: "util") pod "d0a72a3a-dca8-4920-a505-fe8bed189e5e" (UID: "d0a72a3a-dca8-4920-a505-fe8bed189e5e"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:24:41 crc kubenswrapper[4909]: I1128 16:24:41.756044 4909 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/d0a72a3a-dca8-4920-a505-fe8bed189e5e-util\") on node \"crc\" DevicePath \"\"" Nov 28 16:24:42 crc kubenswrapper[4909]: I1128 16:24:42.182419 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fqvnvh" event={"ID":"d0a72a3a-dca8-4920-a505-fe8bed189e5e","Type":"ContainerDied","Data":"7bfa9e5f083f673479e717fbc15d5ed5261179843ebad052565fdf03e95cfa08"} Nov 28 16:24:42 crc kubenswrapper[4909]: I1128 16:24:42.182465 4909 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7bfa9e5f083f673479e717fbc15d5ed5261179843ebad052565fdf03e95cfa08" Nov 28 16:24:42 crc kubenswrapper[4909]: I1128 16:24:42.182535 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fqvnvh" Nov 28 16:24:43 crc kubenswrapper[4909]: I1128 16:24:43.683468 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-7qglw" Nov 28 16:24:44 crc kubenswrapper[4909]: I1128 16:24:44.472110 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-operator-5b5b58f5c8-vs6g7"] Nov 28 16:24:44 crc kubenswrapper[4909]: E1128 16:24:44.472563 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d0a72a3a-dca8-4920-a505-fe8bed189e5e" containerName="pull" Nov 28 16:24:44 crc kubenswrapper[4909]: I1128 16:24:44.472591 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="d0a72a3a-dca8-4920-a505-fe8bed189e5e" containerName="pull" Nov 28 16:24:44 crc kubenswrapper[4909]: E1128 16:24:44.472629 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d0a72a3a-dca8-4920-a505-fe8bed189e5e" containerName="util" Nov 28 16:24:44 crc kubenswrapper[4909]: I1128 16:24:44.472639 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="d0a72a3a-dca8-4920-a505-fe8bed189e5e" containerName="util" Nov 28 16:24:44 crc kubenswrapper[4909]: E1128 16:24:44.472703 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d0a72a3a-dca8-4920-a505-fe8bed189e5e" containerName="extract" Nov 28 16:24:44 crc kubenswrapper[4909]: I1128 16:24:44.472713 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="d0a72a3a-dca8-4920-a505-fe8bed189e5e" containerName="extract" Nov 28 16:24:44 crc kubenswrapper[4909]: I1128 16:24:44.472934 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="d0a72a3a-dca8-4920-a505-fe8bed189e5e" containerName="extract" Nov 28 16:24:44 crc kubenswrapper[4909]: I1128 16:24:44.473447 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-5b5b58f5c8-vs6g7" Nov 28 16:24:44 crc kubenswrapper[4909]: I1128 16:24:44.478395 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-operator-dockercfg-5xgqf" Nov 28 16:24:44 crc kubenswrapper[4909]: I1128 16:24:44.479975 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"kube-root-ca.crt" Nov 28 16:24:44 crc kubenswrapper[4909]: I1128 16:24:44.482852 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"openshift-service-ca.crt" Nov 28 16:24:44 crc kubenswrapper[4909]: I1128 16:24:44.492404 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-5b5b58f5c8-vs6g7"] Nov 28 16:24:44 crc kubenswrapper[4909]: I1128 16:24:44.593128 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8zc7c\" (UniqueName: \"kubernetes.io/projected/8b711a0a-46e9-4bee-b916-2333b5800592-kube-api-access-8zc7c\") pod \"nmstate-operator-5b5b58f5c8-vs6g7\" (UID: \"8b711a0a-46e9-4bee-b916-2333b5800592\") " pod="openshift-nmstate/nmstate-operator-5b5b58f5c8-vs6g7" Nov 28 16:24:44 crc kubenswrapper[4909]: I1128 16:24:44.694507 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8zc7c\" (UniqueName: \"kubernetes.io/projected/8b711a0a-46e9-4bee-b916-2333b5800592-kube-api-access-8zc7c\") pod \"nmstate-operator-5b5b58f5c8-vs6g7\" (UID: \"8b711a0a-46e9-4bee-b916-2333b5800592\") " pod="openshift-nmstate/nmstate-operator-5b5b58f5c8-vs6g7" Nov 28 16:24:44 crc kubenswrapper[4909]: I1128 16:24:44.726688 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8zc7c\" (UniqueName: \"kubernetes.io/projected/8b711a0a-46e9-4bee-b916-2333b5800592-kube-api-access-8zc7c\") pod \"nmstate-operator-5b5b58f5c8-vs6g7\" (UID: \"8b711a0a-46e9-4bee-b916-2333b5800592\") " pod="openshift-nmstate/nmstate-operator-5b5b58f5c8-vs6g7" Nov 28 16:24:44 crc kubenswrapper[4909]: I1128 16:24:44.798531 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-5b5b58f5c8-vs6g7" Nov 28 16:24:45 crc kubenswrapper[4909]: I1128 16:24:45.202191 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-5b5b58f5c8-vs6g7"] Nov 28 16:24:45 crc kubenswrapper[4909]: W1128 16:24:45.205072 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8b711a0a_46e9_4bee_b916_2333b5800592.slice/crio-4b5c53093910c59bc25e026cef4aff067fc834251641122d0ab31542e77c5913 WatchSource:0}: Error finding container 4b5c53093910c59bc25e026cef4aff067fc834251641122d0ab31542e77c5913: Status 404 returned error can't find the container with id 4b5c53093910c59bc25e026cef4aff067fc834251641122d0ab31542e77c5913 Nov 28 16:24:46 crc kubenswrapper[4909]: I1128 16:24:46.202564 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-5b5b58f5c8-vs6g7" event={"ID":"8b711a0a-46e9-4bee-b916-2333b5800592","Type":"ContainerStarted","Data":"4b5c53093910c59bc25e026cef4aff067fc834251641122d0ab31542e77c5913"} Nov 28 16:24:48 crc kubenswrapper[4909]: I1128 16:24:48.213857 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-5b5b58f5c8-vs6g7" event={"ID":"8b711a0a-46e9-4bee-b916-2333b5800592","Type":"ContainerStarted","Data":"67a9a3cf158b4a4297f0bfdc2944585252be8ee3d9d9e8127affc78f1c12f778"} Nov 28 16:24:48 crc kubenswrapper[4909]: I1128 16:24:48.232130 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-operator-5b5b58f5c8-vs6g7" podStartSLOduration=1.9776113400000002 podStartE2EDuration="4.232112141s" podCreationTimestamp="2025-11-28 16:24:44 +0000 UTC" firstStartedPulling="2025-11-28 16:24:45.2080496 +0000 UTC m=+867.604734134" lastFinishedPulling="2025-11-28 16:24:47.462550411 +0000 UTC m=+869.859234935" observedRunningTime="2025-11-28 16:24:48.227928939 +0000 UTC m=+870.624613473" watchObservedRunningTime="2025-11-28 16:24:48.232112141 +0000 UTC m=+870.628796675" Nov 28 16:24:55 crc kubenswrapper[4909]: I1128 16:24:55.628978 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-metrics-7f946cbc9-t2dgs"] Nov 28 16:24:55 crc kubenswrapper[4909]: I1128 16:24:55.630116 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-7f946cbc9-t2dgs" Nov 28 16:24:55 crc kubenswrapper[4909]: I1128 16:24:55.632373 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-handler-dockercfg-s9wpx" Nov 28 16:24:55 crc kubenswrapper[4909]: I1128 16:24:55.642797 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-7f946cbc9-t2dgs"] Nov 28 16:24:55 crc kubenswrapper[4909]: I1128 16:24:55.645962 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jh7jl\" (UniqueName: \"kubernetes.io/projected/1aaa9676-48ce-4f8e-af9d-f683a8432515-kube-api-access-jh7jl\") pod \"nmstate-metrics-7f946cbc9-t2dgs\" (UID: \"1aaa9676-48ce-4f8e-af9d-f683a8432515\") " pod="openshift-nmstate/nmstate-metrics-7f946cbc9-t2dgs" Nov 28 16:24:55 crc kubenswrapper[4909]: I1128 16:24:55.650554 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-webhook-5f6d4c5ccb-9m5xv"] Nov 28 16:24:55 crc kubenswrapper[4909]: I1128 16:24:55.651224 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-9m5xv" Nov 28 16:24:55 crc kubenswrapper[4909]: I1128 16:24:55.652793 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"openshift-nmstate-webhook" Nov 28 16:24:55 crc kubenswrapper[4909]: I1128 16:24:55.659688 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-handler-bk9bk"] Nov 28 16:24:55 crc kubenswrapper[4909]: I1128 16:24:55.660468 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-bk9bk" Nov 28 16:24:55 crc kubenswrapper[4909]: I1128 16:24:55.672778 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-5f6d4c5ccb-9m5xv"] Nov 28 16:24:55 crc kubenswrapper[4909]: I1128 16:24:55.747212 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jh7jl\" (UniqueName: \"kubernetes.io/projected/1aaa9676-48ce-4f8e-af9d-f683a8432515-kube-api-access-jh7jl\") pod \"nmstate-metrics-7f946cbc9-t2dgs\" (UID: \"1aaa9676-48ce-4f8e-af9d-f683a8432515\") " pod="openshift-nmstate/nmstate-metrics-7f946cbc9-t2dgs" Nov 28 16:24:55 crc kubenswrapper[4909]: I1128 16:24:55.766928 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jh7jl\" (UniqueName: \"kubernetes.io/projected/1aaa9676-48ce-4f8e-af9d-f683a8432515-kube-api-access-jh7jl\") pod \"nmstate-metrics-7f946cbc9-t2dgs\" (UID: \"1aaa9676-48ce-4f8e-af9d-f683a8432515\") " pod="openshift-nmstate/nmstate-metrics-7f946cbc9-t2dgs" Nov 28 16:24:55 crc kubenswrapper[4909]: I1128 16:24:55.781645 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-console-plugin-7fbb5f6569-pfpfn"] Nov 28 16:24:55 crc kubenswrapper[4909]: I1128 16:24:55.782312 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-pfpfn" Nov 28 16:24:55 crc kubenswrapper[4909]: I1128 16:24:55.785861 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"plugin-serving-cert" Nov 28 16:24:55 crc kubenswrapper[4909]: I1128 16:24:55.786157 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"default-dockercfg-hhxdz" Nov 28 16:24:55 crc kubenswrapper[4909]: I1128 16:24:55.786287 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"nginx-conf" Nov 28 16:24:55 crc kubenswrapper[4909]: I1128 16:24:55.796133 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-7fbb5f6569-pfpfn"] Nov 28 16:24:55 crc kubenswrapper[4909]: I1128 16:24:55.849132 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/bfaa4c0e-b2c7-4768-b261-87a0ef5696d6-tls-key-pair\") pod \"nmstate-webhook-5f6d4c5ccb-9m5xv\" (UID: \"bfaa4c0e-b2c7-4768-b261-87a0ef5696d6\") " pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-9m5xv" Nov 28 16:24:55 crc kubenswrapper[4909]: I1128 16:24:55.849211 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rhkxr\" (UniqueName: \"kubernetes.io/projected/7c2c98c8-3835-4a16-b57b-6a67e455b7e3-kube-api-access-rhkxr\") pod \"nmstate-handler-bk9bk\" (UID: \"7c2c98c8-3835-4a16-b57b-6a67e455b7e3\") " pod="openshift-nmstate/nmstate-handler-bk9bk" Nov 28 16:24:55 crc kubenswrapper[4909]: I1128 16:24:55.849252 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/622cee50-946d-447b-b403-dba09b89346c-plugin-serving-cert\") pod \"nmstate-console-plugin-7fbb5f6569-pfpfn\" (UID: \"622cee50-946d-447b-b403-dba09b89346c\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-pfpfn" Nov 28 16:24:55 crc kubenswrapper[4909]: I1128 16:24:55.849279 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/7c2c98c8-3835-4a16-b57b-6a67e455b7e3-nmstate-lock\") pod \"nmstate-handler-bk9bk\" (UID: \"7c2c98c8-3835-4a16-b57b-6a67e455b7e3\") " pod="openshift-nmstate/nmstate-handler-bk9bk" Nov 28 16:24:55 crc kubenswrapper[4909]: I1128 16:24:55.849351 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/7c2c98c8-3835-4a16-b57b-6a67e455b7e3-dbus-socket\") pod \"nmstate-handler-bk9bk\" (UID: \"7c2c98c8-3835-4a16-b57b-6a67e455b7e3\") " pod="openshift-nmstate/nmstate-handler-bk9bk" Nov 28 16:24:55 crc kubenswrapper[4909]: I1128 16:24:55.849379 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gtgj6\" (UniqueName: \"kubernetes.io/projected/bfaa4c0e-b2c7-4768-b261-87a0ef5696d6-kube-api-access-gtgj6\") pod \"nmstate-webhook-5f6d4c5ccb-9m5xv\" (UID: \"bfaa4c0e-b2c7-4768-b261-87a0ef5696d6\") " pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-9m5xv" Nov 28 16:24:55 crc kubenswrapper[4909]: I1128 16:24:55.849394 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-78x49\" (UniqueName: \"kubernetes.io/projected/622cee50-946d-447b-b403-dba09b89346c-kube-api-access-78x49\") pod \"nmstate-console-plugin-7fbb5f6569-pfpfn\" (UID: \"622cee50-946d-447b-b403-dba09b89346c\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-pfpfn" Nov 28 16:24:55 crc kubenswrapper[4909]: I1128 16:24:55.849517 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/622cee50-946d-447b-b403-dba09b89346c-nginx-conf\") pod \"nmstate-console-plugin-7fbb5f6569-pfpfn\" (UID: \"622cee50-946d-447b-b403-dba09b89346c\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-pfpfn" Nov 28 16:24:55 crc kubenswrapper[4909]: I1128 16:24:55.849559 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/7c2c98c8-3835-4a16-b57b-6a67e455b7e3-ovs-socket\") pod \"nmstate-handler-bk9bk\" (UID: \"7c2c98c8-3835-4a16-b57b-6a67e455b7e3\") " pod="openshift-nmstate/nmstate-handler-bk9bk" Nov 28 16:24:55 crc kubenswrapper[4909]: I1128 16:24:55.950343 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rhkxr\" (UniqueName: \"kubernetes.io/projected/7c2c98c8-3835-4a16-b57b-6a67e455b7e3-kube-api-access-rhkxr\") pod \"nmstate-handler-bk9bk\" (UID: \"7c2c98c8-3835-4a16-b57b-6a67e455b7e3\") " pod="openshift-nmstate/nmstate-handler-bk9bk" Nov 28 16:24:55 crc kubenswrapper[4909]: I1128 16:24:55.950403 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/622cee50-946d-447b-b403-dba09b89346c-plugin-serving-cert\") pod \"nmstate-console-plugin-7fbb5f6569-pfpfn\" (UID: \"622cee50-946d-447b-b403-dba09b89346c\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-pfpfn" Nov 28 16:24:55 crc kubenswrapper[4909]: I1128 16:24:55.950435 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/7c2c98c8-3835-4a16-b57b-6a67e455b7e3-nmstate-lock\") pod \"nmstate-handler-bk9bk\" (UID: \"7c2c98c8-3835-4a16-b57b-6a67e455b7e3\") " pod="openshift-nmstate/nmstate-handler-bk9bk" Nov 28 16:24:55 crc kubenswrapper[4909]: I1128 16:24:55.950466 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/7c2c98c8-3835-4a16-b57b-6a67e455b7e3-dbus-socket\") pod \"nmstate-handler-bk9bk\" (UID: \"7c2c98c8-3835-4a16-b57b-6a67e455b7e3\") " pod="openshift-nmstate/nmstate-handler-bk9bk" Nov 28 16:24:55 crc kubenswrapper[4909]: I1128 16:24:55.950492 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gtgj6\" (UniqueName: \"kubernetes.io/projected/bfaa4c0e-b2c7-4768-b261-87a0ef5696d6-kube-api-access-gtgj6\") pod \"nmstate-webhook-5f6d4c5ccb-9m5xv\" (UID: \"bfaa4c0e-b2c7-4768-b261-87a0ef5696d6\") " pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-9m5xv" Nov 28 16:24:55 crc kubenswrapper[4909]: I1128 16:24:55.950515 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-78x49\" (UniqueName: \"kubernetes.io/projected/622cee50-946d-447b-b403-dba09b89346c-kube-api-access-78x49\") pod \"nmstate-console-plugin-7fbb5f6569-pfpfn\" (UID: \"622cee50-946d-447b-b403-dba09b89346c\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-pfpfn" Nov 28 16:24:55 crc kubenswrapper[4909]: I1128 16:24:55.950547 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/7c2c98c8-3835-4a16-b57b-6a67e455b7e3-nmstate-lock\") pod \"nmstate-handler-bk9bk\" (UID: \"7c2c98c8-3835-4a16-b57b-6a67e455b7e3\") " pod="openshift-nmstate/nmstate-handler-bk9bk" Nov 28 16:24:55 crc kubenswrapper[4909]: I1128 16:24:55.950562 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/622cee50-946d-447b-b403-dba09b89346c-nginx-conf\") pod \"nmstate-console-plugin-7fbb5f6569-pfpfn\" (UID: \"622cee50-946d-447b-b403-dba09b89346c\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-pfpfn" Nov 28 16:24:55 crc kubenswrapper[4909]: I1128 16:24:55.950810 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/7c2c98c8-3835-4a16-b57b-6a67e455b7e3-ovs-socket\") pod \"nmstate-handler-bk9bk\" (UID: \"7c2c98c8-3835-4a16-b57b-6a67e455b7e3\") " pod="openshift-nmstate/nmstate-handler-bk9bk" Nov 28 16:24:55 crc kubenswrapper[4909]: I1128 16:24:55.950900 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/7c2c98c8-3835-4a16-b57b-6a67e455b7e3-dbus-socket\") pod \"nmstate-handler-bk9bk\" (UID: \"7c2c98c8-3835-4a16-b57b-6a67e455b7e3\") " pod="openshift-nmstate/nmstate-handler-bk9bk" Nov 28 16:24:55 crc kubenswrapper[4909]: I1128 16:24:55.950907 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/bfaa4c0e-b2c7-4768-b261-87a0ef5696d6-tls-key-pair\") pod \"nmstate-webhook-5f6d4c5ccb-9m5xv\" (UID: \"bfaa4c0e-b2c7-4768-b261-87a0ef5696d6\") " pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-9m5xv" Nov 28 16:24:55 crc kubenswrapper[4909]: I1128 16:24:55.950893 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/7c2c98c8-3835-4a16-b57b-6a67e455b7e3-ovs-socket\") pod \"nmstate-handler-bk9bk\" (UID: \"7c2c98c8-3835-4a16-b57b-6a67e455b7e3\") " pod="openshift-nmstate/nmstate-handler-bk9bk" Nov 28 16:24:55 crc kubenswrapper[4909]: I1128 16:24:55.951576 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/622cee50-946d-447b-b403-dba09b89346c-nginx-conf\") pod \"nmstate-console-plugin-7fbb5f6569-pfpfn\" (UID: \"622cee50-946d-447b-b403-dba09b89346c\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-pfpfn" Nov 28 16:24:55 crc kubenswrapper[4909]: I1128 16:24:55.954830 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-7f946cbc9-t2dgs" Nov 28 16:24:55 crc kubenswrapper[4909]: I1128 16:24:55.955154 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-5c5d6cd8f5-kfv52"] Nov 28 16:24:55 crc kubenswrapper[4909]: I1128 16:24:55.955984 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-5c5d6cd8f5-kfv52" Nov 28 16:24:55 crc kubenswrapper[4909]: I1128 16:24:55.956314 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/bfaa4c0e-b2c7-4768-b261-87a0ef5696d6-tls-key-pair\") pod \"nmstate-webhook-5f6d4c5ccb-9m5xv\" (UID: \"bfaa4c0e-b2c7-4768-b261-87a0ef5696d6\") " pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-9m5xv" Nov 28 16:24:55 crc kubenswrapper[4909]: I1128 16:24:55.958382 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/622cee50-946d-447b-b403-dba09b89346c-plugin-serving-cert\") pod \"nmstate-console-plugin-7fbb5f6569-pfpfn\" (UID: \"622cee50-946d-447b-b403-dba09b89346c\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-pfpfn" Nov 28 16:24:55 crc kubenswrapper[4909]: I1128 16:24:55.978277 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-5c5d6cd8f5-kfv52"] Nov 28 16:24:55 crc kubenswrapper[4909]: I1128 16:24:55.990286 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rhkxr\" (UniqueName: \"kubernetes.io/projected/7c2c98c8-3835-4a16-b57b-6a67e455b7e3-kube-api-access-rhkxr\") pod \"nmstate-handler-bk9bk\" (UID: \"7c2c98c8-3835-4a16-b57b-6a67e455b7e3\") " pod="openshift-nmstate/nmstate-handler-bk9bk" Nov 28 16:24:55 crc kubenswrapper[4909]: I1128 16:24:55.991471 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-78x49\" (UniqueName: \"kubernetes.io/projected/622cee50-946d-447b-b403-dba09b89346c-kube-api-access-78x49\") pod \"nmstate-console-plugin-7fbb5f6569-pfpfn\" (UID: \"622cee50-946d-447b-b403-dba09b89346c\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-pfpfn" Nov 28 16:24:56 crc kubenswrapper[4909]: I1128 16:24:56.001822 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-bk9bk" Nov 28 16:24:56 crc kubenswrapper[4909]: I1128 16:24:56.029727 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gtgj6\" (UniqueName: \"kubernetes.io/projected/bfaa4c0e-b2c7-4768-b261-87a0ef5696d6-kube-api-access-gtgj6\") pod \"nmstate-webhook-5f6d4c5ccb-9m5xv\" (UID: \"bfaa4c0e-b2c7-4768-b261-87a0ef5696d6\") " pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-9m5xv" Nov 28 16:24:56 crc kubenswrapper[4909]: I1128 16:24:56.096264 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-pfpfn" Nov 28 16:24:56 crc kubenswrapper[4909]: I1128 16:24:56.156418 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/a144a7dc-a361-422c-8ee4-e4ae6b9a6e03-oauth-serving-cert\") pod \"console-5c5d6cd8f5-kfv52\" (UID: \"a144a7dc-a361-422c-8ee4-e4ae6b9a6e03\") " pod="openshift-console/console-5c5d6cd8f5-kfv52" Nov 28 16:24:56 crc kubenswrapper[4909]: I1128 16:24:56.156466 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/a144a7dc-a361-422c-8ee4-e4ae6b9a6e03-console-oauth-config\") pod \"console-5c5d6cd8f5-kfv52\" (UID: \"a144a7dc-a361-422c-8ee4-e4ae6b9a6e03\") " pod="openshift-console/console-5c5d6cd8f5-kfv52" Nov 28 16:24:56 crc kubenswrapper[4909]: I1128 16:24:56.156513 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/a144a7dc-a361-422c-8ee4-e4ae6b9a6e03-console-config\") pod \"console-5c5d6cd8f5-kfv52\" (UID: \"a144a7dc-a361-422c-8ee4-e4ae6b9a6e03\") " pod="openshift-console/console-5c5d6cd8f5-kfv52" Nov 28 16:24:56 crc kubenswrapper[4909]: I1128 16:24:56.156554 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a144a7dc-a361-422c-8ee4-e4ae6b9a6e03-trusted-ca-bundle\") pod \"console-5c5d6cd8f5-kfv52\" (UID: \"a144a7dc-a361-422c-8ee4-e4ae6b9a6e03\") " pod="openshift-console/console-5c5d6cd8f5-kfv52" Nov 28 16:24:56 crc kubenswrapper[4909]: I1128 16:24:56.156574 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/a144a7dc-a361-422c-8ee4-e4ae6b9a6e03-console-serving-cert\") pod \"console-5c5d6cd8f5-kfv52\" (UID: \"a144a7dc-a361-422c-8ee4-e4ae6b9a6e03\") " pod="openshift-console/console-5c5d6cd8f5-kfv52" Nov 28 16:24:56 crc kubenswrapper[4909]: I1128 16:24:56.156599 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zkdbb\" (UniqueName: \"kubernetes.io/projected/a144a7dc-a361-422c-8ee4-e4ae6b9a6e03-kube-api-access-zkdbb\") pod \"console-5c5d6cd8f5-kfv52\" (UID: \"a144a7dc-a361-422c-8ee4-e4ae6b9a6e03\") " pod="openshift-console/console-5c5d6cd8f5-kfv52" Nov 28 16:24:56 crc kubenswrapper[4909]: I1128 16:24:56.156621 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/a144a7dc-a361-422c-8ee4-e4ae6b9a6e03-service-ca\") pod \"console-5c5d6cd8f5-kfv52\" (UID: \"a144a7dc-a361-422c-8ee4-e4ae6b9a6e03\") " pod="openshift-console/console-5c5d6cd8f5-kfv52" Nov 28 16:24:56 crc kubenswrapper[4909]: I1128 16:24:56.214830 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-7f946cbc9-t2dgs"] Nov 28 16:24:56 crc kubenswrapper[4909]: W1128 16:24:56.227770 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1aaa9676_48ce_4f8e_af9d_f683a8432515.slice/crio-f67b05c602b6a94c71f1e7145291d1e2965917ff88b89598b16203db1b64bcb6 WatchSource:0}: Error finding container f67b05c602b6a94c71f1e7145291d1e2965917ff88b89598b16203db1b64bcb6: Status 404 returned error can't find the container with id f67b05c602b6a94c71f1e7145291d1e2965917ff88b89598b16203db1b64bcb6 Nov 28 16:24:56 crc kubenswrapper[4909]: I1128 16:24:56.257305 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/a144a7dc-a361-422c-8ee4-e4ae6b9a6e03-oauth-serving-cert\") pod \"console-5c5d6cd8f5-kfv52\" (UID: \"a144a7dc-a361-422c-8ee4-e4ae6b9a6e03\") " pod="openshift-console/console-5c5d6cd8f5-kfv52" Nov 28 16:24:56 crc kubenswrapper[4909]: I1128 16:24:56.257343 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/a144a7dc-a361-422c-8ee4-e4ae6b9a6e03-console-oauth-config\") pod \"console-5c5d6cd8f5-kfv52\" (UID: \"a144a7dc-a361-422c-8ee4-e4ae6b9a6e03\") " pod="openshift-console/console-5c5d6cd8f5-kfv52" Nov 28 16:24:56 crc kubenswrapper[4909]: I1128 16:24:56.257383 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/a144a7dc-a361-422c-8ee4-e4ae6b9a6e03-console-config\") pod \"console-5c5d6cd8f5-kfv52\" (UID: \"a144a7dc-a361-422c-8ee4-e4ae6b9a6e03\") " pod="openshift-console/console-5c5d6cd8f5-kfv52" Nov 28 16:24:56 crc kubenswrapper[4909]: I1128 16:24:56.257408 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a144a7dc-a361-422c-8ee4-e4ae6b9a6e03-trusted-ca-bundle\") pod \"console-5c5d6cd8f5-kfv52\" (UID: \"a144a7dc-a361-422c-8ee4-e4ae6b9a6e03\") " pod="openshift-console/console-5c5d6cd8f5-kfv52" Nov 28 16:24:56 crc kubenswrapper[4909]: I1128 16:24:56.257426 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/a144a7dc-a361-422c-8ee4-e4ae6b9a6e03-console-serving-cert\") pod \"console-5c5d6cd8f5-kfv52\" (UID: \"a144a7dc-a361-422c-8ee4-e4ae6b9a6e03\") " pod="openshift-console/console-5c5d6cd8f5-kfv52" Nov 28 16:24:56 crc kubenswrapper[4909]: I1128 16:24:56.257447 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zkdbb\" (UniqueName: \"kubernetes.io/projected/a144a7dc-a361-422c-8ee4-e4ae6b9a6e03-kube-api-access-zkdbb\") pod \"console-5c5d6cd8f5-kfv52\" (UID: \"a144a7dc-a361-422c-8ee4-e4ae6b9a6e03\") " pod="openshift-console/console-5c5d6cd8f5-kfv52" Nov 28 16:24:56 crc kubenswrapper[4909]: I1128 16:24:56.257461 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/a144a7dc-a361-422c-8ee4-e4ae6b9a6e03-service-ca\") pod \"console-5c5d6cd8f5-kfv52\" (UID: \"a144a7dc-a361-422c-8ee4-e4ae6b9a6e03\") " pod="openshift-console/console-5c5d6cd8f5-kfv52" Nov 28 16:24:56 crc kubenswrapper[4909]: I1128 16:24:56.258436 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/a144a7dc-a361-422c-8ee4-e4ae6b9a6e03-service-ca\") pod \"console-5c5d6cd8f5-kfv52\" (UID: \"a144a7dc-a361-422c-8ee4-e4ae6b9a6e03\") " pod="openshift-console/console-5c5d6cd8f5-kfv52" Nov 28 16:24:56 crc kubenswrapper[4909]: I1128 16:24:56.258469 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/a144a7dc-a361-422c-8ee4-e4ae6b9a6e03-console-config\") pod \"console-5c5d6cd8f5-kfv52\" (UID: \"a144a7dc-a361-422c-8ee4-e4ae6b9a6e03\") " pod="openshift-console/console-5c5d6cd8f5-kfv52" Nov 28 16:24:56 crc kubenswrapper[4909]: I1128 16:24:56.259067 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/a144a7dc-a361-422c-8ee4-e4ae6b9a6e03-oauth-serving-cert\") pod \"console-5c5d6cd8f5-kfv52\" (UID: \"a144a7dc-a361-422c-8ee4-e4ae6b9a6e03\") " pod="openshift-console/console-5c5d6cd8f5-kfv52" Nov 28 16:24:56 crc kubenswrapper[4909]: I1128 16:24:56.259134 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a144a7dc-a361-422c-8ee4-e4ae6b9a6e03-trusted-ca-bundle\") pod \"console-5c5d6cd8f5-kfv52\" (UID: \"a144a7dc-a361-422c-8ee4-e4ae6b9a6e03\") " pod="openshift-console/console-5c5d6cd8f5-kfv52" Nov 28 16:24:56 crc kubenswrapper[4909]: I1128 16:24:56.263372 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/a144a7dc-a361-422c-8ee4-e4ae6b9a6e03-console-oauth-config\") pod \"console-5c5d6cd8f5-kfv52\" (UID: \"a144a7dc-a361-422c-8ee4-e4ae6b9a6e03\") " pod="openshift-console/console-5c5d6cd8f5-kfv52" Nov 28 16:24:56 crc kubenswrapper[4909]: I1128 16:24:56.263376 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/a144a7dc-a361-422c-8ee4-e4ae6b9a6e03-console-serving-cert\") pod \"console-5c5d6cd8f5-kfv52\" (UID: \"a144a7dc-a361-422c-8ee4-e4ae6b9a6e03\") " pod="openshift-console/console-5c5d6cd8f5-kfv52" Nov 28 16:24:56 crc kubenswrapper[4909]: I1128 16:24:56.265050 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-bk9bk" event={"ID":"7c2c98c8-3835-4a16-b57b-6a67e455b7e3","Type":"ContainerStarted","Data":"153ca29f2edbabfc6f5f8ec6a63ab59a583c9402eab8b8d9cca66997f5cbeaf1"} Nov 28 16:24:56 crc kubenswrapper[4909]: I1128 16:24:56.265879 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-7f946cbc9-t2dgs" event={"ID":"1aaa9676-48ce-4f8e-af9d-f683a8432515","Type":"ContainerStarted","Data":"f67b05c602b6a94c71f1e7145291d1e2965917ff88b89598b16203db1b64bcb6"} Nov 28 16:24:56 crc kubenswrapper[4909]: I1128 16:24:56.273071 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zkdbb\" (UniqueName: \"kubernetes.io/projected/a144a7dc-a361-422c-8ee4-e4ae6b9a6e03-kube-api-access-zkdbb\") pod \"console-5c5d6cd8f5-kfv52\" (UID: \"a144a7dc-a361-422c-8ee4-e4ae6b9a6e03\") " pod="openshift-console/console-5c5d6cd8f5-kfv52" Nov 28 16:24:56 crc kubenswrapper[4909]: I1128 16:24:56.287399 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-9m5xv" Nov 28 16:24:56 crc kubenswrapper[4909]: I1128 16:24:56.392733 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-5c5d6cd8f5-kfv52" Nov 28 16:24:56 crc kubenswrapper[4909]: I1128 16:24:56.483414 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-7fbb5f6569-pfpfn"] Nov 28 16:24:56 crc kubenswrapper[4909]: I1128 16:24:56.575630 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-5c5d6cd8f5-kfv52"] Nov 28 16:24:56 crc kubenswrapper[4909]: W1128 16:24:56.579312 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda144a7dc_a361_422c_8ee4_e4ae6b9a6e03.slice/crio-dc5132eec382ccb4e0fd024766608ca70858cbebe09b50dbdd1e0fc4751ef201 WatchSource:0}: Error finding container dc5132eec382ccb4e0fd024766608ca70858cbebe09b50dbdd1e0fc4751ef201: Status 404 returned error can't find the container with id dc5132eec382ccb4e0fd024766608ca70858cbebe09b50dbdd1e0fc4751ef201 Nov 28 16:24:56 crc kubenswrapper[4909]: I1128 16:24:56.661552 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-xnlpt"] Nov 28 16:24:56 crc kubenswrapper[4909]: I1128 16:24:56.669161 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-xnlpt" Nov 28 16:24:56 crc kubenswrapper[4909]: I1128 16:24:56.677683 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-xnlpt"] Nov 28 16:24:56 crc kubenswrapper[4909]: I1128 16:24:56.709224 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-5f6d4c5ccb-9m5xv"] Nov 28 16:24:56 crc kubenswrapper[4909]: W1128 16:24:56.715538 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podbfaa4c0e_b2c7_4768_b261_87a0ef5696d6.slice/crio-2c30406a76f6c04329aad26cf818fc7092f879cd7e7a3a64260f07c3d6276056 WatchSource:0}: Error finding container 2c30406a76f6c04329aad26cf818fc7092f879cd7e7a3a64260f07c3d6276056: Status 404 returned error can't find the container with id 2c30406a76f6c04329aad26cf818fc7092f879cd7e7a3a64260f07c3d6276056 Nov 28 16:24:56 crc kubenswrapper[4909]: I1128 16:24:56.869158 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6bde280e-92a4-4bb8-852e-10a09ab6051e-utilities\") pod \"redhat-marketplace-xnlpt\" (UID: \"6bde280e-92a4-4bb8-852e-10a09ab6051e\") " pod="openshift-marketplace/redhat-marketplace-xnlpt" Nov 28 16:24:56 crc kubenswrapper[4909]: I1128 16:24:56.869439 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wn72l\" (UniqueName: \"kubernetes.io/projected/6bde280e-92a4-4bb8-852e-10a09ab6051e-kube-api-access-wn72l\") pod \"redhat-marketplace-xnlpt\" (UID: \"6bde280e-92a4-4bb8-852e-10a09ab6051e\") " pod="openshift-marketplace/redhat-marketplace-xnlpt" Nov 28 16:24:56 crc kubenswrapper[4909]: I1128 16:24:56.869554 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6bde280e-92a4-4bb8-852e-10a09ab6051e-catalog-content\") pod \"redhat-marketplace-xnlpt\" (UID: \"6bde280e-92a4-4bb8-852e-10a09ab6051e\") " pod="openshift-marketplace/redhat-marketplace-xnlpt" Nov 28 16:24:56 crc kubenswrapper[4909]: I1128 16:24:56.970418 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6bde280e-92a4-4bb8-852e-10a09ab6051e-utilities\") pod \"redhat-marketplace-xnlpt\" (UID: \"6bde280e-92a4-4bb8-852e-10a09ab6051e\") " pod="openshift-marketplace/redhat-marketplace-xnlpt" Nov 28 16:24:56 crc kubenswrapper[4909]: I1128 16:24:56.970484 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wn72l\" (UniqueName: \"kubernetes.io/projected/6bde280e-92a4-4bb8-852e-10a09ab6051e-kube-api-access-wn72l\") pod \"redhat-marketplace-xnlpt\" (UID: \"6bde280e-92a4-4bb8-852e-10a09ab6051e\") " pod="openshift-marketplace/redhat-marketplace-xnlpt" Nov 28 16:24:56 crc kubenswrapper[4909]: I1128 16:24:56.970507 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6bde280e-92a4-4bb8-852e-10a09ab6051e-catalog-content\") pod \"redhat-marketplace-xnlpt\" (UID: \"6bde280e-92a4-4bb8-852e-10a09ab6051e\") " pod="openshift-marketplace/redhat-marketplace-xnlpt" Nov 28 16:24:56 crc kubenswrapper[4909]: I1128 16:24:56.971428 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6bde280e-92a4-4bb8-852e-10a09ab6051e-utilities\") pod \"redhat-marketplace-xnlpt\" (UID: \"6bde280e-92a4-4bb8-852e-10a09ab6051e\") " pod="openshift-marketplace/redhat-marketplace-xnlpt" Nov 28 16:24:56 crc kubenswrapper[4909]: I1128 16:24:56.971480 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6bde280e-92a4-4bb8-852e-10a09ab6051e-catalog-content\") pod \"redhat-marketplace-xnlpt\" (UID: \"6bde280e-92a4-4bb8-852e-10a09ab6051e\") " pod="openshift-marketplace/redhat-marketplace-xnlpt" Nov 28 16:24:56 crc kubenswrapper[4909]: I1128 16:24:56.993430 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wn72l\" (UniqueName: \"kubernetes.io/projected/6bde280e-92a4-4bb8-852e-10a09ab6051e-kube-api-access-wn72l\") pod \"redhat-marketplace-xnlpt\" (UID: \"6bde280e-92a4-4bb8-852e-10a09ab6051e\") " pod="openshift-marketplace/redhat-marketplace-xnlpt" Nov 28 16:24:57 crc kubenswrapper[4909]: I1128 16:24:57.003795 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-xnlpt" Nov 28 16:24:57 crc kubenswrapper[4909]: I1128 16:24:57.274085 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-5c5d6cd8f5-kfv52" event={"ID":"a144a7dc-a361-422c-8ee4-e4ae6b9a6e03","Type":"ContainerStarted","Data":"963467f3bcf099f302aa2b25b8c91089d18bd3238edda01067787391fdc5b399"} Nov 28 16:24:57 crc kubenswrapper[4909]: I1128 16:24:57.274711 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-5c5d6cd8f5-kfv52" event={"ID":"a144a7dc-a361-422c-8ee4-e4ae6b9a6e03","Type":"ContainerStarted","Data":"dc5132eec382ccb4e0fd024766608ca70858cbebe09b50dbdd1e0fc4751ef201"} Nov 28 16:24:57 crc kubenswrapper[4909]: I1128 16:24:57.275803 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-pfpfn" event={"ID":"622cee50-946d-447b-b403-dba09b89346c","Type":"ContainerStarted","Data":"aa2b6247e00a8eb36ee10481c98058a0b11bcfff7aab1e2aee4874aba0d22309"} Nov 28 16:24:57 crc kubenswrapper[4909]: I1128 16:24:57.277405 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-9m5xv" event={"ID":"bfaa4c0e-b2c7-4768-b261-87a0ef5696d6","Type":"ContainerStarted","Data":"2c30406a76f6c04329aad26cf818fc7092f879cd7e7a3a64260f07c3d6276056"} Nov 28 16:24:57 crc kubenswrapper[4909]: I1128 16:24:57.294394 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-5c5d6cd8f5-kfv52" podStartSLOduration=2.294368748 podStartE2EDuration="2.294368748s" podCreationTimestamp="2025-11-28 16:24:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:24:57.289643192 +0000 UTC m=+879.686327726" watchObservedRunningTime="2025-11-28 16:24:57.294368748 +0000 UTC m=+879.691053272" Nov 28 16:24:57 crc kubenswrapper[4909]: I1128 16:24:57.421312 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-xnlpt"] Nov 28 16:24:58 crc kubenswrapper[4909]: I1128 16:24:58.287061 4909 generic.go:334] "Generic (PLEG): container finished" podID="6bde280e-92a4-4bb8-852e-10a09ab6051e" containerID="186df1aa9457ae6aed45f7b95bc618852ff8df07ff4292c0b164804ab691e4fb" exitCode=0 Nov 28 16:24:58 crc kubenswrapper[4909]: I1128 16:24:58.287123 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xnlpt" event={"ID":"6bde280e-92a4-4bb8-852e-10a09ab6051e","Type":"ContainerDied","Data":"186df1aa9457ae6aed45f7b95bc618852ff8df07ff4292c0b164804ab691e4fb"} Nov 28 16:24:58 crc kubenswrapper[4909]: I1128 16:24:58.287189 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xnlpt" event={"ID":"6bde280e-92a4-4bb8-852e-10a09ab6051e","Type":"ContainerStarted","Data":"ebd7dbc149c0d9c1dee934c2c201325fa3422a99c16927d924ac9bd2ae7c7d49"} Nov 28 16:25:00 crc kubenswrapper[4909]: I1128 16:25:00.305128 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-7f946cbc9-t2dgs" event={"ID":"1aaa9676-48ce-4f8e-af9d-f683a8432515","Type":"ContainerStarted","Data":"b07881ffb95d53c64fe493af5a8355aaff83d70e4bb838d78abbe3afa398535c"} Nov 28 16:25:00 crc kubenswrapper[4909]: I1128 16:25:00.308695 4909 generic.go:334] "Generic (PLEG): container finished" podID="6bde280e-92a4-4bb8-852e-10a09ab6051e" containerID="a8a760465809ed2c4307440cbf985d8d5230353d323254e0aeb5353c485fb800" exitCode=0 Nov 28 16:25:00 crc kubenswrapper[4909]: I1128 16:25:00.308836 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xnlpt" event={"ID":"6bde280e-92a4-4bb8-852e-10a09ab6051e","Type":"ContainerDied","Data":"a8a760465809ed2c4307440cbf985d8d5230353d323254e0aeb5353c485fb800"} Nov 28 16:25:00 crc kubenswrapper[4909]: I1128 16:25:00.311097 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-9m5xv" event={"ID":"bfaa4c0e-b2c7-4768-b261-87a0ef5696d6","Type":"ContainerStarted","Data":"8bf3346355a3e54329505d3542c655c7e44e40a13aa19435873833da087ff3ec"} Nov 28 16:25:00 crc kubenswrapper[4909]: I1128 16:25:00.311335 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-9m5xv" Nov 28 16:25:00 crc kubenswrapper[4909]: I1128 16:25:00.314575 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-bk9bk" event={"ID":"7c2c98c8-3835-4a16-b57b-6a67e455b7e3","Type":"ContainerStarted","Data":"f8c19418084212254fd4b6d91acbcd7bb335e309378128028c34e6b2e2754fc3"} Nov 28 16:25:00 crc kubenswrapper[4909]: I1128 16:25:00.314712 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-handler-bk9bk" Nov 28 16:25:00 crc kubenswrapper[4909]: I1128 16:25:00.316497 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-pfpfn" event={"ID":"622cee50-946d-447b-b403-dba09b89346c","Type":"ContainerStarted","Data":"140eccf1b83506dee599fa1fa3ac41e72f76836c5f53fd951149e3bd0de33703"} Nov 28 16:25:00 crc kubenswrapper[4909]: I1128 16:25:00.365562 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-pfpfn" podStartSLOduration=2.482918428 podStartE2EDuration="5.365546766s" podCreationTimestamp="2025-11-28 16:24:55 +0000 UTC" firstStartedPulling="2025-11-28 16:24:56.498254901 +0000 UTC m=+878.894939425" lastFinishedPulling="2025-11-28 16:24:59.380883239 +0000 UTC m=+881.777567763" observedRunningTime="2025-11-28 16:25:00.355758865 +0000 UTC m=+882.752443409" watchObservedRunningTime="2025-11-28 16:25:00.365546766 +0000 UTC m=+882.762231290" Nov 28 16:25:00 crc kubenswrapper[4909]: I1128 16:25:00.383055 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-handler-bk9bk" podStartSLOduration=2.077119023 podStartE2EDuration="5.383036543s" podCreationTimestamp="2025-11-28 16:24:55 +0000 UTC" firstStartedPulling="2025-11-28 16:24:56.075417631 +0000 UTC m=+878.472102145" lastFinishedPulling="2025-11-28 16:24:59.381335141 +0000 UTC m=+881.778019665" observedRunningTime="2025-11-28 16:25:00.380857054 +0000 UTC m=+882.777541608" watchObservedRunningTime="2025-11-28 16:25:00.383036543 +0000 UTC m=+882.779721067" Nov 28 16:25:00 crc kubenswrapper[4909]: I1128 16:25:00.401354 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-9m5xv" podStartSLOduration=2.725166751 podStartE2EDuration="5.401333131s" podCreationTimestamp="2025-11-28 16:24:55 +0000 UTC" firstStartedPulling="2025-11-28 16:24:56.717619953 +0000 UTC m=+879.114304477" lastFinishedPulling="2025-11-28 16:24:59.393786333 +0000 UTC m=+881.790470857" observedRunningTime="2025-11-28 16:25:00.396076111 +0000 UTC m=+882.792760645" watchObservedRunningTime="2025-11-28 16:25:00.401333131 +0000 UTC m=+882.798017655" Nov 28 16:25:01 crc kubenswrapper[4909]: I1128 16:25:01.328290 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xnlpt" event={"ID":"6bde280e-92a4-4bb8-852e-10a09ab6051e","Type":"ContainerStarted","Data":"70993d07e42df05004a918914be1ea1118bded0bec261c0d1583ac2b13770166"} Nov 28 16:25:01 crc kubenswrapper[4909]: I1128 16:25:01.348826 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-xnlpt" podStartSLOduration=3.175622343 podStartE2EDuration="5.348808416s" podCreationTimestamp="2025-11-28 16:24:56 +0000 UTC" firstStartedPulling="2025-11-28 16:24:58.559906298 +0000 UTC m=+880.956590822" lastFinishedPulling="2025-11-28 16:25:00.733092361 +0000 UTC m=+883.129776895" observedRunningTime="2025-11-28 16:25:01.34631996 +0000 UTC m=+883.743004484" watchObservedRunningTime="2025-11-28 16:25:01.348808416 +0000 UTC m=+883.745492940" Nov 28 16:25:03 crc kubenswrapper[4909]: I1128 16:25:03.342745 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-7f946cbc9-t2dgs" event={"ID":"1aaa9676-48ce-4f8e-af9d-f683a8432515","Type":"ContainerStarted","Data":"423e2114e79dd84a76148507824eea4f334fbc19a08fa7fc890353be379af56f"} Nov 28 16:25:03 crc kubenswrapper[4909]: I1128 16:25:03.385987 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-metrics-7f946cbc9-t2dgs" podStartSLOduration=2.193376504 podStartE2EDuration="8.385959189s" podCreationTimestamp="2025-11-28 16:24:55 +0000 UTC" firstStartedPulling="2025-11-28 16:24:56.229537312 +0000 UTC m=+878.626221836" lastFinishedPulling="2025-11-28 16:25:02.422119997 +0000 UTC m=+884.818804521" observedRunningTime="2025-11-28 16:25:03.371089653 +0000 UTC m=+885.767774187" watchObservedRunningTime="2025-11-28 16:25:03.385959189 +0000 UTC m=+885.782643743" Nov 28 16:25:06 crc kubenswrapper[4909]: I1128 16:25:06.030122 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-handler-bk9bk" Nov 28 16:25:06 crc kubenswrapper[4909]: I1128 16:25:06.393560 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-5c5d6cd8f5-kfv52" Nov 28 16:25:06 crc kubenswrapper[4909]: I1128 16:25:06.400221 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-5c5d6cd8f5-kfv52" Nov 28 16:25:06 crc kubenswrapper[4909]: I1128 16:25:06.404280 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-5c5d6cd8f5-kfv52" Nov 28 16:25:07 crc kubenswrapper[4909]: I1128 16:25:07.004992 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-xnlpt" Nov 28 16:25:07 crc kubenswrapper[4909]: I1128 16:25:07.005806 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-xnlpt" Nov 28 16:25:07 crc kubenswrapper[4909]: I1128 16:25:07.256744 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-xnlpt" Nov 28 16:25:07 crc kubenswrapper[4909]: I1128 16:25:07.375991 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-5c5d6cd8f5-kfv52" Nov 28 16:25:07 crc kubenswrapper[4909]: I1128 16:25:07.445859 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-xnlpt" Nov 28 16:25:07 crc kubenswrapper[4909]: I1128 16:25:07.451062 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-qjf9t"] Nov 28 16:25:07 crc kubenswrapper[4909]: I1128 16:25:07.493478 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-xnlpt"] Nov 28 16:25:09 crc kubenswrapper[4909]: I1128 16:25:09.381917 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-xnlpt" podUID="6bde280e-92a4-4bb8-852e-10a09ab6051e" containerName="registry-server" containerID="cri-o://70993d07e42df05004a918914be1ea1118bded0bec261c0d1583ac2b13770166" gracePeriod=2 Nov 28 16:25:10 crc kubenswrapper[4909]: I1128 16:25:10.322381 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-xnlpt" Nov 28 16:25:10 crc kubenswrapper[4909]: I1128 16:25:10.403447 4909 generic.go:334] "Generic (PLEG): container finished" podID="6bde280e-92a4-4bb8-852e-10a09ab6051e" containerID="70993d07e42df05004a918914be1ea1118bded0bec261c0d1583ac2b13770166" exitCode=0 Nov 28 16:25:10 crc kubenswrapper[4909]: I1128 16:25:10.403495 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xnlpt" event={"ID":"6bde280e-92a4-4bb8-852e-10a09ab6051e","Type":"ContainerDied","Data":"70993d07e42df05004a918914be1ea1118bded0bec261c0d1583ac2b13770166"} Nov 28 16:25:10 crc kubenswrapper[4909]: I1128 16:25:10.403531 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xnlpt" event={"ID":"6bde280e-92a4-4bb8-852e-10a09ab6051e","Type":"ContainerDied","Data":"ebd7dbc149c0d9c1dee934c2c201325fa3422a99c16927d924ac9bd2ae7c7d49"} Nov 28 16:25:10 crc kubenswrapper[4909]: I1128 16:25:10.403553 4909 scope.go:117] "RemoveContainer" containerID="70993d07e42df05004a918914be1ea1118bded0bec261c0d1583ac2b13770166" Nov 28 16:25:10 crc kubenswrapper[4909]: I1128 16:25:10.403720 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-xnlpt" Nov 28 16:25:10 crc kubenswrapper[4909]: I1128 16:25:10.427576 4909 scope.go:117] "RemoveContainer" containerID="a8a760465809ed2c4307440cbf985d8d5230353d323254e0aeb5353c485fb800" Nov 28 16:25:10 crc kubenswrapper[4909]: I1128 16:25:10.446141 4909 scope.go:117] "RemoveContainer" containerID="186df1aa9457ae6aed45f7b95bc618852ff8df07ff4292c0b164804ab691e4fb" Nov 28 16:25:10 crc kubenswrapper[4909]: I1128 16:25:10.458496 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wn72l\" (UniqueName: \"kubernetes.io/projected/6bde280e-92a4-4bb8-852e-10a09ab6051e-kube-api-access-wn72l\") pod \"6bde280e-92a4-4bb8-852e-10a09ab6051e\" (UID: \"6bde280e-92a4-4bb8-852e-10a09ab6051e\") " Nov 28 16:25:10 crc kubenswrapper[4909]: I1128 16:25:10.458583 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6bde280e-92a4-4bb8-852e-10a09ab6051e-utilities\") pod \"6bde280e-92a4-4bb8-852e-10a09ab6051e\" (UID: \"6bde280e-92a4-4bb8-852e-10a09ab6051e\") " Nov 28 16:25:10 crc kubenswrapper[4909]: I1128 16:25:10.458723 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6bde280e-92a4-4bb8-852e-10a09ab6051e-catalog-content\") pod \"6bde280e-92a4-4bb8-852e-10a09ab6051e\" (UID: \"6bde280e-92a4-4bb8-852e-10a09ab6051e\") " Nov 28 16:25:10 crc kubenswrapper[4909]: I1128 16:25:10.460208 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6bde280e-92a4-4bb8-852e-10a09ab6051e-utilities" (OuterVolumeSpecName: "utilities") pod "6bde280e-92a4-4bb8-852e-10a09ab6051e" (UID: "6bde280e-92a4-4bb8-852e-10a09ab6051e"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:25:10 crc kubenswrapper[4909]: I1128 16:25:10.463412 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6bde280e-92a4-4bb8-852e-10a09ab6051e-kube-api-access-wn72l" (OuterVolumeSpecName: "kube-api-access-wn72l") pod "6bde280e-92a4-4bb8-852e-10a09ab6051e" (UID: "6bde280e-92a4-4bb8-852e-10a09ab6051e"). InnerVolumeSpecName "kube-api-access-wn72l". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:25:10 crc kubenswrapper[4909]: I1128 16:25:10.493331 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6bde280e-92a4-4bb8-852e-10a09ab6051e-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "6bde280e-92a4-4bb8-852e-10a09ab6051e" (UID: "6bde280e-92a4-4bb8-852e-10a09ab6051e"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:25:10 crc kubenswrapper[4909]: I1128 16:25:10.499808 4909 scope.go:117] "RemoveContainer" containerID="70993d07e42df05004a918914be1ea1118bded0bec261c0d1583ac2b13770166" Nov 28 16:25:10 crc kubenswrapper[4909]: E1128 16:25:10.500416 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"70993d07e42df05004a918914be1ea1118bded0bec261c0d1583ac2b13770166\": container with ID starting with 70993d07e42df05004a918914be1ea1118bded0bec261c0d1583ac2b13770166 not found: ID does not exist" containerID="70993d07e42df05004a918914be1ea1118bded0bec261c0d1583ac2b13770166" Nov 28 16:25:10 crc kubenswrapper[4909]: I1128 16:25:10.500506 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"70993d07e42df05004a918914be1ea1118bded0bec261c0d1583ac2b13770166"} err="failed to get container status \"70993d07e42df05004a918914be1ea1118bded0bec261c0d1583ac2b13770166\": rpc error: code = NotFound desc = could not find container \"70993d07e42df05004a918914be1ea1118bded0bec261c0d1583ac2b13770166\": container with ID starting with 70993d07e42df05004a918914be1ea1118bded0bec261c0d1583ac2b13770166 not found: ID does not exist" Nov 28 16:25:10 crc kubenswrapper[4909]: I1128 16:25:10.500587 4909 scope.go:117] "RemoveContainer" containerID="a8a760465809ed2c4307440cbf985d8d5230353d323254e0aeb5353c485fb800" Nov 28 16:25:10 crc kubenswrapper[4909]: E1128 16:25:10.501170 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a8a760465809ed2c4307440cbf985d8d5230353d323254e0aeb5353c485fb800\": container with ID starting with a8a760465809ed2c4307440cbf985d8d5230353d323254e0aeb5353c485fb800 not found: ID does not exist" containerID="a8a760465809ed2c4307440cbf985d8d5230353d323254e0aeb5353c485fb800" Nov 28 16:25:10 crc kubenswrapper[4909]: I1128 16:25:10.501240 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a8a760465809ed2c4307440cbf985d8d5230353d323254e0aeb5353c485fb800"} err="failed to get container status \"a8a760465809ed2c4307440cbf985d8d5230353d323254e0aeb5353c485fb800\": rpc error: code = NotFound desc = could not find container \"a8a760465809ed2c4307440cbf985d8d5230353d323254e0aeb5353c485fb800\": container with ID starting with a8a760465809ed2c4307440cbf985d8d5230353d323254e0aeb5353c485fb800 not found: ID does not exist" Nov 28 16:25:10 crc kubenswrapper[4909]: I1128 16:25:10.501273 4909 scope.go:117] "RemoveContainer" containerID="186df1aa9457ae6aed45f7b95bc618852ff8df07ff4292c0b164804ab691e4fb" Nov 28 16:25:10 crc kubenswrapper[4909]: E1128 16:25:10.501610 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"186df1aa9457ae6aed45f7b95bc618852ff8df07ff4292c0b164804ab691e4fb\": container with ID starting with 186df1aa9457ae6aed45f7b95bc618852ff8df07ff4292c0b164804ab691e4fb not found: ID does not exist" containerID="186df1aa9457ae6aed45f7b95bc618852ff8df07ff4292c0b164804ab691e4fb" Nov 28 16:25:10 crc kubenswrapper[4909]: I1128 16:25:10.501715 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"186df1aa9457ae6aed45f7b95bc618852ff8df07ff4292c0b164804ab691e4fb"} err="failed to get container status \"186df1aa9457ae6aed45f7b95bc618852ff8df07ff4292c0b164804ab691e4fb\": rpc error: code = NotFound desc = could not find container \"186df1aa9457ae6aed45f7b95bc618852ff8df07ff4292c0b164804ab691e4fb\": container with ID starting with 186df1aa9457ae6aed45f7b95bc618852ff8df07ff4292c0b164804ab691e4fb not found: ID does not exist" Nov 28 16:25:10 crc kubenswrapper[4909]: I1128 16:25:10.560167 4909 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6bde280e-92a4-4bb8-852e-10a09ab6051e-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 16:25:10 crc kubenswrapper[4909]: I1128 16:25:10.560431 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wn72l\" (UniqueName: \"kubernetes.io/projected/6bde280e-92a4-4bb8-852e-10a09ab6051e-kube-api-access-wn72l\") on node \"crc\" DevicePath \"\"" Nov 28 16:25:10 crc kubenswrapper[4909]: I1128 16:25:10.560512 4909 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6bde280e-92a4-4bb8-852e-10a09ab6051e-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 16:25:10 crc kubenswrapper[4909]: I1128 16:25:10.751296 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-xnlpt"] Nov 28 16:25:10 crc kubenswrapper[4909]: I1128 16:25:10.756623 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-xnlpt"] Nov 28 16:25:11 crc kubenswrapper[4909]: I1128 16:25:11.914915 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6bde280e-92a4-4bb8-852e-10a09ab6051e" path="/var/lib/kubelet/pods/6bde280e-92a4-4bb8-852e-10a09ab6051e/volumes" Nov 28 16:25:16 crc kubenswrapper[4909]: I1128 16:25:16.297244 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-9m5xv" Nov 28 16:25:19 crc kubenswrapper[4909]: I1128 16:25:19.631528 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-scs9g"] Nov 28 16:25:19 crc kubenswrapper[4909]: E1128 16:25:19.631803 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6bde280e-92a4-4bb8-852e-10a09ab6051e" containerName="extract-content" Nov 28 16:25:19 crc kubenswrapper[4909]: I1128 16:25:19.631819 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="6bde280e-92a4-4bb8-852e-10a09ab6051e" containerName="extract-content" Nov 28 16:25:19 crc kubenswrapper[4909]: E1128 16:25:19.631834 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6bde280e-92a4-4bb8-852e-10a09ab6051e" containerName="registry-server" Nov 28 16:25:19 crc kubenswrapper[4909]: I1128 16:25:19.631842 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="6bde280e-92a4-4bb8-852e-10a09ab6051e" containerName="registry-server" Nov 28 16:25:19 crc kubenswrapper[4909]: E1128 16:25:19.631873 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6bde280e-92a4-4bb8-852e-10a09ab6051e" containerName="extract-utilities" Nov 28 16:25:19 crc kubenswrapper[4909]: I1128 16:25:19.631883 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="6bde280e-92a4-4bb8-852e-10a09ab6051e" containerName="extract-utilities" Nov 28 16:25:19 crc kubenswrapper[4909]: I1128 16:25:19.632018 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="6bde280e-92a4-4bb8-852e-10a09ab6051e" containerName="registry-server" Nov 28 16:25:19 crc kubenswrapper[4909]: I1128 16:25:19.632977 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-scs9g" Nov 28 16:25:19 crc kubenswrapper[4909]: I1128 16:25:19.641862 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-scs9g"] Nov 28 16:25:19 crc kubenswrapper[4909]: I1128 16:25:19.679201 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3c2dc49e-c28d-4f79-8b5b-89d1edefdd77-utilities\") pod \"community-operators-scs9g\" (UID: \"3c2dc49e-c28d-4f79-8b5b-89d1edefdd77\") " pod="openshift-marketplace/community-operators-scs9g" Nov 28 16:25:19 crc kubenswrapper[4909]: I1128 16:25:19.679260 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7hz6z\" (UniqueName: \"kubernetes.io/projected/3c2dc49e-c28d-4f79-8b5b-89d1edefdd77-kube-api-access-7hz6z\") pod \"community-operators-scs9g\" (UID: \"3c2dc49e-c28d-4f79-8b5b-89d1edefdd77\") " pod="openshift-marketplace/community-operators-scs9g" Nov 28 16:25:19 crc kubenswrapper[4909]: I1128 16:25:19.679383 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3c2dc49e-c28d-4f79-8b5b-89d1edefdd77-catalog-content\") pod \"community-operators-scs9g\" (UID: \"3c2dc49e-c28d-4f79-8b5b-89d1edefdd77\") " pod="openshift-marketplace/community-operators-scs9g" Nov 28 16:25:19 crc kubenswrapper[4909]: I1128 16:25:19.780347 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7hz6z\" (UniqueName: \"kubernetes.io/projected/3c2dc49e-c28d-4f79-8b5b-89d1edefdd77-kube-api-access-7hz6z\") pod \"community-operators-scs9g\" (UID: \"3c2dc49e-c28d-4f79-8b5b-89d1edefdd77\") " pod="openshift-marketplace/community-operators-scs9g" Nov 28 16:25:19 crc kubenswrapper[4909]: I1128 16:25:19.780436 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3c2dc49e-c28d-4f79-8b5b-89d1edefdd77-catalog-content\") pod \"community-operators-scs9g\" (UID: \"3c2dc49e-c28d-4f79-8b5b-89d1edefdd77\") " pod="openshift-marketplace/community-operators-scs9g" Nov 28 16:25:19 crc kubenswrapper[4909]: I1128 16:25:19.780500 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3c2dc49e-c28d-4f79-8b5b-89d1edefdd77-utilities\") pod \"community-operators-scs9g\" (UID: \"3c2dc49e-c28d-4f79-8b5b-89d1edefdd77\") " pod="openshift-marketplace/community-operators-scs9g" Nov 28 16:25:19 crc kubenswrapper[4909]: I1128 16:25:19.781038 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3c2dc49e-c28d-4f79-8b5b-89d1edefdd77-utilities\") pod \"community-operators-scs9g\" (UID: \"3c2dc49e-c28d-4f79-8b5b-89d1edefdd77\") " pod="openshift-marketplace/community-operators-scs9g" Nov 28 16:25:19 crc kubenswrapper[4909]: I1128 16:25:19.781083 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3c2dc49e-c28d-4f79-8b5b-89d1edefdd77-catalog-content\") pod \"community-operators-scs9g\" (UID: \"3c2dc49e-c28d-4f79-8b5b-89d1edefdd77\") " pod="openshift-marketplace/community-operators-scs9g" Nov 28 16:25:19 crc kubenswrapper[4909]: I1128 16:25:19.806769 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7hz6z\" (UniqueName: \"kubernetes.io/projected/3c2dc49e-c28d-4f79-8b5b-89d1edefdd77-kube-api-access-7hz6z\") pod \"community-operators-scs9g\" (UID: \"3c2dc49e-c28d-4f79-8b5b-89d1edefdd77\") " pod="openshift-marketplace/community-operators-scs9g" Nov 28 16:25:19 crc kubenswrapper[4909]: I1128 16:25:19.958309 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-scs9g" Nov 28 16:25:20 crc kubenswrapper[4909]: I1128 16:25:20.339459 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-scs9g"] Nov 28 16:25:20 crc kubenswrapper[4909]: I1128 16:25:20.465474 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-scs9g" event={"ID":"3c2dc49e-c28d-4f79-8b5b-89d1edefdd77","Type":"ContainerStarted","Data":"69f64422ce9cca038ab81d195d98fd31f3f5db3920a7b057300180a9b5dce5b8"} Nov 28 16:25:21 crc kubenswrapper[4909]: I1128 16:25:21.472045 4909 generic.go:334] "Generic (PLEG): container finished" podID="3c2dc49e-c28d-4f79-8b5b-89d1edefdd77" containerID="d0f997e33da80f48b7f7bda05349c49cf9e058c4c05041bd75f112d5624c305f" exitCode=0 Nov 28 16:25:21 crc kubenswrapper[4909]: I1128 16:25:21.472153 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-scs9g" event={"ID":"3c2dc49e-c28d-4f79-8b5b-89d1edefdd77","Type":"ContainerDied","Data":"d0f997e33da80f48b7f7bda05349c49cf9e058c4c05041bd75f112d5624c305f"} Nov 28 16:25:23 crc kubenswrapper[4909]: I1128 16:25:23.491826 4909 generic.go:334] "Generic (PLEG): container finished" podID="3c2dc49e-c28d-4f79-8b5b-89d1edefdd77" containerID="4b943a7fd6196f3413b45b5e1ef237be1d810e20de5868092f12c90fc80a3bba" exitCode=0 Nov 28 16:25:23 crc kubenswrapper[4909]: I1128 16:25:23.492286 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-scs9g" event={"ID":"3c2dc49e-c28d-4f79-8b5b-89d1edefdd77","Type":"ContainerDied","Data":"4b943a7fd6196f3413b45b5e1ef237be1d810e20de5868092f12c90fc80a3bba"} Nov 28 16:25:24 crc kubenswrapper[4909]: I1128 16:25:24.507529 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-scs9g" event={"ID":"3c2dc49e-c28d-4f79-8b5b-89d1edefdd77","Type":"ContainerStarted","Data":"78efede959b70bfa9b43acfc7a2643a2e6d543d66ad7d09b65fed8b5177dc7ca"} Nov 28 16:25:29 crc kubenswrapper[4909]: I1128 16:25:29.136299 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-scs9g" podStartSLOduration=7.484752823 podStartE2EDuration="10.136277316s" podCreationTimestamp="2025-11-28 16:25:19 +0000 UTC" firstStartedPulling="2025-11-28 16:25:21.473730088 +0000 UTC m=+903.870414632" lastFinishedPulling="2025-11-28 16:25:24.125254601 +0000 UTC m=+906.521939125" observedRunningTime="2025-11-28 16:25:24.532844684 +0000 UTC m=+906.929529268" watchObservedRunningTime="2025-11-28 16:25:29.136277316 +0000 UTC m=+911.532961840" Nov 28 16:25:29 crc kubenswrapper[4909]: I1128 16:25:29.138341 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83kmlm2"] Nov 28 16:25:29 crc kubenswrapper[4909]: I1128 16:25:29.139594 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83kmlm2" Nov 28 16:25:29 crc kubenswrapper[4909]: I1128 16:25:29.141516 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Nov 28 16:25:29 crc kubenswrapper[4909]: I1128 16:25:29.182075 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83kmlm2"] Nov 28 16:25:29 crc kubenswrapper[4909]: I1128 16:25:29.337777 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gvl6z\" (UniqueName: \"kubernetes.io/projected/def73bb6-09ff-4074-8213-4962def55a10-kube-api-access-gvl6z\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83kmlm2\" (UID: \"def73bb6-09ff-4074-8213-4962def55a10\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83kmlm2" Nov 28 16:25:29 crc kubenswrapper[4909]: I1128 16:25:29.337841 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/def73bb6-09ff-4074-8213-4962def55a10-util\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83kmlm2\" (UID: \"def73bb6-09ff-4074-8213-4962def55a10\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83kmlm2" Nov 28 16:25:29 crc kubenswrapper[4909]: I1128 16:25:29.337894 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/def73bb6-09ff-4074-8213-4962def55a10-bundle\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83kmlm2\" (UID: \"def73bb6-09ff-4074-8213-4962def55a10\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83kmlm2" Nov 28 16:25:29 crc kubenswrapper[4909]: I1128 16:25:29.439562 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/def73bb6-09ff-4074-8213-4962def55a10-bundle\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83kmlm2\" (UID: \"def73bb6-09ff-4074-8213-4962def55a10\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83kmlm2" Nov 28 16:25:29 crc kubenswrapper[4909]: I1128 16:25:29.439691 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gvl6z\" (UniqueName: \"kubernetes.io/projected/def73bb6-09ff-4074-8213-4962def55a10-kube-api-access-gvl6z\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83kmlm2\" (UID: \"def73bb6-09ff-4074-8213-4962def55a10\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83kmlm2" Nov 28 16:25:29 crc kubenswrapper[4909]: I1128 16:25:29.439743 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/def73bb6-09ff-4074-8213-4962def55a10-util\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83kmlm2\" (UID: \"def73bb6-09ff-4074-8213-4962def55a10\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83kmlm2" Nov 28 16:25:29 crc kubenswrapper[4909]: I1128 16:25:29.440321 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/def73bb6-09ff-4074-8213-4962def55a10-bundle\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83kmlm2\" (UID: \"def73bb6-09ff-4074-8213-4962def55a10\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83kmlm2" Nov 28 16:25:29 crc kubenswrapper[4909]: I1128 16:25:29.440353 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/def73bb6-09ff-4074-8213-4962def55a10-util\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83kmlm2\" (UID: \"def73bb6-09ff-4074-8213-4962def55a10\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83kmlm2" Nov 28 16:25:29 crc kubenswrapper[4909]: I1128 16:25:29.463038 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gvl6z\" (UniqueName: \"kubernetes.io/projected/def73bb6-09ff-4074-8213-4962def55a10-kube-api-access-gvl6z\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83kmlm2\" (UID: \"def73bb6-09ff-4074-8213-4962def55a10\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83kmlm2" Nov 28 16:25:29 crc kubenswrapper[4909]: I1128 16:25:29.758906 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83kmlm2" Nov 28 16:25:29 crc kubenswrapper[4909]: I1128 16:25:29.958740 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-scs9g" Nov 28 16:25:29 crc kubenswrapper[4909]: I1128 16:25:29.959194 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-scs9g" Nov 28 16:25:30 crc kubenswrapper[4909]: I1128 16:25:30.004034 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-scs9g" Nov 28 16:25:30 crc kubenswrapper[4909]: I1128 16:25:30.230696 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83kmlm2"] Nov 28 16:25:30 crc kubenswrapper[4909]: I1128 16:25:30.545973 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83kmlm2" event={"ID":"def73bb6-09ff-4074-8213-4962def55a10","Type":"ContainerStarted","Data":"6b9b751b7ea8b51d31a168c390596977136c20cbbd932522ba7b87963e615338"} Nov 28 16:25:30 crc kubenswrapper[4909]: I1128 16:25:30.605599 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-scs9g" Nov 28 16:25:32 crc kubenswrapper[4909]: I1128 16:25:32.507972 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/console-f9d7485db-qjf9t" podUID="06224dc2-1e32-47b8-8a11-0c90a61084cf" containerName="console" containerID="cri-o://439076ca80c295f5ff253f45c069a0d9576c9cd88d3fe150dcba26460c96045a" gracePeriod=15 Nov 28 16:25:32 crc kubenswrapper[4909]: I1128 16:25:32.559730 4909 generic.go:334] "Generic (PLEG): container finished" podID="def73bb6-09ff-4074-8213-4962def55a10" containerID="887f35738fe0d92ee46fa2b1deb2f55bb98bfdca35307381e2cc0dbadbc7323b" exitCode=0 Nov 28 16:25:32 crc kubenswrapper[4909]: I1128 16:25:32.559773 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83kmlm2" event={"ID":"def73bb6-09ff-4074-8213-4962def55a10","Type":"ContainerDied","Data":"887f35738fe0d92ee46fa2b1deb2f55bb98bfdca35307381e2cc0dbadbc7323b"} Nov 28 16:25:32 crc kubenswrapper[4909]: I1128 16:25:32.868896 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-qjf9t_06224dc2-1e32-47b8-8a11-0c90a61084cf/console/0.log" Nov 28 16:25:32 crc kubenswrapper[4909]: I1128 16:25:32.868953 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-qjf9t" Nov 28 16:25:32 crc kubenswrapper[4909]: I1128 16:25:32.879296 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-scs9g"] Nov 28 16:25:32 crc kubenswrapper[4909]: I1128 16:25:32.879486 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-scs9g" podUID="3c2dc49e-c28d-4f79-8b5b-89d1edefdd77" containerName="registry-server" containerID="cri-o://78efede959b70bfa9b43acfc7a2643a2e6d543d66ad7d09b65fed8b5177dc7ca" gracePeriod=2 Nov 28 16:25:32 crc kubenswrapper[4909]: I1128 16:25:32.884898 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/06224dc2-1e32-47b8-8a11-0c90a61084cf-console-config\") pod \"06224dc2-1e32-47b8-8a11-0c90a61084cf\" (UID: \"06224dc2-1e32-47b8-8a11-0c90a61084cf\") " Nov 28 16:25:32 crc kubenswrapper[4909]: I1128 16:25:32.885022 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/06224dc2-1e32-47b8-8a11-0c90a61084cf-console-serving-cert\") pod \"06224dc2-1e32-47b8-8a11-0c90a61084cf\" (UID: \"06224dc2-1e32-47b8-8a11-0c90a61084cf\") " Nov 28 16:25:32 crc kubenswrapper[4909]: I1128 16:25:32.885168 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xfzz6\" (UniqueName: \"kubernetes.io/projected/06224dc2-1e32-47b8-8a11-0c90a61084cf-kube-api-access-xfzz6\") pod \"06224dc2-1e32-47b8-8a11-0c90a61084cf\" (UID: \"06224dc2-1e32-47b8-8a11-0c90a61084cf\") " Nov 28 16:25:32 crc kubenswrapper[4909]: I1128 16:25:32.885193 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/06224dc2-1e32-47b8-8a11-0c90a61084cf-trusted-ca-bundle\") pod \"06224dc2-1e32-47b8-8a11-0c90a61084cf\" (UID: \"06224dc2-1e32-47b8-8a11-0c90a61084cf\") " Nov 28 16:25:32 crc kubenswrapper[4909]: I1128 16:25:32.885320 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/06224dc2-1e32-47b8-8a11-0c90a61084cf-oauth-serving-cert\") pod \"06224dc2-1e32-47b8-8a11-0c90a61084cf\" (UID: \"06224dc2-1e32-47b8-8a11-0c90a61084cf\") " Nov 28 16:25:32 crc kubenswrapper[4909]: I1128 16:25:32.885395 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/06224dc2-1e32-47b8-8a11-0c90a61084cf-console-oauth-config\") pod \"06224dc2-1e32-47b8-8a11-0c90a61084cf\" (UID: \"06224dc2-1e32-47b8-8a11-0c90a61084cf\") " Nov 28 16:25:32 crc kubenswrapper[4909]: I1128 16:25:32.885424 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/06224dc2-1e32-47b8-8a11-0c90a61084cf-service-ca\") pod \"06224dc2-1e32-47b8-8a11-0c90a61084cf\" (UID: \"06224dc2-1e32-47b8-8a11-0c90a61084cf\") " Nov 28 16:25:32 crc kubenswrapper[4909]: I1128 16:25:32.885612 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/06224dc2-1e32-47b8-8a11-0c90a61084cf-console-config" (OuterVolumeSpecName: "console-config") pod "06224dc2-1e32-47b8-8a11-0c90a61084cf" (UID: "06224dc2-1e32-47b8-8a11-0c90a61084cf"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:25:32 crc kubenswrapper[4909]: I1128 16:25:32.885876 4909 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/06224dc2-1e32-47b8-8a11-0c90a61084cf-console-config\") on node \"crc\" DevicePath \"\"" Nov 28 16:25:32 crc kubenswrapper[4909]: I1128 16:25:32.886714 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/06224dc2-1e32-47b8-8a11-0c90a61084cf-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "06224dc2-1e32-47b8-8a11-0c90a61084cf" (UID: "06224dc2-1e32-47b8-8a11-0c90a61084cf"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:25:32 crc kubenswrapper[4909]: I1128 16:25:32.886969 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/06224dc2-1e32-47b8-8a11-0c90a61084cf-service-ca" (OuterVolumeSpecName: "service-ca") pod "06224dc2-1e32-47b8-8a11-0c90a61084cf" (UID: "06224dc2-1e32-47b8-8a11-0c90a61084cf"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:25:32 crc kubenswrapper[4909]: I1128 16:25:32.887724 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/06224dc2-1e32-47b8-8a11-0c90a61084cf-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "06224dc2-1e32-47b8-8a11-0c90a61084cf" (UID: "06224dc2-1e32-47b8-8a11-0c90a61084cf"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:25:32 crc kubenswrapper[4909]: I1128 16:25:32.896312 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/06224dc2-1e32-47b8-8a11-0c90a61084cf-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "06224dc2-1e32-47b8-8a11-0c90a61084cf" (UID: "06224dc2-1e32-47b8-8a11-0c90a61084cf"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:25:32 crc kubenswrapper[4909]: I1128 16:25:32.900175 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/06224dc2-1e32-47b8-8a11-0c90a61084cf-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "06224dc2-1e32-47b8-8a11-0c90a61084cf" (UID: "06224dc2-1e32-47b8-8a11-0c90a61084cf"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:25:32 crc kubenswrapper[4909]: I1128 16:25:32.900372 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/06224dc2-1e32-47b8-8a11-0c90a61084cf-kube-api-access-xfzz6" (OuterVolumeSpecName: "kube-api-access-xfzz6") pod "06224dc2-1e32-47b8-8a11-0c90a61084cf" (UID: "06224dc2-1e32-47b8-8a11-0c90a61084cf"). InnerVolumeSpecName "kube-api-access-xfzz6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:25:32 crc kubenswrapper[4909]: I1128 16:25:32.986635 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xfzz6\" (UniqueName: \"kubernetes.io/projected/06224dc2-1e32-47b8-8a11-0c90a61084cf-kube-api-access-xfzz6\") on node \"crc\" DevicePath \"\"" Nov 28 16:25:32 crc kubenswrapper[4909]: I1128 16:25:32.986686 4909 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/06224dc2-1e32-47b8-8a11-0c90a61084cf-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:25:32 crc kubenswrapper[4909]: I1128 16:25:32.986696 4909 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/06224dc2-1e32-47b8-8a11-0c90a61084cf-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 16:25:32 crc kubenswrapper[4909]: I1128 16:25:32.986705 4909 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/06224dc2-1e32-47b8-8a11-0c90a61084cf-console-oauth-config\") on node \"crc\" DevicePath \"\"" Nov 28 16:25:32 crc kubenswrapper[4909]: I1128 16:25:32.986714 4909 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/06224dc2-1e32-47b8-8a11-0c90a61084cf-service-ca\") on node \"crc\" DevicePath \"\"" Nov 28 16:25:32 crc kubenswrapper[4909]: I1128 16:25:32.986722 4909 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/06224dc2-1e32-47b8-8a11-0c90a61084cf-console-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 16:25:33 crc kubenswrapper[4909]: I1128 16:25:33.178050 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-scs9g" Nov 28 16:25:33 crc kubenswrapper[4909]: I1128 16:25:33.187874 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3c2dc49e-c28d-4f79-8b5b-89d1edefdd77-utilities\") pod \"3c2dc49e-c28d-4f79-8b5b-89d1edefdd77\" (UID: \"3c2dc49e-c28d-4f79-8b5b-89d1edefdd77\") " Nov 28 16:25:33 crc kubenswrapper[4909]: I1128 16:25:33.187935 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3c2dc49e-c28d-4f79-8b5b-89d1edefdd77-catalog-content\") pod \"3c2dc49e-c28d-4f79-8b5b-89d1edefdd77\" (UID: \"3c2dc49e-c28d-4f79-8b5b-89d1edefdd77\") " Nov 28 16:25:33 crc kubenswrapper[4909]: I1128 16:25:33.187956 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7hz6z\" (UniqueName: \"kubernetes.io/projected/3c2dc49e-c28d-4f79-8b5b-89d1edefdd77-kube-api-access-7hz6z\") pod \"3c2dc49e-c28d-4f79-8b5b-89d1edefdd77\" (UID: \"3c2dc49e-c28d-4f79-8b5b-89d1edefdd77\") " Nov 28 16:25:33 crc kubenswrapper[4909]: I1128 16:25:33.189559 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3c2dc49e-c28d-4f79-8b5b-89d1edefdd77-utilities" (OuterVolumeSpecName: "utilities") pod "3c2dc49e-c28d-4f79-8b5b-89d1edefdd77" (UID: "3c2dc49e-c28d-4f79-8b5b-89d1edefdd77"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:25:33 crc kubenswrapper[4909]: I1128 16:25:33.190856 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3c2dc49e-c28d-4f79-8b5b-89d1edefdd77-kube-api-access-7hz6z" (OuterVolumeSpecName: "kube-api-access-7hz6z") pod "3c2dc49e-c28d-4f79-8b5b-89d1edefdd77" (UID: "3c2dc49e-c28d-4f79-8b5b-89d1edefdd77"). InnerVolumeSpecName "kube-api-access-7hz6z". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:25:33 crc kubenswrapper[4909]: I1128 16:25:33.242845 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3c2dc49e-c28d-4f79-8b5b-89d1edefdd77-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "3c2dc49e-c28d-4f79-8b5b-89d1edefdd77" (UID: "3c2dc49e-c28d-4f79-8b5b-89d1edefdd77"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:25:33 crc kubenswrapper[4909]: I1128 16:25:33.288851 4909 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3c2dc49e-c28d-4f79-8b5b-89d1edefdd77-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 16:25:33 crc kubenswrapper[4909]: I1128 16:25:33.288887 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7hz6z\" (UniqueName: \"kubernetes.io/projected/3c2dc49e-c28d-4f79-8b5b-89d1edefdd77-kube-api-access-7hz6z\") on node \"crc\" DevicePath \"\"" Nov 28 16:25:33 crc kubenswrapper[4909]: I1128 16:25:33.288898 4909 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3c2dc49e-c28d-4f79-8b5b-89d1edefdd77-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 16:25:33 crc kubenswrapper[4909]: I1128 16:25:33.568925 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-qjf9t_06224dc2-1e32-47b8-8a11-0c90a61084cf/console/0.log" Nov 28 16:25:33 crc kubenswrapper[4909]: I1128 16:25:33.568971 4909 generic.go:334] "Generic (PLEG): container finished" podID="06224dc2-1e32-47b8-8a11-0c90a61084cf" containerID="439076ca80c295f5ff253f45c069a0d9576c9cd88d3fe150dcba26460c96045a" exitCode=2 Nov 28 16:25:33 crc kubenswrapper[4909]: I1128 16:25:33.569026 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-qjf9t" event={"ID":"06224dc2-1e32-47b8-8a11-0c90a61084cf","Type":"ContainerDied","Data":"439076ca80c295f5ff253f45c069a0d9576c9cd88d3fe150dcba26460c96045a"} Nov 28 16:25:33 crc kubenswrapper[4909]: I1128 16:25:33.569055 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-qjf9t" event={"ID":"06224dc2-1e32-47b8-8a11-0c90a61084cf","Type":"ContainerDied","Data":"a8b7b0a2f7e774e4668fd5c20c312857d0ba0aa96585f98894ee6a9a1df46cf5"} Nov 28 16:25:33 crc kubenswrapper[4909]: I1128 16:25:33.569068 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-qjf9t" Nov 28 16:25:33 crc kubenswrapper[4909]: I1128 16:25:33.569071 4909 scope.go:117] "RemoveContainer" containerID="439076ca80c295f5ff253f45c069a0d9576c9cd88d3fe150dcba26460c96045a" Nov 28 16:25:33 crc kubenswrapper[4909]: I1128 16:25:33.572545 4909 generic.go:334] "Generic (PLEG): container finished" podID="3c2dc49e-c28d-4f79-8b5b-89d1edefdd77" containerID="78efede959b70bfa9b43acfc7a2643a2e6d543d66ad7d09b65fed8b5177dc7ca" exitCode=0 Nov 28 16:25:33 crc kubenswrapper[4909]: I1128 16:25:33.572605 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-scs9g" event={"ID":"3c2dc49e-c28d-4f79-8b5b-89d1edefdd77","Type":"ContainerDied","Data":"78efede959b70bfa9b43acfc7a2643a2e6d543d66ad7d09b65fed8b5177dc7ca"} Nov 28 16:25:33 crc kubenswrapper[4909]: I1128 16:25:33.572646 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-scs9g" event={"ID":"3c2dc49e-c28d-4f79-8b5b-89d1edefdd77","Type":"ContainerDied","Data":"69f64422ce9cca038ab81d195d98fd31f3f5db3920a7b057300180a9b5dce5b8"} Nov 28 16:25:33 crc kubenswrapper[4909]: I1128 16:25:33.572862 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-scs9g" Nov 28 16:25:33 crc kubenswrapper[4909]: I1128 16:25:33.608450 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-qjf9t"] Nov 28 16:25:33 crc kubenswrapper[4909]: I1128 16:25:33.609528 4909 scope.go:117] "RemoveContainer" containerID="439076ca80c295f5ff253f45c069a0d9576c9cd88d3fe150dcba26460c96045a" Nov 28 16:25:33 crc kubenswrapper[4909]: E1128 16:25:33.610074 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"439076ca80c295f5ff253f45c069a0d9576c9cd88d3fe150dcba26460c96045a\": container with ID starting with 439076ca80c295f5ff253f45c069a0d9576c9cd88d3fe150dcba26460c96045a not found: ID does not exist" containerID="439076ca80c295f5ff253f45c069a0d9576c9cd88d3fe150dcba26460c96045a" Nov 28 16:25:33 crc kubenswrapper[4909]: I1128 16:25:33.610117 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"439076ca80c295f5ff253f45c069a0d9576c9cd88d3fe150dcba26460c96045a"} err="failed to get container status \"439076ca80c295f5ff253f45c069a0d9576c9cd88d3fe150dcba26460c96045a\": rpc error: code = NotFound desc = could not find container \"439076ca80c295f5ff253f45c069a0d9576c9cd88d3fe150dcba26460c96045a\": container with ID starting with 439076ca80c295f5ff253f45c069a0d9576c9cd88d3fe150dcba26460c96045a not found: ID does not exist" Nov 28 16:25:33 crc kubenswrapper[4909]: I1128 16:25:33.610142 4909 scope.go:117] "RemoveContainer" containerID="78efede959b70bfa9b43acfc7a2643a2e6d543d66ad7d09b65fed8b5177dc7ca" Nov 28 16:25:33 crc kubenswrapper[4909]: I1128 16:25:33.617128 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-console/console-f9d7485db-qjf9t"] Nov 28 16:25:33 crc kubenswrapper[4909]: I1128 16:25:33.625817 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-scs9g"] Nov 28 16:25:33 crc kubenswrapper[4909]: I1128 16:25:33.630143 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-scs9g"] Nov 28 16:25:33 crc kubenswrapper[4909]: I1128 16:25:33.652393 4909 scope.go:117] "RemoveContainer" containerID="4b943a7fd6196f3413b45b5e1ef237be1d810e20de5868092f12c90fc80a3bba" Nov 28 16:25:33 crc kubenswrapper[4909]: I1128 16:25:33.672126 4909 scope.go:117] "RemoveContainer" containerID="d0f997e33da80f48b7f7bda05349c49cf9e058c4c05041bd75f112d5624c305f" Nov 28 16:25:33 crc kubenswrapper[4909]: I1128 16:25:33.697437 4909 scope.go:117] "RemoveContainer" containerID="78efede959b70bfa9b43acfc7a2643a2e6d543d66ad7d09b65fed8b5177dc7ca" Nov 28 16:25:33 crc kubenswrapper[4909]: E1128 16:25:33.697870 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"78efede959b70bfa9b43acfc7a2643a2e6d543d66ad7d09b65fed8b5177dc7ca\": container with ID starting with 78efede959b70bfa9b43acfc7a2643a2e6d543d66ad7d09b65fed8b5177dc7ca not found: ID does not exist" containerID="78efede959b70bfa9b43acfc7a2643a2e6d543d66ad7d09b65fed8b5177dc7ca" Nov 28 16:25:33 crc kubenswrapper[4909]: I1128 16:25:33.697908 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"78efede959b70bfa9b43acfc7a2643a2e6d543d66ad7d09b65fed8b5177dc7ca"} err="failed to get container status \"78efede959b70bfa9b43acfc7a2643a2e6d543d66ad7d09b65fed8b5177dc7ca\": rpc error: code = NotFound desc = could not find container \"78efede959b70bfa9b43acfc7a2643a2e6d543d66ad7d09b65fed8b5177dc7ca\": container with ID starting with 78efede959b70bfa9b43acfc7a2643a2e6d543d66ad7d09b65fed8b5177dc7ca not found: ID does not exist" Nov 28 16:25:33 crc kubenswrapper[4909]: I1128 16:25:33.697934 4909 scope.go:117] "RemoveContainer" containerID="4b943a7fd6196f3413b45b5e1ef237be1d810e20de5868092f12c90fc80a3bba" Nov 28 16:25:33 crc kubenswrapper[4909]: E1128 16:25:33.698153 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4b943a7fd6196f3413b45b5e1ef237be1d810e20de5868092f12c90fc80a3bba\": container with ID starting with 4b943a7fd6196f3413b45b5e1ef237be1d810e20de5868092f12c90fc80a3bba not found: ID does not exist" containerID="4b943a7fd6196f3413b45b5e1ef237be1d810e20de5868092f12c90fc80a3bba" Nov 28 16:25:33 crc kubenswrapper[4909]: I1128 16:25:33.698181 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4b943a7fd6196f3413b45b5e1ef237be1d810e20de5868092f12c90fc80a3bba"} err="failed to get container status \"4b943a7fd6196f3413b45b5e1ef237be1d810e20de5868092f12c90fc80a3bba\": rpc error: code = NotFound desc = could not find container \"4b943a7fd6196f3413b45b5e1ef237be1d810e20de5868092f12c90fc80a3bba\": container with ID starting with 4b943a7fd6196f3413b45b5e1ef237be1d810e20de5868092f12c90fc80a3bba not found: ID does not exist" Nov 28 16:25:33 crc kubenswrapper[4909]: I1128 16:25:33.698198 4909 scope.go:117] "RemoveContainer" containerID="d0f997e33da80f48b7f7bda05349c49cf9e058c4c05041bd75f112d5624c305f" Nov 28 16:25:33 crc kubenswrapper[4909]: E1128 16:25:33.699050 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d0f997e33da80f48b7f7bda05349c49cf9e058c4c05041bd75f112d5624c305f\": container with ID starting with d0f997e33da80f48b7f7bda05349c49cf9e058c4c05041bd75f112d5624c305f not found: ID does not exist" containerID="d0f997e33da80f48b7f7bda05349c49cf9e058c4c05041bd75f112d5624c305f" Nov 28 16:25:33 crc kubenswrapper[4909]: I1128 16:25:33.699098 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d0f997e33da80f48b7f7bda05349c49cf9e058c4c05041bd75f112d5624c305f"} err="failed to get container status \"d0f997e33da80f48b7f7bda05349c49cf9e058c4c05041bd75f112d5624c305f\": rpc error: code = NotFound desc = could not find container \"d0f997e33da80f48b7f7bda05349c49cf9e058c4c05041bd75f112d5624c305f\": container with ID starting with d0f997e33da80f48b7f7bda05349c49cf9e058c4c05041bd75f112d5624c305f not found: ID does not exist" Nov 28 16:25:33 crc kubenswrapper[4909]: I1128 16:25:33.909984 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="06224dc2-1e32-47b8-8a11-0c90a61084cf" path="/var/lib/kubelet/pods/06224dc2-1e32-47b8-8a11-0c90a61084cf/volumes" Nov 28 16:25:33 crc kubenswrapper[4909]: I1128 16:25:33.910754 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3c2dc49e-c28d-4f79-8b5b-89d1edefdd77" path="/var/lib/kubelet/pods/3c2dc49e-c28d-4f79-8b5b-89d1edefdd77/volumes" Nov 28 16:25:34 crc kubenswrapper[4909]: I1128 16:25:34.581309 4909 generic.go:334] "Generic (PLEG): container finished" podID="def73bb6-09ff-4074-8213-4962def55a10" containerID="7aa2a0b3065a6d48baf0c4f434020353d1254e7f1a7fd8c5b112eb11a4e98467" exitCode=0 Nov 28 16:25:34 crc kubenswrapper[4909]: I1128 16:25:34.581358 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83kmlm2" event={"ID":"def73bb6-09ff-4074-8213-4962def55a10","Type":"ContainerDied","Data":"7aa2a0b3065a6d48baf0c4f434020353d1254e7f1a7fd8c5b112eb11a4e98467"} Nov 28 16:25:35 crc kubenswrapper[4909]: I1128 16:25:35.592593 4909 generic.go:334] "Generic (PLEG): container finished" podID="def73bb6-09ff-4074-8213-4962def55a10" containerID="7cd36b62dd655d73b22224546c9897256bf1556971f66dca325e052046284e89" exitCode=0 Nov 28 16:25:35 crc kubenswrapper[4909]: I1128 16:25:35.592706 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83kmlm2" event={"ID":"def73bb6-09ff-4074-8213-4962def55a10","Type":"ContainerDied","Data":"7cd36b62dd655d73b22224546c9897256bf1556971f66dca325e052046284e89"} Nov 28 16:25:36 crc kubenswrapper[4909]: I1128 16:25:36.821986 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83kmlm2" Nov 28 16:25:36 crc kubenswrapper[4909]: I1128 16:25:36.845439 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/def73bb6-09ff-4074-8213-4962def55a10-util\") pod \"def73bb6-09ff-4074-8213-4962def55a10\" (UID: \"def73bb6-09ff-4074-8213-4962def55a10\") " Nov 28 16:25:36 crc kubenswrapper[4909]: I1128 16:25:36.845523 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gvl6z\" (UniqueName: \"kubernetes.io/projected/def73bb6-09ff-4074-8213-4962def55a10-kube-api-access-gvl6z\") pod \"def73bb6-09ff-4074-8213-4962def55a10\" (UID: \"def73bb6-09ff-4074-8213-4962def55a10\") " Nov 28 16:25:36 crc kubenswrapper[4909]: I1128 16:25:36.845619 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/def73bb6-09ff-4074-8213-4962def55a10-bundle\") pod \"def73bb6-09ff-4074-8213-4962def55a10\" (UID: \"def73bb6-09ff-4074-8213-4962def55a10\") " Nov 28 16:25:36 crc kubenswrapper[4909]: I1128 16:25:36.847256 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/def73bb6-09ff-4074-8213-4962def55a10-bundle" (OuterVolumeSpecName: "bundle") pod "def73bb6-09ff-4074-8213-4962def55a10" (UID: "def73bb6-09ff-4074-8213-4962def55a10"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:25:36 crc kubenswrapper[4909]: I1128 16:25:36.858234 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/def73bb6-09ff-4074-8213-4962def55a10-kube-api-access-gvl6z" (OuterVolumeSpecName: "kube-api-access-gvl6z") pod "def73bb6-09ff-4074-8213-4962def55a10" (UID: "def73bb6-09ff-4074-8213-4962def55a10"). InnerVolumeSpecName "kube-api-access-gvl6z". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:25:36 crc kubenswrapper[4909]: I1128 16:25:36.881317 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/def73bb6-09ff-4074-8213-4962def55a10-util" (OuterVolumeSpecName: "util") pod "def73bb6-09ff-4074-8213-4962def55a10" (UID: "def73bb6-09ff-4074-8213-4962def55a10"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:25:36 crc kubenswrapper[4909]: I1128 16:25:36.946872 4909 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/def73bb6-09ff-4074-8213-4962def55a10-util\") on node \"crc\" DevicePath \"\"" Nov 28 16:25:36 crc kubenswrapper[4909]: I1128 16:25:36.946930 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gvl6z\" (UniqueName: \"kubernetes.io/projected/def73bb6-09ff-4074-8213-4962def55a10-kube-api-access-gvl6z\") on node \"crc\" DevicePath \"\"" Nov 28 16:25:36 crc kubenswrapper[4909]: I1128 16:25:36.946943 4909 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/def73bb6-09ff-4074-8213-4962def55a10-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:25:37 crc kubenswrapper[4909]: I1128 16:25:37.608182 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83kmlm2" event={"ID":"def73bb6-09ff-4074-8213-4962def55a10","Type":"ContainerDied","Data":"6b9b751b7ea8b51d31a168c390596977136c20cbbd932522ba7b87963e615338"} Nov 28 16:25:37 crc kubenswrapper[4909]: I1128 16:25:37.608569 4909 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6b9b751b7ea8b51d31a168c390596977136c20cbbd932522ba7b87963e615338" Nov 28 16:25:37 crc kubenswrapper[4909]: I1128 16:25:37.608271 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83kmlm2" Nov 28 16:25:47 crc kubenswrapper[4909]: I1128 16:25:47.074645 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-controller-manager-7ff45df9c5-whssd"] Nov 28 16:25:47 crc kubenswrapper[4909]: E1128 16:25:47.075400 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="def73bb6-09ff-4074-8213-4962def55a10" containerName="util" Nov 28 16:25:47 crc kubenswrapper[4909]: I1128 16:25:47.075416 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="def73bb6-09ff-4074-8213-4962def55a10" containerName="util" Nov 28 16:25:47 crc kubenswrapper[4909]: E1128 16:25:47.075432 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3c2dc49e-c28d-4f79-8b5b-89d1edefdd77" containerName="extract-utilities" Nov 28 16:25:47 crc kubenswrapper[4909]: I1128 16:25:47.075439 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="3c2dc49e-c28d-4f79-8b5b-89d1edefdd77" containerName="extract-utilities" Nov 28 16:25:47 crc kubenswrapper[4909]: E1128 16:25:47.075450 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3c2dc49e-c28d-4f79-8b5b-89d1edefdd77" containerName="extract-content" Nov 28 16:25:47 crc kubenswrapper[4909]: I1128 16:25:47.075457 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="3c2dc49e-c28d-4f79-8b5b-89d1edefdd77" containerName="extract-content" Nov 28 16:25:47 crc kubenswrapper[4909]: E1128 16:25:47.075465 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="06224dc2-1e32-47b8-8a11-0c90a61084cf" containerName="console" Nov 28 16:25:47 crc kubenswrapper[4909]: I1128 16:25:47.075470 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="06224dc2-1e32-47b8-8a11-0c90a61084cf" containerName="console" Nov 28 16:25:47 crc kubenswrapper[4909]: E1128 16:25:47.075478 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3c2dc49e-c28d-4f79-8b5b-89d1edefdd77" containerName="registry-server" Nov 28 16:25:47 crc kubenswrapper[4909]: I1128 16:25:47.075485 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="3c2dc49e-c28d-4f79-8b5b-89d1edefdd77" containerName="registry-server" Nov 28 16:25:47 crc kubenswrapper[4909]: E1128 16:25:47.075493 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="def73bb6-09ff-4074-8213-4962def55a10" containerName="pull" Nov 28 16:25:47 crc kubenswrapper[4909]: I1128 16:25:47.075500 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="def73bb6-09ff-4074-8213-4962def55a10" containerName="pull" Nov 28 16:25:47 crc kubenswrapper[4909]: E1128 16:25:47.075512 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="def73bb6-09ff-4074-8213-4962def55a10" containerName="extract" Nov 28 16:25:47 crc kubenswrapper[4909]: I1128 16:25:47.075517 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="def73bb6-09ff-4074-8213-4962def55a10" containerName="extract" Nov 28 16:25:47 crc kubenswrapper[4909]: I1128 16:25:47.075608 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="def73bb6-09ff-4074-8213-4962def55a10" containerName="extract" Nov 28 16:25:47 crc kubenswrapper[4909]: I1128 16:25:47.075619 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="06224dc2-1e32-47b8-8a11-0c90a61084cf" containerName="console" Nov 28 16:25:47 crc kubenswrapper[4909]: I1128 16:25:47.075625 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="3c2dc49e-c28d-4f79-8b5b-89d1edefdd77" containerName="registry-server" Nov 28 16:25:47 crc kubenswrapper[4909]: I1128 16:25:47.082707 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-7ff45df9c5-whssd" Nov 28 16:25:47 crc kubenswrapper[4909]: W1128 16:25:47.094291 4909 reflector.go:561] object-"metallb-system"/"manager-account-dockercfg-nh884": failed to list *v1.Secret: secrets "manager-account-dockercfg-nh884" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "metallb-system": no relationship found between node 'crc' and this object Nov 28 16:25:47 crc kubenswrapper[4909]: W1128 16:25:47.094312 4909 reflector.go:561] object-"metallb-system"/"openshift-service-ca.crt": failed to list *v1.ConfigMap: configmaps "openshift-service-ca.crt" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "metallb-system": no relationship found between node 'crc' and this object Nov 28 16:25:47 crc kubenswrapper[4909]: E1128 16:25:47.094342 4909 reflector.go:158] "Unhandled Error" err="object-\"metallb-system\"/\"manager-account-dockercfg-nh884\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"manager-account-dockercfg-nh884\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"metallb-system\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 28 16:25:47 crc kubenswrapper[4909]: E1128 16:25:47.094367 4909 reflector.go:158] "Unhandled Error" err="object-\"metallb-system\"/\"openshift-service-ca.crt\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"openshift-service-ca.crt\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"metallb-system\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 28 16:25:47 crc kubenswrapper[4909]: W1128 16:25:47.094412 4909 reflector.go:561] object-"metallb-system"/"metallb-operator-webhook-server-cert": failed to list *v1.Secret: secrets "metallb-operator-webhook-server-cert" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "metallb-system": no relationship found between node 'crc' and this object Nov 28 16:25:47 crc kubenswrapper[4909]: E1128 16:25:47.094431 4909 reflector.go:158] "Unhandled Error" err="object-\"metallb-system\"/\"metallb-operator-webhook-server-cert\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"metallb-operator-webhook-server-cert\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"metallb-system\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 28 16:25:47 crc kubenswrapper[4909]: W1128 16:25:47.094439 4909 reflector.go:561] object-"metallb-system"/"metallb-operator-controller-manager-service-cert": failed to list *v1.Secret: secrets "metallb-operator-controller-manager-service-cert" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "metallb-system": no relationship found between node 'crc' and this object Nov 28 16:25:47 crc kubenswrapper[4909]: E1128 16:25:47.094459 4909 reflector.go:158] "Unhandled Error" err="object-\"metallb-system\"/\"metallb-operator-controller-manager-service-cert\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"metallb-operator-controller-manager-service-cert\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"metallb-system\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 28 16:25:47 crc kubenswrapper[4909]: W1128 16:25:47.094461 4909 reflector.go:561] object-"metallb-system"/"kube-root-ca.crt": failed to list *v1.ConfigMap: configmaps "kube-root-ca.crt" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "metallb-system": no relationship found between node 'crc' and this object Nov 28 16:25:47 crc kubenswrapper[4909]: E1128 16:25:47.094533 4909 reflector.go:158] "Unhandled Error" err="object-\"metallb-system\"/\"kube-root-ca.crt\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"kube-root-ca.crt\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"metallb-system\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 28 16:25:47 crc kubenswrapper[4909]: I1128 16:25:47.098500 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-7ff45df9c5-whssd"] Nov 28 16:25:47 crc kubenswrapper[4909]: I1128 16:25:47.161695 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2s472\" (UniqueName: \"kubernetes.io/projected/393afd2e-0377-4085-a367-7ad40c67b6a5-kube-api-access-2s472\") pod \"metallb-operator-controller-manager-7ff45df9c5-whssd\" (UID: \"393afd2e-0377-4085-a367-7ad40c67b6a5\") " pod="metallb-system/metallb-operator-controller-manager-7ff45df9c5-whssd" Nov 28 16:25:47 crc kubenswrapper[4909]: I1128 16:25:47.161842 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/393afd2e-0377-4085-a367-7ad40c67b6a5-webhook-cert\") pod \"metallb-operator-controller-manager-7ff45df9c5-whssd\" (UID: \"393afd2e-0377-4085-a367-7ad40c67b6a5\") " pod="metallb-system/metallb-operator-controller-manager-7ff45df9c5-whssd" Nov 28 16:25:47 crc kubenswrapper[4909]: I1128 16:25:47.161948 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/393afd2e-0377-4085-a367-7ad40c67b6a5-apiservice-cert\") pod \"metallb-operator-controller-manager-7ff45df9c5-whssd\" (UID: \"393afd2e-0377-4085-a367-7ad40c67b6a5\") " pod="metallb-system/metallb-operator-controller-manager-7ff45df9c5-whssd" Nov 28 16:25:47 crc kubenswrapper[4909]: I1128 16:25:47.263173 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2s472\" (UniqueName: \"kubernetes.io/projected/393afd2e-0377-4085-a367-7ad40c67b6a5-kube-api-access-2s472\") pod \"metallb-operator-controller-manager-7ff45df9c5-whssd\" (UID: \"393afd2e-0377-4085-a367-7ad40c67b6a5\") " pod="metallb-system/metallb-operator-controller-manager-7ff45df9c5-whssd" Nov 28 16:25:47 crc kubenswrapper[4909]: I1128 16:25:47.263265 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/393afd2e-0377-4085-a367-7ad40c67b6a5-webhook-cert\") pod \"metallb-operator-controller-manager-7ff45df9c5-whssd\" (UID: \"393afd2e-0377-4085-a367-7ad40c67b6a5\") " pod="metallb-system/metallb-operator-controller-manager-7ff45df9c5-whssd" Nov 28 16:25:47 crc kubenswrapper[4909]: I1128 16:25:47.263291 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/393afd2e-0377-4085-a367-7ad40c67b6a5-apiservice-cert\") pod \"metallb-operator-controller-manager-7ff45df9c5-whssd\" (UID: \"393afd2e-0377-4085-a367-7ad40c67b6a5\") " pod="metallb-system/metallb-operator-controller-manager-7ff45df9c5-whssd" Nov 28 16:25:47 crc kubenswrapper[4909]: I1128 16:25:47.424153 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-webhook-server-6d85d4dbb-jr45x"] Nov 28 16:25:47 crc kubenswrapper[4909]: I1128 16:25:47.425058 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-6d85d4dbb-jr45x" Nov 28 16:25:47 crc kubenswrapper[4909]: I1128 16:25:47.427124 4909 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-service-cert" Nov 28 16:25:47 crc kubenswrapper[4909]: I1128 16:25:47.427124 4909 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Nov 28 16:25:47 crc kubenswrapper[4909]: I1128 16:25:47.427286 4909 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-dockercfg-r7pwg" Nov 28 16:25:47 crc kubenswrapper[4909]: I1128 16:25:47.444264 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-6d85d4dbb-jr45x"] Nov 28 16:25:47 crc kubenswrapper[4909]: I1128 16:25:47.566831 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/98c039de-9587-4f9e-989f-f46662bfec99-webhook-cert\") pod \"metallb-operator-webhook-server-6d85d4dbb-jr45x\" (UID: \"98c039de-9587-4f9e-989f-f46662bfec99\") " pod="metallb-system/metallb-operator-webhook-server-6d85d4dbb-jr45x" Nov 28 16:25:47 crc kubenswrapper[4909]: I1128 16:25:47.567022 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-86sg8\" (UniqueName: \"kubernetes.io/projected/98c039de-9587-4f9e-989f-f46662bfec99-kube-api-access-86sg8\") pod \"metallb-operator-webhook-server-6d85d4dbb-jr45x\" (UID: \"98c039de-9587-4f9e-989f-f46662bfec99\") " pod="metallb-system/metallb-operator-webhook-server-6d85d4dbb-jr45x" Nov 28 16:25:47 crc kubenswrapper[4909]: I1128 16:25:47.567059 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/98c039de-9587-4f9e-989f-f46662bfec99-apiservice-cert\") pod \"metallb-operator-webhook-server-6d85d4dbb-jr45x\" (UID: \"98c039de-9587-4f9e-989f-f46662bfec99\") " pod="metallb-system/metallb-operator-webhook-server-6d85d4dbb-jr45x" Nov 28 16:25:47 crc kubenswrapper[4909]: I1128 16:25:47.667670 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/98c039de-9587-4f9e-989f-f46662bfec99-webhook-cert\") pod \"metallb-operator-webhook-server-6d85d4dbb-jr45x\" (UID: \"98c039de-9587-4f9e-989f-f46662bfec99\") " pod="metallb-system/metallb-operator-webhook-server-6d85d4dbb-jr45x" Nov 28 16:25:47 crc kubenswrapper[4909]: I1128 16:25:47.667774 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-86sg8\" (UniqueName: \"kubernetes.io/projected/98c039de-9587-4f9e-989f-f46662bfec99-kube-api-access-86sg8\") pod \"metallb-operator-webhook-server-6d85d4dbb-jr45x\" (UID: \"98c039de-9587-4f9e-989f-f46662bfec99\") " pod="metallb-system/metallb-operator-webhook-server-6d85d4dbb-jr45x" Nov 28 16:25:47 crc kubenswrapper[4909]: I1128 16:25:47.667802 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/98c039de-9587-4f9e-989f-f46662bfec99-apiservice-cert\") pod \"metallb-operator-webhook-server-6d85d4dbb-jr45x\" (UID: \"98c039de-9587-4f9e-989f-f46662bfec99\") " pod="metallb-system/metallb-operator-webhook-server-6d85d4dbb-jr45x" Nov 28 16:25:47 crc kubenswrapper[4909]: I1128 16:25:47.673818 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/98c039de-9587-4f9e-989f-f46662bfec99-webhook-cert\") pod \"metallb-operator-webhook-server-6d85d4dbb-jr45x\" (UID: \"98c039de-9587-4f9e-989f-f46662bfec99\") " pod="metallb-system/metallb-operator-webhook-server-6d85d4dbb-jr45x" Nov 28 16:25:47 crc kubenswrapper[4909]: I1128 16:25:47.673986 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/98c039de-9587-4f9e-989f-f46662bfec99-apiservice-cert\") pod \"metallb-operator-webhook-server-6d85d4dbb-jr45x\" (UID: \"98c039de-9587-4f9e-989f-f46662bfec99\") " pod="metallb-system/metallb-operator-webhook-server-6d85d4dbb-jr45x" Nov 28 16:25:47 crc kubenswrapper[4909]: I1128 16:25:47.953576 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"openshift-service-ca.crt" Nov 28 16:25:48 crc kubenswrapper[4909]: I1128 16:25:48.165821 4909 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-cert" Nov 28 16:25:48 crc kubenswrapper[4909]: I1128 16:25:48.238693 4909 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"manager-account-dockercfg-nh884" Nov 28 16:25:48 crc kubenswrapper[4909]: E1128 16:25:48.263969 4909 secret.go:188] Couldn't get secret metallb-system/metallb-operator-controller-manager-service-cert: failed to sync secret cache: timed out waiting for the condition Nov 28 16:25:48 crc kubenswrapper[4909]: E1128 16:25:48.264033 4909 secret.go:188] Couldn't get secret metallb-system/metallb-operator-controller-manager-service-cert: failed to sync secret cache: timed out waiting for the condition Nov 28 16:25:48 crc kubenswrapper[4909]: E1128 16:25:48.264072 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/393afd2e-0377-4085-a367-7ad40c67b6a5-webhook-cert podName:393afd2e-0377-4085-a367-7ad40c67b6a5 nodeName:}" failed. No retries permitted until 2025-11-28 16:25:48.764050473 +0000 UTC m=+931.160734997 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "webhook-cert" (UniqueName: "kubernetes.io/secret/393afd2e-0377-4085-a367-7ad40c67b6a5-webhook-cert") pod "metallb-operator-controller-manager-7ff45df9c5-whssd" (UID: "393afd2e-0377-4085-a367-7ad40c67b6a5") : failed to sync secret cache: timed out waiting for the condition Nov 28 16:25:48 crc kubenswrapper[4909]: E1128 16:25:48.264091 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/393afd2e-0377-4085-a367-7ad40c67b6a5-apiservice-cert podName:393afd2e-0377-4085-a367-7ad40c67b6a5 nodeName:}" failed. No retries permitted until 2025-11-28 16:25:48.764083984 +0000 UTC m=+931.160768508 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "apiservice-cert" (UniqueName: "kubernetes.io/secret/393afd2e-0377-4085-a367-7ad40c67b6a5-apiservice-cert") pod "metallb-operator-controller-manager-7ff45df9c5-whssd" (UID: "393afd2e-0377-4085-a367-7ad40c67b6a5") : failed to sync secret cache: timed out waiting for the condition Nov 28 16:25:48 crc kubenswrapper[4909]: E1128 16:25:48.283098 4909 projected.go:288] Couldn't get configMap metallb-system/kube-root-ca.crt: failed to sync configmap cache: timed out waiting for the condition Nov 28 16:25:48 crc kubenswrapper[4909]: E1128 16:25:48.283142 4909 projected.go:194] Error preparing data for projected volume kube-api-access-2s472 for pod metallb-system/metallb-operator-controller-manager-7ff45df9c5-whssd: failed to sync configmap cache: timed out waiting for the condition Nov 28 16:25:48 crc kubenswrapper[4909]: E1128 16:25:48.283194 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/393afd2e-0377-4085-a367-7ad40c67b6a5-kube-api-access-2s472 podName:393afd2e-0377-4085-a367-7ad40c67b6a5 nodeName:}" failed. No retries permitted until 2025-11-28 16:25:48.783178183 +0000 UTC m=+931.179862707 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-2s472" (UniqueName: "kubernetes.io/projected/393afd2e-0377-4085-a367-7ad40c67b6a5-kube-api-access-2s472") pod "metallb-operator-controller-manager-7ff45df9c5-whssd" (UID: "393afd2e-0377-4085-a367-7ad40c67b6a5") : failed to sync configmap cache: timed out waiting for the condition Nov 28 16:25:48 crc kubenswrapper[4909]: I1128 16:25:48.283315 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"kube-root-ca.crt" Nov 28 16:25:48 crc kubenswrapper[4909]: I1128 16:25:48.304135 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-86sg8\" (UniqueName: \"kubernetes.io/projected/98c039de-9587-4f9e-989f-f46662bfec99-kube-api-access-86sg8\") pod \"metallb-operator-webhook-server-6d85d4dbb-jr45x\" (UID: \"98c039de-9587-4f9e-989f-f46662bfec99\") " pod="metallb-system/metallb-operator-webhook-server-6d85d4dbb-jr45x" Nov 28 16:25:48 crc kubenswrapper[4909]: I1128 16:25:48.340164 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-6d85d4dbb-jr45x" Nov 28 16:25:48 crc kubenswrapper[4909]: I1128 16:25:48.561435 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-6d85d4dbb-jr45x"] Nov 28 16:25:48 crc kubenswrapper[4909]: W1128 16:25:48.569732 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod98c039de_9587_4f9e_989f_f46662bfec99.slice/crio-0a1e013fe67719010e2512fa06acdc1c52fa1ab1f0eeb7aa2a0d06ea055f67d4 WatchSource:0}: Error finding container 0a1e013fe67719010e2512fa06acdc1c52fa1ab1f0eeb7aa2a0d06ea055f67d4: Status 404 returned error can't find the container with id 0a1e013fe67719010e2512fa06acdc1c52fa1ab1f0eeb7aa2a0d06ea055f67d4 Nov 28 16:25:48 crc kubenswrapper[4909]: I1128 16:25:48.613336 4909 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-controller-manager-service-cert" Nov 28 16:25:48 crc kubenswrapper[4909]: I1128 16:25:48.665009 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-6d85d4dbb-jr45x" event={"ID":"98c039de-9587-4f9e-989f-f46662bfec99","Type":"ContainerStarted","Data":"0a1e013fe67719010e2512fa06acdc1c52fa1ab1f0eeb7aa2a0d06ea055f67d4"} Nov 28 16:25:48 crc kubenswrapper[4909]: I1128 16:25:48.783605 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/393afd2e-0377-4085-a367-7ad40c67b6a5-webhook-cert\") pod \"metallb-operator-controller-manager-7ff45df9c5-whssd\" (UID: \"393afd2e-0377-4085-a367-7ad40c67b6a5\") " pod="metallb-system/metallb-operator-controller-manager-7ff45df9c5-whssd" Nov 28 16:25:48 crc kubenswrapper[4909]: I1128 16:25:48.783739 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/393afd2e-0377-4085-a367-7ad40c67b6a5-apiservice-cert\") pod \"metallb-operator-controller-manager-7ff45df9c5-whssd\" (UID: \"393afd2e-0377-4085-a367-7ad40c67b6a5\") " pod="metallb-system/metallb-operator-controller-manager-7ff45df9c5-whssd" Nov 28 16:25:48 crc kubenswrapper[4909]: I1128 16:25:48.784625 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2s472\" (UniqueName: \"kubernetes.io/projected/393afd2e-0377-4085-a367-7ad40c67b6a5-kube-api-access-2s472\") pod \"metallb-operator-controller-manager-7ff45df9c5-whssd\" (UID: \"393afd2e-0377-4085-a367-7ad40c67b6a5\") " pod="metallb-system/metallb-operator-controller-manager-7ff45df9c5-whssd" Nov 28 16:25:48 crc kubenswrapper[4909]: I1128 16:25:48.787818 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/393afd2e-0377-4085-a367-7ad40c67b6a5-webhook-cert\") pod \"metallb-operator-controller-manager-7ff45df9c5-whssd\" (UID: \"393afd2e-0377-4085-a367-7ad40c67b6a5\") " pod="metallb-system/metallb-operator-controller-manager-7ff45df9c5-whssd" Nov 28 16:25:48 crc kubenswrapper[4909]: I1128 16:25:48.790332 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/393afd2e-0377-4085-a367-7ad40c67b6a5-apiservice-cert\") pod \"metallb-operator-controller-manager-7ff45df9c5-whssd\" (UID: \"393afd2e-0377-4085-a367-7ad40c67b6a5\") " pod="metallb-system/metallb-operator-controller-manager-7ff45df9c5-whssd" Nov 28 16:25:48 crc kubenswrapper[4909]: I1128 16:25:48.790985 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2s472\" (UniqueName: \"kubernetes.io/projected/393afd2e-0377-4085-a367-7ad40c67b6a5-kube-api-access-2s472\") pod \"metallb-operator-controller-manager-7ff45df9c5-whssd\" (UID: \"393afd2e-0377-4085-a367-7ad40c67b6a5\") " pod="metallb-system/metallb-operator-controller-manager-7ff45df9c5-whssd" Nov 28 16:25:48 crc kubenswrapper[4909]: I1128 16:25:48.912548 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-7ff45df9c5-whssd" Nov 28 16:25:49 crc kubenswrapper[4909]: I1128 16:25:49.087235 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-7ff45df9c5-whssd"] Nov 28 16:25:49 crc kubenswrapper[4909]: W1128 16:25:49.093570 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod393afd2e_0377_4085_a367_7ad40c67b6a5.slice/crio-fdb12dd03875ee074772684d21772c015919eeb695ec8054a3ae25dbaf21b0dd WatchSource:0}: Error finding container fdb12dd03875ee074772684d21772c015919eeb695ec8054a3ae25dbaf21b0dd: Status 404 returned error can't find the container with id fdb12dd03875ee074772684d21772c015919eeb695ec8054a3ae25dbaf21b0dd Nov 28 16:25:49 crc kubenswrapper[4909]: I1128 16:25:49.672738 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-7ff45df9c5-whssd" event={"ID":"393afd2e-0377-4085-a367-7ad40c67b6a5","Type":"ContainerStarted","Data":"fdb12dd03875ee074772684d21772c015919eeb695ec8054a3ae25dbaf21b0dd"} Nov 28 16:25:54 crc kubenswrapper[4909]: I1128 16:25:54.701189 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-6d85d4dbb-jr45x" event={"ID":"98c039de-9587-4f9e-989f-f46662bfec99","Type":"ContainerStarted","Data":"189a2d68a46557f68d62dab06c6d8107e816b93d36283d6dd35df39981b044ae"} Nov 28 16:25:54 crc kubenswrapper[4909]: I1128 16:25:54.701812 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-webhook-server-6d85d4dbb-jr45x" Nov 28 16:25:54 crc kubenswrapper[4909]: I1128 16:25:54.702769 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-7ff45df9c5-whssd" event={"ID":"393afd2e-0377-4085-a367-7ad40c67b6a5","Type":"ContainerStarted","Data":"f49cbca110967df245cb68f99c1f6eff5da4b5cb8ade0fadc44e8ec591eed0d0"} Nov 28 16:25:54 crc kubenswrapper[4909]: I1128 16:25:54.702948 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-controller-manager-7ff45df9c5-whssd" Nov 28 16:25:54 crc kubenswrapper[4909]: I1128 16:25:54.781834 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-webhook-server-6d85d4dbb-jr45x" podStartSLOduration=2.798381124 podStartE2EDuration="7.781813944s" podCreationTimestamp="2025-11-28 16:25:47 +0000 UTC" firstStartedPulling="2025-11-28 16:25:48.581921803 +0000 UTC m=+930.978606337" lastFinishedPulling="2025-11-28 16:25:53.565354633 +0000 UTC m=+935.962039157" observedRunningTime="2025-11-28 16:25:54.740338087 +0000 UTC m=+937.137022611" watchObservedRunningTime="2025-11-28 16:25:54.781813944 +0000 UTC m=+937.178498468" Nov 28 16:25:54 crc kubenswrapper[4909]: I1128 16:25:54.785924 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-controller-manager-7ff45df9c5-whssd" podStartSLOduration=3.33731681 podStartE2EDuration="7.785904703s" podCreationTimestamp="2025-11-28 16:25:47 +0000 UTC" firstStartedPulling="2025-11-28 16:25:49.098844932 +0000 UTC m=+931.495529456" lastFinishedPulling="2025-11-28 16:25:53.547432825 +0000 UTC m=+935.944117349" observedRunningTime="2025-11-28 16:25:54.773594404 +0000 UTC m=+937.170278928" watchObservedRunningTime="2025-11-28 16:25:54.785904703 +0000 UTC m=+937.182589227" Nov 28 16:26:08 crc kubenswrapper[4909]: I1128 16:26:08.346733 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-webhook-server-6d85d4dbb-jr45x" Nov 28 16:26:28 crc kubenswrapper[4909]: I1128 16:26:28.915364 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-controller-manager-7ff45df9c5-whssd" Nov 28 16:26:29 crc kubenswrapper[4909]: I1128 16:26:29.603580 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-webhook-server-7fcb986d4-6w4xm"] Nov 28 16:26:29 crc kubenswrapper[4909]: I1128 16:26:29.605040 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-6w4xm" Nov 28 16:26:29 crc kubenswrapper[4909]: I1128 16:26:29.609884 4909 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-webhook-server-cert" Nov 28 16:26:29 crc kubenswrapper[4909]: I1128 16:26:29.610171 4909 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-daemon-dockercfg-2n22l" Nov 28 16:26:29 crc kubenswrapper[4909]: I1128 16:26:29.613538 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-kh8w8"] Nov 28 16:26:29 crc kubenswrapper[4909]: I1128 16:26:29.623868 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-kh8w8" Nov 28 16:26:29 crc kubenswrapper[4909]: I1128 16:26:29.625895 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"frr-startup" Nov 28 16:26:29 crc kubenswrapper[4909]: I1128 16:26:29.626630 4909 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-certs-secret" Nov 28 16:26:29 crc kubenswrapper[4909]: I1128 16:26:29.628794 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-7fcb986d4-6w4xm"] Nov 28 16:26:29 crc kubenswrapper[4909]: I1128 16:26:29.681354 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/speaker-x27f8"] Nov 28 16:26:29 crc kubenswrapper[4909]: I1128 16:26:29.682602 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-x27f8" Nov 28 16:26:29 crc kubenswrapper[4909]: I1128 16:26:29.687678 4909 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-certs-secret" Nov 28 16:26:29 crc kubenswrapper[4909]: I1128 16:26:29.688087 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"metallb-excludel2" Nov 28 16:26:29 crc kubenswrapper[4909]: I1128 16:26:29.689488 4909 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-dockercfg-86jkb" Nov 28 16:26:29 crc kubenswrapper[4909]: I1128 16:26:29.695961 4909 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-memberlist" Nov 28 16:26:29 crc kubenswrapper[4909]: I1128 16:26:29.696480 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/72616f0a-8071-4cc3-a5bb-0c226dcbf877-reloader\") pod \"frr-k8s-kh8w8\" (UID: \"72616f0a-8071-4cc3-a5bb-0c226dcbf877\") " pod="metallb-system/frr-k8s-kh8w8" Nov 28 16:26:29 crc kubenswrapper[4909]: I1128 16:26:29.696532 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/72616f0a-8071-4cc3-a5bb-0c226dcbf877-metrics-certs\") pod \"frr-k8s-kh8w8\" (UID: \"72616f0a-8071-4cc3-a5bb-0c226dcbf877\") " pod="metallb-system/frr-k8s-kh8w8" Nov 28 16:26:29 crc kubenswrapper[4909]: I1128 16:26:29.696599 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/72616f0a-8071-4cc3-a5bb-0c226dcbf877-metrics\") pod \"frr-k8s-kh8w8\" (UID: \"72616f0a-8071-4cc3-a5bb-0c226dcbf877\") " pod="metallb-system/frr-k8s-kh8w8" Nov 28 16:26:29 crc kubenswrapper[4909]: I1128 16:26:29.696637 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/802e20e0-dcbc-4743-9e54-dad0045f1e64-cert\") pod \"frr-k8s-webhook-server-7fcb986d4-6w4xm\" (UID: \"802e20e0-dcbc-4743-9e54-dad0045f1e64\") " pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-6w4xm" Nov 28 16:26:29 crc kubenswrapper[4909]: I1128 16:26:29.696781 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qh85j\" (UniqueName: \"kubernetes.io/projected/802e20e0-dcbc-4743-9e54-dad0045f1e64-kube-api-access-qh85j\") pod \"frr-k8s-webhook-server-7fcb986d4-6w4xm\" (UID: \"802e20e0-dcbc-4743-9e54-dad0045f1e64\") " pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-6w4xm" Nov 28 16:26:29 crc kubenswrapper[4909]: I1128 16:26:29.696820 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8hwgm\" (UniqueName: \"kubernetes.io/projected/72616f0a-8071-4cc3-a5bb-0c226dcbf877-kube-api-access-8hwgm\") pod \"frr-k8s-kh8w8\" (UID: \"72616f0a-8071-4cc3-a5bb-0c226dcbf877\") " pod="metallb-system/frr-k8s-kh8w8" Nov 28 16:26:29 crc kubenswrapper[4909]: I1128 16:26:29.696850 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/72616f0a-8071-4cc3-a5bb-0c226dcbf877-frr-startup\") pod \"frr-k8s-kh8w8\" (UID: \"72616f0a-8071-4cc3-a5bb-0c226dcbf877\") " pod="metallb-system/frr-k8s-kh8w8" Nov 28 16:26:29 crc kubenswrapper[4909]: I1128 16:26:29.696884 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/72616f0a-8071-4cc3-a5bb-0c226dcbf877-frr-sockets\") pod \"frr-k8s-kh8w8\" (UID: \"72616f0a-8071-4cc3-a5bb-0c226dcbf877\") " pod="metallb-system/frr-k8s-kh8w8" Nov 28 16:26:29 crc kubenswrapper[4909]: I1128 16:26:29.696915 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/72616f0a-8071-4cc3-a5bb-0c226dcbf877-frr-conf\") pod \"frr-k8s-kh8w8\" (UID: \"72616f0a-8071-4cc3-a5bb-0c226dcbf877\") " pod="metallb-system/frr-k8s-kh8w8" Nov 28 16:26:29 crc kubenswrapper[4909]: I1128 16:26:29.700258 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/controller-f8648f98b-vcxs6"] Nov 28 16:26:29 crc kubenswrapper[4909]: I1128 16:26:29.701103 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-f8648f98b-vcxs6" Nov 28 16:26:29 crc kubenswrapper[4909]: I1128 16:26:29.706875 4909 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-certs-secret" Nov 28 16:26:29 crc kubenswrapper[4909]: I1128 16:26:29.710927 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-f8648f98b-vcxs6"] Nov 28 16:26:29 crc kubenswrapper[4909]: I1128 16:26:29.798280 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qh85j\" (UniqueName: \"kubernetes.io/projected/802e20e0-dcbc-4743-9e54-dad0045f1e64-kube-api-access-qh85j\") pod \"frr-k8s-webhook-server-7fcb986d4-6w4xm\" (UID: \"802e20e0-dcbc-4743-9e54-dad0045f1e64\") " pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-6w4xm" Nov 28 16:26:29 crc kubenswrapper[4909]: I1128 16:26:29.798559 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8hwgm\" (UniqueName: \"kubernetes.io/projected/72616f0a-8071-4cc3-a5bb-0c226dcbf877-kube-api-access-8hwgm\") pod \"frr-k8s-kh8w8\" (UID: \"72616f0a-8071-4cc3-a5bb-0c226dcbf877\") " pod="metallb-system/frr-k8s-kh8w8" Nov 28 16:26:29 crc kubenswrapper[4909]: I1128 16:26:29.798998 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/7bfa82af-4dc2-4b43-a682-c6b4e9fedde7-metallb-excludel2\") pod \"speaker-x27f8\" (UID: \"7bfa82af-4dc2-4b43-a682-c6b4e9fedde7\") " pod="metallb-system/speaker-x27f8" Nov 28 16:26:29 crc kubenswrapper[4909]: I1128 16:26:29.799123 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/4f88a2f5-e41e-4832-94e4-1d5e190ccdd4-metrics-certs\") pod \"controller-f8648f98b-vcxs6\" (UID: \"4f88a2f5-e41e-4832-94e4-1d5e190ccdd4\") " pod="metallb-system/controller-f8648f98b-vcxs6" Nov 28 16:26:29 crc kubenswrapper[4909]: I1128 16:26:29.799221 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/7bfa82af-4dc2-4b43-a682-c6b4e9fedde7-memberlist\") pod \"speaker-x27f8\" (UID: \"7bfa82af-4dc2-4b43-a682-c6b4e9fedde7\") " pod="metallb-system/speaker-x27f8" Nov 28 16:26:29 crc kubenswrapper[4909]: I1128 16:26:29.799321 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/72616f0a-8071-4cc3-a5bb-0c226dcbf877-frr-startup\") pod \"frr-k8s-kh8w8\" (UID: \"72616f0a-8071-4cc3-a5bb-0c226dcbf877\") " pod="metallb-system/frr-k8s-kh8w8" Nov 28 16:26:29 crc kubenswrapper[4909]: I1128 16:26:29.799424 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n8n6j\" (UniqueName: \"kubernetes.io/projected/4f88a2f5-e41e-4832-94e4-1d5e190ccdd4-kube-api-access-n8n6j\") pod \"controller-f8648f98b-vcxs6\" (UID: \"4f88a2f5-e41e-4832-94e4-1d5e190ccdd4\") " pod="metallb-system/controller-f8648f98b-vcxs6" Nov 28 16:26:29 crc kubenswrapper[4909]: I1128 16:26:29.799522 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/72616f0a-8071-4cc3-a5bb-0c226dcbf877-frr-sockets\") pod \"frr-k8s-kh8w8\" (UID: \"72616f0a-8071-4cc3-a5bb-0c226dcbf877\") " pod="metallb-system/frr-k8s-kh8w8" Nov 28 16:26:29 crc kubenswrapper[4909]: I1128 16:26:29.799630 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/72616f0a-8071-4cc3-a5bb-0c226dcbf877-frr-conf\") pod \"frr-k8s-kh8w8\" (UID: \"72616f0a-8071-4cc3-a5bb-0c226dcbf877\") " pod="metallb-system/frr-k8s-kh8w8" Nov 28 16:26:29 crc kubenswrapper[4909]: I1128 16:26:29.799752 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/4f88a2f5-e41e-4832-94e4-1d5e190ccdd4-cert\") pod \"controller-f8648f98b-vcxs6\" (UID: \"4f88a2f5-e41e-4832-94e4-1d5e190ccdd4\") " pod="metallb-system/controller-f8648f98b-vcxs6" Nov 28 16:26:29 crc kubenswrapper[4909]: I1128 16:26:29.799866 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/72616f0a-8071-4cc3-a5bb-0c226dcbf877-reloader\") pod \"frr-k8s-kh8w8\" (UID: \"72616f0a-8071-4cc3-a5bb-0c226dcbf877\") " pod="metallb-system/frr-k8s-kh8w8" Nov 28 16:26:29 crc kubenswrapper[4909]: I1128 16:26:29.799963 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/72616f0a-8071-4cc3-a5bb-0c226dcbf877-metrics-certs\") pod \"frr-k8s-kh8w8\" (UID: \"72616f0a-8071-4cc3-a5bb-0c226dcbf877\") " pod="metallb-system/frr-k8s-kh8w8" Nov 28 16:26:29 crc kubenswrapper[4909]: I1128 16:26:29.800054 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/72616f0a-8071-4cc3-a5bb-0c226dcbf877-metrics\") pod \"frr-k8s-kh8w8\" (UID: \"72616f0a-8071-4cc3-a5bb-0c226dcbf877\") " pod="metallb-system/frr-k8s-kh8w8" Nov 28 16:26:29 crc kubenswrapper[4909]: I1128 16:26:29.800151 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v5cz6\" (UniqueName: \"kubernetes.io/projected/7bfa82af-4dc2-4b43-a682-c6b4e9fedde7-kube-api-access-v5cz6\") pod \"speaker-x27f8\" (UID: \"7bfa82af-4dc2-4b43-a682-c6b4e9fedde7\") " pod="metallb-system/speaker-x27f8" Nov 28 16:26:29 crc kubenswrapper[4909]: I1128 16:26:29.800247 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/802e20e0-dcbc-4743-9e54-dad0045f1e64-cert\") pod \"frr-k8s-webhook-server-7fcb986d4-6w4xm\" (UID: \"802e20e0-dcbc-4743-9e54-dad0045f1e64\") " pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-6w4xm" Nov 28 16:26:29 crc kubenswrapper[4909]: I1128 16:26:29.800351 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/7bfa82af-4dc2-4b43-a682-c6b4e9fedde7-metrics-certs\") pod \"speaker-x27f8\" (UID: \"7bfa82af-4dc2-4b43-a682-c6b4e9fedde7\") " pod="metallb-system/speaker-x27f8" Nov 28 16:26:29 crc kubenswrapper[4909]: I1128 16:26:29.801381 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/72616f0a-8071-4cc3-a5bb-0c226dcbf877-reloader\") pod \"frr-k8s-kh8w8\" (UID: \"72616f0a-8071-4cc3-a5bb-0c226dcbf877\") " pod="metallb-system/frr-k8s-kh8w8" Nov 28 16:26:29 crc kubenswrapper[4909]: I1128 16:26:29.801481 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/72616f0a-8071-4cc3-a5bb-0c226dcbf877-frr-sockets\") pod \"frr-k8s-kh8w8\" (UID: \"72616f0a-8071-4cc3-a5bb-0c226dcbf877\") " pod="metallb-system/frr-k8s-kh8w8" Nov 28 16:26:29 crc kubenswrapper[4909]: I1128 16:26:29.801486 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/72616f0a-8071-4cc3-a5bb-0c226dcbf877-frr-conf\") pod \"frr-k8s-kh8w8\" (UID: \"72616f0a-8071-4cc3-a5bb-0c226dcbf877\") " pod="metallb-system/frr-k8s-kh8w8" Nov 28 16:26:29 crc kubenswrapper[4909]: I1128 16:26:29.801706 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/72616f0a-8071-4cc3-a5bb-0c226dcbf877-metrics\") pod \"frr-k8s-kh8w8\" (UID: \"72616f0a-8071-4cc3-a5bb-0c226dcbf877\") " pod="metallb-system/frr-k8s-kh8w8" Nov 28 16:26:29 crc kubenswrapper[4909]: I1128 16:26:29.802125 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/72616f0a-8071-4cc3-a5bb-0c226dcbf877-frr-startup\") pod \"frr-k8s-kh8w8\" (UID: \"72616f0a-8071-4cc3-a5bb-0c226dcbf877\") " pod="metallb-system/frr-k8s-kh8w8" Nov 28 16:26:29 crc kubenswrapper[4909]: I1128 16:26:29.808563 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/72616f0a-8071-4cc3-a5bb-0c226dcbf877-metrics-certs\") pod \"frr-k8s-kh8w8\" (UID: \"72616f0a-8071-4cc3-a5bb-0c226dcbf877\") " pod="metallb-system/frr-k8s-kh8w8" Nov 28 16:26:29 crc kubenswrapper[4909]: I1128 16:26:29.809322 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/802e20e0-dcbc-4743-9e54-dad0045f1e64-cert\") pod \"frr-k8s-webhook-server-7fcb986d4-6w4xm\" (UID: \"802e20e0-dcbc-4743-9e54-dad0045f1e64\") " pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-6w4xm" Nov 28 16:26:29 crc kubenswrapper[4909]: I1128 16:26:29.818433 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qh85j\" (UniqueName: \"kubernetes.io/projected/802e20e0-dcbc-4743-9e54-dad0045f1e64-kube-api-access-qh85j\") pod \"frr-k8s-webhook-server-7fcb986d4-6w4xm\" (UID: \"802e20e0-dcbc-4743-9e54-dad0045f1e64\") " pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-6w4xm" Nov 28 16:26:29 crc kubenswrapper[4909]: I1128 16:26:29.819970 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8hwgm\" (UniqueName: \"kubernetes.io/projected/72616f0a-8071-4cc3-a5bb-0c226dcbf877-kube-api-access-8hwgm\") pod \"frr-k8s-kh8w8\" (UID: \"72616f0a-8071-4cc3-a5bb-0c226dcbf877\") " pod="metallb-system/frr-k8s-kh8w8" Nov 28 16:26:29 crc kubenswrapper[4909]: I1128 16:26:29.902554 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v5cz6\" (UniqueName: \"kubernetes.io/projected/7bfa82af-4dc2-4b43-a682-c6b4e9fedde7-kube-api-access-v5cz6\") pod \"speaker-x27f8\" (UID: \"7bfa82af-4dc2-4b43-a682-c6b4e9fedde7\") " pod="metallb-system/speaker-x27f8" Nov 28 16:26:29 crc kubenswrapper[4909]: I1128 16:26:29.902610 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/7bfa82af-4dc2-4b43-a682-c6b4e9fedde7-metrics-certs\") pod \"speaker-x27f8\" (UID: \"7bfa82af-4dc2-4b43-a682-c6b4e9fedde7\") " pod="metallb-system/speaker-x27f8" Nov 28 16:26:29 crc kubenswrapper[4909]: I1128 16:26:29.902674 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/7bfa82af-4dc2-4b43-a682-c6b4e9fedde7-metallb-excludel2\") pod \"speaker-x27f8\" (UID: \"7bfa82af-4dc2-4b43-a682-c6b4e9fedde7\") " pod="metallb-system/speaker-x27f8" Nov 28 16:26:29 crc kubenswrapper[4909]: I1128 16:26:29.902704 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/7bfa82af-4dc2-4b43-a682-c6b4e9fedde7-memberlist\") pod \"speaker-x27f8\" (UID: \"7bfa82af-4dc2-4b43-a682-c6b4e9fedde7\") " pod="metallb-system/speaker-x27f8" Nov 28 16:26:29 crc kubenswrapper[4909]: I1128 16:26:29.902724 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/4f88a2f5-e41e-4832-94e4-1d5e190ccdd4-metrics-certs\") pod \"controller-f8648f98b-vcxs6\" (UID: \"4f88a2f5-e41e-4832-94e4-1d5e190ccdd4\") " pod="metallb-system/controller-f8648f98b-vcxs6" Nov 28 16:26:29 crc kubenswrapper[4909]: I1128 16:26:29.902751 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n8n6j\" (UniqueName: \"kubernetes.io/projected/4f88a2f5-e41e-4832-94e4-1d5e190ccdd4-kube-api-access-n8n6j\") pod \"controller-f8648f98b-vcxs6\" (UID: \"4f88a2f5-e41e-4832-94e4-1d5e190ccdd4\") " pod="metallb-system/controller-f8648f98b-vcxs6" Nov 28 16:26:29 crc kubenswrapper[4909]: I1128 16:26:29.902785 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/4f88a2f5-e41e-4832-94e4-1d5e190ccdd4-cert\") pod \"controller-f8648f98b-vcxs6\" (UID: \"4f88a2f5-e41e-4832-94e4-1d5e190ccdd4\") " pod="metallb-system/controller-f8648f98b-vcxs6" Nov 28 16:26:29 crc kubenswrapper[4909]: E1128 16:26:29.903180 4909 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Nov 28 16:26:29 crc kubenswrapper[4909]: E1128 16:26:29.903297 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/7bfa82af-4dc2-4b43-a682-c6b4e9fedde7-memberlist podName:7bfa82af-4dc2-4b43-a682-c6b4e9fedde7 nodeName:}" failed. No retries permitted until 2025-11-28 16:26:30.40328096 +0000 UTC m=+972.799965484 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/7bfa82af-4dc2-4b43-a682-c6b4e9fedde7-memberlist") pod "speaker-x27f8" (UID: "7bfa82af-4dc2-4b43-a682-c6b4e9fedde7") : secret "metallb-memberlist" not found Nov 28 16:26:29 crc kubenswrapper[4909]: I1128 16:26:29.903633 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/7bfa82af-4dc2-4b43-a682-c6b4e9fedde7-metallb-excludel2\") pod \"speaker-x27f8\" (UID: \"7bfa82af-4dc2-4b43-a682-c6b4e9fedde7\") " pod="metallb-system/speaker-x27f8" Nov 28 16:26:29 crc kubenswrapper[4909]: I1128 16:26:29.905558 4909 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Nov 28 16:26:29 crc kubenswrapper[4909]: I1128 16:26:29.906812 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/4f88a2f5-e41e-4832-94e4-1d5e190ccdd4-metrics-certs\") pod \"controller-f8648f98b-vcxs6\" (UID: \"4f88a2f5-e41e-4832-94e4-1d5e190ccdd4\") " pod="metallb-system/controller-f8648f98b-vcxs6" Nov 28 16:26:29 crc kubenswrapper[4909]: I1128 16:26:29.907704 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/7bfa82af-4dc2-4b43-a682-c6b4e9fedde7-metrics-certs\") pod \"speaker-x27f8\" (UID: \"7bfa82af-4dc2-4b43-a682-c6b4e9fedde7\") " pod="metallb-system/speaker-x27f8" Nov 28 16:26:29 crc kubenswrapper[4909]: I1128 16:26:29.916529 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/4f88a2f5-e41e-4832-94e4-1d5e190ccdd4-cert\") pod \"controller-f8648f98b-vcxs6\" (UID: \"4f88a2f5-e41e-4832-94e4-1d5e190ccdd4\") " pod="metallb-system/controller-f8648f98b-vcxs6" Nov 28 16:26:29 crc kubenswrapper[4909]: I1128 16:26:29.926892 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v5cz6\" (UniqueName: \"kubernetes.io/projected/7bfa82af-4dc2-4b43-a682-c6b4e9fedde7-kube-api-access-v5cz6\") pod \"speaker-x27f8\" (UID: \"7bfa82af-4dc2-4b43-a682-c6b4e9fedde7\") " pod="metallb-system/speaker-x27f8" Nov 28 16:26:29 crc kubenswrapper[4909]: I1128 16:26:29.927374 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n8n6j\" (UniqueName: \"kubernetes.io/projected/4f88a2f5-e41e-4832-94e4-1d5e190ccdd4-kube-api-access-n8n6j\") pod \"controller-f8648f98b-vcxs6\" (UID: \"4f88a2f5-e41e-4832-94e4-1d5e190ccdd4\") " pod="metallb-system/controller-f8648f98b-vcxs6" Nov 28 16:26:29 crc kubenswrapper[4909]: I1128 16:26:29.935798 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-6w4xm" Nov 28 16:26:29 crc kubenswrapper[4909]: I1128 16:26:29.953466 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-kh8w8" Nov 28 16:26:30 crc kubenswrapper[4909]: I1128 16:26:30.020423 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-f8648f98b-vcxs6" Nov 28 16:26:30 crc kubenswrapper[4909]: I1128 16:26:30.150769 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-7fcb986d4-6w4xm"] Nov 28 16:26:30 crc kubenswrapper[4909]: W1128 16:26:30.153774 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod802e20e0_dcbc_4743_9e54_dad0045f1e64.slice/crio-cee9f41c2c69a5f9dd76ecfeb07dc89ee19d9bac9e4728fa04cf097629270fb2 WatchSource:0}: Error finding container cee9f41c2c69a5f9dd76ecfeb07dc89ee19d9bac9e4728fa04cf097629270fb2: Status 404 returned error can't find the container with id cee9f41c2c69a5f9dd76ecfeb07dc89ee19d9bac9e4728fa04cf097629270fb2 Nov 28 16:26:30 crc kubenswrapper[4909]: I1128 16:26:30.216901 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-f8648f98b-vcxs6"] Nov 28 16:26:30 crc kubenswrapper[4909]: W1128 16:26:30.222300 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4f88a2f5_e41e_4832_94e4_1d5e190ccdd4.slice/crio-f3bed53ce60d17eede054759614dc2d7d2cc85ca275fcf5a3c59caf25229b06c WatchSource:0}: Error finding container f3bed53ce60d17eede054759614dc2d7d2cc85ca275fcf5a3c59caf25229b06c: Status 404 returned error can't find the container with id f3bed53ce60d17eede054759614dc2d7d2cc85ca275fcf5a3c59caf25229b06c Nov 28 16:26:30 crc kubenswrapper[4909]: I1128 16:26:30.409627 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/7bfa82af-4dc2-4b43-a682-c6b4e9fedde7-memberlist\") pod \"speaker-x27f8\" (UID: \"7bfa82af-4dc2-4b43-a682-c6b4e9fedde7\") " pod="metallb-system/speaker-x27f8" Nov 28 16:26:30 crc kubenswrapper[4909]: E1128 16:26:30.409850 4909 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Nov 28 16:26:30 crc kubenswrapper[4909]: E1128 16:26:30.409908 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/7bfa82af-4dc2-4b43-a682-c6b4e9fedde7-memberlist podName:7bfa82af-4dc2-4b43-a682-c6b4e9fedde7 nodeName:}" failed. No retries permitted until 2025-11-28 16:26:31.409894165 +0000 UTC m=+973.806578689 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/7bfa82af-4dc2-4b43-a682-c6b4e9fedde7-memberlist") pod "speaker-x27f8" (UID: "7bfa82af-4dc2-4b43-a682-c6b4e9fedde7") : secret "metallb-memberlist" not found Nov 28 16:26:30 crc kubenswrapper[4909]: I1128 16:26:30.911107 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-6w4xm" event={"ID":"802e20e0-dcbc-4743-9e54-dad0045f1e64","Type":"ContainerStarted","Data":"cee9f41c2c69a5f9dd76ecfeb07dc89ee19d9bac9e4728fa04cf097629270fb2"} Nov 28 16:26:30 crc kubenswrapper[4909]: I1128 16:26:30.913170 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-f8648f98b-vcxs6" event={"ID":"4f88a2f5-e41e-4832-94e4-1d5e190ccdd4","Type":"ContainerStarted","Data":"bbe4429166dd258510f1de0db9302cae9445ff45002c1e07efc357f4197c1556"} Nov 28 16:26:30 crc kubenswrapper[4909]: I1128 16:26:30.913196 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-f8648f98b-vcxs6" event={"ID":"4f88a2f5-e41e-4832-94e4-1d5e190ccdd4","Type":"ContainerStarted","Data":"1507b6d2cf4e7dee4de1400ac3841db4f29eb2a5607a45719187c8af8c67321e"} Nov 28 16:26:30 crc kubenswrapper[4909]: I1128 16:26:30.913205 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-f8648f98b-vcxs6" event={"ID":"4f88a2f5-e41e-4832-94e4-1d5e190ccdd4","Type":"ContainerStarted","Data":"f3bed53ce60d17eede054759614dc2d7d2cc85ca275fcf5a3c59caf25229b06c"} Nov 28 16:26:30 crc kubenswrapper[4909]: I1128 16:26:30.913267 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/controller-f8648f98b-vcxs6" Nov 28 16:26:30 crc kubenswrapper[4909]: I1128 16:26:30.914681 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-kh8w8" event={"ID":"72616f0a-8071-4cc3-a5bb-0c226dcbf877","Type":"ContainerStarted","Data":"7a506bd7bf58a72284a1e83ac48296330c0b505a30a25ff0f7002bf62c518eae"} Nov 28 16:26:30 crc kubenswrapper[4909]: I1128 16:26:30.929087 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/controller-f8648f98b-vcxs6" podStartSLOduration=1.9290672309999999 podStartE2EDuration="1.929067231s" podCreationTimestamp="2025-11-28 16:26:29 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:26:30.927203972 +0000 UTC m=+973.323888496" watchObservedRunningTime="2025-11-28 16:26:30.929067231 +0000 UTC m=+973.325751755" Nov 28 16:26:31 crc kubenswrapper[4909]: I1128 16:26:31.421469 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/7bfa82af-4dc2-4b43-a682-c6b4e9fedde7-memberlist\") pod \"speaker-x27f8\" (UID: \"7bfa82af-4dc2-4b43-a682-c6b4e9fedde7\") " pod="metallb-system/speaker-x27f8" Nov 28 16:26:31 crc kubenswrapper[4909]: I1128 16:26:31.430047 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/7bfa82af-4dc2-4b43-a682-c6b4e9fedde7-memberlist\") pod \"speaker-x27f8\" (UID: \"7bfa82af-4dc2-4b43-a682-c6b4e9fedde7\") " pod="metallb-system/speaker-x27f8" Nov 28 16:26:31 crc kubenswrapper[4909]: I1128 16:26:31.510169 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-x27f8" Nov 28 16:26:31 crc kubenswrapper[4909]: W1128 16:26:31.538585 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7bfa82af_4dc2_4b43_a682_c6b4e9fedde7.slice/crio-9157c90d3a94085a67de0fc31b173bdbed4a730ac119f0f39b68f0f8d8edf0d9 WatchSource:0}: Error finding container 9157c90d3a94085a67de0fc31b173bdbed4a730ac119f0f39b68f0f8d8edf0d9: Status 404 returned error can't find the container with id 9157c90d3a94085a67de0fc31b173bdbed4a730ac119f0f39b68f0f8d8edf0d9 Nov 28 16:26:31 crc kubenswrapper[4909]: I1128 16:26:31.926181 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-x27f8" event={"ID":"7bfa82af-4dc2-4b43-a682-c6b4e9fedde7","Type":"ContainerStarted","Data":"3e370c324eb0b611ab710e05f746d2a8cfcc876a3ddf9b21730050930b104208"} Nov 28 16:26:31 crc kubenswrapper[4909]: I1128 16:26:31.926225 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-x27f8" event={"ID":"7bfa82af-4dc2-4b43-a682-c6b4e9fedde7","Type":"ContainerStarted","Data":"9157c90d3a94085a67de0fc31b173bdbed4a730ac119f0f39b68f0f8d8edf0d9"} Nov 28 16:26:32 crc kubenswrapper[4909]: I1128 16:26:32.941839 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-x27f8" event={"ID":"7bfa82af-4dc2-4b43-a682-c6b4e9fedde7","Type":"ContainerStarted","Data":"8fdfce57c3412880078dded90bff9021e6885b9fe001160d71e012cfebee7477"} Nov 28 16:26:32 crc kubenswrapper[4909]: I1128 16:26:32.942978 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/speaker-x27f8" Nov 28 16:26:32 crc kubenswrapper[4909]: I1128 16:26:32.971872 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/speaker-x27f8" podStartSLOduration=3.9718504599999997 podStartE2EDuration="3.97185046s" podCreationTimestamp="2025-11-28 16:26:29 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:26:32.969798726 +0000 UTC m=+975.366483240" watchObservedRunningTime="2025-11-28 16:26:32.97185046 +0000 UTC m=+975.368534984" Nov 28 16:26:38 crc kubenswrapper[4909]: I1128 16:26:38.989422 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-6w4xm" event={"ID":"802e20e0-dcbc-4743-9e54-dad0045f1e64","Type":"ContainerStarted","Data":"77db3ae8920d63211f9fe1615e6a6bc6d323c4b94634a5b1d42d13ffa82f0654"} Nov 28 16:26:38 crc kubenswrapper[4909]: I1128 16:26:38.989988 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-6w4xm" Nov 28 16:26:38 crc kubenswrapper[4909]: I1128 16:26:38.991708 4909 generic.go:334] "Generic (PLEG): container finished" podID="72616f0a-8071-4cc3-a5bb-0c226dcbf877" containerID="52dcb8ac2ce115a71d72ca984403388b1a90906373d06ba80ab8cf8edba0155c" exitCode=0 Nov 28 16:26:38 crc kubenswrapper[4909]: I1128 16:26:38.991750 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-kh8w8" event={"ID":"72616f0a-8071-4cc3-a5bb-0c226dcbf877","Type":"ContainerDied","Data":"52dcb8ac2ce115a71d72ca984403388b1a90906373d06ba80ab8cf8edba0155c"} Nov 28 16:26:39 crc kubenswrapper[4909]: I1128 16:26:39.012074 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-6w4xm" podStartSLOduration=1.984476104 podStartE2EDuration="10.012059008s" podCreationTimestamp="2025-11-28 16:26:29 +0000 UTC" firstStartedPulling="2025-11-28 16:26:30.156142016 +0000 UTC m=+972.552826540" lastFinishedPulling="2025-11-28 16:26:38.18372492 +0000 UTC m=+980.580409444" observedRunningTime="2025-11-28 16:26:39.011190415 +0000 UTC m=+981.407874939" watchObservedRunningTime="2025-11-28 16:26:39.012059008 +0000 UTC m=+981.408743532" Nov 28 16:26:39 crc kubenswrapper[4909]: I1128 16:26:39.997964 4909 generic.go:334] "Generic (PLEG): container finished" podID="72616f0a-8071-4cc3-a5bb-0c226dcbf877" containerID="89eab7163129c3fa3809d4a174df1de230e3cbbd9885628b3fa6693dac9b2cd8" exitCode=0 Nov 28 16:26:39 crc kubenswrapper[4909]: I1128 16:26:39.997999 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-kh8w8" event={"ID":"72616f0a-8071-4cc3-a5bb-0c226dcbf877","Type":"ContainerDied","Data":"89eab7163129c3fa3809d4a174df1de230e3cbbd9885628b3fa6693dac9b2cd8"} Nov 28 16:26:40 crc kubenswrapper[4909]: I1128 16:26:40.026178 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/controller-f8648f98b-vcxs6" Nov 28 16:26:41 crc kubenswrapper[4909]: I1128 16:26:41.007787 4909 generic.go:334] "Generic (PLEG): container finished" podID="72616f0a-8071-4cc3-a5bb-0c226dcbf877" containerID="01a628206ec5862ced03c86c7d45533de03913a9f049136218147aa2524a302f" exitCode=0 Nov 28 16:26:41 crc kubenswrapper[4909]: I1128 16:26:41.007840 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-kh8w8" event={"ID":"72616f0a-8071-4cc3-a5bb-0c226dcbf877","Type":"ContainerDied","Data":"01a628206ec5862ced03c86c7d45533de03913a9f049136218147aa2524a302f"} Nov 28 16:26:41 crc kubenswrapper[4909]: I1128 16:26:41.515827 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/speaker-x27f8" Nov 28 16:26:42 crc kubenswrapper[4909]: I1128 16:26:42.017840 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-kh8w8" event={"ID":"72616f0a-8071-4cc3-a5bb-0c226dcbf877","Type":"ContainerStarted","Data":"f7c9b0576d37b90b2b1c8fc081e84aa30bcecb29b85cd49ef1b82ddd0e9d8972"} Nov 28 16:26:42 crc kubenswrapper[4909]: I1128 16:26:42.017890 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-kh8w8" event={"ID":"72616f0a-8071-4cc3-a5bb-0c226dcbf877","Type":"ContainerStarted","Data":"f5cc56b2b4ee381bc25d5c4fce05858a4540de590d9a6847ffbbe5a91bf6aa98"} Nov 28 16:26:42 crc kubenswrapper[4909]: I1128 16:26:42.017904 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-kh8w8" event={"ID":"72616f0a-8071-4cc3-a5bb-0c226dcbf877","Type":"ContainerStarted","Data":"ddeb594fd40e54f70c75267ee815db4587ec435c8de623c540fcd40d5d083143"} Nov 28 16:26:42 crc kubenswrapper[4909]: I1128 16:26:42.017922 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-kh8w8" event={"ID":"72616f0a-8071-4cc3-a5bb-0c226dcbf877","Type":"ContainerStarted","Data":"ec0b1afa3f73729f8b712f5481f17e66177d8971ae82fdc7622c27970b5647ff"} Nov 28 16:26:42 crc kubenswrapper[4909]: I1128 16:26:42.017937 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-kh8w8" event={"ID":"72616f0a-8071-4cc3-a5bb-0c226dcbf877","Type":"ContainerStarted","Data":"01b52238674cc1e4da72e583046e98f86196246d1d8aa8883f4f088213eec6d5"} Nov 28 16:26:43 crc kubenswrapper[4909]: I1128 16:26:43.028318 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-kh8w8" event={"ID":"72616f0a-8071-4cc3-a5bb-0c226dcbf877","Type":"ContainerStarted","Data":"a6f05b496222f17801ef2f48fbb0137f3f89370347549617bed507f6fe50e611"} Nov 28 16:26:43 crc kubenswrapper[4909]: I1128 16:26:43.028496 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-kh8w8" Nov 28 16:26:43 crc kubenswrapper[4909]: I1128 16:26:43.054348 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-kh8w8" podStartSLOduration=6.469612143 podStartE2EDuration="14.054332733s" podCreationTimestamp="2025-11-28 16:26:29 +0000 UTC" firstStartedPulling="2025-11-28 16:26:30.617057191 +0000 UTC m=+973.013741725" lastFinishedPulling="2025-11-28 16:26:38.201777791 +0000 UTC m=+980.598462315" observedRunningTime="2025-11-28 16:26:43.051142218 +0000 UTC m=+985.447826742" watchObservedRunningTime="2025-11-28 16:26:43.054332733 +0000 UTC m=+985.451017257" Nov 28 16:26:43 crc kubenswrapper[4909]: I1128 16:26:43.107042 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931ah4rkz"] Nov 28 16:26:43 crc kubenswrapper[4909]: I1128 16:26:43.108117 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931ah4rkz" Nov 28 16:26:43 crc kubenswrapper[4909]: I1128 16:26:43.109828 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Nov 28 16:26:43 crc kubenswrapper[4909]: I1128 16:26:43.120568 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931ah4rkz"] Nov 28 16:26:43 crc kubenswrapper[4909]: I1128 16:26:43.226010 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5k2rq\" (UniqueName: \"kubernetes.io/projected/4148c1c6-142b-41bb-9607-7d391b4cc45d-kube-api-access-5k2rq\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931ah4rkz\" (UID: \"4148c1c6-142b-41bb-9607-7d391b4cc45d\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931ah4rkz" Nov 28 16:26:43 crc kubenswrapper[4909]: I1128 16:26:43.226059 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/4148c1c6-142b-41bb-9607-7d391b4cc45d-util\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931ah4rkz\" (UID: \"4148c1c6-142b-41bb-9607-7d391b4cc45d\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931ah4rkz" Nov 28 16:26:43 crc kubenswrapper[4909]: I1128 16:26:43.226229 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/4148c1c6-142b-41bb-9607-7d391b4cc45d-bundle\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931ah4rkz\" (UID: \"4148c1c6-142b-41bb-9607-7d391b4cc45d\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931ah4rkz" Nov 28 16:26:43 crc kubenswrapper[4909]: I1128 16:26:43.327898 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5k2rq\" (UniqueName: \"kubernetes.io/projected/4148c1c6-142b-41bb-9607-7d391b4cc45d-kube-api-access-5k2rq\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931ah4rkz\" (UID: \"4148c1c6-142b-41bb-9607-7d391b4cc45d\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931ah4rkz" Nov 28 16:26:43 crc kubenswrapper[4909]: I1128 16:26:43.327954 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/4148c1c6-142b-41bb-9607-7d391b4cc45d-util\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931ah4rkz\" (UID: \"4148c1c6-142b-41bb-9607-7d391b4cc45d\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931ah4rkz" Nov 28 16:26:43 crc kubenswrapper[4909]: I1128 16:26:43.328001 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/4148c1c6-142b-41bb-9607-7d391b4cc45d-bundle\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931ah4rkz\" (UID: \"4148c1c6-142b-41bb-9607-7d391b4cc45d\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931ah4rkz" Nov 28 16:26:43 crc kubenswrapper[4909]: I1128 16:26:43.328438 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/4148c1c6-142b-41bb-9607-7d391b4cc45d-bundle\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931ah4rkz\" (UID: \"4148c1c6-142b-41bb-9607-7d391b4cc45d\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931ah4rkz" Nov 28 16:26:43 crc kubenswrapper[4909]: I1128 16:26:43.329172 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/4148c1c6-142b-41bb-9607-7d391b4cc45d-util\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931ah4rkz\" (UID: \"4148c1c6-142b-41bb-9607-7d391b4cc45d\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931ah4rkz" Nov 28 16:26:43 crc kubenswrapper[4909]: I1128 16:26:43.350838 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5k2rq\" (UniqueName: \"kubernetes.io/projected/4148c1c6-142b-41bb-9607-7d391b4cc45d-kube-api-access-5k2rq\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931ah4rkz\" (UID: \"4148c1c6-142b-41bb-9607-7d391b4cc45d\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931ah4rkz" Nov 28 16:26:43 crc kubenswrapper[4909]: I1128 16:26:43.423769 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931ah4rkz" Nov 28 16:26:43 crc kubenswrapper[4909]: I1128 16:26:43.944720 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931ah4rkz"] Nov 28 16:26:44 crc kubenswrapper[4909]: I1128 16:26:44.034306 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931ah4rkz" event={"ID":"4148c1c6-142b-41bb-9607-7d391b4cc45d","Type":"ContainerStarted","Data":"196b3b786712b995911f2b52f5749d9b3feb73a99293b8660a863cf635978fa5"} Nov 28 16:26:44 crc kubenswrapper[4909]: I1128 16:26:44.953982 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="metallb-system/frr-k8s-kh8w8" Nov 28 16:26:44 crc kubenswrapper[4909]: I1128 16:26:44.993035 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="metallb-system/frr-k8s-kh8w8" Nov 28 16:26:45 crc kubenswrapper[4909]: I1128 16:26:45.040273 4909 generic.go:334] "Generic (PLEG): container finished" podID="4148c1c6-142b-41bb-9607-7d391b4cc45d" containerID="3ae1e3d657bca3ed7d99a5a2cf6431f9d8a4fc16387ef3538288d73853c52f36" exitCode=0 Nov 28 16:26:45 crc kubenswrapper[4909]: I1128 16:26:45.040313 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931ah4rkz" event={"ID":"4148c1c6-142b-41bb-9607-7d391b4cc45d","Type":"ContainerDied","Data":"3ae1e3d657bca3ed7d99a5a2cf6431f9d8a4fc16387ef3538288d73853c52f36"} Nov 28 16:26:49 crc kubenswrapper[4909]: I1128 16:26:49.911732 4909 patch_prober.go:28] interesting pod/machine-config-daemon-d5nd7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 16:26:49 crc kubenswrapper[4909]: I1128 16:26:49.912080 4909 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 16:26:49 crc kubenswrapper[4909]: I1128 16:26:49.944624 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-6w4xm" Nov 28 16:26:52 crc kubenswrapper[4909]: I1128 16:26:52.083846 4909 generic.go:334] "Generic (PLEG): container finished" podID="4148c1c6-142b-41bb-9607-7d391b4cc45d" containerID="9ed8b4fbb1c020b65a51b6c4b1af5ae23753d608f8bcdba4966283404f6a21d9" exitCode=0 Nov 28 16:26:52 crc kubenswrapper[4909]: I1128 16:26:52.083915 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931ah4rkz" event={"ID":"4148c1c6-142b-41bb-9607-7d391b4cc45d","Type":"ContainerDied","Data":"9ed8b4fbb1c020b65a51b6c4b1af5ae23753d608f8bcdba4966283404f6a21d9"} Nov 28 16:26:53 crc kubenswrapper[4909]: I1128 16:26:53.092242 4909 generic.go:334] "Generic (PLEG): container finished" podID="4148c1c6-142b-41bb-9607-7d391b4cc45d" containerID="fb435ab58956d152f734377864788b18d3b05e567d2bc48b9759d9a06159fb61" exitCode=0 Nov 28 16:26:53 crc kubenswrapper[4909]: I1128 16:26:53.092279 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931ah4rkz" event={"ID":"4148c1c6-142b-41bb-9607-7d391b4cc45d","Type":"ContainerDied","Data":"fb435ab58956d152f734377864788b18d3b05e567d2bc48b9759d9a06159fb61"} Nov 28 16:26:54 crc kubenswrapper[4909]: I1128 16:26:54.371984 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931ah4rkz" Nov 28 16:26:54 crc kubenswrapper[4909]: I1128 16:26:54.487850 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/4148c1c6-142b-41bb-9607-7d391b4cc45d-bundle\") pod \"4148c1c6-142b-41bb-9607-7d391b4cc45d\" (UID: \"4148c1c6-142b-41bb-9607-7d391b4cc45d\") " Nov 28 16:26:54 crc kubenswrapper[4909]: I1128 16:26:54.487957 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/4148c1c6-142b-41bb-9607-7d391b4cc45d-util\") pod \"4148c1c6-142b-41bb-9607-7d391b4cc45d\" (UID: \"4148c1c6-142b-41bb-9607-7d391b4cc45d\") " Nov 28 16:26:54 crc kubenswrapper[4909]: I1128 16:26:54.488013 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5k2rq\" (UniqueName: \"kubernetes.io/projected/4148c1c6-142b-41bb-9607-7d391b4cc45d-kube-api-access-5k2rq\") pod \"4148c1c6-142b-41bb-9607-7d391b4cc45d\" (UID: \"4148c1c6-142b-41bb-9607-7d391b4cc45d\") " Nov 28 16:26:54 crc kubenswrapper[4909]: I1128 16:26:54.494873 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4148c1c6-142b-41bb-9607-7d391b4cc45d-bundle" (OuterVolumeSpecName: "bundle") pod "4148c1c6-142b-41bb-9607-7d391b4cc45d" (UID: "4148c1c6-142b-41bb-9607-7d391b4cc45d"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:26:54 crc kubenswrapper[4909]: I1128 16:26:54.508255 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4148c1c6-142b-41bb-9607-7d391b4cc45d-kube-api-access-5k2rq" (OuterVolumeSpecName: "kube-api-access-5k2rq") pod "4148c1c6-142b-41bb-9607-7d391b4cc45d" (UID: "4148c1c6-142b-41bb-9607-7d391b4cc45d"). InnerVolumeSpecName "kube-api-access-5k2rq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:26:54 crc kubenswrapper[4909]: I1128 16:26:54.522960 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4148c1c6-142b-41bb-9607-7d391b4cc45d-util" (OuterVolumeSpecName: "util") pod "4148c1c6-142b-41bb-9607-7d391b4cc45d" (UID: "4148c1c6-142b-41bb-9607-7d391b4cc45d"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:26:54 crc kubenswrapper[4909]: I1128 16:26:54.589077 4909 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/4148c1c6-142b-41bb-9607-7d391b4cc45d-util\") on node \"crc\" DevicePath \"\"" Nov 28 16:26:54 crc kubenswrapper[4909]: I1128 16:26:54.589109 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5k2rq\" (UniqueName: \"kubernetes.io/projected/4148c1c6-142b-41bb-9607-7d391b4cc45d-kube-api-access-5k2rq\") on node \"crc\" DevicePath \"\"" Nov 28 16:26:54 crc kubenswrapper[4909]: I1128 16:26:54.589120 4909 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/4148c1c6-142b-41bb-9607-7d391b4cc45d-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:26:55 crc kubenswrapper[4909]: I1128 16:26:55.106764 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931ah4rkz" event={"ID":"4148c1c6-142b-41bb-9607-7d391b4cc45d","Type":"ContainerDied","Data":"196b3b786712b995911f2b52f5749d9b3feb73a99293b8660a863cf635978fa5"} Nov 28 16:26:55 crc kubenswrapper[4909]: I1128 16:26:55.106802 4909 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="196b3b786712b995911f2b52f5749d9b3feb73a99293b8660a863cf635978fa5" Nov 28 16:26:55 crc kubenswrapper[4909]: I1128 16:26:55.106874 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931ah4rkz" Nov 28 16:26:59 crc kubenswrapper[4909]: I1128 16:26:59.958722 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-kh8w8" Nov 28 16:27:00 crc kubenswrapper[4909]: I1128 16:27:00.982101 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-z5lxp"] Nov 28 16:27:00 crc kubenswrapper[4909]: E1128 16:27:00.982325 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4148c1c6-142b-41bb-9607-7d391b4cc45d" containerName="util" Nov 28 16:27:00 crc kubenswrapper[4909]: I1128 16:27:00.982337 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="4148c1c6-142b-41bb-9607-7d391b4cc45d" containerName="util" Nov 28 16:27:00 crc kubenswrapper[4909]: E1128 16:27:00.982344 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4148c1c6-142b-41bb-9607-7d391b4cc45d" containerName="extract" Nov 28 16:27:00 crc kubenswrapper[4909]: I1128 16:27:00.982351 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="4148c1c6-142b-41bb-9607-7d391b4cc45d" containerName="extract" Nov 28 16:27:00 crc kubenswrapper[4909]: E1128 16:27:00.982367 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4148c1c6-142b-41bb-9607-7d391b4cc45d" containerName="pull" Nov 28 16:27:00 crc kubenswrapper[4909]: I1128 16:27:00.982372 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="4148c1c6-142b-41bb-9607-7d391b4cc45d" containerName="pull" Nov 28 16:27:00 crc kubenswrapper[4909]: I1128 16:27:00.982484 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="4148c1c6-142b-41bb-9607-7d391b4cc45d" containerName="extract" Nov 28 16:27:00 crc kubenswrapper[4909]: I1128 16:27:00.982919 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-z5lxp" Nov 28 16:27:00 crc kubenswrapper[4909]: I1128 16:27:00.984712 4909 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager-operator"/"cert-manager-operator-controller-manager-dockercfg-g4vjs" Nov 28 16:27:00 crc kubenswrapper[4909]: I1128 16:27:00.984803 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager-operator"/"kube-root-ca.crt" Nov 28 16:27:00 crc kubenswrapper[4909]: I1128 16:27:00.985028 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager-operator"/"openshift-service-ca.crt" Nov 28 16:27:00 crc kubenswrapper[4909]: I1128 16:27:00.997236 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-z5lxp"] Nov 28 16:27:01 crc kubenswrapper[4909]: I1128 16:27:01.073414 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6crjt\" (UniqueName: \"kubernetes.io/projected/12384261-5c7e-44f6-a5d7-5b42a8f55db6-kube-api-access-6crjt\") pod \"cert-manager-operator-controller-manager-64cf6dff88-z5lxp\" (UID: \"12384261-5c7e-44f6-a5d7-5b42a8f55db6\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-z5lxp" Nov 28 16:27:01 crc kubenswrapper[4909]: I1128 16:27:01.073488 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/12384261-5c7e-44f6-a5d7-5b42a8f55db6-tmp\") pod \"cert-manager-operator-controller-manager-64cf6dff88-z5lxp\" (UID: \"12384261-5c7e-44f6-a5d7-5b42a8f55db6\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-z5lxp" Nov 28 16:27:01 crc kubenswrapper[4909]: I1128 16:27:01.174314 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6crjt\" (UniqueName: \"kubernetes.io/projected/12384261-5c7e-44f6-a5d7-5b42a8f55db6-kube-api-access-6crjt\") pod \"cert-manager-operator-controller-manager-64cf6dff88-z5lxp\" (UID: \"12384261-5c7e-44f6-a5d7-5b42a8f55db6\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-z5lxp" Nov 28 16:27:01 crc kubenswrapper[4909]: I1128 16:27:01.174694 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/12384261-5c7e-44f6-a5d7-5b42a8f55db6-tmp\") pod \"cert-manager-operator-controller-manager-64cf6dff88-z5lxp\" (UID: \"12384261-5c7e-44f6-a5d7-5b42a8f55db6\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-z5lxp" Nov 28 16:27:01 crc kubenswrapper[4909]: I1128 16:27:01.175117 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/12384261-5c7e-44f6-a5d7-5b42a8f55db6-tmp\") pod \"cert-manager-operator-controller-manager-64cf6dff88-z5lxp\" (UID: \"12384261-5c7e-44f6-a5d7-5b42a8f55db6\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-z5lxp" Nov 28 16:27:01 crc kubenswrapper[4909]: I1128 16:27:01.194587 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6crjt\" (UniqueName: \"kubernetes.io/projected/12384261-5c7e-44f6-a5d7-5b42a8f55db6-kube-api-access-6crjt\") pod \"cert-manager-operator-controller-manager-64cf6dff88-z5lxp\" (UID: \"12384261-5c7e-44f6-a5d7-5b42a8f55db6\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-z5lxp" Nov 28 16:27:01 crc kubenswrapper[4909]: I1128 16:27:01.297178 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-z5lxp" Nov 28 16:27:01 crc kubenswrapper[4909]: I1128 16:27:01.876908 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-z5lxp"] Nov 28 16:27:02 crc kubenswrapper[4909]: I1128 16:27:02.150082 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-z5lxp" event={"ID":"12384261-5c7e-44f6-a5d7-5b42a8f55db6","Type":"ContainerStarted","Data":"b16e737f3ced2642b2204dc2cf94ad86ad7979fe869f0e7339a561bcbdc77c23"} Nov 28 16:27:11 crc kubenswrapper[4909]: I1128 16:27:11.210427 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-z5lxp" event={"ID":"12384261-5c7e-44f6-a5d7-5b42a8f55db6","Type":"ContainerStarted","Data":"8215bdaa2b8d2227f8785d48d1930febb4d9e8c73368113892b26e43e61eee8d"} Nov 28 16:27:11 crc kubenswrapper[4909]: I1128 16:27:11.239687 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-z5lxp" podStartSLOduration=2.861736208 podStartE2EDuration="11.239644617s" podCreationTimestamp="2025-11-28 16:27:00 +0000 UTC" firstStartedPulling="2025-11-28 16:27:01.890613154 +0000 UTC m=+1004.287297678" lastFinishedPulling="2025-11-28 16:27:10.268521563 +0000 UTC m=+1012.665206087" observedRunningTime="2025-11-28 16:27:11.232752584 +0000 UTC m=+1013.629437108" watchObservedRunningTime="2025-11-28 16:27:11.239644617 +0000 UTC m=+1013.636329151" Nov 28 16:27:14 crc kubenswrapper[4909]: I1128 16:27:14.145086 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-webhook-f4fb5df64-tl4xj"] Nov 28 16:27:14 crc kubenswrapper[4909]: I1128 16:27:14.146639 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-f4fb5df64-tl4xj" Nov 28 16:27:14 crc kubenswrapper[4909]: I1128 16:27:14.149444 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"kube-root-ca.crt" Nov 28 16:27:14 crc kubenswrapper[4909]: I1128 16:27:14.149893 4909 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-webhook-dockercfg-r4279" Nov 28 16:27:14 crc kubenswrapper[4909]: I1128 16:27:14.155622 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"openshift-service-ca.crt" Nov 28 16:27:14 crc kubenswrapper[4909]: I1128 16:27:14.162323 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-f4fb5df64-tl4xj"] Nov 28 16:27:14 crc kubenswrapper[4909]: I1128 16:27:14.258605 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/6bcf5976-b2b7-47b5-aa2a-7010dff6a42c-bound-sa-token\") pod \"cert-manager-webhook-f4fb5df64-tl4xj\" (UID: \"6bcf5976-b2b7-47b5-aa2a-7010dff6a42c\") " pod="cert-manager/cert-manager-webhook-f4fb5df64-tl4xj" Nov 28 16:27:14 crc kubenswrapper[4909]: I1128 16:27:14.258750 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mj25v\" (UniqueName: \"kubernetes.io/projected/6bcf5976-b2b7-47b5-aa2a-7010dff6a42c-kube-api-access-mj25v\") pod \"cert-manager-webhook-f4fb5df64-tl4xj\" (UID: \"6bcf5976-b2b7-47b5-aa2a-7010dff6a42c\") " pod="cert-manager/cert-manager-webhook-f4fb5df64-tl4xj" Nov 28 16:27:14 crc kubenswrapper[4909]: I1128 16:27:14.360622 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mj25v\" (UniqueName: \"kubernetes.io/projected/6bcf5976-b2b7-47b5-aa2a-7010dff6a42c-kube-api-access-mj25v\") pod \"cert-manager-webhook-f4fb5df64-tl4xj\" (UID: \"6bcf5976-b2b7-47b5-aa2a-7010dff6a42c\") " pod="cert-manager/cert-manager-webhook-f4fb5df64-tl4xj" Nov 28 16:27:14 crc kubenswrapper[4909]: I1128 16:27:14.360725 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/6bcf5976-b2b7-47b5-aa2a-7010dff6a42c-bound-sa-token\") pod \"cert-manager-webhook-f4fb5df64-tl4xj\" (UID: \"6bcf5976-b2b7-47b5-aa2a-7010dff6a42c\") " pod="cert-manager/cert-manager-webhook-f4fb5df64-tl4xj" Nov 28 16:27:14 crc kubenswrapper[4909]: I1128 16:27:14.389067 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mj25v\" (UniqueName: \"kubernetes.io/projected/6bcf5976-b2b7-47b5-aa2a-7010dff6a42c-kube-api-access-mj25v\") pod \"cert-manager-webhook-f4fb5df64-tl4xj\" (UID: \"6bcf5976-b2b7-47b5-aa2a-7010dff6a42c\") " pod="cert-manager/cert-manager-webhook-f4fb5df64-tl4xj" Nov 28 16:27:14 crc kubenswrapper[4909]: I1128 16:27:14.390189 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/6bcf5976-b2b7-47b5-aa2a-7010dff6a42c-bound-sa-token\") pod \"cert-manager-webhook-f4fb5df64-tl4xj\" (UID: \"6bcf5976-b2b7-47b5-aa2a-7010dff6a42c\") " pod="cert-manager/cert-manager-webhook-f4fb5df64-tl4xj" Nov 28 16:27:14 crc kubenswrapper[4909]: I1128 16:27:14.462335 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-f4fb5df64-tl4xj" Nov 28 16:27:15 crc kubenswrapper[4909]: I1128 16:27:15.054623 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-f4fb5df64-tl4xj"] Nov 28 16:27:15 crc kubenswrapper[4909]: I1128 16:27:15.166349 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-cainjector-855d9ccff4-274mv"] Nov 28 16:27:15 crc kubenswrapper[4909]: I1128 16:27:15.167520 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-855d9ccff4-274mv" Nov 28 16:27:15 crc kubenswrapper[4909]: I1128 16:27:15.177026 4909 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-cainjector-dockercfg-r78r4" Nov 28 16:27:15 crc kubenswrapper[4909]: I1128 16:27:15.187754 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-855d9ccff4-274mv"] Nov 28 16:27:15 crc kubenswrapper[4909]: I1128 16:27:15.240922 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-f4fb5df64-tl4xj" event={"ID":"6bcf5976-b2b7-47b5-aa2a-7010dff6a42c","Type":"ContainerStarted","Data":"1b12a7bef523ce7be6f22f7bea3afae2121f90550a2f4fb0be16c42edf4e3ae8"} Nov 28 16:27:15 crc kubenswrapper[4909]: I1128 16:27:15.293891 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/37a96393-8175-4ec6-bf57-d02fd13cc257-bound-sa-token\") pod \"cert-manager-cainjector-855d9ccff4-274mv\" (UID: \"37a96393-8175-4ec6-bf57-d02fd13cc257\") " pod="cert-manager/cert-manager-cainjector-855d9ccff4-274mv" Nov 28 16:27:15 crc kubenswrapper[4909]: I1128 16:27:15.293943 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b4rc7\" (UniqueName: \"kubernetes.io/projected/37a96393-8175-4ec6-bf57-d02fd13cc257-kube-api-access-b4rc7\") pod \"cert-manager-cainjector-855d9ccff4-274mv\" (UID: \"37a96393-8175-4ec6-bf57-d02fd13cc257\") " pod="cert-manager/cert-manager-cainjector-855d9ccff4-274mv" Nov 28 16:27:15 crc kubenswrapper[4909]: I1128 16:27:15.395402 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/37a96393-8175-4ec6-bf57-d02fd13cc257-bound-sa-token\") pod \"cert-manager-cainjector-855d9ccff4-274mv\" (UID: \"37a96393-8175-4ec6-bf57-d02fd13cc257\") " pod="cert-manager/cert-manager-cainjector-855d9ccff4-274mv" Nov 28 16:27:15 crc kubenswrapper[4909]: I1128 16:27:15.395467 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b4rc7\" (UniqueName: \"kubernetes.io/projected/37a96393-8175-4ec6-bf57-d02fd13cc257-kube-api-access-b4rc7\") pod \"cert-manager-cainjector-855d9ccff4-274mv\" (UID: \"37a96393-8175-4ec6-bf57-d02fd13cc257\") " pod="cert-manager/cert-manager-cainjector-855d9ccff4-274mv" Nov 28 16:27:15 crc kubenswrapper[4909]: I1128 16:27:15.412029 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/37a96393-8175-4ec6-bf57-d02fd13cc257-bound-sa-token\") pod \"cert-manager-cainjector-855d9ccff4-274mv\" (UID: \"37a96393-8175-4ec6-bf57-d02fd13cc257\") " pod="cert-manager/cert-manager-cainjector-855d9ccff4-274mv" Nov 28 16:27:15 crc kubenswrapper[4909]: I1128 16:27:15.412098 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b4rc7\" (UniqueName: \"kubernetes.io/projected/37a96393-8175-4ec6-bf57-d02fd13cc257-kube-api-access-b4rc7\") pod \"cert-manager-cainjector-855d9ccff4-274mv\" (UID: \"37a96393-8175-4ec6-bf57-d02fd13cc257\") " pod="cert-manager/cert-manager-cainjector-855d9ccff4-274mv" Nov 28 16:27:15 crc kubenswrapper[4909]: I1128 16:27:15.506821 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-855d9ccff4-274mv" Nov 28 16:27:15 crc kubenswrapper[4909]: I1128 16:27:15.744378 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-855d9ccff4-274mv"] Nov 28 16:27:16 crc kubenswrapper[4909]: I1128 16:27:16.247956 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-855d9ccff4-274mv" event={"ID":"37a96393-8175-4ec6-bf57-d02fd13cc257","Type":"ContainerStarted","Data":"c61ff88572c1a74a8599d70bb66bb573bb34b66f1b52e97596ca45231665ee41"} Nov 28 16:27:19 crc kubenswrapper[4909]: I1128 16:27:19.911851 4909 patch_prober.go:28] interesting pod/machine-config-daemon-d5nd7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 16:27:19 crc kubenswrapper[4909]: I1128 16:27:19.912430 4909 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 16:27:24 crc kubenswrapper[4909]: I1128 16:27:24.236912 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-86cb77c54b-xk9wg"] Nov 28 16:27:24 crc kubenswrapper[4909]: I1128 16:27:24.238568 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-86cb77c54b-xk9wg" Nov 28 16:27:24 crc kubenswrapper[4909]: I1128 16:27:24.240374 4909 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-dockercfg-p6zkp" Nov 28 16:27:24 crc kubenswrapper[4909]: I1128 16:27:24.256227 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-86cb77c54b-xk9wg"] Nov 28 16:27:24 crc kubenswrapper[4909]: I1128 16:27:24.318947 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-f4fb5df64-tl4xj" event={"ID":"6bcf5976-b2b7-47b5-aa2a-7010dff6a42c","Type":"ContainerStarted","Data":"6a2ab0c1d2482bb75308e5fef11a4f770aa03100cfb44c35a1a79e23a6b8a7b5"} Nov 28 16:27:24 crc kubenswrapper[4909]: I1128 16:27:24.319032 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="cert-manager/cert-manager-webhook-f4fb5df64-tl4xj" Nov 28 16:27:24 crc kubenswrapper[4909]: I1128 16:27:24.320285 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-855d9ccff4-274mv" event={"ID":"37a96393-8175-4ec6-bf57-d02fd13cc257","Type":"ContainerStarted","Data":"a9df6fa89fe8f6ee0b27c5fa6bf5fe7f4d97de328fb667c02ab9fbaea7d6df8b"} Nov 28 16:27:24 crc kubenswrapper[4909]: I1128 16:27:24.322551 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5q9lp\" (UniqueName: \"kubernetes.io/projected/4c9b7c14-6c6d-4e55-bacc-1e2f3309e069-kube-api-access-5q9lp\") pod \"cert-manager-86cb77c54b-xk9wg\" (UID: \"4c9b7c14-6c6d-4e55-bacc-1e2f3309e069\") " pod="cert-manager/cert-manager-86cb77c54b-xk9wg" Nov 28 16:27:24 crc kubenswrapper[4909]: I1128 16:27:24.322617 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/4c9b7c14-6c6d-4e55-bacc-1e2f3309e069-bound-sa-token\") pod \"cert-manager-86cb77c54b-xk9wg\" (UID: \"4c9b7c14-6c6d-4e55-bacc-1e2f3309e069\") " pod="cert-manager/cert-manager-86cb77c54b-xk9wg" Nov 28 16:27:24 crc kubenswrapper[4909]: I1128 16:27:24.335745 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-webhook-f4fb5df64-tl4xj" podStartSLOduration=1.306461337 podStartE2EDuration="10.335725507s" podCreationTimestamp="2025-11-28 16:27:14 +0000 UTC" firstStartedPulling="2025-11-28 16:27:15.064433736 +0000 UTC m=+1017.461118260" lastFinishedPulling="2025-11-28 16:27:24.093697906 +0000 UTC m=+1026.490382430" observedRunningTime="2025-11-28 16:27:24.332314176 +0000 UTC m=+1026.728998710" watchObservedRunningTime="2025-11-28 16:27:24.335725507 +0000 UTC m=+1026.732410041" Nov 28 16:27:24 crc kubenswrapper[4909]: I1128 16:27:24.347646 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-cainjector-855d9ccff4-274mv" podStartSLOduration=0.986846641 podStartE2EDuration="9.347624584s" podCreationTimestamp="2025-11-28 16:27:15 +0000 UTC" firstStartedPulling="2025-11-28 16:27:15.750306017 +0000 UTC m=+1018.146990541" lastFinishedPulling="2025-11-28 16:27:24.11108396 +0000 UTC m=+1026.507768484" observedRunningTime="2025-11-28 16:27:24.344856801 +0000 UTC m=+1026.741541325" watchObservedRunningTime="2025-11-28 16:27:24.347624584 +0000 UTC m=+1026.744309108" Nov 28 16:27:24 crc kubenswrapper[4909]: I1128 16:27:24.424252 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5q9lp\" (UniqueName: \"kubernetes.io/projected/4c9b7c14-6c6d-4e55-bacc-1e2f3309e069-kube-api-access-5q9lp\") pod \"cert-manager-86cb77c54b-xk9wg\" (UID: \"4c9b7c14-6c6d-4e55-bacc-1e2f3309e069\") " pod="cert-manager/cert-manager-86cb77c54b-xk9wg" Nov 28 16:27:24 crc kubenswrapper[4909]: I1128 16:27:24.424325 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/4c9b7c14-6c6d-4e55-bacc-1e2f3309e069-bound-sa-token\") pod \"cert-manager-86cb77c54b-xk9wg\" (UID: \"4c9b7c14-6c6d-4e55-bacc-1e2f3309e069\") " pod="cert-manager/cert-manager-86cb77c54b-xk9wg" Nov 28 16:27:24 crc kubenswrapper[4909]: I1128 16:27:24.442615 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/4c9b7c14-6c6d-4e55-bacc-1e2f3309e069-bound-sa-token\") pod \"cert-manager-86cb77c54b-xk9wg\" (UID: \"4c9b7c14-6c6d-4e55-bacc-1e2f3309e069\") " pod="cert-manager/cert-manager-86cb77c54b-xk9wg" Nov 28 16:27:24 crc kubenswrapper[4909]: I1128 16:27:24.442692 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5q9lp\" (UniqueName: \"kubernetes.io/projected/4c9b7c14-6c6d-4e55-bacc-1e2f3309e069-kube-api-access-5q9lp\") pod \"cert-manager-86cb77c54b-xk9wg\" (UID: \"4c9b7c14-6c6d-4e55-bacc-1e2f3309e069\") " pod="cert-manager/cert-manager-86cb77c54b-xk9wg" Nov 28 16:27:24 crc kubenswrapper[4909]: I1128 16:27:24.557206 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-86cb77c54b-xk9wg" Nov 28 16:27:24 crc kubenswrapper[4909]: I1128 16:27:24.987202 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-86cb77c54b-xk9wg"] Nov 28 16:27:24 crc kubenswrapper[4909]: W1128 16:27:24.994508 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4c9b7c14_6c6d_4e55_bacc_1e2f3309e069.slice/crio-d079fc0c8ce41b8b68769cb302f56d0e9947bb34e3bbb67654e9b84a52f24ccd WatchSource:0}: Error finding container d079fc0c8ce41b8b68769cb302f56d0e9947bb34e3bbb67654e9b84a52f24ccd: Status 404 returned error can't find the container with id d079fc0c8ce41b8b68769cb302f56d0e9947bb34e3bbb67654e9b84a52f24ccd Nov 28 16:27:25 crc kubenswrapper[4909]: I1128 16:27:25.326388 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-86cb77c54b-xk9wg" event={"ID":"4c9b7c14-6c6d-4e55-bacc-1e2f3309e069","Type":"ContainerStarted","Data":"d079fc0c8ce41b8b68769cb302f56d0e9947bb34e3bbb67654e9b84a52f24ccd"} Nov 28 16:27:28 crc kubenswrapper[4909]: I1128 16:27:28.350439 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-86cb77c54b-xk9wg" event={"ID":"4c9b7c14-6c6d-4e55-bacc-1e2f3309e069","Type":"ContainerStarted","Data":"feb0ed184d2dd3fe4422f3098d59df6c530a0bfd13ae18f64aab59c0c86b839c"} Nov 28 16:27:28 crc kubenswrapper[4909]: I1128 16:27:28.370351 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-86cb77c54b-xk9wg" podStartSLOduration=4.370329617 podStartE2EDuration="4.370329617s" podCreationTimestamp="2025-11-28 16:27:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:27:28.369530906 +0000 UTC m=+1030.766215490" watchObservedRunningTime="2025-11-28 16:27:28.370329617 +0000 UTC m=+1030.767014141" Nov 28 16:27:29 crc kubenswrapper[4909]: I1128 16:27:29.465916 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="cert-manager/cert-manager-webhook-f4fb5df64-tl4xj" Nov 28 16:27:32 crc kubenswrapper[4909]: I1128 16:27:32.703840 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-index-px5xk"] Nov 28 16:27:32 crc kubenswrapper[4909]: I1128 16:27:32.705003 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-px5xk" Nov 28 16:27:32 crc kubenswrapper[4909]: I1128 16:27:32.707898 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-index-dockercfg-nhjdc" Nov 28 16:27:32 crc kubenswrapper[4909]: I1128 16:27:32.712186 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"kube-root-ca.crt" Nov 28 16:27:32 crc kubenswrapper[4909]: I1128 16:27:32.712276 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"openshift-service-ca.crt" Nov 28 16:27:32 crc kubenswrapper[4909]: I1128 16:27:32.732611 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-px5xk"] Nov 28 16:27:32 crc kubenswrapper[4909]: I1128 16:27:32.843751 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v4zxd\" (UniqueName: \"kubernetes.io/projected/3905efb8-833b-42a5-a087-1bb3fd25a70c-kube-api-access-v4zxd\") pod \"openstack-operator-index-px5xk\" (UID: \"3905efb8-833b-42a5-a087-1bb3fd25a70c\") " pod="openstack-operators/openstack-operator-index-px5xk" Nov 28 16:27:32 crc kubenswrapper[4909]: I1128 16:27:32.945484 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v4zxd\" (UniqueName: \"kubernetes.io/projected/3905efb8-833b-42a5-a087-1bb3fd25a70c-kube-api-access-v4zxd\") pod \"openstack-operator-index-px5xk\" (UID: \"3905efb8-833b-42a5-a087-1bb3fd25a70c\") " pod="openstack-operators/openstack-operator-index-px5xk" Nov 28 16:27:32 crc kubenswrapper[4909]: I1128 16:27:32.974118 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v4zxd\" (UniqueName: \"kubernetes.io/projected/3905efb8-833b-42a5-a087-1bb3fd25a70c-kube-api-access-v4zxd\") pod \"openstack-operator-index-px5xk\" (UID: \"3905efb8-833b-42a5-a087-1bb3fd25a70c\") " pod="openstack-operators/openstack-operator-index-px5xk" Nov 28 16:27:33 crc kubenswrapper[4909]: I1128 16:27:33.042920 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-px5xk" Nov 28 16:27:33 crc kubenswrapper[4909]: I1128 16:27:33.533356 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-px5xk"] Nov 28 16:27:33 crc kubenswrapper[4909]: W1128 16:27:33.540023 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3905efb8_833b_42a5_a087_1bb3fd25a70c.slice/crio-0df09ae11f8d983c5ffddd989160ba836b3b784d7f3a8d364626005227772d03 WatchSource:0}: Error finding container 0df09ae11f8d983c5ffddd989160ba836b3b784d7f3a8d364626005227772d03: Status 404 returned error can't find the container with id 0df09ae11f8d983c5ffddd989160ba836b3b784d7f3a8d364626005227772d03 Nov 28 16:27:34 crc kubenswrapper[4909]: I1128 16:27:34.392008 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-px5xk" event={"ID":"3905efb8-833b-42a5-a087-1bb3fd25a70c","Type":"ContainerStarted","Data":"0df09ae11f8d983c5ffddd989160ba836b3b784d7f3a8d364626005227772d03"} Nov 28 16:27:36 crc kubenswrapper[4909]: I1128 16:27:36.404935 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-px5xk" event={"ID":"3905efb8-833b-42a5-a087-1bb3fd25a70c","Type":"ContainerStarted","Data":"5fdec330b5809c5a84f2907b1daa8dbe33185cfdac62a743eaaef6bf8df63fd7"} Nov 28 16:27:37 crc kubenswrapper[4909]: I1128 16:27:37.264764 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/openstack-operator-index-px5xk"] Nov 28 16:27:37 crc kubenswrapper[4909]: I1128 16:27:37.432445 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-index-px5xk" podStartSLOduration=2.7846114159999997 podStartE2EDuration="5.432425632s" podCreationTimestamp="2025-11-28 16:27:32 +0000 UTC" firstStartedPulling="2025-11-28 16:27:33.542298983 +0000 UTC m=+1035.938983537" lastFinishedPulling="2025-11-28 16:27:36.190113209 +0000 UTC m=+1038.586797753" observedRunningTime="2025-11-28 16:27:37.430209113 +0000 UTC m=+1039.826893637" watchObservedRunningTime="2025-11-28 16:27:37.432425632 +0000 UTC m=+1039.829110166" Nov 28 16:27:38 crc kubenswrapper[4909]: I1128 16:27:38.268548 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-index-prsdg"] Nov 28 16:27:38 crc kubenswrapper[4909]: I1128 16:27:38.269267 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-prsdg" Nov 28 16:27:38 crc kubenswrapper[4909]: I1128 16:27:38.281515 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-prsdg"] Nov 28 16:27:38 crc kubenswrapper[4909]: I1128 16:27:38.334026 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-77b66\" (UniqueName: \"kubernetes.io/projected/4d9f796e-bf71-47e0-a15b-13f652dd03b5-kube-api-access-77b66\") pod \"openstack-operator-index-prsdg\" (UID: \"4d9f796e-bf71-47e0-a15b-13f652dd03b5\") " pod="openstack-operators/openstack-operator-index-prsdg" Nov 28 16:27:38 crc kubenswrapper[4909]: I1128 16:27:38.416245 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack-operators/openstack-operator-index-px5xk" podUID="3905efb8-833b-42a5-a087-1bb3fd25a70c" containerName="registry-server" containerID="cri-o://5fdec330b5809c5a84f2907b1daa8dbe33185cfdac62a743eaaef6bf8df63fd7" gracePeriod=2 Nov 28 16:27:38 crc kubenswrapper[4909]: I1128 16:27:38.434949 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-77b66\" (UniqueName: \"kubernetes.io/projected/4d9f796e-bf71-47e0-a15b-13f652dd03b5-kube-api-access-77b66\") pod \"openstack-operator-index-prsdg\" (UID: \"4d9f796e-bf71-47e0-a15b-13f652dd03b5\") " pod="openstack-operators/openstack-operator-index-prsdg" Nov 28 16:27:38 crc kubenswrapper[4909]: I1128 16:27:38.458451 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-77b66\" (UniqueName: \"kubernetes.io/projected/4d9f796e-bf71-47e0-a15b-13f652dd03b5-kube-api-access-77b66\") pod \"openstack-operator-index-prsdg\" (UID: \"4d9f796e-bf71-47e0-a15b-13f652dd03b5\") " pod="openstack-operators/openstack-operator-index-prsdg" Nov 28 16:27:38 crc kubenswrapper[4909]: I1128 16:27:38.593814 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-prsdg" Nov 28 16:27:38 crc kubenswrapper[4909]: I1128 16:27:38.805756 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-px5xk" Nov 28 16:27:38 crc kubenswrapper[4909]: I1128 16:27:38.839608 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v4zxd\" (UniqueName: \"kubernetes.io/projected/3905efb8-833b-42a5-a087-1bb3fd25a70c-kube-api-access-v4zxd\") pod \"3905efb8-833b-42a5-a087-1bb3fd25a70c\" (UID: \"3905efb8-833b-42a5-a087-1bb3fd25a70c\") " Nov 28 16:27:38 crc kubenswrapper[4909]: I1128 16:27:38.845074 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3905efb8-833b-42a5-a087-1bb3fd25a70c-kube-api-access-v4zxd" (OuterVolumeSpecName: "kube-api-access-v4zxd") pod "3905efb8-833b-42a5-a087-1bb3fd25a70c" (UID: "3905efb8-833b-42a5-a087-1bb3fd25a70c"). InnerVolumeSpecName "kube-api-access-v4zxd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:27:38 crc kubenswrapper[4909]: I1128 16:27:38.846608 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-prsdg"] Nov 28 16:27:38 crc kubenswrapper[4909]: W1128 16:27:38.847721 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4d9f796e_bf71_47e0_a15b_13f652dd03b5.slice/crio-35d36bb0e3f147c54b731040e769a7479409fda8a4ff3c0d93132e7a1e81bfe1 WatchSource:0}: Error finding container 35d36bb0e3f147c54b731040e769a7479409fda8a4ff3c0d93132e7a1e81bfe1: Status 404 returned error can't find the container with id 35d36bb0e3f147c54b731040e769a7479409fda8a4ff3c0d93132e7a1e81bfe1 Nov 28 16:27:38 crc kubenswrapper[4909]: I1128 16:27:38.941670 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v4zxd\" (UniqueName: \"kubernetes.io/projected/3905efb8-833b-42a5-a087-1bb3fd25a70c-kube-api-access-v4zxd\") on node \"crc\" DevicePath \"\"" Nov 28 16:27:39 crc kubenswrapper[4909]: I1128 16:27:39.422370 4909 generic.go:334] "Generic (PLEG): container finished" podID="3905efb8-833b-42a5-a087-1bb3fd25a70c" containerID="5fdec330b5809c5a84f2907b1daa8dbe33185cfdac62a743eaaef6bf8df63fd7" exitCode=0 Nov 28 16:27:39 crc kubenswrapper[4909]: I1128 16:27:39.422440 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-px5xk" Nov 28 16:27:39 crc kubenswrapper[4909]: I1128 16:27:39.422428 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-px5xk" event={"ID":"3905efb8-833b-42a5-a087-1bb3fd25a70c","Type":"ContainerDied","Data":"5fdec330b5809c5a84f2907b1daa8dbe33185cfdac62a743eaaef6bf8df63fd7"} Nov 28 16:27:39 crc kubenswrapper[4909]: I1128 16:27:39.422646 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-px5xk" event={"ID":"3905efb8-833b-42a5-a087-1bb3fd25a70c","Type":"ContainerDied","Data":"0df09ae11f8d983c5ffddd989160ba836b3b784d7f3a8d364626005227772d03"} Nov 28 16:27:39 crc kubenswrapper[4909]: I1128 16:27:39.422734 4909 scope.go:117] "RemoveContainer" containerID="5fdec330b5809c5a84f2907b1daa8dbe33185cfdac62a743eaaef6bf8df63fd7" Nov 28 16:27:39 crc kubenswrapper[4909]: I1128 16:27:39.423801 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-prsdg" event={"ID":"4d9f796e-bf71-47e0-a15b-13f652dd03b5","Type":"ContainerStarted","Data":"2561e92ac30904987a63ea35fa5b480a5cc07d594d56432f4c96dc23d9ca771f"} Nov 28 16:27:39 crc kubenswrapper[4909]: I1128 16:27:39.423839 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-prsdg" event={"ID":"4d9f796e-bf71-47e0-a15b-13f652dd03b5","Type":"ContainerStarted","Data":"35d36bb0e3f147c54b731040e769a7479409fda8a4ff3c0d93132e7a1e81bfe1"} Nov 28 16:27:39 crc kubenswrapper[4909]: I1128 16:27:39.437850 4909 scope.go:117] "RemoveContainer" containerID="5fdec330b5809c5a84f2907b1daa8dbe33185cfdac62a743eaaef6bf8df63fd7" Nov 28 16:27:39 crc kubenswrapper[4909]: E1128 16:27:39.438920 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5fdec330b5809c5a84f2907b1daa8dbe33185cfdac62a743eaaef6bf8df63fd7\": container with ID starting with 5fdec330b5809c5a84f2907b1daa8dbe33185cfdac62a743eaaef6bf8df63fd7 not found: ID does not exist" containerID="5fdec330b5809c5a84f2907b1daa8dbe33185cfdac62a743eaaef6bf8df63fd7" Nov 28 16:27:39 crc kubenswrapper[4909]: I1128 16:27:39.438962 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5fdec330b5809c5a84f2907b1daa8dbe33185cfdac62a743eaaef6bf8df63fd7"} err="failed to get container status \"5fdec330b5809c5a84f2907b1daa8dbe33185cfdac62a743eaaef6bf8df63fd7\": rpc error: code = NotFound desc = could not find container \"5fdec330b5809c5a84f2907b1daa8dbe33185cfdac62a743eaaef6bf8df63fd7\": container with ID starting with 5fdec330b5809c5a84f2907b1daa8dbe33185cfdac62a743eaaef6bf8df63fd7 not found: ID does not exist" Nov 28 16:27:39 crc kubenswrapper[4909]: I1128 16:27:39.452411 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-index-prsdg" podStartSLOduration=1.406455197 podStartE2EDuration="1.452394452s" podCreationTimestamp="2025-11-28 16:27:38 +0000 UTC" firstStartedPulling="2025-11-28 16:27:38.85951452 +0000 UTC m=+1041.256199044" lastFinishedPulling="2025-11-28 16:27:38.905453775 +0000 UTC m=+1041.302138299" observedRunningTime="2025-11-28 16:27:39.451758035 +0000 UTC m=+1041.848442569" watchObservedRunningTime="2025-11-28 16:27:39.452394452 +0000 UTC m=+1041.849078976" Nov 28 16:27:39 crc kubenswrapper[4909]: I1128 16:27:39.469977 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/openstack-operator-index-px5xk"] Nov 28 16:27:39 crc kubenswrapper[4909]: I1128 16:27:39.473787 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack-operators/openstack-operator-index-px5xk"] Nov 28 16:27:39 crc kubenswrapper[4909]: I1128 16:27:39.911416 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3905efb8-833b-42a5-a087-1bb3fd25a70c" path="/var/lib/kubelet/pods/3905efb8-833b-42a5-a087-1bb3fd25a70c/volumes" Nov 28 16:27:48 crc kubenswrapper[4909]: I1128 16:27:48.594575 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack-operators/openstack-operator-index-prsdg" Nov 28 16:27:48 crc kubenswrapper[4909]: I1128 16:27:48.595175 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-index-prsdg" Nov 28 16:27:48 crc kubenswrapper[4909]: I1128 16:27:48.632768 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack-operators/openstack-operator-index-prsdg" Nov 28 16:27:49 crc kubenswrapper[4909]: I1128 16:27:49.515860 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-index-prsdg" Nov 28 16:27:49 crc kubenswrapper[4909]: I1128 16:27:49.911722 4909 patch_prober.go:28] interesting pod/machine-config-daemon-d5nd7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 16:27:49 crc kubenswrapper[4909]: I1128 16:27:49.911816 4909 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 16:27:49 crc kubenswrapper[4909]: I1128 16:27:49.917006 4909 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" Nov 28 16:27:49 crc kubenswrapper[4909]: I1128 16:27:49.917786 4909 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"a1d1b89b71acf3efad2ebc7b1465d76f5e0a096e6a7cd92cd5f8be9dcf1f258e"} pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 16:27:49 crc kubenswrapper[4909]: I1128 16:27:49.917926 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerName="machine-config-daemon" containerID="cri-o://a1d1b89b71acf3efad2ebc7b1465d76f5e0a096e6a7cd92cd5f8be9dcf1f258e" gracePeriod=600 Nov 28 16:27:50 crc kubenswrapper[4909]: I1128 16:27:50.500432 4909 generic.go:334] "Generic (PLEG): container finished" podID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerID="a1d1b89b71acf3efad2ebc7b1465d76f5e0a096e6a7cd92cd5f8be9dcf1f258e" exitCode=0 Nov 28 16:27:50 crc kubenswrapper[4909]: I1128 16:27:50.500911 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" event={"ID":"5f0ac931-d37b-4342-8c12-c2779b455cc5","Type":"ContainerDied","Data":"a1d1b89b71acf3efad2ebc7b1465d76f5e0a096e6a7cd92cd5f8be9dcf1f258e"} Nov 28 16:27:50 crc kubenswrapper[4909]: I1128 16:27:50.501002 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" event={"ID":"5f0ac931-d37b-4342-8c12-c2779b455cc5","Type":"ContainerStarted","Data":"938b3015525903a828287e46b6bae7ec7c3c38edf7df86757b71b4c9037a7ecd"} Nov 28 16:27:50 crc kubenswrapper[4909]: I1128 16:27:50.501041 4909 scope.go:117] "RemoveContainer" containerID="385cb21d269057f04b94911a7382004d4c8760b6e581a733ba61cea01c0b4b65" Nov 28 16:27:52 crc kubenswrapper[4909]: I1128 16:27:52.508358 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/170303e37f43f7d4fb9c01e6e287a976e4e0e80cbddd20eca5c3983f885m9n5"] Nov 28 16:27:52 crc kubenswrapper[4909]: E1128 16:27:52.509053 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3905efb8-833b-42a5-a087-1bb3fd25a70c" containerName="registry-server" Nov 28 16:27:52 crc kubenswrapper[4909]: I1128 16:27:52.509065 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="3905efb8-833b-42a5-a087-1bb3fd25a70c" containerName="registry-server" Nov 28 16:27:52 crc kubenswrapper[4909]: I1128 16:27:52.509173 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="3905efb8-833b-42a5-a087-1bb3fd25a70c" containerName="registry-server" Nov 28 16:27:52 crc kubenswrapper[4909]: I1128 16:27:52.509930 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/170303e37f43f7d4fb9c01e6e287a976e4e0e80cbddd20eca5c3983f885m9n5" Nov 28 16:27:52 crc kubenswrapper[4909]: I1128 16:27:52.512033 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"default-dockercfg-lvh26" Nov 28 16:27:52 crc kubenswrapper[4909]: I1128 16:27:52.533146 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/170303e37f43f7d4fb9c01e6e287a976e4e0e80cbddd20eca5c3983f885m9n5"] Nov 28 16:27:52 crc kubenswrapper[4909]: I1128 16:27:52.536410 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/f1e08c93-a869-4fc1-a497-681cb10fdab3-bundle\") pod \"170303e37f43f7d4fb9c01e6e287a976e4e0e80cbddd20eca5c3983f885m9n5\" (UID: \"f1e08c93-a869-4fc1-a497-681cb10fdab3\") " pod="openstack-operators/170303e37f43f7d4fb9c01e6e287a976e4e0e80cbddd20eca5c3983f885m9n5" Nov 28 16:27:52 crc kubenswrapper[4909]: I1128 16:27:52.536759 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/f1e08c93-a869-4fc1-a497-681cb10fdab3-util\") pod \"170303e37f43f7d4fb9c01e6e287a976e4e0e80cbddd20eca5c3983f885m9n5\" (UID: \"f1e08c93-a869-4fc1-a497-681cb10fdab3\") " pod="openstack-operators/170303e37f43f7d4fb9c01e6e287a976e4e0e80cbddd20eca5c3983f885m9n5" Nov 28 16:27:52 crc kubenswrapper[4909]: I1128 16:27:52.536850 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t8spg\" (UniqueName: \"kubernetes.io/projected/f1e08c93-a869-4fc1-a497-681cb10fdab3-kube-api-access-t8spg\") pod \"170303e37f43f7d4fb9c01e6e287a976e4e0e80cbddd20eca5c3983f885m9n5\" (UID: \"f1e08c93-a869-4fc1-a497-681cb10fdab3\") " pod="openstack-operators/170303e37f43f7d4fb9c01e6e287a976e4e0e80cbddd20eca5c3983f885m9n5" Nov 28 16:27:52 crc kubenswrapper[4909]: I1128 16:27:52.648908 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/f1e08c93-a869-4fc1-a497-681cb10fdab3-util\") pod \"170303e37f43f7d4fb9c01e6e287a976e4e0e80cbddd20eca5c3983f885m9n5\" (UID: \"f1e08c93-a869-4fc1-a497-681cb10fdab3\") " pod="openstack-operators/170303e37f43f7d4fb9c01e6e287a976e4e0e80cbddd20eca5c3983f885m9n5" Nov 28 16:27:52 crc kubenswrapper[4909]: I1128 16:27:52.649272 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t8spg\" (UniqueName: \"kubernetes.io/projected/f1e08c93-a869-4fc1-a497-681cb10fdab3-kube-api-access-t8spg\") pod \"170303e37f43f7d4fb9c01e6e287a976e4e0e80cbddd20eca5c3983f885m9n5\" (UID: \"f1e08c93-a869-4fc1-a497-681cb10fdab3\") " pod="openstack-operators/170303e37f43f7d4fb9c01e6e287a976e4e0e80cbddd20eca5c3983f885m9n5" Nov 28 16:27:52 crc kubenswrapper[4909]: I1128 16:27:52.649443 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/f1e08c93-a869-4fc1-a497-681cb10fdab3-bundle\") pod \"170303e37f43f7d4fb9c01e6e287a976e4e0e80cbddd20eca5c3983f885m9n5\" (UID: \"f1e08c93-a869-4fc1-a497-681cb10fdab3\") " pod="openstack-operators/170303e37f43f7d4fb9c01e6e287a976e4e0e80cbddd20eca5c3983f885m9n5" Nov 28 16:27:52 crc kubenswrapper[4909]: I1128 16:27:52.651104 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/f1e08c93-a869-4fc1-a497-681cb10fdab3-bundle\") pod \"170303e37f43f7d4fb9c01e6e287a976e4e0e80cbddd20eca5c3983f885m9n5\" (UID: \"f1e08c93-a869-4fc1-a497-681cb10fdab3\") " pod="openstack-operators/170303e37f43f7d4fb9c01e6e287a976e4e0e80cbddd20eca5c3983f885m9n5" Nov 28 16:27:52 crc kubenswrapper[4909]: I1128 16:27:52.651149 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/f1e08c93-a869-4fc1-a497-681cb10fdab3-util\") pod \"170303e37f43f7d4fb9c01e6e287a976e4e0e80cbddd20eca5c3983f885m9n5\" (UID: \"f1e08c93-a869-4fc1-a497-681cb10fdab3\") " pod="openstack-operators/170303e37f43f7d4fb9c01e6e287a976e4e0e80cbddd20eca5c3983f885m9n5" Nov 28 16:27:52 crc kubenswrapper[4909]: I1128 16:27:52.671335 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t8spg\" (UniqueName: \"kubernetes.io/projected/f1e08c93-a869-4fc1-a497-681cb10fdab3-kube-api-access-t8spg\") pod \"170303e37f43f7d4fb9c01e6e287a976e4e0e80cbddd20eca5c3983f885m9n5\" (UID: \"f1e08c93-a869-4fc1-a497-681cb10fdab3\") " pod="openstack-operators/170303e37f43f7d4fb9c01e6e287a976e4e0e80cbddd20eca5c3983f885m9n5" Nov 28 16:27:52 crc kubenswrapper[4909]: I1128 16:27:52.835264 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/170303e37f43f7d4fb9c01e6e287a976e4e0e80cbddd20eca5c3983f885m9n5" Nov 28 16:27:53 crc kubenswrapper[4909]: I1128 16:27:53.116982 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/170303e37f43f7d4fb9c01e6e287a976e4e0e80cbddd20eca5c3983f885m9n5"] Nov 28 16:27:53 crc kubenswrapper[4909]: W1128 16:27:53.127274 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf1e08c93_a869_4fc1_a497_681cb10fdab3.slice/crio-891c12e93166b3fed27212bf21828747ba87787152e81f7bd5936f5cd3e7e367 WatchSource:0}: Error finding container 891c12e93166b3fed27212bf21828747ba87787152e81f7bd5936f5cd3e7e367: Status 404 returned error can't find the container with id 891c12e93166b3fed27212bf21828747ba87787152e81f7bd5936f5cd3e7e367 Nov 28 16:27:53 crc kubenswrapper[4909]: I1128 16:27:53.524983 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/170303e37f43f7d4fb9c01e6e287a976e4e0e80cbddd20eca5c3983f885m9n5" event={"ID":"f1e08c93-a869-4fc1-a497-681cb10fdab3","Type":"ContainerStarted","Data":"d18b6185180f3fcbed44b94a298fbbd169b8c6f99a5a4c2593a1a27d1076fcf5"} Nov 28 16:27:53 crc kubenswrapper[4909]: I1128 16:27:53.525369 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/170303e37f43f7d4fb9c01e6e287a976e4e0e80cbddd20eca5c3983f885m9n5" event={"ID":"f1e08c93-a869-4fc1-a497-681cb10fdab3","Type":"ContainerStarted","Data":"891c12e93166b3fed27212bf21828747ba87787152e81f7bd5936f5cd3e7e367"} Nov 28 16:27:54 crc kubenswrapper[4909]: I1128 16:27:54.532932 4909 generic.go:334] "Generic (PLEG): container finished" podID="f1e08c93-a869-4fc1-a497-681cb10fdab3" containerID="d18b6185180f3fcbed44b94a298fbbd169b8c6f99a5a4c2593a1a27d1076fcf5" exitCode=0 Nov 28 16:27:54 crc kubenswrapper[4909]: I1128 16:27:54.532992 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/170303e37f43f7d4fb9c01e6e287a976e4e0e80cbddd20eca5c3983f885m9n5" event={"ID":"f1e08c93-a869-4fc1-a497-681cb10fdab3","Type":"ContainerDied","Data":"d18b6185180f3fcbed44b94a298fbbd169b8c6f99a5a4c2593a1a27d1076fcf5"} Nov 28 16:27:56 crc kubenswrapper[4909]: I1128 16:27:56.549437 4909 generic.go:334] "Generic (PLEG): container finished" podID="f1e08c93-a869-4fc1-a497-681cb10fdab3" containerID="b01e2b1b9b79b383481423ca4e180fd088d84989384f5338df4f71e316f62eb0" exitCode=0 Nov 28 16:27:56 crc kubenswrapper[4909]: I1128 16:27:56.549507 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/170303e37f43f7d4fb9c01e6e287a976e4e0e80cbddd20eca5c3983f885m9n5" event={"ID":"f1e08c93-a869-4fc1-a497-681cb10fdab3","Type":"ContainerDied","Data":"b01e2b1b9b79b383481423ca4e180fd088d84989384f5338df4f71e316f62eb0"} Nov 28 16:27:58 crc kubenswrapper[4909]: I1128 16:27:58.570886 4909 generic.go:334] "Generic (PLEG): container finished" podID="f1e08c93-a869-4fc1-a497-681cb10fdab3" containerID="d7c442e5e05c2e7d3fcf3f5710426ead5ff1e5ad9bd32037d86a157c41727ebf" exitCode=0 Nov 28 16:27:58 crc kubenswrapper[4909]: I1128 16:27:58.570954 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/170303e37f43f7d4fb9c01e6e287a976e4e0e80cbddd20eca5c3983f885m9n5" event={"ID":"f1e08c93-a869-4fc1-a497-681cb10fdab3","Type":"ContainerDied","Data":"d7c442e5e05c2e7d3fcf3f5710426ead5ff1e5ad9bd32037d86a157c41727ebf"} Nov 28 16:27:59 crc kubenswrapper[4909]: I1128 16:27:59.838642 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/170303e37f43f7d4fb9c01e6e287a976e4e0e80cbddd20eca5c3983f885m9n5" Nov 28 16:27:59 crc kubenswrapper[4909]: I1128 16:27:59.958680 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/f1e08c93-a869-4fc1-a497-681cb10fdab3-bundle\") pod \"f1e08c93-a869-4fc1-a497-681cb10fdab3\" (UID: \"f1e08c93-a869-4fc1-a497-681cb10fdab3\") " Nov 28 16:27:59 crc kubenswrapper[4909]: I1128 16:27:59.958790 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/f1e08c93-a869-4fc1-a497-681cb10fdab3-util\") pod \"f1e08c93-a869-4fc1-a497-681cb10fdab3\" (UID: \"f1e08c93-a869-4fc1-a497-681cb10fdab3\") " Nov 28 16:27:59 crc kubenswrapper[4909]: I1128 16:27:59.958851 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t8spg\" (UniqueName: \"kubernetes.io/projected/f1e08c93-a869-4fc1-a497-681cb10fdab3-kube-api-access-t8spg\") pod \"f1e08c93-a869-4fc1-a497-681cb10fdab3\" (UID: \"f1e08c93-a869-4fc1-a497-681cb10fdab3\") " Nov 28 16:27:59 crc kubenswrapper[4909]: I1128 16:27:59.959901 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f1e08c93-a869-4fc1-a497-681cb10fdab3-bundle" (OuterVolumeSpecName: "bundle") pod "f1e08c93-a869-4fc1-a497-681cb10fdab3" (UID: "f1e08c93-a869-4fc1-a497-681cb10fdab3"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:27:59 crc kubenswrapper[4909]: I1128 16:27:59.980611 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f1e08c93-a869-4fc1-a497-681cb10fdab3-kube-api-access-t8spg" (OuterVolumeSpecName: "kube-api-access-t8spg") pod "f1e08c93-a869-4fc1-a497-681cb10fdab3" (UID: "f1e08c93-a869-4fc1-a497-681cb10fdab3"). InnerVolumeSpecName "kube-api-access-t8spg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:27:59 crc kubenswrapper[4909]: I1128 16:27:59.983050 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f1e08c93-a869-4fc1-a497-681cb10fdab3-util" (OuterVolumeSpecName: "util") pod "f1e08c93-a869-4fc1-a497-681cb10fdab3" (UID: "f1e08c93-a869-4fc1-a497-681cb10fdab3"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:28:00 crc kubenswrapper[4909]: I1128 16:28:00.060561 4909 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/f1e08c93-a869-4fc1-a497-681cb10fdab3-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:28:00 crc kubenswrapper[4909]: I1128 16:28:00.060607 4909 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/f1e08c93-a869-4fc1-a497-681cb10fdab3-util\") on node \"crc\" DevicePath \"\"" Nov 28 16:28:00 crc kubenswrapper[4909]: I1128 16:28:00.060621 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t8spg\" (UniqueName: \"kubernetes.io/projected/f1e08c93-a869-4fc1-a497-681cb10fdab3-kube-api-access-t8spg\") on node \"crc\" DevicePath \"\"" Nov 28 16:28:00 crc kubenswrapper[4909]: I1128 16:28:00.588366 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/170303e37f43f7d4fb9c01e6e287a976e4e0e80cbddd20eca5c3983f885m9n5" event={"ID":"f1e08c93-a869-4fc1-a497-681cb10fdab3","Type":"ContainerDied","Data":"891c12e93166b3fed27212bf21828747ba87787152e81f7bd5936f5cd3e7e367"} Nov 28 16:28:00 crc kubenswrapper[4909]: I1128 16:28:00.588426 4909 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="891c12e93166b3fed27212bf21828747ba87787152e81f7bd5936f5cd3e7e367" Nov 28 16:28:00 crc kubenswrapper[4909]: I1128 16:28:00.588492 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/170303e37f43f7d4fb9c01e6e287a976e4e0e80cbddd20eca5c3983f885m9n5" Nov 28 16:28:02 crc kubenswrapper[4909]: I1128 16:28:02.312231 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-operator-6fcddf5ccf-lkhfm"] Nov 28 16:28:02 crc kubenswrapper[4909]: E1128 16:28:02.312824 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f1e08c93-a869-4fc1-a497-681cb10fdab3" containerName="pull" Nov 28 16:28:02 crc kubenswrapper[4909]: I1128 16:28:02.312840 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="f1e08c93-a869-4fc1-a497-681cb10fdab3" containerName="pull" Nov 28 16:28:02 crc kubenswrapper[4909]: E1128 16:28:02.312855 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f1e08c93-a869-4fc1-a497-681cb10fdab3" containerName="util" Nov 28 16:28:02 crc kubenswrapper[4909]: I1128 16:28:02.312864 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="f1e08c93-a869-4fc1-a497-681cb10fdab3" containerName="util" Nov 28 16:28:02 crc kubenswrapper[4909]: E1128 16:28:02.312892 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f1e08c93-a869-4fc1-a497-681cb10fdab3" containerName="extract" Nov 28 16:28:02 crc kubenswrapper[4909]: I1128 16:28:02.312900 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="f1e08c93-a869-4fc1-a497-681cb10fdab3" containerName="extract" Nov 28 16:28:02 crc kubenswrapper[4909]: I1128 16:28:02.313045 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="f1e08c93-a869-4fc1-a497-681cb10fdab3" containerName="extract" Nov 28 16:28:02 crc kubenswrapper[4909]: I1128 16:28:02.313569 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-operator-6fcddf5ccf-lkhfm" Nov 28 16:28:02 crc kubenswrapper[4909]: I1128 16:28:02.316081 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-operator-dockercfg-thqjx" Nov 28 16:28:02 crc kubenswrapper[4909]: I1128 16:28:02.346372 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-operator-6fcddf5ccf-lkhfm"] Nov 28 16:28:02 crc kubenswrapper[4909]: I1128 16:28:02.389094 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qpfvj\" (UniqueName: \"kubernetes.io/projected/bdf3a989-5fba-461f-b8ed-82ed0bd1ece7-kube-api-access-qpfvj\") pod \"openstack-operator-controller-operator-6fcddf5ccf-lkhfm\" (UID: \"bdf3a989-5fba-461f-b8ed-82ed0bd1ece7\") " pod="openstack-operators/openstack-operator-controller-operator-6fcddf5ccf-lkhfm" Nov 28 16:28:02 crc kubenswrapper[4909]: I1128 16:28:02.490421 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qpfvj\" (UniqueName: \"kubernetes.io/projected/bdf3a989-5fba-461f-b8ed-82ed0bd1ece7-kube-api-access-qpfvj\") pod \"openstack-operator-controller-operator-6fcddf5ccf-lkhfm\" (UID: \"bdf3a989-5fba-461f-b8ed-82ed0bd1ece7\") " pod="openstack-operators/openstack-operator-controller-operator-6fcddf5ccf-lkhfm" Nov 28 16:28:02 crc kubenswrapper[4909]: I1128 16:28:02.507843 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qpfvj\" (UniqueName: \"kubernetes.io/projected/bdf3a989-5fba-461f-b8ed-82ed0bd1ece7-kube-api-access-qpfvj\") pod \"openstack-operator-controller-operator-6fcddf5ccf-lkhfm\" (UID: \"bdf3a989-5fba-461f-b8ed-82ed0bd1ece7\") " pod="openstack-operators/openstack-operator-controller-operator-6fcddf5ccf-lkhfm" Nov 28 16:28:02 crc kubenswrapper[4909]: I1128 16:28:02.632617 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-operator-6fcddf5ccf-lkhfm" Nov 28 16:28:03 crc kubenswrapper[4909]: I1128 16:28:03.060949 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-operator-6fcddf5ccf-lkhfm"] Nov 28 16:28:03 crc kubenswrapper[4909]: I1128 16:28:03.610069 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-6fcddf5ccf-lkhfm" event={"ID":"bdf3a989-5fba-461f-b8ed-82ed0bd1ece7","Type":"ContainerStarted","Data":"55349aad2c0b8e60b932ecbd38abafe6d0fc879c999dda164b0d27d161bc890a"} Nov 28 16:28:07 crc kubenswrapper[4909]: I1128 16:28:07.649615 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-6fcddf5ccf-lkhfm" event={"ID":"bdf3a989-5fba-461f-b8ed-82ed0bd1ece7","Type":"ContainerStarted","Data":"b5f5efa7a295b01d0994db390e531aa481488f4f81306e07fa9aec720fa1010b"} Nov 28 16:28:07 crc kubenswrapper[4909]: I1128 16:28:07.650780 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-operator-6fcddf5ccf-lkhfm" Nov 28 16:28:07 crc kubenswrapper[4909]: I1128 16:28:07.680201 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-operator-6fcddf5ccf-lkhfm" podStartSLOduration=1.537244311 podStartE2EDuration="5.680184629s" podCreationTimestamp="2025-11-28 16:28:02 +0000 UTC" firstStartedPulling="2025-11-28 16:28:03.068441715 +0000 UTC m=+1065.465126239" lastFinishedPulling="2025-11-28 16:28:07.211382033 +0000 UTC m=+1069.608066557" observedRunningTime="2025-11-28 16:28:07.679436509 +0000 UTC m=+1070.076121033" watchObservedRunningTime="2025-11-28 16:28:07.680184629 +0000 UTC m=+1070.076869153" Nov 28 16:28:12 crc kubenswrapper[4909]: I1128 16:28:12.636097 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-operator-6fcddf5ccf-lkhfm" Nov 28 16:28:31 crc kubenswrapper[4909]: I1128 16:28:31.577862 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/barbican-operator-controller-manager-7d9dfd778-v8ggq"] Nov 28 16:28:31 crc kubenswrapper[4909]: I1128 16:28:31.579634 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-7d9dfd778-v8ggq" Nov 28 16:28:31 crc kubenswrapper[4909]: I1128 16:28:31.584274 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"barbican-operator-controller-manager-dockercfg-jz6h8" Nov 28 16:28:31 crc kubenswrapper[4909]: I1128 16:28:31.599551 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-7d9dfd778-v8ggq"] Nov 28 16:28:31 crc kubenswrapper[4909]: I1128 16:28:31.611631 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/cinder-operator-controller-manager-859b6ccc6-cdlxz"] Nov 28 16:28:31 crc kubenswrapper[4909]: I1128 16:28:31.612940 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-859b6ccc6-cdlxz" Nov 28 16:28:31 crc kubenswrapper[4909]: I1128 16:28:31.617696 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-859b6ccc6-cdlxz"] Nov 28 16:28:31 crc kubenswrapper[4909]: I1128 16:28:31.617894 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"cinder-operator-controller-manager-dockercfg-5k9ph" Nov 28 16:28:31 crc kubenswrapper[4909]: I1128 16:28:31.622244 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/designate-operator-controller-manager-78b4bc895b-52zmw"] Nov 28 16:28:31 crc kubenswrapper[4909]: I1128 16:28:31.623448 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-78b4bc895b-52zmw" Nov 28 16:28:31 crc kubenswrapper[4909]: I1128 16:28:31.625955 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"designate-operator-controller-manager-dockercfg-2c66j" Nov 28 16:28:31 crc kubenswrapper[4909]: I1128 16:28:31.627922 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-78b4bc895b-52zmw"] Nov 28 16:28:31 crc kubenswrapper[4909]: I1128 16:28:31.654268 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/glance-operator-controller-manager-668d9c48b9-ggchx"] Nov 28 16:28:31 crc kubenswrapper[4909]: I1128 16:28:31.655372 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-668d9c48b9-ggchx" Nov 28 16:28:31 crc kubenswrapper[4909]: I1128 16:28:31.657086 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"glance-operator-controller-manager-dockercfg-zcffx" Nov 28 16:28:31 crc kubenswrapper[4909]: I1128 16:28:31.669342 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kdszc\" (UniqueName: \"kubernetes.io/projected/facf7553-0ba6-43b6-b720-0345f63d5706-kube-api-access-kdszc\") pod \"barbican-operator-controller-manager-7d9dfd778-v8ggq\" (UID: \"facf7553-0ba6-43b6-b720-0345f63d5706\") " pod="openstack-operators/barbican-operator-controller-manager-7d9dfd778-v8ggq" Nov 28 16:28:31 crc kubenswrapper[4909]: I1128 16:28:31.679459 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/heat-operator-controller-manager-5f64f6f8bb-k5bt2"] Nov 28 16:28:31 crc kubenswrapper[4909]: I1128 16:28:31.680759 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-5f64f6f8bb-k5bt2" Nov 28 16:28:31 crc kubenswrapper[4909]: I1128 16:28:31.683330 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"heat-operator-controller-manager-dockercfg-d854m" Nov 28 16:28:31 crc kubenswrapper[4909]: I1128 16:28:31.690419 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/horizon-operator-controller-manager-68c6d99b8f-zjfzk"] Nov 28 16:28:31 crc kubenswrapper[4909]: I1128 16:28:31.691944 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-68c6d99b8f-zjfzk" Nov 28 16:28:31 crc kubenswrapper[4909]: I1128 16:28:31.694754 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"horizon-operator-controller-manager-dockercfg-fxt86" Nov 28 16:28:31 crc kubenswrapper[4909]: I1128 16:28:31.704895 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-668d9c48b9-ggchx"] Nov 28 16:28:31 crc kubenswrapper[4909]: I1128 16:28:31.712735 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-5f64f6f8bb-k5bt2"] Nov 28 16:28:31 crc kubenswrapper[4909]: I1128 16:28:31.717736 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-68c6d99b8f-zjfzk"] Nov 28 16:28:31 crc kubenswrapper[4909]: I1128 16:28:31.726747 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/infra-operator-controller-manager-57548d458d-7z5zf"] Nov 28 16:28:31 crc kubenswrapper[4909]: I1128 16:28:31.727720 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-57548d458d-7z5zf" Nov 28 16:28:31 crc kubenswrapper[4909]: I1128 16:28:31.736228 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-controller-manager-dockercfg-h76v9" Nov 28 16:28:31 crc kubenswrapper[4909]: I1128 16:28:31.736402 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-webhook-server-cert" Nov 28 16:28:31 crc kubenswrapper[4909]: I1128 16:28:31.736491 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ironic-operator-controller-manager-6c548fd776-zqxvl"] Nov 28 16:28:31 crc kubenswrapper[4909]: I1128 16:28:31.737441 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-6c548fd776-zqxvl" Nov 28 16:28:31 crc kubenswrapper[4909]: I1128 16:28:31.741710 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ironic-operator-controller-manager-dockercfg-s67zv" Nov 28 16:28:31 crc kubenswrapper[4909]: I1128 16:28:31.766758 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-57548d458d-7z5zf"] Nov 28 16:28:31 crc kubenswrapper[4909]: I1128 16:28:31.772964 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tgnxh\" (UniqueName: \"kubernetes.io/projected/5fbd4b3d-b059-4780-b1f1-f04e00a9a90e-kube-api-access-tgnxh\") pod \"designate-operator-controller-manager-78b4bc895b-52zmw\" (UID: \"5fbd4b3d-b059-4780-b1f1-f04e00a9a90e\") " pod="openstack-operators/designate-operator-controller-manager-78b4bc895b-52zmw" Nov 28 16:28:31 crc kubenswrapper[4909]: I1128 16:28:31.773018 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8z6qs\" (UniqueName: \"kubernetes.io/projected/5efbc774-20bd-4b16-a9dd-2584462dad47-kube-api-access-8z6qs\") pod \"heat-operator-controller-manager-5f64f6f8bb-k5bt2\" (UID: \"5efbc774-20bd-4b16-a9dd-2584462dad47\") " pod="openstack-operators/heat-operator-controller-manager-5f64f6f8bb-k5bt2" Nov 28 16:28:31 crc kubenswrapper[4909]: I1128 16:28:31.773117 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l5zmv\" (UniqueName: \"kubernetes.io/projected/906bccad-3e05-4f36-9ecc-57627d2fb226-kube-api-access-l5zmv\") pod \"horizon-operator-controller-manager-68c6d99b8f-zjfzk\" (UID: \"906bccad-3e05-4f36-9ecc-57627d2fb226\") " pod="openstack-operators/horizon-operator-controller-manager-68c6d99b8f-zjfzk" Nov 28 16:28:31 crc kubenswrapper[4909]: I1128 16:28:31.773153 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-shtgz\" (UniqueName: \"kubernetes.io/projected/4081f6af-5e99-4383-b12a-654d6c1419d8-kube-api-access-shtgz\") pod \"glance-operator-controller-manager-668d9c48b9-ggchx\" (UID: \"4081f6af-5e99-4383-b12a-654d6c1419d8\") " pod="openstack-operators/glance-operator-controller-manager-668d9c48b9-ggchx" Nov 28 16:28:31 crc kubenswrapper[4909]: I1128 16:28:31.773195 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kdszc\" (UniqueName: \"kubernetes.io/projected/facf7553-0ba6-43b6-b720-0345f63d5706-kube-api-access-kdszc\") pod \"barbican-operator-controller-manager-7d9dfd778-v8ggq\" (UID: \"facf7553-0ba6-43b6-b720-0345f63d5706\") " pod="openstack-operators/barbican-operator-controller-manager-7d9dfd778-v8ggq" Nov 28 16:28:31 crc kubenswrapper[4909]: I1128 16:28:31.773237 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7cbkt\" (UniqueName: \"kubernetes.io/projected/5e7d8f53-01bb-407c-8897-ceff90567e28-kube-api-access-7cbkt\") pod \"cinder-operator-controller-manager-859b6ccc6-cdlxz\" (UID: \"5e7d8f53-01bb-407c-8897-ceff90567e28\") " pod="openstack-operators/cinder-operator-controller-manager-859b6ccc6-cdlxz" Nov 28 16:28:31 crc kubenswrapper[4909]: I1128 16:28:31.780843 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-6c548fd776-zqxvl"] Nov 28 16:28:31 crc kubenswrapper[4909]: I1128 16:28:31.819210 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/keystone-operator-controller-manager-546d4bdf48-wsrnd"] Nov 28 16:28:31 crc kubenswrapper[4909]: I1128 16:28:31.823795 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kdszc\" (UniqueName: \"kubernetes.io/projected/facf7553-0ba6-43b6-b720-0345f63d5706-kube-api-access-kdszc\") pod \"barbican-operator-controller-manager-7d9dfd778-v8ggq\" (UID: \"facf7553-0ba6-43b6-b720-0345f63d5706\") " pod="openstack-operators/barbican-operator-controller-manager-7d9dfd778-v8ggq" Nov 28 16:28:31 crc kubenswrapper[4909]: I1128 16:28:31.827454 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-546d4bdf48-wsrnd" Nov 28 16:28:31 crc kubenswrapper[4909]: I1128 16:28:31.829263 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/manila-operator-controller-manager-6546668bfd-t5cvm"] Nov 28 16:28:31 crc kubenswrapper[4909]: I1128 16:28:31.830514 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-6546668bfd-t5cvm" Nov 28 16:28:31 crc kubenswrapper[4909]: I1128 16:28:31.834701 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-sqpxn"] Nov 28 16:28:31 crc kubenswrapper[4909]: I1128 16:28:31.835018 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"manila-operator-controller-manager-dockercfg-tzhvt" Nov 28 16:28:31 crc kubenswrapper[4909]: I1128 16:28:31.835278 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"keystone-operator-controller-manager-dockercfg-28sw4" Nov 28 16:28:31 crc kubenswrapper[4909]: I1128 16:28:31.835636 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-sqpxn" Nov 28 16:28:31 crc kubenswrapper[4909]: I1128 16:28:31.839064 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"neutron-operator-controller-manager-dockercfg-gvrdd" Nov 28 16:28:31 crc kubenswrapper[4909]: I1128 16:28:31.849535 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-56bbcc9d85-5r95f"] Nov 28 16:28:31 crc kubenswrapper[4909]: I1128 16:28:31.850917 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-56bbcc9d85-5r95f" Nov 28 16:28:31 crc kubenswrapper[4909]: I1128 16:28:31.854470 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"mariadb-operator-controller-manager-dockercfg-8zjb6" Nov 28 16:28:31 crc kubenswrapper[4909]: I1128 16:28:31.854639 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-546d4bdf48-wsrnd"] Nov 28 16:28:31 crc kubenswrapper[4909]: I1128 16:28:31.874260 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-6546668bfd-t5cvm"] Nov 28 16:28:31 crc kubenswrapper[4909]: I1128 16:28:31.874851 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tgnxh\" (UniqueName: \"kubernetes.io/projected/5fbd4b3d-b059-4780-b1f1-f04e00a9a90e-kube-api-access-tgnxh\") pod \"designate-operator-controller-manager-78b4bc895b-52zmw\" (UID: \"5fbd4b3d-b059-4780-b1f1-f04e00a9a90e\") " pod="openstack-operators/designate-operator-controller-manager-78b4bc895b-52zmw" Nov 28 16:28:31 crc kubenswrapper[4909]: I1128 16:28:31.874875 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8z6qs\" (UniqueName: \"kubernetes.io/projected/5efbc774-20bd-4b16-a9dd-2584462dad47-kube-api-access-8z6qs\") pod \"heat-operator-controller-manager-5f64f6f8bb-k5bt2\" (UID: \"5efbc774-20bd-4b16-a9dd-2584462dad47\") " pod="openstack-operators/heat-operator-controller-manager-5f64f6f8bb-k5bt2" Nov 28 16:28:31 crc kubenswrapper[4909]: I1128 16:28:31.874904 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bwfms\" (UniqueName: \"kubernetes.io/projected/d4d007c0-4c50-4550-b0ec-829717b7aa37-kube-api-access-bwfms\") pod \"infra-operator-controller-manager-57548d458d-7z5zf\" (UID: \"d4d007c0-4c50-4550-b0ec-829717b7aa37\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-7z5zf" Nov 28 16:28:31 crc kubenswrapper[4909]: I1128 16:28:31.874955 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l5zmv\" (UniqueName: \"kubernetes.io/projected/906bccad-3e05-4f36-9ecc-57627d2fb226-kube-api-access-l5zmv\") pod \"horizon-operator-controller-manager-68c6d99b8f-zjfzk\" (UID: \"906bccad-3e05-4f36-9ecc-57627d2fb226\") " pod="openstack-operators/horizon-operator-controller-manager-68c6d99b8f-zjfzk" Nov 28 16:28:31 crc kubenswrapper[4909]: I1128 16:28:31.874977 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-shtgz\" (UniqueName: \"kubernetes.io/projected/4081f6af-5e99-4383-b12a-654d6c1419d8-kube-api-access-shtgz\") pod \"glance-operator-controller-manager-668d9c48b9-ggchx\" (UID: \"4081f6af-5e99-4383-b12a-654d6c1419d8\") " pod="openstack-operators/glance-operator-controller-manager-668d9c48b9-ggchx" Nov 28 16:28:31 crc kubenswrapper[4909]: I1128 16:28:31.875011 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r6t6s\" (UniqueName: \"kubernetes.io/projected/8b35e013-9a3e-4434-9603-e7fbd95f2dca-kube-api-access-r6t6s\") pod \"ironic-operator-controller-manager-6c548fd776-zqxvl\" (UID: \"8b35e013-9a3e-4434-9603-e7fbd95f2dca\") " pod="openstack-operators/ironic-operator-controller-manager-6c548fd776-zqxvl" Nov 28 16:28:31 crc kubenswrapper[4909]: I1128 16:28:31.875036 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7cbkt\" (UniqueName: \"kubernetes.io/projected/5e7d8f53-01bb-407c-8897-ceff90567e28-kube-api-access-7cbkt\") pod \"cinder-operator-controller-manager-859b6ccc6-cdlxz\" (UID: \"5e7d8f53-01bb-407c-8897-ceff90567e28\") " pod="openstack-operators/cinder-operator-controller-manager-859b6ccc6-cdlxz" Nov 28 16:28:31 crc kubenswrapper[4909]: I1128 16:28:31.875069 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/d4d007c0-4c50-4550-b0ec-829717b7aa37-cert\") pod \"infra-operator-controller-manager-57548d458d-7z5zf\" (UID: \"d4d007c0-4c50-4550-b0ec-829717b7aa37\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-7z5zf" Nov 28 16:28:31 crc kubenswrapper[4909]: I1128 16:28:31.881740 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-sqpxn"] Nov 28 16:28:31 crc kubenswrapper[4909]: I1128 16:28:31.885812 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-56bbcc9d85-5r95f"] Nov 28 16:28:31 crc kubenswrapper[4909]: I1128 16:28:31.890360 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/nova-operator-controller-manager-697bc559fc-xnnf4"] Nov 28 16:28:31 crc kubenswrapper[4909]: I1128 16:28:31.891577 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-697bc559fc-xnnf4" Nov 28 16:28:31 crc kubenswrapper[4909]: I1128 16:28:31.894821 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-697bc559fc-xnnf4"] Nov 28 16:28:31 crc kubenswrapper[4909]: I1128 16:28:31.910891 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"nova-operator-controller-manager-dockercfg-b6h8d" Nov 28 16:28:31 crc kubenswrapper[4909]: I1128 16:28:31.922182 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-7d9dfd778-v8ggq" Nov 28 16:28:31 crc kubenswrapper[4909]: I1128 16:28:31.931923 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l5zmv\" (UniqueName: \"kubernetes.io/projected/906bccad-3e05-4f36-9ecc-57627d2fb226-kube-api-access-l5zmv\") pod \"horizon-operator-controller-manager-68c6d99b8f-zjfzk\" (UID: \"906bccad-3e05-4f36-9ecc-57627d2fb226\") " pod="openstack-operators/horizon-operator-controller-manager-68c6d99b8f-zjfzk" Nov 28 16:28:31 crc kubenswrapper[4909]: I1128 16:28:31.932801 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tgnxh\" (UniqueName: \"kubernetes.io/projected/5fbd4b3d-b059-4780-b1f1-f04e00a9a90e-kube-api-access-tgnxh\") pod \"designate-operator-controller-manager-78b4bc895b-52zmw\" (UID: \"5fbd4b3d-b059-4780-b1f1-f04e00a9a90e\") " pod="openstack-operators/designate-operator-controller-manager-78b4bc895b-52zmw" Nov 28 16:28:31 crc kubenswrapper[4909]: I1128 16:28:31.938607 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7cbkt\" (UniqueName: \"kubernetes.io/projected/5e7d8f53-01bb-407c-8897-ceff90567e28-kube-api-access-7cbkt\") pod \"cinder-operator-controller-manager-859b6ccc6-cdlxz\" (UID: \"5e7d8f53-01bb-407c-8897-ceff90567e28\") " pod="openstack-operators/cinder-operator-controller-manager-859b6ccc6-cdlxz" Nov 28 16:28:31 crc kubenswrapper[4909]: I1128 16:28:31.971440 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-78b4bc895b-52zmw" Nov 28 16:28:31 crc kubenswrapper[4909]: I1128 16:28:31.976116 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8z6qs\" (UniqueName: \"kubernetes.io/projected/5efbc774-20bd-4b16-a9dd-2584462dad47-kube-api-access-8z6qs\") pod \"heat-operator-controller-manager-5f64f6f8bb-k5bt2\" (UID: \"5efbc774-20bd-4b16-a9dd-2584462dad47\") " pod="openstack-operators/heat-operator-controller-manager-5f64f6f8bb-k5bt2" Nov 28 16:28:31 crc kubenswrapper[4909]: I1128 16:28:31.977713 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-859b6ccc6-cdlxz" Nov 28 16:28:31 crc kubenswrapper[4909]: I1128 16:28:31.980113 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rk6p9\" (UniqueName: \"kubernetes.io/projected/f5f666e9-9715-444d-a4ec-f6b3bb719df6-kube-api-access-rk6p9\") pod \"keystone-operator-controller-manager-546d4bdf48-wsrnd\" (UID: \"f5f666e9-9715-444d-a4ec-f6b3bb719df6\") " pod="openstack-operators/keystone-operator-controller-manager-546d4bdf48-wsrnd" Nov 28 16:28:31 crc kubenswrapper[4909]: I1128 16:28:31.980170 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r6t6s\" (UniqueName: \"kubernetes.io/projected/8b35e013-9a3e-4434-9603-e7fbd95f2dca-kube-api-access-r6t6s\") pod \"ironic-operator-controller-manager-6c548fd776-zqxvl\" (UID: \"8b35e013-9a3e-4434-9603-e7fbd95f2dca\") " pod="openstack-operators/ironic-operator-controller-manager-6c548fd776-zqxvl" Nov 28 16:28:31 crc kubenswrapper[4909]: I1128 16:28:31.980212 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/d4d007c0-4c50-4550-b0ec-829717b7aa37-cert\") pod \"infra-operator-controller-manager-57548d458d-7z5zf\" (UID: \"d4d007c0-4c50-4550-b0ec-829717b7aa37\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-7z5zf" Nov 28 16:28:31 crc kubenswrapper[4909]: I1128 16:28:31.980267 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6n6vn\" (UniqueName: \"kubernetes.io/projected/dc2bc53b-f803-4f9b-943f-e53f132cbb39-kube-api-access-6n6vn\") pod \"nova-operator-controller-manager-697bc559fc-xnnf4\" (UID: \"dc2bc53b-f803-4f9b-943f-e53f132cbb39\") " pod="openstack-operators/nova-operator-controller-manager-697bc559fc-xnnf4" Nov 28 16:28:31 crc kubenswrapper[4909]: I1128 16:28:31.980284 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bwfms\" (UniqueName: \"kubernetes.io/projected/d4d007c0-4c50-4550-b0ec-829717b7aa37-kube-api-access-bwfms\") pod \"infra-operator-controller-manager-57548d458d-7z5zf\" (UID: \"d4d007c0-4c50-4550-b0ec-829717b7aa37\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-7z5zf" Nov 28 16:28:31 crc kubenswrapper[4909]: I1128 16:28:31.980325 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6x886\" (UniqueName: \"kubernetes.io/projected/ede8d830-fccd-4337-b0ec-48030a263d44-kube-api-access-6x886\") pod \"neutron-operator-controller-manager-5fdfd5b6b5-sqpxn\" (UID: \"ede8d830-fccd-4337-b0ec-48030a263d44\") " pod="openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-sqpxn" Nov 28 16:28:31 crc kubenswrapper[4909]: I1128 16:28:31.980384 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cm7xf\" (UniqueName: \"kubernetes.io/projected/735f57d0-1df6-4773-9e9f-2f2745d307d0-kube-api-access-cm7xf\") pod \"manila-operator-controller-manager-6546668bfd-t5cvm\" (UID: \"735f57d0-1df6-4773-9e9f-2f2745d307d0\") " pod="openstack-operators/manila-operator-controller-manager-6546668bfd-t5cvm" Nov 28 16:28:31 crc kubenswrapper[4909]: I1128 16:28:31.980414 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dnxmb\" (UniqueName: \"kubernetes.io/projected/b8299ad0-0617-430d-9b0f-066022a5679f-kube-api-access-dnxmb\") pod \"mariadb-operator-controller-manager-56bbcc9d85-5r95f\" (UID: \"b8299ad0-0617-430d-9b0f-066022a5679f\") " pod="openstack-operators/mariadb-operator-controller-manager-56bbcc9d85-5r95f" Nov 28 16:28:31 crc kubenswrapper[4909]: E1128 16:28:31.980904 4909 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Nov 28 16:28:31 crc kubenswrapper[4909]: E1128 16:28:31.980972 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/d4d007c0-4c50-4550-b0ec-829717b7aa37-cert podName:d4d007c0-4c50-4550-b0ec-829717b7aa37 nodeName:}" failed. No retries permitted until 2025-11-28 16:28:32.48095125 +0000 UTC m=+1094.877635854 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/d4d007c0-4c50-4550-b0ec-829717b7aa37-cert") pod "infra-operator-controller-manager-57548d458d-7z5zf" (UID: "d4d007c0-4c50-4550-b0ec-829717b7aa37") : secret "infra-operator-webhook-server-cert" not found Nov 28 16:28:31 crc kubenswrapper[4909]: I1128 16:28:31.986630 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-shtgz\" (UniqueName: \"kubernetes.io/projected/4081f6af-5e99-4383-b12a-654d6c1419d8-kube-api-access-shtgz\") pod \"glance-operator-controller-manager-668d9c48b9-ggchx\" (UID: \"4081f6af-5e99-4383-b12a-654d6c1419d8\") " pod="openstack-operators/glance-operator-controller-manager-668d9c48b9-ggchx" Nov 28 16:28:32 crc kubenswrapper[4909]: I1128 16:28:32.000002 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-5f64f6f8bb-k5bt2" Nov 28 16:28:32 crc kubenswrapper[4909]: I1128 16:28:32.001268 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bwfms\" (UniqueName: \"kubernetes.io/projected/d4d007c0-4c50-4550-b0ec-829717b7aa37-kube-api-access-bwfms\") pod \"infra-operator-controller-manager-57548d458d-7z5zf\" (UID: \"d4d007c0-4c50-4550-b0ec-829717b7aa37\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-7z5zf" Nov 28 16:28:32 crc kubenswrapper[4909]: I1128 16:28:32.001704 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r6t6s\" (UniqueName: \"kubernetes.io/projected/8b35e013-9a3e-4434-9603-e7fbd95f2dca-kube-api-access-r6t6s\") pod \"ironic-operator-controller-manager-6c548fd776-zqxvl\" (UID: \"8b35e013-9a3e-4434-9603-e7fbd95f2dca\") " pod="openstack-operators/ironic-operator-controller-manager-6c548fd776-zqxvl" Nov 28 16:28:32 crc kubenswrapper[4909]: I1128 16:28:32.008806 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/octavia-operator-controller-manager-998648c74-5mfq8"] Nov 28 16:28:32 crc kubenswrapper[4909]: I1128 16:28:32.009858 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-998648c74-5mfq8" Nov 28 16:28:32 crc kubenswrapper[4909]: I1128 16:28:32.011212 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"octavia-operator-controller-manager-dockercfg-g6lb5" Nov 28 16:28:32 crc kubenswrapper[4909]: I1128 16:28:32.017482 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/placement-operator-controller-manager-78f8948974-jlztw"] Nov 28 16:28:32 crc kubenswrapper[4909]: I1128 16:28:32.020765 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-78f8948974-jlztw" Nov 28 16:28:32 crc kubenswrapper[4909]: I1128 16:28:32.023940 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-68c6d99b8f-zjfzk" Nov 28 16:28:32 crc kubenswrapper[4909]: I1128 16:28:32.025170 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"placement-operator-controller-manager-dockercfg-42vfk" Nov 28 16:28:32 crc kubenswrapper[4909]: I1128 16:28:32.028933 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ovn-operator-controller-manager-b6456fdb6-x28l4"] Nov 28 16:28:32 crc kubenswrapper[4909]: I1128 16:28:32.030345 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-b6456fdb6-x28l4" Nov 28 16:28:32 crc kubenswrapper[4909]: I1128 16:28:32.036914 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ovn-operator-controller-manager-dockercfg-lt9nr" Nov 28 16:28:32 crc kubenswrapper[4909]: I1128 16:28:32.043900 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-64bc77cfd4hqcvw"] Nov 28 16:28:32 crc kubenswrapper[4909]: I1128 16:28:32.045254 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-64bc77cfd4hqcvw" Nov 28 16:28:32 crc kubenswrapper[4909]: I1128 16:28:32.047241 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-controller-manager-dockercfg-2rxgx" Nov 28 16:28:32 crc kubenswrapper[4909]: I1128 16:28:32.047410 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-webhook-server-cert" Nov 28 16:28:32 crc kubenswrapper[4909]: I1128 16:28:32.058768 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-78f8948974-jlztw"] Nov 28 16:28:32 crc kubenswrapper[4909]: I1128 16:28:32.065431 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-998648c74-5mfq8"] Nov 28 16:28:32 crc kubenswrapper[4909]: I1128 16:28:32.072985 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-64bc77cfd4hqcvw"] Nov 28 16:28:32 crc kubenswrapper[4909]: I1128 16:28:32.080730 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-b6456fdb6-x28l4"] Nov 28 16:28:32 crc kubenswrapper[4909]: I1128 16:28:32.081834 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6n6vn\" (UniqueName: \"kubernetes.io/projected/dc2bc53b-f803-4f9b-943f-e53f132cbb39-kube-api-access-6n6vn\") pod \"nova-operator-controller-manager-697bc559fc-xnnf4\" (UID: \"dc2bc53b-f803-4f9b-943f-e53f132cbb39\") " pod="openstack-operators/nova-operator-controller-manager-697bc559fc-xnnf4" Nov 28 16:28:32 crc kubenswrapper[4909]: I1128 16:28:32.081897 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6x886\" (UniqueName: \"kubernetes.io/projected/ede8d830-fccd-4337-b0ec-48030a263d44-kube-api-access-6x886\") pod \"neutron-operator-controller-manager-5fdfd5b6b5-sqpxn\" (UID: \"ede8d830-fccd-4337-b0ec-48030a263d44\") " pod="openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-sqpxn" Nov 28 16:28:32 crc kubenswrapper[4909]: I1128 16:28:32.081948 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rsw59\" (UniqueName: \"kubernetes.io/projected/5a4d9dc0-67a3-4014-9265-f88dac783bca-kube-api-access-rsw59\") pod \"placement-operator-controller-manager-78f8948974-jlztw\" (UID: \"5a4d9dc0-67a3-4014-9265-f88dac783bca\") " pod="openstack-operators/placement-operator-controller-manager-78f8948974-jlztw" Nov 28 16:28:32 crc kubenswrapper[4909]: I1128 16:28:32.081972 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wrk66\" (UniqueName: \"kubernetes.io/projected/f7de33ac-fc6f-4c0c-a22a-0d9919ac6212-kube-api-access-wrk66\") pod \"openstack-baremetal-operator-controller-manager-64bc77cfd4hqcvw\" (UID: \"f7de33ac-fc6f-4c0c-a22a-0d9919ac6212\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-64bc77cfd4hqcvw" Nov 28 16:28:32 crc kubenswrapper[4909]: I1128 16:28:32.082000 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d6b5k\" (UniqueName: \"kubernetes.io/projected/32bd97d0-184a-4f85-bfdc-1b34688753a5-kube-api-access-d6b5k\") pod \"octavia-operator-controller-manager-998648c74-5mfq8\" (UID: \"32bd97d0-184a-4f85-bfdc-1b34688753a5\") " pod="openstack-operators/octavia-operator-controller-manager-998648c74-5mfq8" Nov 28 16:28:32 crc kubenswrapper[4909]: I1128 16:28:32.082027 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cm7xf\" (UniqueName: \"kubernetes.io/projected/735f57d0-1df6-4773-9e9f-2f2745d307d0-kube-api-access-cm7xf\") pod \"manila-operator-controller-manager-6546668bfd-t5cvm\" (UID: \"735f57d0-1df6-4773-9e9f-2f2745d307d0\") " pod="openstack-operators/manila-operator-controller-manager-6546668bfd-t5cvm" Nov 28 16:28:32 crc kubenswrapper[4909]: I1128 16:28:32.082052 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dnxmb\" (UniqueName: \"kubernetes.io/projected/b8299ad0-0617-430d-9b0f-066022a5679f-kube-api-access-dnxmb\") pod \"mariadb-operator-controller-manager-56bbcc9d85-5r95f\" (UID: \"b8299ad0-0617-430d-9b0f-066022a5679f\") " pod="openstack-operators/mariadb-operator-controller-manager-56bbcc9d85-5r95f" Nov 28 16:28:32 crc kubenswrapper[4909]: I1128 16:28:32.082072 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rk6p9\" (UniqueName: \"kubernetes.io/projected/f5f666e9-9715-444d-a4ec-f6b3bb719df6-kube-api-access-rk6p9\") pod \"keystone-operator-controller-manager-546d4bdf48-wsrnd\" (UID: \"f5f666e9-9715-444d-a4ec-f6b3bb719df6\") " pod="openstack-operators/keystone-operator-controller-manager-546d4bdf48-wsrnd" Nov 28 16:28:32 crc kubenswrapper[4909]: I1128 16:28:32.082092 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/f7de33ac-fc6f-4c0c-a22a-0d9919ac6212-cert\") pod \"openstack-baremetal-operator-controller-manager-64bc77cfd4hqcvw\" (UID: \"f7de33ac-fc6f-4c0c-a22a-0d9919ac6212\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-64bc77cfd4hqcvw" Nov 28 16:28:32 crc kubenswrapper[4909]: I1128 16:28:32.082136 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9r7gr\" (UniqueName: \"kubernetes.io/projected/108b7264-8189-4234-a458-ba48a0c28123-kube-api-access-9r7gr\") pod \"ovn-operator-controller-manager-b6456fdb6-x28l4\" (UID: \"108b7264-8189-4234-a458-ba48a0c28123\") " pod="openstack-operators/ovn-operator-controller-manager-b6456fdb6-x28l4" Nov 28 16:28:32 crc kubenswrapper[4909]: I1128 16:28:32.088813 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-6c548fd776-zqxvl" Nov 28 16:28:32 crc kubenswrapper[4909]: I1128 16:28:32.092893 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/swift-operator-controller-manager-5f8c65bbfc-hbnt7"] Nov 28 16:28:32 crc kubenswrapper[4909]: I1128 16:28:32.093942 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-5f8c65bbfc-hbnt7" Nov 28 16:28:32 crc kubenswrapper[4909]: I1128 16:28:32.095449 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"swift-operator-controller-manager-dockercfg-22l8b" Nov 28 16:28:32 crc kubenswrapper[4909]: I1128 16:28:32.105254 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6x886\" (UniqueName: \"kubernetes.io/projected/ede8d830-fccd-4337-b0ec-48030a263d44-kube-api-access-6x886\") pod \"neutron-operator-controller-manager-5fdfd5b6b5-sqpxn\" (UID: \"ede8d830-fccd-4337-b0ec-48030a263d44\") " pod="openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-sqpxn" Nov 28 16:28:32 crc kubenswrapper[4909]: I1128 16:28:32.110153 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rk6p9\" (UniqueName: \"kubernetes.io/projected/f5f666e9-9715-444d-a4ec-f6b3bb719df6-kube-api-access-rk6p9\") pod \"keystone-operator-controller-manager-546d4bdf48-wsrnd\" (UID: \"f5f666e9-9715-444d-a4ec-f6b3bb719df6\") " pod="openstack-operators/keystone-operator-controller-manager-546d4bdf48-wsrnd" Nov 28 16:28:32 crc kubenswrapper[4909]: I1128 16:28:32.110179 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6n6vn\" (UniqueName: \"kubernetes.io/projected/dc2bc53b-f803-4f9b-943f-e53f132cbb39-kube-api-access-6n6vn\") pod \"nova-operator-controller-manager-697bc559fc-xnnf4\" (UID: \"dc2bc53b-f803-4f9b-943f-e53f132cbb39\") " pod="openstack-operators/nova-operator-controller-manager-697bc559fc-xnnf4" Nov 28 16:28:32 crc kubenswrapper[4909]: I1128 16:28:32.110475 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cm7xf\" (UniqueName: \"kubernetes.io/projected/735f57d0-1df6-4773-9e9f-2f2745d307d0-kube-api-access-cm7xf\") pod \"manila-operator-controller-manager-6546668bfd-t5cvm\" (UID: \"735f57d0-1df6-4773-9e9f-2f2745d307d0\") " pod="openstack-operators/manila-operator-controller-manager-6546668bfd-t5cvm" Nov 28 16:28:32 crc kubenswrapper[4909]: I1128 16:28:32.110929 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-5f8c65bbfc-hbnt7"] Nov 28 16:28:32 crc kubenswrapper[4909]: I1128 16:28:32.113220 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dnxmb\" (UniqueName: \"kubernetes.io/projected/b8299ad0-0617-430d-9b0f-066022a5679f-kube-api-access-dnxmb\") pod \"mariadb-operator-controller-manager-56bbcc9d85-5r95f\" (UID: \"b8299ad0-0617-430d-9b0f-066022a5679f\") " pod="openstack-operators/mariadb-operator-controller-manager-56bbcc9d85-5r95f" Nov 28 16:28:32 crc kubenswrapper[4909]: I1128 16:28:32.121384 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-9vnqw"] Nov 28 16:28:32 crc kubenswrapper[4909]: I1128 16:28:32.126136 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-9vnqw" Nov 28 16:28:32 crc kubenswrapper[4909]: I1128 16:28:32.129101 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"telemetry-operator-controller-manager-dockercfg-2phtk" Nov 28 16:28:32 crc kubenswrapper[4909]: I1128 16:28:32.142205 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-9vnqw"] Nov 28 16:28:32 crc kubenswrapper[4909]: I1128 16:28:32.185139 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/f7de33ac-fc6f-4c0c-a22a-0d9919ac6212-cert\") pod \"openstack-baremetal-operator-controller-manager-64bc77cfd4hqcvw\" (UID: \"f7de33ac-fc6f-4c0c-a22a-0d9919ac6212\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-64bc77cfd4hqcvw" Nov 28 16:28:32 crc kubenswrapper[4909]: E1128 16:28:32.185348 4909 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 28 16:28:32 crc kubenswrapper[4909]: E1128 16:28:32.185482 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/f7de33ac-fc6f-4c0c-a22a-0d9919ac6212-cert podName:f7de33ac-fc6f-4c0c-a22a-0d9919ac6212 nodeName:}" failed. No retries permitted until 2025-11-28 16:28:32.685460161 +0000 UTC m=+1095.082144685 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/f7de33ac-fc6f-4c0c-a22a-0d9919ac6212-cert") pod "openstack-baremetal-operator-controller-manager-64bc77cfd4hqcvw" (UID: "f7de33ac-fc6f-4c0c-a22a-0d9919ac6212") : secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 28 16:28:32 crc kubenswrapper[4909]: I1128 16:28:32.185581 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9r7gr\" (UniqueName: \"kubernetes.io/projected/108b7264-8189-4234-a458-ba48a0c28123-kube-api-access-9r7gr\") pod \"ovn-operator-controller-manager-b6456fdb6-x28l4\" (UID: \"108b7264-8189-4234-a458-ba48a0c28123\") " pod="openstack-operators/ovn-operator-controller-manager-b6456fdb6-x28l4" Nov 28 16:28:32 crc kubenswrapper[4909]: I1128 16:28:32.185690 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rsw59\" (UniqueName: \"kubernetes.io/projected/5a4d9dc0-67a3-4014-9265-f88dac783bca-kube-api-access-rsw59\") pod \"placement-operator-controller-manager-78f8948974-jlztw\" (UID: \"5a4d9dc0-67a3-4014-9265-f88dac783bca\") " pod="openstack-operators/placement-operator-controller-manager-78f8948974-jlztw" Nov 28 16:28:32 crc kubenswrapper[4909]: I1128 16:28:32.185717 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wrk66\" (UniqueName: \"kubernetes.io/projected/f7de33ac-fc6f-4c0c-a22a-0d9919ac6212-kube-api-access-wrk66\") pod \"openstack-baremetal-operator-controller-manager-64bc77cfd4hqcvw\" (UID: \"f7de33ac-fc6f-4c0c-a22a-0d9919ac6212\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-64bc77cfd4hqcvw" Nov 28 16:28:32 crc kubenswrapper[4909]: I1128 16:28:32.185739 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d6b5k\" (UniqueName: \"kubernetes.io/projected/32bd97d0-184a-4f85-bfdc-1b34688753a5-kube-api-access-d6b5k\") pod \"octavia-operator-controller-manager-998648c74-5mfq8\" (UID: \"32bd97d0-184a-4f85-bfdc-1b34688753a5\") " pod="openstack-operators/octavia-operator-controller-manager-998648c74-5mfq8" Nov 28 16:28:32 crc kubenswrapper[4909]: I1128 16:28:32.192557 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-546d4bdf48-wsrnd" Nov 28 16:28:32 crc kubenswrapper[4909]: I1128 16:28:32.198013 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/test-operator-controller-manager-5854674fcc-w8bg4"] Nov 28 16:28:32 crc kubenswrapper[4909]: I1128 16:28:32.199523 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-5854674fcc-w8bg4" Nov 28 16:28:32 crc kubenswrapper[4909]: I1128 16:28:32.203890 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-5854674fcc-w8bg4"] Nov 28 16:28:32 crc kubenswrapper[4909]: I1128 16:28:32.213501 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"test-operator-controller-manager-dockercfg-7ncng" Nov 28 16:28:32 crc kubenswrapper[4909]: I1128 16:28:32.214547 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9r7gr\" (UniqueName: \"kubernetes.io/projected/108b7264-8189-4234-a458-ba48a0c28123-kube-api-access-9r7gr\") pod \"ovn-operator-controller-manager-b6456fdb6-x28l4\" (UID: \"108b7264-8189-4234-a458-ba48a0c28123\") " pod="openstack-operators/ovn-operator-controller-manager-b6456fdb6-x28l4" Nov 28 16:28:32 crc kubenswrapper[4909]: I1128 16:28:32.216471 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d6b5k\" (UniqueName: \"kubernetes.io/projected/32bd97d0-184a-4f85-bfdc-1b34688753a5-kube-api-access-d6b5k\") pod \"octavia-operator-controller-manager-998648c74-5mfq8\" (UID: \"32bd97d0-184a-4f85-bfdc-1b34688753a5\") " pod="openstack-operators/octavia-operator-controller-manager-998648c74-5mfq8" Nov 28 16:28:32 crc kubenswrapper[4909]: I1128 16:28:32.220214 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wrk66\" (UniqueName: \"kubernetes.io/projected/f7de33ac-fc6f-4c0c-a22a-0d9919ac6212-kube-api-access-wrk66\") pod \"openstack-baremetal-operator-controller-manager-64bc77cfd4hqcvw\" (UID: \"f7de33ac-fc6f-4c0c-a22a-0d9919ac6212\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-64bc77cfd4hqcvw" Nov 28 16:28:32 crc kubenswrapper[4909]: I1128 16:28:32.228531 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rsw59\" (UniqueName: \"kubernetes.io/projected/5a4d9dc0-67a3-4014-9265-f88dac783bca-kube-api-access-rsw59\") pod \"placement-operator-controller-manager-78f8948974-jlztw\" (UID: \"5a4d9dc0-67a3-4014-9265-f88dac783bca\") " pod="openstack-operators/placement-operator-controller-manager-78f8948974-jlztw" Nov 28 16:28:32 crc kubenswrapper[4909]: I1128 16:28:32.266354 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/watcher-operator-controller-manager-769dc69bc-b8ssv"] Nov 28 16:28:32 crc kubenswrapper[4909]: I1128 16:28:32.267816 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-769dc69bc-b8ssv" Nov 28 16:28:32 crc kubenswrapper[4909]: I1128 16:28:32.270282 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"watcher-operator-controller-manager-dockercfg-l64bz" Nov 28 16:28:32 crc kubenswrapper[4909]: I1128 16:28:32.275604 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-668d9c48b9-ggchx" Nov 28 16:28:32 crc kubenswrapper[4909]: I1128 16:28:32.279250 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-769dc69bc-b8ssv"] Nov 28 16:28:32 crc kubenswrapper[4909]: I1128 16:28:32.286980 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5d5wt\" (UniqueName: \"kubernetes.io/projected/9024361d-e7dd-4f11-b22f-4027e31bd0ae-kube-api-access-5d5wt\") pod \"swift-operator-controller-manager-5f8c65bbfc-hbnt7\" (UID: \"9024361d-e7dd-4f11-b22f-4027e31bd0ae\") " pod="openstack-operators/swift-operator-controller-manager-5f8c65bbfc-hbnt7" Nov 28 16:28:32 crc kubenswrapper[4909]: I1128 16:28:32.287044 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gjcrx\" (UniqueName: \"kubernetes.io/projected/771c530f-d0ab-412d-a6c6-931999bc878f-kube-api-access-gjcrx\") pod \"telemetry-operator-controller-manager-76cc84c6bb-9vnqw\" (UID: \"771c530f-d0ab-412d-a6c6-931999bc878f\") " pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-9vnqw" Nov 28 16:28:32 crc kubenswrapper[4909]: I1128 16:28:32.287331 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-6546668bfd-t5cvm" Nov 28 16:28:32 crc kubenswrapper[4909]: I1128 16:28:32.303014 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-manager-78d5d44766-bqs6f"] Nov 28 16:28:32 crc kubenswrapper[4909]: I1128 16:28:32.306247 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-78d5d44766-bqs6f" Nov 28 16:28:32 crc kubenswrapper[4909]: I1128 16:28:32.313898 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"webhook-server-cert" Nov 28 16:28:32 crc kubenswrapper[4909]: I1128 16:28:32.313911 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"metrics-server-cert" Nov 28 16:28:32 crc kubenswrapper[4909]: I1128 16:28:32.314292 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-manager-dockercfg-swm4q" Nov 28 16:28:32 crc kubenswrapper[4909]: I1128 16:28:32.328038 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-sqpxn" Nov 28 16:28:32 crc kubenswrapper[4909]: I1128 16:28:32.348052 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-78d5d44766-bqs6f"] Nov 28 16:28:32 crc kubenswrapper[4909]: I1128 16:28:32.350732 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-56bbcc9d85-5r95f" Nov 28 16:28:32 crc kubenswrapper[4909]: I1128 16:28:32.365692 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-697bc559fc-xnnf4" Nov 28 16:28:32 crc kubenswrapper[4909]: I1128 16:28:32.374364 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-rrfhv"] Nov 28 16:28:32 crc kubenswrapper[4909]: I1128 16:28:32.375238 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-rrfhv" Nov 28 16:28:32 crc kubenswrapper[4909]: I1128 16:28:32.378932 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"rabbitmq-cluster-operator-controller-manager-dockercfg-4rmz4" Nov 28 16:28:32 crc kubenswrapper[4909]: I1128 16:28:32.380699 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-998648c74-5mfq8" Nov 28 16:28:32 crc kubenswrapper[4909]: I1128 16:28:32.388020 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-rrfhv"] Nov 28 16:28:32 crc kubenswrapper[4909]: I1128 16:28:32.389423 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gjcrx\" (UniqueName: \"kubernetes.io/projected/771c530f-d0ab-412d-a6c6-931999bc878f-kube-api-access-gjcrx\") pod \"telemetry-operator-controller-manager-76cc84c6bb-9vnqw\" (UID: \"771c530f-d0ab-412d-a6c6-931999bc878f\") " pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-9vnqw" Nov 28 16:28:32 crc kubenswrapper[4909]: I1128 16:28:32.389642 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7whvg\" (UniqueName: \"kubernetes.io/projected/584e895a-63c3-48db-8207-e89bc9396da7-kube-api-access-7whvg\") pod \"test-operator-controller-manager-5854674fcc-w8bg4\" (UID: \"584e895a-63c3-48db-8207-e89bc9396da7\") " pod="openstack-operators/test-operator-controller-manager-5854674fcc-w8bg4" Nov 28 16:28:32 crc kubenswrapper[4909]: I1128 16:28:32.389794 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5d5wt\" (UniqueName: \"kubernetes.io/projected/9024361d-e7dd-4f11-b22f-4027e31bd0ae-kube-api-access-5d5wt\") pod \"swift-operator-controller-manager-5f8c65bbfc-hbnt7\" (UID: \"9024361d-e7dd-4f11-b22f-4027e31bd0ae\") " pod="openstack-operators/swift-operator-controller-manager-5f8c65bbfc-hbnt7" Nov 28 16:28:32 crc kubenswrapper[4909]: I1128 16:28:32.389927 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jh4ss\" (UniqueName: \"kubernetes.io/projected/3127edf8-24b9-4170-b677-c625926881a5-kube-api-access-jh4ss\") pod \"watcher-operator-controller-manager-769dc69bc-b8ssv\" (UID: \"3127edf8-24b9-4170-b677-c625926881a5\") " pod="openstack-operators/watcher-operator-controller-manager-769dc69bc-b8ssv" Nov 28 16:28:32 crc kubenswrapper[4909]: I1128 16:28:32.400010 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-78f8948974-jlztw" Nov 28 16:28:32 crc kubenswrapper[4909]: I1128 16:28:32.410570 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gjcrx\" (UniqueName: \"kubernetes.io/projected/771c530f-d0ab-412d-a6c6-931999bc878f-kube-api-access-gjcrx\") pod \"telemetry-operator-controller-manager-76cc84c6bb-9vnqw\" (UID: \"771c530f-d0ab-412d-a6c6-931999bc878f\") " pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-9vnqw" Nov 28 16:28:32 crc kubenswrapper[4909]: I1128 16:28:32.417068 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5d5wt\" (UniqueName: \"kubernetes.io/projected/9024361d-e7dd-4f11-b22f-4027e31bd0ae-kube-api-access-5d5wt\") pod \"swift-operator-controller-manager-5f8c65bbfc-hbnt7\" (UID: \"9024361d-e7dd-4f11-b22f-4027e31bd0ae\") " pod="openstack-operators/swift-operator-controller-manager-5f8c65bbfc-hbnt7" Nov 28 16:28:32 crc kubenswrapper[4909]: I1128 16:28:32.418025 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-b6456fdb6-x28l4" Nov 28 16:28:32 crc kubenswrapper[4909]: I1128 16:28:32.438510 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-5f8c65bbfc-hbnt7" Nov 28 16:28:32 crc kubenswrapper[4909]: I1128 16:28:32.456039 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-9vnqw" Nov 28 16:28:32 crc kubenswrapper[4909]: I1128 16:28:32.491099 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7whvg\" (UniqueName: \"kubernetes.io/projected/584e895a-63c3-48db-8207-e89bc9396da7-kube-api-access-7whvg\") pod \"test-operator-controller-manager-5854674fcc-w8bg4\" (UID: \"584e895a-63c3-48db-8207-e89bc9396da7\") " pod="openstack-operators/test-operator-controller-manager-5854674fcc-w8bg4" Nov 28 16:28:32 crc kubenswrapper[4909]: I1128 16:28:32.491152 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/4ed1d030-fe78-4211-a585-46c8ae4f419d-metrics-certs\") pod \"openstack-operator-controller-manager-78d5d44766-bqs6f\" (UID: \"4ed1d030-fe78-4211-a585-46c8ae4f419d\") " pod="openstack-operators/openstack-operator-controller-manager-78d5d44766-bqs6f" Nov 28 16:28:32 crc kubenswrapper[4909]: I1128 16:28:32.491176 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8nmpf\" (UniqueName: \"kubernetes.io/projected/4ed1d030-fe78-4211-a585-46c8ae4f419d-kube-api-access-8nmpf\") pod \"openstack-operator-controller-manager-78d5d44766-bqs6f\" (UID: \"4ed1d030-fe78-4211-a585-46c8ae4f419d\") " pod="openstack-operators/openstack-operator-controller-manager-78d5d44766-bqs6f" Nov 28 16:28:32 crc kubenswrapper[4909]: I1128 16:28:32.491208 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s8h2l\" (UniqueName: \"kubernetes.io/projected/626fb166-d7e7-424e-bcde-08e77d0c54b1-kube-api-access-s8h2l\") pod \"rabbitmq-cluster-operator-manager-668c99d594-rrfhv\" (UID: \"626fb166-d7e7-424e-bcde-08e77d0c54b1\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-rrfhv" Nov 28 16:28:32 crc kubenswrapper[4909]: I1128 16:28:32.491233 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jh4ss\" (UniqueName: \"kubernetes.io/projected/3127edf8-24b9-4170-b677-c625926881a5-kube-api-access-jh4ss\") pod \"watcher-operator-controller-manager-769dc69bc-b8ssv\" (UID: \"3127edf8-24b9-4170-b677-c625926881a5\") " pod="openstack-operators/watcher-operator-controller-manager-769dc69bc-b8ssv" Nov 28 16:28:32 crc kubenswrapper[4909]: I1128 16:28:32.491275 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/4ed1d030-fe78-4211-a585-46c8ae4f419d-webhook-certs\") pod \"openstack-operator-controller-manager-78d5d44766-bqs6f\" (UID: \"4ed1d030-fe78-4211-a585-46c8ae4f419d\") " pod="openstack-operators/openstack-operator-controller-manager-78d5d44766-bqs6f" Nov 28 16:28:32 crc kubenswrapper[4909]: I1128 16:28:32.491321 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/d4d007c0-4c50-4550-b0ec-829717b7aa37-cert\") pod \"infra-operator-controller-manager-57548d458d-7z5zf\" (UID: \"d4d007c0-4c50-4550-b0ec-829717b7aa37\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-7z5zf" Nov 28 16:28:32 crc kubenswrapper[4909]: E1128 16:28:32.491676 4909 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Nov 28 16:28:32 crc kubenswrapper[4909]: E1128 16:28:32.491747 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/d4d007c0-4c50-4550-b0ec-829717b7aa37-cert podName:d4d007c0-4c50-4550-b0ec-829717b7aa37 nodeName:}" failed. No retries permitted until 2025-11-28 16:28:33.491727724 +0000 UTC m=+1095.888412318 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/d4d007c0-4c50-4550-b0ec-829717b7aa37-cert") pod "infra-operator-controller-manager-57548d458d-7z5zf" (UID: "d4d007c0-4c50-4550-b0ec-829717b7aa37") : secret "infra-operator-webhook-server-cert" not found Nov 28 16:28:32 crc kubenswrapper[4909]: I1128 16:28:32.515483 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7whvg\" (UniqueName: \"kubernetes.io/projected/584e895a-63c3-48db-8207-e89bc9396da7-kube-api-access-7whvg\") pod \"test-operator-controller-manager-5854674fcc-w8bg4\" (UID: \"584e895a-63c3-48db-8207-e89bc9396da7\") " pod="openstack-operators/test-operator-controller-manager-5854674fcc-w8bg4" Nov 28 16:28:32 crc kubenswrapper[4909]: I1128 16:28:32.517368 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jh4ss\" (UniqueName: \"kubernetes.io/projected/3127edf8-24b9-4170-b677-c625926881a5-kube-api-access-jh4ss\") pod \"watcher-operator-controller-manager-769dc69bc-b8ssv\" (UID: \"3127edf8-24b9-4170-b677-c625926881a5\") " pod="openstack-operators/watcher-operator-controller-manager-769dc69bc-b8ssv" Nov 28 16:28:32 crc kubenswrapper[4909]: I1128 16:28:32.522259 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-5854674fcc-w8bg4" Nov 28 16:28:32 crc kubenswrapper[4909]: I1128 16:28:32.585227 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-769dc69bc-b8ssv" Nov 28 16:28:32 crc kubenswrapper[4909]: I1128 16:28:32.592221 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8nmpf\" (UniqueName: \"kubernetes.io/projected/4ed1d030-fe78-4211-a585-46c8ae4f419d-kube-api-access-8nmpf\") pod \"openstack-operator-controller-manager-78d5d44766-bqs6f\" (UID: \"4ed1d030-fe78-4211-a585-46c8ae4f419d\") " pod="openstack-operators/openstack-operator-controller-manager-78d5d44766-bqs6f" Nov 28 16:28:32 crc kubenswrapper[4909]: I1128 16:28:32.592264 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s8h2l\" (UniqueName: \"kubernetes.io/projected/626fb166-d7e7-424e-bcde-08e77d0c54b1-kube-api-access-s8h2l\") pod \"rabbitmq-cluster-operator-manager-668c99d594-rrfhv\" (UID: \"626fb166-d7e7-424e-bcde-08e77d0c54b1\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-rrfhv" Nov 28 16:28:32 crc kubenswrapper[4909]: I1128 16:28:32.592311 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/4ed1d030-fe78-4211-a585-46c8ae4f419d-webhook-certs\") pod \"openstack-operator-controller-manager-78d5d44766-bqs6f\" (UID: \"4ed1d030-fe78-4211-a585-46c8ae4f419d\") " pod="openstack-operators/openstack-operator-controller-manager-78d5d44766-bqs6f" Nov 28 16:28:32 crc kubenswrapper[4909]: I1128 16:28:32.592380 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/4ed1d030-fe78-4211-a585-46c8ae4f419d-metrics-certs\") pod \"openstack-operator-controller-manager-78d5d44766-bqs6f\" (UID: \"4ed1d030-fe78-4211-a585-46c8ae4f419d\") " pod="openstack-operators/openstack-operator-controller-manager-78d5d44766-bqs6f" Nov 28 16:28:32 crc kubenswrapper[4909]: E1128 16:28:32.592487 4909 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Nov 28 16:28:32 crc kubenswrapper[4909]: E1128 16:28:32.592530 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/4ed1d030-fe78-4211-a585-46c8ae4f419d-metrics-certs podName:4ed1d030-fe78-4211-a585-46c8ae4f419d nodeName:}" failed. No retries permitted until 2025-11-28 16:28:33.092517381 +0000 UTC m=+1095.489201905 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/4ed1d030-fe78-4211-a585-46c8ae4f419d-metrics-certs") pod "openstack-operator-controller-manager-78d5d44766-bqs6f" (UID: "4ed1d030-fe78-4211-a585-46c8ae4f419d") : secret "metrics-server-cert" not found Nov 28 16:28:32 crc kubenswrapper[4909]: E1128 16:28:32.593166 4909 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Nov 28 16:28:32 crc kubenswrapper[4909]: E1128 16:28:32.593191 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/4ed1d030-fe78-4211-a585-46c8ae4f419d-webhook-certs podName:4ed1d030-fe78-4211-a585-46c8ae4f419d nodeName:}" failed. No retries permitted until 2025-11-28 16:28:33.093183209 +0000 UTC m=+1095.489867733 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/4ed1d030-fe78-4211-a585-46c8ae4f419d-webhook-certs") pod "openstack-operator-controller-manager-78d5d44766-bqs6f" (UID: "4ed1d030-fe78-4211-a585-46c8ae4f419d") : secret "webhook-server-cert" not found Nov 28 16:28:32 crc kubenswrapper[4909]: I1128 16:28:32.613557 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8nmpf\" (UniqueName: \"kubernetes.io/projected/4ed1d030-fe78-4211-a585-46c8ae4f419d-kube-api-access-8nmpf\") pod \"openstack-operator-controller-manager-78d5d44766-bqs6f\" (UID: \"4ed1d030-fe78-4211-a585-46c8ae4f419d\") " pod="openstack-operators/openstack-operator-controller-manager-78d5d44766-bqs6f" Nov 28 16:28:32 crc kubenswrapper[4909]: I1128 16:28:32.615123 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s8h2l\" (UniqueName: \"kubernetes.io/projected/626fb166-d7e7-424e-bcde-08e77d0c54b1-kube-api-access-s8h2l\") pod \"rabbitmq-cluster-operator-manager-668c99d594-rrfhv\" (UID: \"626fb166-d7e7-424e-bcde-08e77d0c54b1\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-rrfhv" Nov 28 16:28:32 crc kubenswrapper[4909]: I1128 16:28:32.753958 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-rrfhv" Nov 28 16:28:32 crc kubenswrapper[4909]: I1128 16:28:32.755826 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/f7de33ac-fc6f-4c0c-a22a-0d9919ac6212-cert\") pod \"openstack-baremetal-operator-controller-manager-64bc77cfd4hqcvw\" (UID: \"f7de33ac-fc6f-4c0c-a22a-0d9919ac6212\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-64bc77cfd4hqcvw" Nov 28 16:28:32 crc kubenswrapper[4909]: E1128 16:28:32.755993 4909 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 28 16:28:32 crc kubenswrapper[4909]: E1128 16:28:32.756030 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/f7de33ac-fc6f-4c0c-a22a-0d9919ac6212-cert podName:f7de33ac-fc6f-4c0c-a22a-0d9919ac6212 nodeName:}" failed. No retries permitted until 2025-11-28 16:28:33.756017549 +0000 UTC m=+1096.152702073 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/f7de33ac-fc6f-4c0c-a22a-0d9919ac6212-cert") pod "openstack-baremetal-operator-controller-manager-64bc77cfd4hqcvw" (UID: "f7de33ac-fc6f-4c0c-a22a-0d9919ac6212") : secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 28 16:28:32 crc kubenswrapper[4909]: I1128 16:28:32.927513 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-7d9dfd778-v8ggq"] Nov 28 16:28:32 crc kubenswrapper[4909]: I1128 16:28:32.972597 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-859b6ccc6-cdlxz"] Nov 28 16:28:32 crc kubenswrapper[4909]: I1128 16:28:32.994425 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-6546668bfd-t5cvm"] Nov 28 16:28:33 crc kubenswrapper[4909]: I1128 16:28:33.004622 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-5f64f6f8bb-k5bt2"] Nov 28 16:28:33 crc kubenswrapper[4909]: I1128 16:28:33.020451 4909 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 28 16:28:33 crc kubenswrapper[4909]: I1128 16:28:33.165597 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/4ed1d030-fe78-4211-a585-46c8ae4f419d-metrics-certs\") pod \"openstack-operator-controller-manager-78d5d44766-bqs6f\" (UID: \"4ed1d030-fe78-4211-a585-46c8ae4f419d\") " pod="openstack-operators/openstack-operator-controller-manager-78d5d44766-bqs6f" Nov 28 16:28:33 crc kubenswrapper[4909]: I1128 16:28:33.165832 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/4ed1d030-fe78-4211-a585-46c8ae4f419d-webhook-certs\") pod \"openstack-operator-controller-manager-78d5d44766-bqs6f\" (UID: \"4ed1d030-fe78-4211-a585-46c8ae4f419d\") " pod="openstack-operators/openstack-operator-controller-manager-78d5d44766-bqs6f" Nov 28 16:28:33 crc kubenswrapper[4909]: E1128 16:28:33.166031 4909 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Nov 28 16:28:33 crc kubenswrapper[4909]: E1128 16:28:33.166089 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/4ed1d030-fe78-4211-a585-46c8ae4f419d-webhook-certs podName:4ed1d030-fe78-4211-a585-46c8ae4f419d nodeName:}" failed. No retries permitted until 2025-11-28 16:28:34.166069558 +0000 UTC m=+1096.562754092 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/4ed1d030-fe78-4211-a585-46c8ae4f419d-webhook-certs") pod "openstack-operator-controller-manager-78d5d44766-bqs6f" (UID: "4ed1d030-fe78-4211-a585-46c8ae4f419d") : secret "webhook-server-cert" not found Nov 28 16:28:33 crc kubenswrapper[4909]: E1128 16:28:33.166601 4909 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Nov 28 16:28:33 crc kubenswrapper[4909]: E1128 16:28:33.166636 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/4ed1d030-fe78-4211-a585-46c8ae4f419d-metrics-certs podName:4ed1d030-fe78-4211-a585-46c8ae4f419d nodeName:}" failed. No retries permitted until 2025-11-28 16:28:34.166625373 +0000 UTC m=+1096.563309897 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/4ed1d030-fe78-4211-a585-46c8ae4f419d-metrics-certs") pod "openstack-operator-controller-manager-78d5d44766-bqs6f" (UID: "4ed1d030-fe78-4211-a585-46c8ae4f419d") : secret "metrics-server-cert" not found Nov 28 16:28:33 crc kubenswrapper[4909]: I1128 16:28:33.208819 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-546d4bdf48-wsrnd"] Nov 28 16:28:33 crc kubenswrapper[4909]: W1128 16:28:33.249919 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf5f666e9_9715_444d_a4ec_f6b3bb719df6.slice/crio-2f2b468fba09b89368c56a6513360da8ebcf0509f67d24dee688d0e27bf46db1 WatchSource:0}: Error finding container 2f2b468fba09b89368c56a6513360da8ebcf0509f67d24dee688d0e27bf46db1: Status 404 returned error can't find the container with id 2f2b468fba09b89368c56a6513360da8ebcf0509f67d24dee688d0e27bf46db1 Nov 28 16:28:33 crc kubenswrapper[4909]: I1128 16:28:33.456237 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-668d9c48b9-ggchx"] Nov 28 16:28:33 crc kubenswrapper[4909]: W1128 16:28:33.463148 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4081f6af_5e99_4383_b12a_654d6c1419d8.slice/crio-e16e40bd9fbecf1b22f48198a30c49aa5d3163a77dc780e19b3d87d6fc7454b3 WatchSource:0}: Error finding container e16e40bd9fbecf1b22f48198a30c49aa5d3163a77dc780e19b3d87d6fc7454b3: Status 404 returned error can't find the container with id e16e40bd9fbecf1b22f48198a30c49aa5d3163a77dc780e19b3d87d6fc7454b3 Nov 28 16:28:33 crc kubenswrapper[4909]: I1128 16:28:33.480353 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-6c548fd776-zqxvl"] Nov 28 16:28:33 crc kubenswrapper[4909]: W1128 16:28:33.483945 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5fbd4b3d_b059_4780_b1f1_f04e00a9a90e.slice/crio-fc150c1c52338bf3337c746d03f1c1931893eefa88635731e92b59b1d1a1c6e3 WatchSource:0}: Error finding container fc150c1c52338bf3337c746d03f1c1931893eefa88635731e92b59b1d1a1c6e3: Status 404 returned error can't find the container with id fc150c1c52338bf3337c746d03f1c1931893eefa88635731e92b59b1d1a1c6e3 Nov 28 16:28:33 crc kubenswrapper[4909]: I1128 16:28:33.487139 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-78b4bc895b-52zmw"] Nov 28 16:28:33 crc kubenswrapper[4909]: W1128 16:28:33.487454 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8b35e013_9a3e_4434_9603_e7fbd95f2dca.slice/crio-7d76bca280edb22b820aaca8f45b570f0c99be4a719b60352b9f6f0f46c0d69e WatchSource:0}: Error finding container 7d76bca280edb22b820aaca8f45b570f0c99be4a719b60352b9f6f0f46c0d69e: Status 404 returned error can't find the container with id 7d76bca280edb22b820aaca8f45b570f0c99be4a719b60352b9f6f0f46c0d69e Nov 28 16:28:33 crc kubenswrapper[4909]: I1128 16:28:33.572921 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/d4d007c0-4c50-4550-b0ec-829717b7aa37-cert\") pod \"infra-operator-controller-manager-57548d458d-7z5zf\" (UID: \"d4d007c0-4c50-4550-b0ec-829717b7aa37\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-7z5zf" Nov 28 16:28:33 crc kubenswrapper[4909]: E1128 16:28:33.573109 4909 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Nov 28 16:28:33 crc kubenswrapper[4909]: E1128 16:28:33.573184 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/d4d007c0-4c50-4550-b0ec-829717b7aa37-cert podName:d4d007c0-4c50-4550-b0ec-829717b7aa37 nodeName:}" failed. No retries permitted until 2025-11-28 16:28:35.573166269 +0000 UTC m=+1097.969850803 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/d4d007c0-4c50-4550-b0ec-829717b7aa37-cert") pod "infra-operator-controller-manager-57548d458d-7z5zf" (UID: "d4d007c0-4c50-4550-b0ec-829717b7aa37") : secret "infra-operator-webhook-server-cert" not found Nov 28 16:28:33 crc kubenswrapper[4909]: I1128 16:28:33.676011 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-56bbcc9d85-5r95f"] Nov 28 16:28:33 crc kubenswrapper[4909]: I1128 16:28:33.694067 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-998648c74-5mfq8"] Nov 28 16:28:33 crc kubenswrapper[4909]: W1128 16:28:33.707295 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod906bccad_3e05_4f36_9ecc_57627d2fb226.slice/crio-8d39263d6e47b435b62103a9ddfe2ec9c10a191633a644a7d5c21f69be6a9070 WatchSource:0}: Error finding container 8d39263d6e47b435b62103a9ddfe2ec9c10a191633a644a7d5c21f69be6a9070: Status 404 returned error can't find the container with id 8d39263d6e47b435b62103a9ddfe2ec9c10a191633a644a7d5c21f69be6a9070 Nov 28 16:28:33 crc kubenswrapper[4909]: I1128 16:28:33.709259 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-sqpxn"] Nov 28 16:28:33 crc kubenswrapper[4909]: W1128 16:28:33.709364 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod32bd97d0_184a_4f85_bfdc_1b34688753a5.slice/crio-e66f95ce4e1409e68970d2007353db47621c1048f4354c0c26ad4991a6957e38 WatchSource:0}: Error finding container e66f95ce4e1409e68970d2007353db47621c1048f4354c0c26ad4991a6957e38: Status 404 returned error can't find the container with id e66f95ce4e1409e68970d2007353db47621c1048f4354c0c26ad4991a6957e38 Nov 28 16:28:33 crc kubenswrapper[4909]: I1128 16:28:33.714920 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-9vnqw"] Nov 28 16:28:33 crc kubenswrapper[4909]: W1128 16:28:33.716776 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podede8d830_fccd_4337_b0ec_48030a263d44.slice/crio-4066b685e8f7d2e489a90f3167b06bb2dcad723b5f4c85bcc677c0b972f84194 WatchSource:0}: Error finding container 4066b685e8f7d2e489a90f3167b06bb2dcad723b5f4c85bcc677c0b972f84194: Status 404 returned error can't find the container with id 4066b685e8f7d2e489a90f3167b06bb2dcad723b5f4c85bcc677c0b972f84194 Nov 28 16:28:33 crc kubenswrapper[4909]: I1128 16:28:33.719821 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-68c6d99b8f-zjfzk"] Nov 28 16:28:33 crc kubenswrapper[4909]: I1128 16:28:33.778917 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/f7de33ac-fc6f-4c0c-a22a-0d9919ac6212-cert\") pod \"openstack-baremetal-operator-controller-manager-64bc77cfd4hqcvw\" (UID: \"f7de33ac-fc6f-4c0c-a22a-0d9919ac6212\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-64bc77cfd4hqcvw" Nov 28 16:28:33 crc kubenswrapper[4909]: E1128 16:28:33.779121 4909 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 28 16:28:33 crc kubenswrapper[4909]: E1128 16:28:33.779209 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/f7de33ac-fc6f-4c0c-a22a-0d9919ac6212-cert podName:f7de33ac-fc6f-4c0c-a22a-0d9919ac6212 nodeName:}" failed. No retries permitted until 2025-11-28 16:28:35.77918631 +0000 UTC m=+1098.175870844 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/f7de33ac-fc6f-4c0c-a22a-0d9919ac6212-cert") pod "openstack-baremetal-operator-controller-manager-64bc77cfd4hqcvw" (UID: "f7de33ac-fc6f-4c0c-a22a-0d9919ac6212") : secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 28 16:28:33 crc kubenswrapper[4909]: I1128 16:28:33.809648 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-78f8948974-jlztw"] Nov 28 16:28:33 crc kubenswrapper[4909]: I1128 16:28:33.835527 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-5854674fcc-w8bg4"] Nov 28 16:28:33 crc kubenswrapper[4909]: W1128 16:28:33.840239 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod108b7264_8189_4234_a458_ba48a0c28123.slice/crio-546689e626e88f430fdbb042b7f77e0f8788ebe61b915173d6ab8e6ad5cefe88 WatchSource:0}: Error finding container 546689e626e88f430fdbb042b7f77e0f8788ebe61b915173d6ab8e6ad5cefe88: Status 404 returned error can't find the container with id 546689e626e88f430fdbb042b7f77e0f8788ebe61b915173d6ab8e6ad5cefe88 Nov 28 16:28:33 crc kubenswrapper[4909]: E1128 16:28:33.842882 4909 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/ovn-operator@sha256:635a4aef9d6f0b799e8ec91333dbb312160c001d05b3c63f614c124e0b67cb59,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-9r7gr,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ovn-operator-controller-manager-b6456fdb6-x28l4_openstack-operators(108b7264-8189-4234-a458-ba48a0c28123): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 28 16:28:33 crc kubenswrapper[4909]: I1128 16:28:33.843642 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-b6456fdb6-x28l4"] Nov 28 16:28:33 crc kubenswrapper[4909]: W1128 16:28:33.844106 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poddc2bc53b_f803_4f9b_943f_e53f132cbb39.slice/crio-017eade1bfbb3a0236bcdfd362f3e542198d9c34db4843eb9baaba14b3822e18 WatchSource:0}: Error finding container 017eade1bfbb3a0236bcdfd362f3e542198d9c34db4843eb9baaba14b3822e18: Status 404 returned error can't find the container with id 017eade1bfbb3a0236bcdfd362f3e542198d9c34db4843eb9baaba14b3822e18 Nov 28 16:28:33 crc kubenswrapper[4909]: W1128 16:28:33.846090 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9024361d_e7dd_4f11_b22f_4027e31bd0ae.slice/crio-8e9a509591d8df3240c6ec074a04d6c7b981312c6640e67efb102635d6228d18 WatchSource:0}: Error finding container 8e9a509591d8df3240c6ec074a04d6c7b981312c6640e67efb102635d6228d18: Status 404 returned error can't find the container with id 8e9a509591d8df3240c6ec074a04d6c7b981312c6640e67efb102635d6228d18 Nov 28 16:28:33 crc kubenswrapper[4909]: E1128 16:28:33.846605 4909 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-9r7gr,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ovn-operator-controller-manager-b6456fdb6-x28l4_openstack-operators(108b7264-8189-4234-a458-ba48a0c28123): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 28 16:28:33 crc kubenswrapper[4909]: E1128 16:28:33.847884 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"]" pod="openstack-operators/ovn-operator-controller-manager-b6456fdb6-x28l4" podUID="108b7264-8189-4234-a458-ba48a0c28123" Nov 28 16:28:33 crc kubenswrapper[4909]: E1128 16:28:33.848944 4909 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/nova-operator@sha256:779f0cee6024d0fb8f259b036fe790e62aa5a3b0431ea9bf15a6e7d02e2e5670,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-6n6vn,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod nova-operator-controller-manager-697bc559fc-xnnf4_openstack-operators(dc2bc53b-f803-4f9b-943f-e53f132cbb39): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 28 16:28:33 crc kubenswrapper[4909]: I1128 16:28:33.849566 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-697bc559fc-xnnf4"] Nov 28 16:28:33 crc kubenswrapper[4909]: W1128 16:28:33.849783 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod584e895a_63c3_48db_8207_e89bc9396da7.slice/crio-061cd5b8704332a77120fa68eb8322c526cea56b5f134d7d1e9e48842d737937 WatchSource:0}: Error finding container 061cd5b8704332a77120fa68eb8322c526cea56b5f134d7d1e9e48842d737937: Status 404 returned error can't find the container with id 061cd5b8704332a77120fa68eb8322c526cea56b5f134d7d1e9e48842d737937 Nov 28 16:28:33 crc kubenswrapper[4909]: E1128 16:28:33.849766 4909 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/test-operator@sha256:101b3e007d8c9f2e183262d7712f986ad51256448099069bc14f1ea5f997ab94,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-7whvg,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod test-operator-controller-manager-5854674fcc-w8bg4_openstack-operators(584e895a-63c3-48db-8207-e89bc9396da7): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 28 16:28:33 crc kubenswrapper[4909]: E1128 16:28:33.852459 4909 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-6n6vn,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod nova-operator-controller-manager-697bc559fc-xnnf4_openstack-operators(dc2bc53b-f803-4f9b-943f-e53f132cbb39): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 28 16:28:33 crc kubenswrapper[4909]: E1128 16:28:33.853387 4909 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-7whvg,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod test-operator-controller-manager-5854674fcc-w8bg4_openstack-operators(584e895a-63c3-48db-8207-e89bc9396da7): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 28 16:28:33 crc kubenswrapper[4909]: W1128 16:28:33.853624 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3127edf8_24b9_4170_b677_c625926881a5.slice/crio-25e4687f340831d73c95345c698506d22431a7d8eeb5103fdcc2ea4336985891 WatchSource:0}: Error finding container 25e4687f340831d73c95345c698506d22431a7d8eeb5103fdcc2ea4336985891: Status 404 returned error can't find the container with id 25e4687f340831d73c95345c698506d22431a7d8eeb5103fdcc2ea4336985891 Nov 28 16:28:33 crc kubenswrapper[4909]: E1128 16:28:33.853690 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"]" pod="openstack-operators/nova-operator-controller-manager-697bc559fc-xnnf4" podUID="dc2bc53b-f803-4f9b-943f-e53f132cbb39" Nov 28 16:28:33 crc kubenswrapper[4909]: E1128 16:28:33.853814 4909 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/swift-operator@sha256:2a3d21728a8bfb4e64617e63e61e2d1cb70a383ea3e8f846e0c3c3c02d2b0a9d,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-5d5wt,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod swift-operator-controller-manager-5f8c65bbfc-hbnt7_openstack-operators(9024361d-e7dd-4f11-b22f-4027e31bd0ae): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 28 16:28:33 crc kubenswrapper[4909]: E1128 16:28:33.854459 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"]" pod="openstack-operators/test-operator-controller-manager-5854674fcc-w8bg4" podUID="584e895a-63c3-48db-8207-e89bc9396da7" Nov 28 16:28:33 crc kubenswrapper[4909]: E1128 16:28:33.854458 4909 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:operator,Image:quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2,Command:[/manager],Args:[],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:metrics,HostPort:0,ContainerPort:9782,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:OPERATOR_NAMESPACE,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{200 -3} {} 200m DecimalSI},memory: {{524288000 0} {} 500Mi BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-s8h2l,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-cluster-operator-manager-668c99d594-rrfhv_openstack-operators(626fb166-d7e7-424e-bcde-08e77d0c54b1): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 28 16:28:33 crc kubenswrapper[4909]: I1128 16:28:33.855400 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-769dc69bc-b8ssv"] Nov 28 16:28:33 crc kubenswrapper[4909]: E1128 16:28:33.855618 4909 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-5d5wt,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod swift-operator-controller-manager-5f8c65bbfc-hbnt7_openstack-operators(9024361d-e7dd-4f11-b22f-4027e31bd0ae): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 28 16:28:33 crc kubenswrapper[4909]: E1128 16:28:33.855644 4909 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/watcher-operator@sha256:9aa8c03633e4b934c57868c1660acf47e7d386ac86bcb344df262c9ad76b8621,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-jh4ss,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod watcher-operator-controller-manager-769dc69bc-b8ssv_openstack-operators(3127edf8-24b9-4170-b677-c625926881a5): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 28 16:28:33 crc kubenswrapper[4909]: E1128 16:28:33.855896 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-rrfhv" podUID="626fb166-d7e7-424e-bcde-08e77d0c54b1" Nov 28 16:28:33 crc kubenswrapper[4909]: E1128 16:28:33.857479 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"]" pod="openstack-operators/swift-operator-controller-manager-5f8c65bbfc-hbnt7" podUID="9024361d-e7dd-4f11-b22f-4027e31bd0ae" Nov 28 16:28:33 crc kubenswrapper[4909]: E1128 16:28:33.858102 4909 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-jh4ss,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod watcher-operator-controller-manager-769dc69bc-b8ssv_openstack-operators(3127edf8-24b9-4170-b677-c625926881a5): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 28 16:28:33 crc kubenswrapper[4909]: E1128 16:28:33.859519 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"]" pod="openstack-operators/watcher-operator-controller-manager-769dc69bc-b8ssv" podUID="3127edf8-24b9-4170-b677-c625926881a5" Nov 28 16:28:33 crc kubenswrapper[4909]: I1128 16:28:33.861449 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-rrfhv"] Nov 28 16:28:33 crc kubenswrapper[4909]: I1128 16:28:33.866987 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-5f8c65bbfc-hbnt7"] Nov 28 16:28:33 crc kubenswrapper[4909]: I1128 16:28:33.892951 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-859b6ccc6-cdlxz" event={"ID":"5e7d8f53-01bb-407c-8897-ceff90567e28","Type":"ContainerStarted","Data":"2b774d3c1b3e7664bbb41cbcdddfec2c207daebf78cd19bfd065c456837d86e5"} Nov 28 16:28:33 crc kubenswrapper[4909]: I1128 16:28:33.894338 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-9vnqw" event={"ID":"771c530f-d0ab-412d-a6c6-931999bc878f","Type":"ContainerStarted","Data":"24e5a620f49f7daa20bd6086992e087186ae9920f6ca81d6fc41fb31af6177f8"} Nov 28 16:28:33 crc kubenswrapper[4909]: I1128 16:28:33.896009 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-697bc559fc-xnnf4" event={"ID":"dc2bc53b-f803-4f9b-943f-e53f132cbb39","Type":"ContainerStarted","Data":"017eade1bfbb3a0236bcdfd362f3e542198d9c34db4843eb9baaba14b3822e18"} Nov 28 16:28:33 crc kubenswrapper[4909]: I1128 16:28:33.897409 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-769dc69bc-b8ssv" event={"ID":"3127edf8-24b9-4170-b677-c625926881a5","Type":"ContainerStarted","Data":"25e4687f340831d73c95345c698506d22431a7d8eeb5103fdcc2ea4336985891"} Nov 28 16:28:33 crc kubenswrapper[4909]: E1128 16:28:33.897999 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/nova-operator@sha256:779f0cee6024d0fb8f259b036fe790e62aa5a3b0431ea9bf15a6e7d02e2e5670\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/nova-operator-controller-manager-697bc559fc-xnnf4" podUID="dc2bc53b-f803-4f9b-943f-e53f132cbb39" Nov 28 16:28:33 crc kubenswrapper[4909]: E1128 16:28:33.899808 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/watcher-operator@sha256:9aa8c03633e4b934c57868c1660acf47e7d386ac86bcb344df262c9ad76b8621\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/watcher-operator-controller-manager-769dc69bc-b8ssv" podUID="3127edf8-24b9-4170-b677-c625926881a5" Nov 28 16:28:33 crc kubenswrapper[4909]: I1128 16:28:33.900343 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-78b4bc895b-52zmw" event={"ID":"5fbd4b3d-b059-4780-b1f1-f04e00a9a90e","Type":"ContainerStarted","Data":"fc150c1c52338bf3337c746d03f1c1931893eefa88635731e92b59b1d1a1c6e3"} Nov 28 16:28:33 crc kubenswrapper[4909]: E1128 16:28:33.903459 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/ovn-operator@sha256:635a4aef9d6f0b799e8ec91333dbb312160c001d05b3c63f614c124e0b67cb59\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/ovn-operator-controller-manager-b6456fdb6-x28l4" podUID="108b7264-8189-4234-a458-ba48a0c28123" Nov 28 16:28:33 crc kubenswrapper[4909]: E1128 16:28:33.906113 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/swift-operator@sha256:2a3d21728a8bfb4e64617e63e61e2d1cb70a383ea3e8f846e0c3c3c02d2b0a9d\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/swift-operator-controller-manager-5f8c65bbfc-hbnt7" podUID="9024361d-e7dd-4f11-b22f-4027e31bd0ae" Nov 28 16:28:33 crc kubenswrapper[4909]: I1128 16:28:33.914633 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-b6456fdb6-x28l4" event={"ID":"108b7264-8189-4234-a458-ba48a0c28123","Type":"ContainerStarted","Data":"546689e626e88f430fdbb042b7f77e0f8788ebe61b915173d6ab8e6ad5cefe88"} Nov 28 16:28:33 crc kubenswrapper[4909]: I1128 16:28:33.914715 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-7d9dfd778-v8ggq" event={"ID":"facf7553-0ba6-43b6-b720-0345f63d5706","Type":"ContainerStarted","Data":"fe8aa38e4491de08813fd351fb3c5304e839f4cb8feaa71ec002a29533dbaf9e"} Nov 28 16:28:33 crc kubenswrapper[4909]: I1128 16:28:33.914730 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-5f8c65bbfc-hbnt7" event={"ID":"9024361d-e7dd-4f11-b22f-4027e31bd0ae","Type":"ContainerStarted","Data":"8e9a509591d8df3240c6ec074a04d6c7b981312c6640e67efb102635d6228d18"} Nov 28 16:28:33 crc kubenswrapper[4909]: I1128 16:28:33.914791 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-68c6d99b8f-zjfzk" event={"ID":"906bccad-3e05-4f36-9ecc-57627d2fb226","Type":"ContainerStarted","Data":"8d39263d6e47b435b62103a9ddfe2ec9c10a191633a644a7d5c21f69be6a9070"} Nov 28 16:28:33 crc kubenswrapper[4909]: I1128 16:28:33.914804 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-78f8948974-jlztw" event={"ID":"5a4d9dc0-67a3-4014-9265-f88dac783bca","Type":"ContainerStarted","Data":"1342e19974cb8a7937b7c820e547eede1cbb0632c2a739d9d084cf0be49244bf"} Nov 28 16:28:33 crc kubenswrapper[4909]: I1128 16:28:33.914834 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-668d9c48b9-ggchx" event={"ID":"4081f6af-5e99-4383-b12a-654d6c1419d8","Type":"ContainerStarted","Data":"e16e40bd9fbecf1b22f48198a30c49aa5d3163a77dc780e19b3d87d6fc7454b3"} Nov 28 16:28:33 crc kubenswrapper[4909]: I1128 16:28:33.914848 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-6c548fd776-zqxvl" event={"ID":"8b35e013-9a3e-4434-9603-e7fbd95f2dca","Type":"ContainerStarted","Data":"7d76bca280edb22b820aaca8f45b570f0c99be4a719b60352b9f6f0f46c0d69e"} Nov 28 16:28:33 crc kubenswrapper[4909]: I1128 16:28:33.914859 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-sqpxn" event={"ID":"ede8d830-fccd-4337-b0ec-48030a263d44","Type":"ContainerStarted","Data":"4066b685e8f7d2e489a90f3167b06bb2dcad723b5f4c85bcc677c0b972f84194"} Nov 28 16:28:33 crc kubenswrapper[4909]: I1128 16:28:33.914870 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-56bbcc9d85-5r95f" event={"ID":"b8299ad0-0617-430d-9b0f-066022a5679f","Type":"ContainerStarted","Data":"2f27fa0749fa6b76863ff4f79b1e66ad72ab64d72b825abbb5cf5c9a0730ec31"} Nov 28 16:28:33 crc kubenswrapper[4909]: I1128 16:28:33.925503 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-5854674fcc-w8bg4" event={"ID":"584e895a-63c3-48db-8207-e89bc9396da7","Type":"ContainerStarted","Data":"061cd5b8704332a77120fa68eb8322c526cea56b5f134d7d1e9e48842d737937"} Nov 28 16:28:33 crc kubenswrapper[4909]: I1128 16:28:33.926817 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-5f64f6f8bb-k5bt2" event={"ID":"5efbc774-20bd-4b16-a9dd-2584462dad47","Type":"ContainerStarted","Data":"204eb14a5234d1efc5bc62213994d8e3b031411b6f02846fb802a3d99d843d9f"} Nov 28 16:28:33 crc kubenswrapper[4909]: E1128 16:28:33.928309 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/test-operator@sha256:101b3e007d8c9f2e183262d7712f986ad51256448099069bc14f1ea5f997ab94\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/test-operator-controller-manager-5854674fcc-w8bg4" podUID="584e895a-63c3-48db-8207-e89bc9396da7" Nov 28 16:28:33 crc kubenswrapper[4909]: I1128 16:28:33.929153 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-998648c74-5mfq8" event={"ID":"32bd97d0-184a-4f85-bfdc-1b34688753a5","Type":"ContainerStarted","Data":"e66f95ce4e1409e68970d2007353db47621c1048f4354c0c26ad4991a6957e38"} Nov 28 16:28:33 crc kubenswrapper[4909]: I1128 16:28:33.930589 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-546d4bdf48-wsrnd" event={"ID":"f5f666e9-9715-444d-a4ec-f6b3bb719df6","Type":"ContainerStarted","Data":"2f2b468fba09b89368c56a6513360da8ebcf0509f67d24dee688d0e27bf46db1"} Nov 28 16:28:33 crc kubenswrapper[4909]: I1128 16:28:33.931710 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-rrfhv" event={"ID":"626fb166-d7e7-424e-bcde-08e77d0c54b1","Type":"ContainerStarted","Data":"f8fbaba4970ecb8e5ebfaa9636b9201005312cdf3b96f43de0dee38f2244060c"} Nov 28 16:28:33 crc kubenswrapper[4909]: E1128 16:28:33.933292 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2\\\"\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-rrfhv" podUID="626fb166-d7e7-424e-bcde-08e77d0c54b1" Nov 28 16:28:33 crc kubenswrapper[4909]: I1128 16:28:33.933440 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-6546668bfd-t5cvm" event={"ID":"735f57d0-1df6-4773-9e9f-2f2745d307d0","Type":"ContainerStarted","Data":"9de5bab495c5830620de7d99ede5fd447a052a4270cc878b6f7bbb49d383e9b4"} Nov 28 16:28:34 crc kubenswrapper[4909]: I1128 16:28:34.189362 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/4ed1d030-fe78-4211-a585-46c8ae4f419d-metrics-certs\") pod \"openstack-operator-controller-manager-78d5d44766-bqs6f\" (UID: \"4ed1d030-fe78-4211-a585-46c8ae4f419d\") " pod="openstack-operators/openstack-operator-controller-manager-78d5d44766-bqs6f" Nov 28 16:28:34 crc kubenswrapper[4909]: I1128 16:28:34.189484 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/4ed1d030-fe78-4211-a585-46c8ae4f419d-webhook-certs\") pod \"openstack-operator-controller-manager-78d5d44766-bqs6f\" (UID: \"4ed1d030-fe78-4211-a585-46c8ae4f419d\") " pod="openstack-operators/openstack-operator-controller-manager-78d5d44766-bqs6f" Nov 28 16:28:34 crc kubenswrapper[4909]: E1128 16:28:34.189523 4909 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Nov 28 16:28:34 crc kubenswrapper[4909]: E1128 16:28:34.189599 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/4ed1d030-fe78-4211-a585-46c8ae4f419d-metrics-certs podName:4ed1d030-fe78-4211-a585-46c8ae4f419d nodeName:}" failed. No retries permitted until 2025-11-28 16:28:36.189579739 +0000 UTC m=+1098.586264263 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/4ed1d030-fe78-4211-a585-46c8ae4f419d-metrics-certs") pod "openstack-operator-controller-manager-78d5d44766-bqs6f" (UID: "4ed1d030-fe78-4211-a585-46c8ae4f419d") : secret "metrics-server-cert" not found Nov 28 16:28:34 crc kubenswrapper[4909]: E1128 16:28:34.189751 4909 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Nov 28 16:28:34 crc kubenswrapper[4909]: E1128 16:28:34.189819 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/4ed1d030-fe78-4211-a585-46c8ae4f419d-webhook-certs podName:4ed1d030-fe78-4211-a585-46c8ae4f419d nodeName:}" failed. No retries permitted until 2025-11-28 16:28:36.189800995 +0000 UTC m=+1098.586485579 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/4ed1d030-fe78-4211-a585-46c8ae4f419d-webhook-certs") pod "openstack-operator-controller-manager-78d5d44766-bqs6f" (UID: "4ed1d030-fe78-4211-a585-46c8ae4f419d") : secret "webhook-server-cert" not found Nov 28 16:28:34 crc kubenswrapper[4909]: E1128 16:28:34.941139 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/swift-operator@sha256:2a3d21728a8bfb4e64617e63e61e2d1cb70a383ea3e8f846e0c3c3c02d2b0a9d\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/swift-operator-controller-manager-5f8c65bbfc-hbnt7" podUID="9024361d-e7dd-4f11-b22f-4027e31bd0ae" Nov 28 16:28:34 crc kubenswrapper[4909]: E1128 16:28:34.941304 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2\\\"\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-rrfhv" podUID="626fb166-d7e7-424e-bcde-08e77d0c54b1" Nov 28 16:28:34 crc kubenswrapper[4909]: E1128 16:28:34.941429 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/watcher-operator@sha256:9aa8c03633e4b934c57868c1660acf47e7d386ac86bcb344df262c9ad76b8621\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/watcher-operator-controller-manager-769dc69bc-b8ssv" podUID="3127edf8-24b9-4170-b677-c625926881a5" Nov 28 16:28:34 crc kubenswrapper[4909]: E1128 16:28:34.942357 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/nova-operator@sha256:779f0cee6024d0fb8f259b036fe790e62aa5a3b0431ea9bf15a6e7d02e2e5670\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/nova-operator-controller-manager-697bc559fc-xnnf4" podUID="dc2bc53b-f803-4f9b-943f-e53f132cbb39" Nov 28 16:28:34 crc kubenswrapper[4909]: E1128 16:28:34.942428 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/test-operator@sha256:101b3e007d8c9f2e183262d7712f986ad51256448099069bc14f1ea5f997ab94\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/test-operator-controller-manager-5854674fcc-w8bg4" podUID="584e895a-63c3-48db-8207-e89bc9396da7" Nov 28 16:28:34 crc kubenswrapper[4909]: E1128 16:28:34.942866 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/ovn-operator@sha256:635a4aef9d6f0b799e8ec91333dbb312160c001d05b3c63f614c124e0b67cb59\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/ovn-operator-controller-manager-b6456fdb6-x28l4" podUID="108b7264-8189-4234-a458-ba48a0c28123" Nov 28 16:28:35 crc kubenswrapper[4909]: I1128 16:28:35.632166 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/d4d007c0-4c50-4550-b0ec-829717b7aa37-cert\") pod \"infra-operator-controller-manager-57548d458d-7z5zf\" (UID: \"d4d007c0-4c50-4550-b0ec-829717b7aa37\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-7z5zf" Nov 28 16:28:35 crc kubenswrapper[4909]: E1128 16:28:35.632361 4909 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Nov 28 16:28:35 crc kubenswrapper[4909]: E1128 16:28:35.632433 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/d4d007c0-4c50-4550-b0ec-829717b7aa37-cert podName:d4d007c0-4c50-4550-b0ec-829717b7aa37 nodeName:}" failed. No retries permitted until 2025-11-28 16:28:39.632414368 +0000 UTC m=+1102.029098892 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/d4d007c0-4c50-4550-b0ec-829717b7aa37-cert") pod "infra-operator-controller-manager-57548d458d-7z5zf" (UID: "d4d007c0-4c50-4550-b0ec-829717b7aa37") : secret "infra-operator-webhook-server-cert" not found Nov 28 16:28:35 crc kubenswrapper[4909]: I1128 16:28:35.834638 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/f7de33ac-fc6f-4c0c-a22a-0d9919ac6212-cert\") pod \"openstack-baremetal-operator-controller-manager-64bc77cfd4hqcvw\" (UID: \"f7de33ac-fc6f-4c0c-a22a-0d9919ac6212\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-64bc77cfd4hqcvw" Nov 28 16:28:35 crc kubenswrapper[4909]: E1128 16:28:35.834792 4909 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 28 16:28:35 crc kubenswrapper[4909]: E1128 16:28:35.834874 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/f7de33ac-fc6f-4c0c-a22a-0d9919ac6212-cert podName:f7de33ac-fc6f-4c0c-a22a-0d9919ac6212 nodeName:}" failed. No retries permitted until 2025-11-28 16:28:39.834856414 +0000 UTC m=+1102.231540938 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/f7de33ac-fc6f-4c0c-a22a-0d9919ac6212-cert") pod "openstack-baremetal-operator-controller-manager-64bc77cfd4hqcvw" (UID: "f7de33ac-fc6f-4c0c-a22a-0d9919ac6212") : secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 28 16:28:35 crc kubenswrapper[4909]: E1128 16:28:35.949502 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/swift-operator@sha256:2a3d21728a8bfb4e64617e63e61e2d1cb70a383ea3e8f846e0c3c3c02d2b0a9d\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/swift-operator-controller-manager-5f8c65bbfc-hbnt7" podUID="9024361d-e7dd-4f11-b22f-4027e31bd0ae" Nov 28 16:28:36 crc kubenswrapper[4909]: I1128 16:28:36.239859 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/4ed1d030-fe78-4211-a585-46c8ae4f419d-metrics-certs\") pod \"openstack-operator-controller-manager-78d5d44766-bqs6f\" (UID: \"4ed1d030-fe78-4211-a585-46c8ae4f419d\") " pod="openstack-operators/openstack-operator-controller-manager-78d5d44766-bqs6f" Nov 28 16:28:36 crc kubenswrapper[4909]: I1128 16:28:36.240042 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/4ed1d030-fe78-4211-a585-46c8ae4f419d-webhook-certs\") pod \"openstack-operator-controller-manager-78d5d44766-bqs6f\" (UID: \"4ed1d030-fe78-4211-a585-46c8ae4f419d\") " pod="openstack-operators/openstack-operator-controller-manager-78d5d44766-bqs6f" Nov 28 16:28:36 crc kubenswrapper[4909]: E1128 16:28:36.240105 4909 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Nov 28 16:28:36 crc kubenswrapper[4909]: E1128 16:28:36.240188 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/4ed1d030-fe78-4211-a585-46c8ae4f419d-metrics-certs podName:4ed1d030-fe78-4211-a585-46c8ae4f419d nodeName:}" failed. No retries permitted until 2025-11-28 16:28:40.240167767 +0000 UTC m=+1102.636852291 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/4ed1d030-fe78-4211-a585-46c8ae4f419d-metrics-certs") pod "openstack-operator-controller-manager-78d5d44766-bqs6f" (UID: "4ed1d030-fe78-4211-a585-46c8ae4f419d") : secret "metrics-server-cert" not found Nov 28 16:28:36 crc kubenswrapper[4909]: E1128 16:28:36.240234 4909 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Nov 28 16:28:36 crc kubenswrapper[4909]: E1128 16:28:36.240308 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/4ed1d030-fe78-4211-a585-46c8ae4f419d-webhook-certs podName:4ed1d030-fe78-4211-a585-46c8ae4f419d nodeName:}" failed. No retries permitted until 2025-11-28 16:28:40.24028894 +0000 UTC m=+1102.636973524 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/4ed1d030-fe78-4211-a585-46c8ae4f419d-webhook-certs") pod "openstack-operator-controller-manager-78d5d44766-bqs6f" (UID: "4ed1d030-fe78-4211-a585-46c8ae4f419d") : secret "webhook-server-cert" not found Nov 28 16:28:39 crc kubenswrapper[4909]: I1128 16:28:39.684327 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/d4d007c0-4c50-4550-b0ec-829717b7aa37-cert\") pod \"infra-operator-controller-manager-57548d458d-7z5zf\" (UID: \"d4d007c0-4c50-4550-b0ec-829717b7aa37\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-7z5zf" Nov 28 16:28:39 crc kubenswrapper[4909]: E1128 16:28:39.684526 4909 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Nov 28 16:28:39 crc kubenswrapper[4909]: E1128 16:28:39.684834 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/d4d007c0-4c50-4550-b0ec-829717b7aa37-cert podName:d4d007c0-4c50-4550-b0ec-829717b7aa37 nodeName:}" failed. No retries permitted until 2025-11-28 16:28:47.684784021 +0000 UTC m=+1110.081468545 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/d4d007c0-4c50-4550-b0ec-829717b7aa37-cert") pod "infra-operator-controller-manager-57548d458d-7z5zf" (UID: "d4d007c0-4c50-4550-b0ec-829717b7aa37") : secret "infra-operator-webhook-server-cert" not found Nov 28 16:28:39 crc kubenswrapper[4909]: I1128 16:28:39.887309 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/f7de33ac-fc6f-4c0c-a22a-0d9919ac6212-cert\") pod \"openstack-baremetal-operator-controller-manager-64bc77cfd4hqcvw\" (UID: \"f7de33ac-fc6f-4c0c-a22a-0d9919ac6212\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-64bc77cfd4hqcvw" Nov 28 16:28:39 crc kubenswrapper[4909]: E1128 16:28:39.887401 4909 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 28 16:28:39 crc kubenswrapper[4909]: E1128 16:28:39.887464 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/f7de33ac-fc6f-4c0c-a22a-0d9919ac6212-cert podName:f7de33ac-fc6f-4c0c-a22a-0d9919ac6212 nodeName:}" failed. No retries permitted until 2025-11-28 16:28:47.887449863 +0000 UTC m=+1110.284134387 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/f7de33ac-fc6f-4c0c-a22a-0d9919ac6212-cert") pod "openstack-baremetal-operator-controller-manager-64bc77cfd4hqcvw" (UID: "f7de33ac-fc6f-4c0c-a22a-0d9919ac6212") : secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 28 16:28:40 crc kubenswrapper[4909]: I1128 16:28:40.296948 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/4ed1d030-fe78-4211-a585-46c8ae4f419d-webhook-certs\") pod \"openstack-operator-controller-manager-78d5d44766-bqs6f\" (UID: \"4ed1d030-fe78-4211-a585-46c8ae4f419d\") " pod="openstack-operators/openstack-operator-controller-manager-78d5d44766-bqs6f" Nov 28 16:28:40 crc kubenswrapper[4909]: E1128 16:28:40.297087 4909 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Nov 28 16:28:40 crc kubenswrapper[4909]: E1128 16:28:40.297162 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/4ed1d030-fe78-4211-a585-46c8ae4f419d-webhook-certs podName:4ed1d030-fe78-4211-a585-46c8ae4f419d nodeName:}" failed. No retries permitted until 2025-11-28 16:28:48.297142163 +0000 UTC m=+1110.693826747 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/4ed1d030-fe78-4211-a585-46c8ae4f419d-webhook-certs") pod "openstack-operator-controller-manager-78d5d44766-bqs6f" (UID: "4ed1d030-fe78-4211-a585-46c8ae4f419d") : secret "webhook-server-cert" not found Nov 28 16:28:40 crc kubenswrapper[4909]: I1128 16:28:40.297208 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/4ed1d030-fe78-4211-a585-46c8ae4f419d-metrics-certs\") pod \"openstack-operator-controller-manager-78d5d44766-bqs6f\" (UID: \"4ed1d030-fe78-4211-a585-46c8ae4f419d\") " pod="openstack-operators/openstack-operator-controller-manager-78d5d44766-bqs6f" Nov 28 16:28:40 crc kubenswrapper[4909]: E1128 16:28:40.297438 4909 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Nov 28 16:28:40 crc kubenswrapper[4909]: E1128 16:28:40.297532 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/4ed1d030-fe78-4211-a585-46c8ae4f419d-metrics-certs podName:4ed1d030-fe78-4211-a585-46c8ae4f419d nodeName:}" failed. No retries permitted until 2025-11-28 16:28:48.297507622 +0000 UTC m=+1110.694192176 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/4ed1d030-fe78-4211-a585-46c8ae4f419d-metrics-certs") pod "openstack-operator-controller-manager-78d5d44766-bqs6f" (UID: "4ed1d030-fe78-4211-a585-46c8ae4f419d") : secret "metrics-server-cert" not found Nov 28 16:28:47 crc kubenswrapper[4909]: E1128 16:28:47.403904 4909 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/neutron-operator@sha256:0b3fb69f35c151895d3dffd514974a9f9fe1c77c3bca69b78b81efb183cf4557" Nov 28 16:28:47 crc kubenswrapper[4909]: E1128 16:28:47.404469 4909 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/neutron-operator@sha256:0b3fb69f35c151895d3dffd514974a9f9fe1c77c3bca69b78b81efb183cf4557,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-6x886,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod neutron-operator-controller-manager-5fdfd5b6b5-sqpxn_openstack-operators(ede8d830-fccd-4337-b0ec-48030a263d44): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 16:28:47 crc kubenswrapper[4909]: I1128 16:28:47.704363 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/d4d007c0-4c50-4550-b0ec-829717b7aa37-cert\") pod \"infra-operator-controller-manager-57548d458d-7z5zf\" (UID: \"d4d007c0-4c50-4550-b0ec-829717b7aa37\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-7z5zf" Nov 28 16:28:47 crc kubenswrapper[4909]: I1128 16:28:47.724374 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/d4d007c0-4c50-4550-b0ec-829717b7aa37-cert\") pod \"infra-operator-controller-manager-57548d458d-7z5zf\" (UID: \"d4d007c0-4c50-4550-b0ec-829717b7aa37\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-7z5zf" Nov 28 16:28:47 crc kubenswrapper[4909]: I1128 16:28:47.906411 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/f7de33ac-fc6f-4c0c-a22a-0d9919ac6212-cert\") pod \"openstack-baremetal-operator-controller-manager-64bc77cfd4hqcvw\" (UID: \"f7de33ac-fc6f-4c0c-a22a-0d9919ac6212\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-64bc77cfd4hqcvw" Nov 28 16:28:47 crc kubenswrapper[4909]: I1128 16:28:47.917043 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/f7de33ac-fc6f-4c0c-a22a-0d9919ac6212-cert\") pod \"openstack-baremetal-operator-controller-manager-64bc77cfd4hqcvw\" (UID: \"f7de33ac-fc6f-4c0c-a22a-0d9919ac6212\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-64bc77cfd4hqcvw" Nov 28 16:28:47 crc kubenswrapper[4909]: I1128 16:28:47.966539 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-57548d458d-7z5zf" Nov 28 16:28:48 crc kubenswrapper[4909]: E1128 16:28:48.011520 4909 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/keystone-operator@sha256:986861e5a0a9954f63581d9d55a30f8057883cefea489415d76257774526eea3" Nov 28 16:28:48 crc kubenswrapper[4909]: E1128 16:28:48.011670 4909 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/keystone-operator@sha256:986861e5a0a9954f63581d9d55a30f8057883cefea489415d76257774526eea3,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-rk6p9,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod keystone-operator-controller-manager-546d4bdf48-wsrnd_openstack-operators(f5f666e9-9715-444d-a4ec-f6b3bb719df6): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 16:28:48 crc kubenswrapper[4909]: I1128 16:28:48.027695 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-64bc77cfd4hqcvw" Nov 28 16:28:48 crc kubenswrapper[4909]: I1128 16:28:48.313362 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/4ed1d030-fe78-4211-a585-46c8ae4f419d-webhook-certs\") pod \"openstack-operator-controller-manager-78d5d44766-bqs6f\" (UID: \"4ed1d030-fe78-4211-a585-46c8ae4f419d\") " pod="openstack-operators/openstack-operator-controller-manager-78d5d44766-bqs6f" Nov 28 16:28:48 crc kubenswrapper[4909]: I1128 16:28:48.314898 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/4ed1d030-fe78-4211-a585-46c8ae4f419d-metrics-certs\") pod \"openstack-operator-controller-manager-78d5d44766-bqs6f\" (UID: \"4ed1d030-fe78-4211-a585-46c8ae4f419d\") " pod="openstack-operators/openstack-operator-controller-manager-78d5d44766-bqs6f" Nov 28 16:28:48 crc kubenswrapper[4909]: I1128 16:28:48.320527 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/4ed1d030-fe78-4211-a585-46c8ae4f419d-metrics-certs\") pod \"openstack-operator-controller-manager-78d5d44766-bqs6f\" (UID: \"4ed1d030-fe78-4211-a585-46c8ae4f419d\") " pod="openstack-operators/openstack-operator-controller-manager-78d5d44766-bqs6f" Nov 28 16:28:48 crc kubenswrapper[4909]: I1128 16:28:48.330755 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/4ed1d030-fe78-4211-a585-46c8ae4f419d-webhook-certs\") pod \"openstack-operator-controller-manager-78d5d44766-bqs6f\" (UID: \"4ed1d030-fe78-4211-a585-46c8ae4f419d\") " pod="openstack-operators/openstack-operator-controller-manager-78d5d44766-bqs6f" Nov 28 16:28:48 crc kubenswrapper[4909]: I1128 16:28:48.533555 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-57548d458d-7z5zf"] Nov 28 16:28:48 crc kubenswrapper[4909]: I1128 16:28:48.538250 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-78d5d44766-bqs6f" Nov 28 16:28:49 crc kubenswrapper[4909]: W1128 16:28:49.065049 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd4d007c0_4c50_4550_b0ec_829717b7aa37.slice/crio-7c37960c0742f36b915f57068e2ca95723fe33877634911cf3d65143b80c7956 WatchSource:0}: Error finding container 7c37960c0742f36b915f57068e2ca95723fe33877634911cf3d65143b80c7956: Status 404 returned error can't find the container with id 7c37960c0742f36b915f57068e2ca95723fe33877634911cf3d65143b80c7956 Nov 28 16:28:49 crc kubenswrapper[4909]: I1128 16:28:49.067741 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-56bbcc9d85-5r95f" event={"ID":"b8299ad0-0617-430d-9b0f-066022a5679f","Type":"ContainerStarted","Data":"4435048c454b8e499549ea2d320070911010f6169d264483e7abcb370c765154"} Nov 28 16:28:49 crc kubenswrapper[4909]: I1128 16:28:49.069691 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-78f8948974-jlztw" event={"ID":"5a4d9dc0-67a3-4014-9265-f88dac783bca","Type":"ContainerStarted","Data":"c30da5ea09c09f6a29528efaba3f2fe9816aad55cd68310a602cb512a66fcb76"} Nov 28 16:28:49 crc kubenswrapper[4909]: I1128 16:28:49.072025 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-6c548fd776-zqxvl" event={"ID":"8b35e013-9a3e-4434-9603-e7fbd95f2dca","Type":"ContainerStarted","Data":"31b878f409e09af3ccc10f0a3ad02fffef4efe806730a6df6b0255efcc01daf0"} Nov 28 16:28:49 crc kubenswrapper[4909]: I1128 16:28:49.564390 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-64bc77cfd4hqcvw"] Nov 28 16:28:49 crc kubenswrapper[4909]: I1128 16:28:49.595960 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-78d5d44766-bqs6f"] Nov 28 16:28:50 crc kubenswrapper[4909]: I1128 16:28:50.119765 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-6546668bfd-t5cvm" event={"ID":"735f57d0-1df6-4773-9e9f-2f2745d307d0","Type":"ContainerStarted","Data":"18cb33fda2ae6ae86786b1184224cb9c71ba41e88eed3d858d5871ad7c6226be"} Nov 28 16:28:50 crc kubenswrapper[4909]: I1128 16:28:50.127346 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-78b4bc895b-52zmw" event={"ID":"5fbd4b3d-b059-4780-b1f1-f04e00a9a90e","Type":"ContainerStarted","Data":"1e63691b2086a708a56339a8b002ae0385266732037868f8e1704f3edb0ce995"} Nov 28 16:28:50 crc kubenswrapper[4909]: I1128 16:28:50.129351 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-7d9dfd778-v8ggq" event={"ID":"facf7553-0ba6-43b6-b720-0345f63d5706","Type":"ContainerStarted","Data":"bd5d0d6ae95bdbdfd2c450b289d67222c2d30bc9f6c1386e87a81d995970ef6c"} Nov 28 16:28:50 crc kubenswrapper[4909]: I1128 16:28:50.132639 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-859b6ccc6-cdlxz" event={"ID":"5e7d8f53-01bb-407c-8897-ceff90567e28","Type":"ContainerStarted","Data":"9b65ec8f9510bfd5fcdf5272ef38092f5647348d5232a15fcf0e183e50722916"} Nov 28 16:28:50 crc kubenswrapper[4909]: I1128 16:28:50.134236 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-57548d458d-7z5zf" event={"ID":"d4d007c0-4c50-4550-b0ec-829717b7aa37","Type":"ContainerStarted","Data":"7c37960c0742f36b915f57068e2ca95723fe33877634911cf3d65143b80c7956"} Nov 28 16:28:50 crc kubenswrapper[4909]: I1128 16:28:50.136096 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-9vnqw" event={"ID":"771c530f-d0ab-412d-a6c6-931999bc878f","Type":"ContainerStarted","Data":"b739eab9d69f60907a8fa11eccbb87cd77548925449609c4172a6870dddeaa2b"} Nov 28 16:28:50 crc kubenswrapper[4909]: W1128 16:28:50.704745 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4ed1d030_fe78_4211_a585_46c8ae4f419d.slice/crio-7e101046d26a96a15d79811856527b711023489640e541bcc03ba6757536bf51 WatchSource:0}: Error finding container 7e101046d26a96a15d79811856527b711023489640e541bcc03ba6757536bf51: Status 404 returned error can't find the container with id 7e101046d26a96a15d79811856527b711023489640e541bcc03ba6757536bf51 Nov 28 16:28:51 crc kubenswrapper[4909]: I1128 16:28:51.145724 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-998648c74-5mfq8" event={"ID":"32bd97d0-184a-4f85-bfdc-1b34688753a5","Type":"ContainerStarted","Data":"6615694ca73f11cb104afc250cae5876a4d6d44679e12d426cadfab626d5a514"} Nov 28 16:28:51 crc kubenswrapper[4909]: I1128 16:28:51.146672 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-64bc77cfd4hqcvw" event={"ID":"f7de33ac-fc6f-4c0c-a22a-0d9919ac6212","Type":"ContainerStarted","Data":"65037fe4ea90bf7baeb54684550362063055f757d401bdf9d098e0235cbebb0c"} Nov 28 16:28:51 crc kubenswrapper[4909]: I1128 16:28:51.148214 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-78d5d44766-bqs6f" event={"ID":"4ed1d030-fe78-4211-a585-46c8ae4f419d","Type":"ContainerStarted","Data":"7e101046d26a96a15d79811856527b711023489640e541bcc03ba6757536bf51"} Nov 28 16:28:51 crc kubenswrapper[4909]: I1128 16:28:51.149198 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-5f64f6f8bb-k5bt2" event={"ID":"5efbc774-20bd-4b16-a9dd-2584462dad47","Type":"ContainerStarted","Data":"d4db9fade624f1d69d0c519e9046b51bf0204fcc317faa4ed5bbc2373f7fb60c"} Nov 28 16:28:53 crc kubenswrapper[4909]: I1128 16:28:53.169247 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-697bc559fc-xnnf4" event={"ID":"dc2bc53b-f803-4f9b-943f-e53f132cbb39","Type":"ContainerStarted","Data":"0523368fecad873de4f34f67f9f2d6f8d92c51178a533776596df8337ddb067b"} Nov 28 16:28:53 crc kubenswrapper[4909]: I1128 16:28:53.171074 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-68c6d99b8f-zjfzk" event={"ID":"906bccad-3e05-4f36-9ecc-57627d2fb226","Type":"ContainerStarted","Data":"5f2fc57cb76e7f2e8adaccceda0c499fedd58697f8d1698a2c4c56d8601cee34"} Nov 28 16:28:53 crc kubenswrapper[4909]: I1128 16:28:53.172278 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-668d9c48b9-ggchx" event={"ID":"4081f6af-5e99-4383-b12a-654d6c1419d8","Type":"ContainerStarted","Data":"f4a1dacaff44f1d848294c1b0c4d158657d9cb7499f44924c843888c2448daf6"} Nov 28 16:28:53 crc kubenswrapper[4909]: I1128 16:28:53.181510 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-78d5d44766-bqs6f" event={"ID":"4ed1d030-fe78-4211-a585-46c8ae4f419d","Type":"ContainerStarted","Data":"ee96bf5b029f6806c32188415c7d79a68f0001f49de67dedf87a9bd7f805c1ee"} Nov 28 16:28:53 crc kubenswrapper[4909]: I1128 16:28:53.182432 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-manager-78d5d44766-bqs6f" Nov 28 16:28:53 crc kubenswrapper[4909]: I1128 16:28:53.216211 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-manager-78d5d44766-bqs6f" podStartSLOduration=21.216191603 podStartE2EDuration="21.216191603s" podCreationTimestamp="2025-11-28 16:28:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:28:53.213963203 +0000 UTC m=+1115.610647737" watchObservedRunningTime="2025-11-28 16:28:53.216191603 +0000 UTC m=+1115.612876127" Nov 28 16:28:54 crc kubenswrapper[4909]: I1128 16:28:54.192467 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-5854674fcc-w8bg4" event={"ID":"584e895a-63c3-48db-8207-e89bc9396da7","Type":"ContainerStarted","Data":"d7048a6b885c6ad70f749dc1d1a8a88ed073ab427b0a8af1765407c6909866d4"} Nov 28 16:28:56 crc kubenswrapper[4909]: I1128 16:28:56.212587 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-769dc69bc-b8ssv" event={"ID":"3127edf8-24b9-4170-b677-c625926881a5","Type":"ContainerStarted","Data":"dabaf6240004ce89ebf3764302dfbf12e0fa23b05465eafd7b4e5a734772900c"} Nov 28 16:28:58 crc kubenswrapper[4909]: I1128 16:28:58.544632 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-manager-78d5d44766-bqs6f" Nov 28 16:29:13 crc kubenswrapper[4909]: E1128 16:29:13.486995 4909 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = writing blob: storing blob to file \"/var/tmp/container_images_storage2830081538/4\": happened during read: context canceled" image="quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0" Nov 28 16:29:13 crc kubenswrapper[4909]: E1128 16:29:13.487709 4909 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-tgnxh,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod designate-operator-controller-manager-78b4bc895b-52zmw_openstack-operators(5fbd4b3d-b059-4780-b1f1-f04e00a9a90e): ErrImagePull: rpc error: code = Canceled desc = writing blob: storing blob to file \"/var/tmp/container_images_storage2830081538/4\": happened during read: context canceled" logger="UnhandledError" Nov 28 16:29:13 crc kubenswrapper[4909]: E1128 16:29:13.489233 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"rpc error: code = Canceled desc = writing blob: storing blob to file \\\"/var/tmp/container_images_storage2830081538/4\\\": happened during read: context canceled\"" pod="openstack-operators/designate-operator-controller-manager-78b4bc895b-52zmw" podUID="5fbd4b3d-b059-4780-b1f1-f04e00a9a90e" Nov 28 16:29:13 crc kubenswrapper[4909]: E1128 16:29:13.753061 4909 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying layer: context canceled" image="quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0" Nov 28 16:29:13 crc kubenswrapper[4909]: E1128 16:29:13.753625 4909 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-l5zmv,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod horizon-operator-controller-manager-68c6d99b8f-zjfzk_openstack-operators(906bccad-3e05-4f36-9ecc-57627d2fb226): ErrImagePull: rpc error: code = Canceled desc = copying layer: context canceled" logger="UnhandledError" Nov 28 16:29:13 crc kubenswrapper[4909]: E1128 16:29:13.755032 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"rpc error: code = Canceled desc = copying layer: context canceled\"" pod="openstack-operators/horizon-operator-controller-manager-68c6d99b8f-zjfzk" podUID="906bccad-3e05-4f36-9ecc-57627d2fb226" Nov 28 16:29:13 crc kubenswrapper[4909]: E1128 16:29:13.760508 4909 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0" Nov 28 16:29:13 crc kubenswrapper[4909]: E1128 16:29:13.760809 4909 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-rsw59,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod placement-operator-controller-manager-78f8948974-jlztw_openstack-operators(5a4d9dc0-67a3-4014-9265-f88dac783bca): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 16:29:13 crc kubenswrapper[4909]: E1128 16:29:13.763000 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/placement-operator-controller-manager-78f8948974-jlztw" podUID="5a4d9dc0-67a3-4014-9265-f88dac783bca" Nov 28 16:29:13 crc kubenswrapper[4909]: E1128 16:29:13.799227 4909 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0" Nov 28 16:29:13 crc kubenswrapper[4909]: E1128 16:29:13.799429 4909 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-cm7xf,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod manila-operator-controller-manager-6546668bfd-t5cvm_openstack-operators(735f57d0-1df6-4773-9e9f-2f2745d307d0): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 16:29:13 crc kubenswrapper[4909]: E1128 16:29:13.800729 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/manila-operator-controller-manager-6546668bfd-t5cvm" podUID="735f57d0-1df6-4773-9e9f-2f2745d307d0" Nov 28 16:29:13 crc kubenswrapper[4909]: E1128 16:29:13.933984 4909 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = writing blob: storing blob to file \"/var/tmp/container_images_storage4157605173/2\": happened during read: context canceled" image="quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0" Nov 28 16:29:13 crc kubenswrapper[4909]: E1128 16:29:13.934213 4909 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-jh4ss,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod watcher-operator-controller-manager-769dc69bc-b8ssv_openstack-operators(3127edf8-24b9-4170-b677-c625926881a5): ErrImagePull: rpc error: code = Canceled desc = writing blob: storing blob to file \"/var/tmp/container_images_storage4157605173/2\": happened during read: context canceled" logger="UnhandledError" Nov 28 16:29:13 crc kubenswrapper[4909]: E1128 16:29:13.935481 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"rpc error: code = Canceled desc = writing blob: storing blob to file \\\"/var/tmp/container_images_storage4157605173/2\\\": happened during read: context canceled\"" pod="openstack-operators/watcher-operator-controller-manager-769dc69bc-b8ssv" podUID="3127edf8-24b9-4170-b677-c625926881a5" Nov 28 16:29:13 crc kubenswrapper[4909]: E1128 16:29:13.950093 4909 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0" Nov 28 16:29:13 crc kubenswrapper[4909]: E1128 16:29:13.950275 4909 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-dnxmb,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod mariadb-operator-controller-manager-56bbcc9d85-5r95f_openstack-operators(b8299ad0-0617-430d-9b0f-066022a5679f): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 16:29:13 crc kubenswrapper[4909]: E1128 16:29:13.951450 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/mariadb-operator-controller-manager-56bbcc9d85-5r95f" podUID="b8299ad0-0617-430d-9b0f-066022a5679f" Nov 28 16:29:14 crc kubenswrapper[4909]: E1128 16:29:14.156773 4909 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0" Nov 28 16:29:14 crc kubenswrapper[4909]: E1128 16:29:14.157112 4909 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-r6t6s,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ironic-operator-controller-manager-6c548fd776-zqxvl_openstack-operators(8b35e013-9a3e-4434-9603-e7fbd95f2dca): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 16:29:14 crc kubenswrapper[4909]: E1128 16:29:14.158362 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/ironic-operator-controller-manager-6c548fd776-zqxvl" podUID="8b35e013-9a3e-4434-9603-e7fbd95f2dca" Nov 28 16:29:14 crc kubenswrapper[4909]: I1128 16:29:14.392039 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-57548d458d-7z5zf" event={"ID":"d4d007c0-4c50-4550-b0ec-829717b7aa37","Type":"ContainerStarted","Data":"3f344a46c2bf222b822c825371023bfbe4d9e44501cfd1364bf64d00180fa26c"} Nov 28 16:29:14 crc kubenswrapper[4909]: I1128 16:29:14.392105 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-57548d458d-7z5zf" event={"ID":"d4d007c0-4c50-4550-b0ec-829717b7aa37","Type":"ContainerStarted","Data":"c7ea8188f843e202e3214a457cf7c09028e3b42460fe7dedfd5f6efce1a0494f"} Nov 28 16:29:14 crc kubenswrapper[4909]: I1128 16:29:14.392144 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/infra-operator-controller-manager-57548d458d-7z5zf" Nov 28 16:29:14 crc kubenswrapper[4909]: I1128 16:29:14.396924 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-64bc77cfd4hqcvw" event={"ID":"f7de33ac-fc6f-4c0c-a22a-0d9919ac6212","Type":"ContainerStarted","Data":"47041e5d173a8564e33492a4cb39288e4e9ec70a2dfc57384b14f81fd8c79073"} Nov 28 16:29:14 crc kubenswrapper[4909]: I1128 16:29:14.398145 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-rrfhv" event={"ID":"626fb166-d7e7-424e-bcde-08e77d0c54b1","Type":"ContainerStarted","Data":"f175d594f2f6d5e8ef5e9c7d2262c8200cd2eb669405aa86c18ee05d215bcb7c"} Nov 28 16:29:14 crc kubenswrapper[4909]: I1128 16:29:14.410788 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-b6456fdb6-x28l4" event={"ID":"108b7264-8189-4234-a458-ba48a0c28123","Type":"ContainerStarted","Data":"e87a6c1e9c01e4dc0daf91dd253259edccd944f91d2157b492c262266c95f9c5"} Nov 28 16:29:14 crc kubenswrapper[4909]: I1128 16:29:14.410842 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-b6456fdb6-x28l4" event={"ID":"108b7264-8189-4234-a458-ba48a0c28123","Type":"ContainerStarted","Data":"4d7c8e9cc342c4544af530aa257a6990113b60b06236c550adfcde8cb3a70935"} Nov 28 16:29:14 crc kubenswrapper[4909]: I1128 16:29:14.411552 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ovn-operator-controller-manager-b6456fdb6-x28l4" Nov 28 16:29:14 crc kubenswrapper[4909]: I1128 16:29:14.422681 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/infra-operator-controller-manager-57548d458d-7z5zf" podStartSLOduration=34.995271851 podStartE2EDuration="43.422639266s" podCreationTimestamp="2025-11-28 16:28:31 +0000 UTC" firstStartedPulling="2025-11-28 16:28:49.110276383 +0000 UTC m=+1111.506960907" lastFinishedPulling="2025-11-28 16:28:57.537643798 +0000 UTC m=+1119.934328322" observedRunningTime="2025-11-28 16:29:14.41789514 +0000 UTC m=+1136.814579684" watchObservedRunningTime="2025-11-28 16:29:14.422639266 +0000 UTC m=+1136.819323790" Nov 28 16:29:14 crc kubenswrapper[4909]: I1128 16:29:14.426776 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-5f8c65bbfc-hbnt7" event={"ID":"9024361d-e7dd-4f11-b22f-4027e31bd0ae","Type":"ContainerStarted","Data":"399018970d36c201f477d66e6696659e2052dc537ab265bc6d2d9fb47b189aef"} Nov 28 16:29:14 crc kubenswrapper[4909]: I1128 16:29:14.426836 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-5f8c65bbfc-hbnt7" event={"ID":"9024361d-e7dd-4f11-b22f-4027e31bd0ae","Type":"ContainerStarted","Data":"717df6bfea18cf5eb4d6af94b7a3709305efa577ff59a3113043b475fe52f26a"} Nov 28 16:29:14 crc kubenswrapper[4909]: I1128 16:29:14.427305 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/swift-operator-controller-manager-5f8c65bbfc-hbnt7" Nov 28 16:29:14 crc kubenswrapper[4909]: I1128 16:29:14.450198 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-5f64f6f8bb-k5bt2" event={"ID":"5efbc774-20bd-4b16-a9dd-2584462dad47","Type":"ContainerStarted","Data":"066e74e58485836820cbee0156d5bb8bbca4361f796452550492d8e36232de6b"} Nov 28 16:29:14 crc kubenswrapper[4909]: I1128 16:29:14.450248 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/heat-operator-controller-manager-5f64f6f8bb-k5bt2" Nov 28 16:29:14 crc kubenswrapper[4909]: I1128 16:29:14.452386 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/watcher-operator-controller-manager-769dc69bc-b8ssv" Nov 28 16:29:14 crc kubenswrapper[4909]: I1128 16:29:14.452427 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/placement-operator-controller-manager-78f8948974-jlztw" Nov 28 16:29:14 crc kubenswrapper[4909]: I1128 16:29:14.452439 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ironic-operator-controller-manager-6c548fd776-zqxvl" Nov 28 16:29:14 crc kubenswrapper[4909]: I1128 16:29:14.452448 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/horizon-operator-controller-manager-68c6d99b8f-zjfzk" Nov 28 16:29:14 crc kubenswrapper[4909]: I1128 16:29:14.452456 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/designate-operator-controller-manager-78b4bc895b-52zmw" Nov 28 16:29:14 crc kubenswrapper[4909]: I1128 16:29:14.453032 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/mariadb-operator-controller-manager-56bbcc9d85-5r95f" Nov 28 16:29:14 crc kubenswrapper[4909]: I1128 16:29:14.453053 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/manila-operator-controller-manager-6546668bfd-t5cvm" Nov 28 16:29:14 crc kubenswrapper[4909]: I1128 16:29:14.453812 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-rrfhv" podStartSLOduration=2.547460559 podStartE2EDuration="42.453788707s" podCreationTimestamp="2025-11-28 16:28:32 +0000 UTC" firstStartedPulling="2025-11-28 16:28:33.854181209 +0000 UTC m=+1096.250865733" lastFinishedPulling="2025-11-28 16:29:13.760509357 +0000 UTC m=+1136.157193881" observedRunningTime="2025-11-28 16:29:14.446927784 +0000 UTC m=+1136.843612318" watchObservedRunningTime="2025-11-28 16:29:14.453788707 +0000 UTC m=+1136.850473231" Nov 28 16:29:14 crc kubenswrapper[4909]: I1128 16:29:14.462098 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/mariadb-operator-controller-manager-56bbcc9d85-5r95f" Nov 28 16:29:14 crc kubenswrapper[4909]: I1128 16:29:14.462453 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/manila-operator-controller-manager-6546668bfd-t5cvm" Nov 28 16:29:14 crc kubenswrapper[4909]: I1128 16:29:14.462525 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/designate-operator-controller-manager-78b4bc895b-52zmw" Nov 28 16:29:14 crc kubenswrapper[4909]: I1128 16:29:14.463966 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/horizon-operator-controller-manager-68c6d99b8f-zjfzk" Nov 28 16:29:14 crc kubenswrapper[4909]: I1128 16:29:14.464027 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ironic-operator-controller-manager-6c548fd776-zqxvl" Nov 28 16:29:14 crc kubenswrapper[4909]: I1128 16:29:14.464059 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/watcher-operator-controller-manager-769dc69bc-b8ssv" Nov 28 16:29:14 crc kubenswrapper[4909]: I1128 16:29:14.464084 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/heat-operator-controller-manager-5f64f6f8bb-k5bt2" Nov 28 16:29:14 crc kubenswrapper[4909]: I1128 16:29:14.464110 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/placement-operator-controller-manager-78f8948974-jlztw" Nov 28 16:29:14 crc kubenswrapper[4909]: I1128 16:29:14.485314 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ovn-operator-controller-manager-b6456fdb6-x28l4" podStartSLOduration=17.49461362 podStartE2EDuration="43.485285396s" podCreationTimestamp="2025-11-28 16:28:31 +0000 UTC" firstStartedPulling="2025-11-28 16:28:33.842623551 +0000 UTC m=+1096.239308075" lastFinishedPulling="2025-11-28 16:28:59.833295327 +0000 UTC m=+1122.229979851" observedRunningTime="2025-11-28 16:29:14.473879572 +0000 UTC m=+1136.870564096" watchObservedRunningTime="2025-11-28 16:29:14.485285396 +0000 UTC m=+1136.881969990" Nov 28 16:29:14 crc kubenswrapper[4909]: E1128 16:29:14.519198 4909 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0" Nov 28 16:29:14 crc kubenswrapper[4909]: E1128 16:29:14.519345 4909 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-kdszc,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod barbican-operator-controller-manager-7d9dfd778-v8ggq_openstack-operators(facf7553-0ba6-43b6-b720-0345f63d5706): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 16:29:14 crc kubenswrapper[4909]: E1128 16:29:14.520948 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/barbican-operator-controller-manager-7d9dfd778-v8ggq" podUID="facf7553-0ba6-43b6-b720-0345f63d5706" Nov 28 16:29:14 crc kubenswrapper[4909]: I1128 16:29:14.616428 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/swift-operator-controller-manager-5f8c65bbfc-hbnt7" podStartSLOduration=17.636096722 podStartE2EDuration="43.616396041s" podCreationTimestamp="2025-11-28 16:28:31 +0000 UTC" firstStartedPulling="2025-11-28 16:28:33.853643245 +0000 UTC m=+1096.250327769" lastFinishedPulling="2025-11-28 16:28:59.833942564 +0000 UTC m=+1122.230627088" observedRunningTime="2025-11-28 16:29:14.600319922 +0000 UTC m=+1136.997004466" watchObservedRunningTime="2025-11-28 16:29:14.616396041 +0000 UTC m=+1137.013080565" Nov 28 16:29:14 crc kubenswrapper[4909]: I1128 16:29:14.730678 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/heat-operator-controller-manager-5f64f6f8bb-k5bt2" podStartSLOduration=3.03619144 podStartE2EDuration="43.730642736s" podCreationTimestamp="2025-11-28 16:28:31 +0000 UTC" firstStartedPulling="2025-11-28 16:28:33.15077857 +0000 UTC m=+1095.547463094" lastFinishedPulling="2025-11-28 16:29:13.845229866 +0000 UTC m=+1136.241914390" observedRunningTime="2025-11-28 16:29:14.70301157 +0000 UTC m=+1137.099696094" watchObservedRunningTime="2025-11-28 16:29:14.730642736 +0000 UTC m=+1137.127327260" Nov 28 16:29:14 crc kubenswrapper[4909]: E1128 16:29:14.739419 4909 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0" Nov 28 16:29:14 crc kubenswrapper[4909]: E1128 16:29:14.739547 4909 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-6x886,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod neutron-operator-controller-manager-5fdfd5b6b5-sqpxn_openstack-operators(ede8d830-fccd-4337-b0ec-48030a263d44): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 16:29:14 crc kubenswrapper[4909]: E1128 16:29:14.743696 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"]" pod="openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-sqpxn" podUID="ede8d830-fccd-4337-b0ec-48030a263d44" Nov 28 16:29:14 crc kubenswrapper[4909]: E1128 16:29:14.770283 4909 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = writing blob: storing blob to file \"/var/tmp/container_images_storage4268914787/3\": happened during read: context canceled" image="quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0" Nov 28 16:29:14 crc kubenswrapper[4909]: E1128 16:29:14.770436 4909 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-6n6vn,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod nova-operator-controller-manager-697bc559fc-xnnf4_openstack-operators(dc2bc53b-f803-4f9b-943f-e53f132cbb39): ErrImagePull: rpc error: code = Canceled desc = writing blob: storing blob to file \"/var/tmp/container_images_storage4268914787/3\": happened during read: context canceled" logger="UnhandledError" Nov 28 16:29:14 crc kubenswrapper[4909]: E1128 16:29:14.771856 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"rpc error: code = Canceled desc = writing blob: storing blob to file \\\"/var/tmp/container_images_storage4268914787/3\\\": happened during read: context canceled\"" pod="openstack-operators/nova-operator-controller-manager-697bc559fc-xnnf4" podUID="dc2bc53b-f803-4f9b-943f-e53f132cbb39" Nov 28 16:29:14 crc kubenswrapper[4909]: E1128 16:29:14.905461 4909 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0" Nov 28 16:29:14 crc kubenswrapper[4909]: E1128 16:29:14.905627 4909 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-d6b5k,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod octavia-operator-controller-manager-998648c74-5mfq8_openstack-operators(32bd97d0-184a-4f85-bfdc-1b34688753a5): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 16:29:14 crc kubenswrapper[4909]: E1128 16:29:14.912846 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/octavia-operator-controller-manager-998648c74-5mfq8" podUID="32bd97d0-184a-4f85-bfdc-1b34688753a5" Nov 28 16:29:14 crc kubenswrapper[4909]: E1128 16:29:14.938811 4909 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0" Nov 28 16:29:14 crc kubenswrapper[4909]: E1128 16:29:14.938979 4909 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-gjcrx,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod telemetry-operator-controller-manager-76cc84c6bb-9vnqw_openstack-operators(771c530f-d0ab-412d-a6c6-931999bc878f): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 16:29:14 crc kubenswrapper[4909]: E1128 16:29:14.940527 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-9vnqw" podUID="771c530f-d0ab-412d-a6c6-931999bc878f" Nov 28 16:29:15 crc kubenswrapper[4909]: E1128 16:29:15.255478 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/keystone-operator-controller-manager-546d4bdf48-wsrnd" podUID="f5f666e9-9715-444d-a4ec-f6b3bb719df6" Nov 28 16:29:15 crc kubenswrapper[4909]: I1128 16:29:15.456120 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-6546668bfd-t5cvm" event={"ID":"735f57d0-1df6-4773-9e9f-2f2745d307d0","Type":"ContainerStarted","Data":"611c68ae0b1ffcd8b3eec6aa18b342551f01dc00601dfa4dc3686c2b8c727b64"} Nov 28 16:29:15 crc kubenswrapper[4909]: I1128 16:29:15.458781 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-6c548fd776-zqxvl" event={"ID":"8b35e013-9a3e-4434-9603-e7fbd95f2dca","Type":"ContainerStarted","Data":"34a7a43ffeb38d701a97268f43cfa113964b473555aa21ceefcef6ad204a6613"} Nov 28 16:29:15 crc kubenswrapper[4909]: I1128 16:29:15.460843 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-5854674fcc-w8bg4" event={"ID":"584e895a-63c3-48db-8207-e89bc9396da7","Type":"ContainerStarted","Data":"9e975508b57f0d65d1fa698263daa47b9c9ea5806bffd179e6e0687bf3a1925c"} Nov 28 16:29:15 crc kubenswrapper[4909]: I1128 16:29:15.462828 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/test-operator-controller-manager-5854674fcc-w8bg4" Nov 28 16:29:15 crc kubenswrapper[4909]: I1128 16:29:15.463592 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/test-operator-controller-manager-5854674fcc-w8bg4" Nov 28 16:29:15 crc kubenswrapper[4909]: I1128 16:29:15.464579 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-546d4bdf48-wsrnd" event={"ID":"f5f666e9-9715-444d-a4ec-f6b3bb719df6","Type":"ContainerStarted","Data":"090aba792cae8a0556a3cad2c8e716cd49c0dd65b2d3326b575ee61b56b27005"} Nov 28 16:29:15 crc kubenswrapper[4909]: I1128 16:29:15.467239 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-769dc69bc-b8ssv" event={"ID":"3127edf8-24b9-4170-b677-c625926881a5","Type":"ContainerStarted","Data":"b7c80b629aaea316dbb5eea6adffb2ef002c653ba1ca543ed99082efefaf8ba7"} Nov 28 16:29:15 crc kubenswrapper[4909]: I1128 16:29:15.469199 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-859b6ccc6-cdlxz" event={"ID":"5e7d8f53-01bb-407c-8897-ceff90567e28","Type":"ContainerStarted","Data":"473841cd99fb0f44b3fb38818eb24f02b0af97542b6c34902ce33a7193bbbede"} Nov 28 16:29:15 crc kubenswrapper[4909]: I1128 16:29:15.470552 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/cinder-operator-controller-manager-859b6ccc6-cdlxz" Nov 28 16:29:15 crc kubenswrapper[4909]: I1128 16:29:15.471312 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/cinder-operator-controller-manager-859b6ccc6-cdlxz" Nov 28 16:29:15 crc kubenswrapper[4909]: I1128 16:29:15.471927 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-56bbcc9d85-5r95f" event={"ID":"b8299ad0-0617-430d-9b0f-066022a5679f","Type":"ContainerStarted","Data":"9f9d934160d81f5eb55a52188abbe403e593166692c344ceb9c1a48373ff3007"} Nov 28 16:29:15 crc kubenswrapper[4909]: I1128 16:29:15.478755 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-64bc77cfd4hqcvw" event={"ID":"f7de33ac-fc6f-4c0c-a22a-0d9919ac6212","Type":"ContainerStarted","Data":"b8d490d873ea695996cc8f7500c310fcad5ed859dfed3693ef4421b660420c5c"} Nov 28 16:29:15 crc kubenswrapper[4909]: I1128 16:29:15.478787 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/manila-operator-controller-manager-6546668bfd-t5cvm" podStartSLOduration=29.550561343 podStartE2EDuration="44.478768477s" podCreationTimestamp="2025-11-28 16:28:31 +0000 UTC" firstStartedPulling="2025-11-28 16:28:33.046784288 +0000 UTC m=+1095.443468812" lastFinishedPulling="2025-11-28 16:28:47.974991422 +0000 UTC m=+1110.371675946" observedRunningTime="2025-11-28 16:29:15.476220289 +0000 UTC m=+1137.872904823" watchObservedRunningTime="2025-11-28 16:29:15.478768477 +0000 UTC m=+1137.875453001" Nov 28 16:29:15 crc kubenswrapper[4909]: I1128 16:29:15.479353 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-baremetal-operator-controller-manager-64bc77cfd4hqcvw" Nov 28 16:29:15 crc kubenswrapper[4909]: I1128 16:29:15.481441 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-68c6d99b8f-zjfzk" event={"ID":"906bccad-3e05-4f36-9ecc-57627d2fb226","Type":"ContainerStarted","Data":"26772071e841a26aa8e5065b5335695e7d470ee621e3411137cf1f1660764ae1"} Nov 28 16:29:15 crc kubenswrapper[4909]: I1128 16:29:15.483064 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-78f8948974-jlztw" event={"ID":"5a4d9dc0-67a3-4014-9265-f88dac783bca","Type":"ContainerStarted","Data":"5eca2834309cba768943c82051c1f775b077f5bcf7c4a1812f2a6b368e7b1ffd"} Nov 28 16:29:15 crc kubenswrapper[4909]: I1128 16:29:15.484623 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-668d9c48b9-ggchx" event={"ID":"4081f6af-5e99-4383-b12a-654d6c1419d8","Type":"ContainerStarted","Data":"381d88378a794082f570fa4be795d4545303418114b3d949a09c3db90e08dc91"} Nov 28 16:29:15 crc kubenswrapper[4909]: I1128 16:29:15.486214 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/glance-operator-controller-manager-668d9c48b9-ggchx" Nov 28 16:29:15 crc kubenswrapper[4909]: I1128 16:29:15.493070 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/glance-operator-controller-manager-668d9c48b9-ggchx" Nov 28 16:29:15 crc kubenswrapper[4909]: I1128 16:29:15.494524 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-78b4bc895b-52zmw" event={"ID":"5fbd4b3d-b059-4780-b1f1-f04e00a9a90e","Type":"ContainerStarted","Data":"ac5b121cd851a7f964082b5b9803e19058e3fe126f42bc4842d65f69ea90a050"} Nov 28 16:29:15 crc kubenswrapper[4909]: I1128 16:29:15.500258 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-9vnqw" Nov 28 16:29:15 crc kubenswrapper[4909]: I1128 16:29:15.500296 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/barbican-operator-controller-manager-7d9dfd778-v8ggq" Nov 28 16:29:15 crc kubenswrapper[4909]: I1128 16:29:15.500311 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/octavia-operator-controller-manager-998648c74-5mfq8" Nov 28 16:29:15 crc kubenswrapper[4909]: I1128 16:29:15.500323 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/nova-operator-controller-manager-697bc559fc-xnnf4" Nov 28 16:29:15 crc kubenswrapper[4909]: I1128 16:29:15.501269 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/nova-operator-controller-manager-697bc559fc-xnnf4" Nov 28 16:29:15 crc kubenswrapper[4909]: I1128 16:29:15.501597 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/barbican-operator-controller-manager-7d9dfd778-v8ggq" Nov 28 16:29:15 crc kubenswrapper[4909]: I1128 16:29:15.501875 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/octavia-operator-controller-manager-998648c74-5mfq8" Nov 28 16:29:15 crc kubenswrapper[4909]: I1128 16:29:15.508968 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/cinder-operator-controller-manager-859b6ccc6-cdlxz" podStartSLOduration=2.683154491 podStartE2EDuration="44.508945702s" podCreationTimestamp="2025-11-28 16:28:31 +0000 UTC" firstStartedPulling="2025-11-28 16:28:33.02020957 +0000 UTC m=+1095.416894094" lastFinishedPulling="2025-11-28 16:29:14.846000781 +0000 UTC m=+1137.242685305" observedRunningTime="2025-11-28 16:29:15.501709989 +0000 UTC m=+1137.898394523" watchObservedRunningTime="2025-11-28 16:29:15.508945702 +0000 UTC m=+1137.905630226" Nov 28 16:29:15 crc kubenswrapper[4909]: I1128 16:29:15.516383 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-9vnqw" Nov 28 16:29:15 crc kubenswrapper[4909]: I1128 16:29:15.546719 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ironic-operator-controller-manager-6c548fd776-zqxvl" podStartSLOduration=30.062780347 podStartE2EDuration="44.546697518s" podCreationTimestamp="2025-11-28 16:28:31 +0000 UTC" firstStartedPulling="2025-11-28 16:28:33.49103633 +0000 UTC m=+1095.887720854" lastFinishedPulling="2025-11-28 16:28:47.974953501 +0000 UTC m=+1110.371638025" observedRunningTime="2025-11-28 16:29:15.540858462 +0000 UTC m=+1137.937542986" watchObservedRunningTime="2025-11-28 16:29:15.546697518 +0000 UTC m=+1137.943382042" Nov 28 16:29:15 crc kubenswrapper[4909]: I1128 16:29:15.610981 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/mariadb-operator-controller-manager-56bbcc9d85-5r95f" podStartSLOduration=30.332772723 podStartE2EDuration="44.610960541s" podCreationTimestamp="2025-11-28 16:28:31 +0000 UTC" firstStartedPulling="2025-11-28 16:28:33.697735319 +0000 UTC m=+1096.094419843" lastFinishedPulling="2025-11-28 16:28:47.975923137 +0000 UTC m=+1110.372607661" observedRunningTime="2025-11-28 16:29:15.596074894 +0000 UTC m=+1137.992759428" watchObservedRunningTime="2025-11-28 16:29:15.610960541 +0000 UTC m=+1138.007645075" Nov 28 16:29:15 crc kubenswrapper[4909]: I1128 16:29:15.630619 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/test-operator-controller-manager-5854674fcc-w8bg4" podStartSLOduration=3.033667898 podStartE2EDuration="43.630599274s" podCreationTimestamp="2025-11-28 16:28:32 +0000 UTC" firstStartedPulling="2025-11-28 16:28:33.849674749 +0000 UTC m=+1096.246359273" lastFinishedPulling="2025-11-28 16:29:14.446606125 +0000 UTC m=+1136.843290649" observedRunningTime="2025-11-28 16:29:15.628007655 +0000 UTC m=+1138.024692189" watchObservedRunningTime="2025-11-28 16:29:15.630599274 +0000 UTC m=+1138.027283798" Nov 28 16:29:15 crc kubenswrapper[4909]: I1128 16:29:15.694713 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/watcher-operator-controller-manager-769dc69bc-b8ssv" podStartSLOduration=25.36070033 podStartE2EDuration="43.694693743s" podCreationTimestamp="2025-11-28 16:28:32 +0000 UTC" firstStartedPulling="2025-11-28 16:28:33.855522675 +0000 UTC m=+1096.252207199" lastFinishedPulling="2025-11-28 16:28:52.189516088 +0000 UTC m=+1114.586200612" observedRunningTime="2025-11-28 16:29:15.668178076 +0000 UTC m=+1138.064862610" watchObservedRunningTime="2025-11-28 16:29:15.694693743 +0000 UTC m=+1138.091378277" Nov 28 16:29:15 crc kubenswrapper[4909]: I1128 16:29:15.719612 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/glance-operator-controller-manager-668d9c48b9-ggchx" podStartSLOduration=3.852584611 podStartE2EDuration="44.719591676s" podCreationTimestamp="2025-11-28 16:28:31 +0000 UTC" firstStartedPulling="2025-11-28 16:28:33.466760573 +0000 UTC m=+1095.863445097" lastFinishedPulling="2025-11-28 16:29:14.333767638 +0000 UTC m=+1136.730452162" observedRunningTime="2025-11-28 16:29:15.718356153 +0000 UTC m=+1138.115040687" watchObservedRunningTime="2025-11-28 16:29:15.719591676 +0000 UTC m=+1138.116276200" Nov 28 16:29:15 crc kubenswrapper[4909]: I1128 16:29:15.796760 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/placement-operator-controller-manager-78f8948974-jlztw" podStartSLOduration=30.642084827 podStartE2EDuration="44.796746312s" podCreationTimestamp="2025-11-28 16:28:31 +0000 UTC" firstStartedPulling="2025-11-28 16:28:33.820290856 +0000 UTC m=+1096.216975390" lastFinishedPulling="2025-11-28 16:28:47.974952351 +0000 UTC m=+1110.371636875" observedRunningTime="2025-11-28 16:29:15.795118328 +0000 UTC m=+1138.191802862" watchObservedRunningTime="2025-11-28 16:29:15.796746312 +0000 UTC m=+1138.193430836" Nov 28 16:29:15 crc kubenswrapper[4909]: I1128 16:29:15.848214 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-baremetal-operator-controller-manager-64bc77cfd4hqcvw" podStartSLOduration=35.82662989 podStartE2EDuration="44.848195863s" podCreationTimestamp="2025-11-28 16:28:31 +0000 UTC" firstStartedPulling="2025-11-28 16:28:50.811690183 +0000 UTC m=+1113.208374707" lastFinishedPulling="2025-11-28 16:28:59.833256156 +0000 UTC m=+1122.229940680" observedRunningTime="2025-11-28 16:29:15.832807483 +0000 UTC m=+1138.229492007" watchObservedRunningTime="2025-11-28 16:29:15.848195863 +0000 UTC m=+1138.244880387" Nov 28 16:29:15 crc kubenswrapper[4909]: I1128 16:29:15.865921 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/horizon-operator-controller-manager-68c6d99b8f-zjfzk" podStartSLOduration=30.529352482 podStartE2EDuration="44.865898715s" podCreationTimestamp="2025-11-28 16:28:31 +0000 UTC" firstStartedPulling="2025-11-28 16:28:33.709991106 +0000 UTC m=+1096.106675630" lastFinishedPulling="2025-11-28 16:28:48.046537339 +0000 UTC m=+1110.443221863" observedRunningTime="2025-11-28 16:29:15.852317693 +0000 UTC m=+1138.249002217" watchObservedRunningTime="2025-11-28 16:29:15.865898715 +0000 UTC m=+1138.262583239" Nov 28 16:29:16 crc kubenswrapper[4909]: I1128 16:29:16.018401 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/designate-operator-controller-manager-78b4bc895b-52zmw" podStartSLOduration=30.529801503 podStartE2EDuration="45.018379029s" podCreationTimestamp="2025-11-28 16:28:31 +0000 UTC" firstStartedPulling="2025-11-28 16:28:33.486373295 +0000 UTC m=+1095.883057819" lastFinishedPulling="2025-11-28 16:28:47.974950821 +0000 UTC m=+1110.371635345" observedRunningTime="2025-11-28 16:29:16.00714905 +0000 UTC m=+1138.403833584" watchObservedRunningTime="2025-11-28 16:29:16.018379029 +0000 UTC m=+1138.415063563" Nov 28 16:29:16 crc kubenswrapper[4909]: I1128 16:29:16.509334 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-9vnqw" event={"ID":"771c530f-d0ab-412d-a6c6-931999bc878f","Type":"ContainerStarted","Data":"3cd911c455a70f48699b77c1ed3905ef8bbecf3b875d3bd62570b536af3cc3e6"} Nov 28 16:29:16 crc kubenswrapper[4909]: I1128 16:29:16.513846 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-697bc559fc-xnnf4" event={"ID":"dc2bc53b-f803-4f9b-943f-e53f132cbb39","Type":"ContainerStarted","Data":"21d90af200cea58f8d8d36a2290c420b66d5493ce60f7945609c7eb75cf1565d"} Nov 28 16:29:16 crc kubenswrapper[4909]: I1128 16:29:16.517414 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-7d9dfd778-v8ggq" event={"ID":"facf7553-0ba6-43b6-b720-0345f63d5706","Type":"ContainerStarted","Data":"7818aea75ec555efc418fb5fe85308ab8e724d30cd85fc96dc34e704398c7d6c"} Nov 28 16:29:16 crc kubenswrapper[4909]: I1128 16:29:16.523731 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-998648c74-5mfq8" event={"ID":"32bd97d0-184a-4f85-bfdc-1b34688753a5","Type":"ContainerStarted","Data":"9e650d63c0d8f7dca9d301441d3c92dead669f6c40888139660a4756a83c86b7"} Nov 28 16:29:16 crc kubenswrapper[4909]: I1128 16:29:16.527981 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-546d4bdf48-wsrnd" event={"ID":"f5f666e9-9715-444d-a4ec-f6b3bb719df6","Type":"ContainerStarted","Data":"aed8f19601e3ca40ea80ece06999c59b4c537f04c15b59453feaaf1b862aa210"} Nov 28 16:29:16 crc kubenswrapper[4909]: I1128 16:29:16.542137 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-9vnqw" podStartSLOduration=31.262232885 podStartE2EDuration="45.542118729s" podCreationTimestamp="2025-11-28 16:28:31 +0000 UTC" firstStartedPulling="2025-11-28 16:28:33.717928957 +0000 UTC m=+1096.114613481" lastFinishedPulling="2025-11-28 16:28:47.997814801 +0000 UTC m=+1110.394499325" observedRunningTime="2025-11-28 16:29:16.538880833 +0000 UTC m=+1138.935565357" watchObservedRunningTime="2025-11-28 16:29:16.542118729 +0000 UTC m=+1138.938803253" Nov 28 16:29:16 crc kubenswrapper[4909]: I1128 16:29:16.604808 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/octavia-operator-controller-manager-998648c74-5mfq8" podStartSLOduration=31.313972355 podStartE2EDuration="45.60478952s" podCreationTimestamp="2025-11-28 16:28:31 +0000 UTC" firstStartedPulling="2025-11-28 16:28:33.712589955 +0000 UTC m=+1096.109274479" lastFinishedPulling="2025-11-28 16:28:48.00340712 +0000 UTC m=+1110.400091644" observedRunningTime="2025-11-28 16:29:16.601379389 +0000 UTC m=+1138.998063913" watchObservedRunningTime="2025-11-28 16:29:16.60478952 +0000 UTC m=+1139.001474044" Nov 28 16:29:16 crc kubenswrapper[4909]: I1128 16:29:16.623570 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/nova-operator-controller-manager-697bc559fc-xnnf4" podStartSLOduration=27.301097665 podStartE2EDuration="45.6235429s" podCreationTimestamp="2025-11-28 16:28:31 +0000 UTC" firstStartedPulling="2025-11-28 16:28:33.848808466 +0000 UTC m=+1096.245492990" lastFinishedPulling="2025-11-28 16:28:52.171253701 +0000 UTC m=+1114.567938225" observedRunningTime="2025-11-28 16:29:16.57815695 +0000 UTC m=+1138.974841474" watchObservedRunningTime="2025-11-28 16:29:16.6235429 +0000 UTC m=+1139.020227424" Nov 28 16:29:16 crc kubenswrapper[4909]: I1128 16:29:16.646647 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/barbican-operator-controller-manager-7d9dfd778-v8ggq" podStartSLOduration=30.664528845 podStartE2EDuration="45.646624535s" podCreationTimestamp="2025-11-28 16:28:31 +0000 UTC" firstStartedPulling="2025-11-28 16:28:33.020801176 +0000 UTC m=+1095.417485700" lastFinishedPulling="2025-11-28 16:28:48.002896866 +0000 UTC m=+1110.399581390" observedRunningTime="2025-11-28 16:29:16.626705094 +0000 UTC m=+1139.023389648" watchObservedRunningTime="2025-11-28 16:29:16.646624535 +0000 UTC m=+1139.043309059" Nov 28 16:29:16 crc kubenswrapper[4909]: I1128 16:29:16.658130 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/keystone-operator-controller-manager-546d4bdf48-wsrnd" podStartSLOduration=2.750704721 podStartE2EDuration="45.658107841s" podCreationTimestamp="2025-11-28 16:28:31 +0000 UTC" firstStartedPulling="2025-11-28 16:28:33.256467557 +0000 UTC m=+1095.653152081" lastFinishedPulling="2025-11-28 16:29:16.163870667 +0000 UTC m=+1138.560555201" observedRunningTime="2025-11-28 16:29:16.650320474 +0000 UTC m=+1139.047005008" watchObservedRunningTime="2025-11-28 16:29:16.658107841 +0000 UTC m=+1139.054792365" Nov 28 16:29:17 crc kubenswrapper[4909]: I1128 16:29:17.538258 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-sqpxn" event={"ID":"ede8d830-fccd-4337-b0ec-48030a263d44","Type":"ContainerStarted","Data":"43e6f86794ed0784b95a2db71561f9130f3f0645d1bf48d0a2df746030a37368"} Nov 28 16:29:17 crc kubenswrapper[4909]: I1128 16:29:17.538623 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/keystone-operator-controller-manager-546d4bdf48-wsrnd" Nov 28 16:29:17 crc kubenswrapper[4909]: I1128 16:29:17.538681 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-sqpxn" event={"ID":"ede8d830-fccd-4337-b0ec-48030a263d44","Type":"ContainerStarted","Data":"13c1a6f5c9b778ac08e9750d0ac1545db2e6e3bf321ef6cee6077d1977c59c12"} Nov 28 16:29:17 crc kubenswrapper[4909]: I1128 16:29:17.557738 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-sqpxn" podStartSLOduration=3.914007589 podStartE2EDuration="46.55770491s" podCreationTimestamp="2025-11-28 16:28:31 +0000 UTC" firstStartedPulling="2025-11-28 16:28:33.720170847 +0000 UTC m=+1096.116855371" lastFinishedPulling="2025-11-28 16:29:16.363868178 +0000 UTC m=+1138.760552692" observedRunningTime="2025-11-28 16:29:17.556497507 +0000 UTC m=+1139.953182041" watchObservedRunningTime="2025-11-28 16:29:17.55770491 +0000 UTC m=+1139.954389474" Nov 28 16:29:18 crc kubenswrapper[4909]: I1128 16:29:18.548135 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-sqpxn" Nov 28 16:29:22 crc kubenswrapper[4909]: I1128 16:29:22.195453 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/keystone-operator-controller-manager-546d4bdf48-wsrnd" Nov 28 16:29:22 crc kubenswrapper[4909]: I1128 16:29:22.339057 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-sqpxn" Nov 28 16:29:22 crc kubenswrapper[4909]: I1128 16:29:22.421070 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ovn-operator-controller-manager-b6456fdb6-x28l4" Nov 28 16:29:22 crc kubenswrapper[4909]: I1128 16:29:22.447297 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/swift-operator-controller-manager-5f8c65bbfc-hbnt7" Nov 28 16:29:27 crc kubenswrapper[4909]: I1128 16:29:27.972997 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/infra-operator-controller-manager-57548d458d-7z5zf" Nov 28 16:29:28 crc kubenswrapper[4909]: I1128 16:29:28.038340 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-baremetal-operator-controller-manager-64bc77cfd4hqcvw" Nov 28 16:29:45 crc kubenswrapper[4909]: I1128 16:29:45.004502 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-jv5sk"] Nov 28 16:29:45 crc kubenswrapper[4909]: I1128 16:29:45.005991 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-jv5sk" Nov 28 16:29:45 crc kubenswrapper[4909]: I1128 16:29:45.008420 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns" Nov 28 16:29:45 crc kubenswrapper[4909]: I1128 16:29:45.008638 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dnsmasq-dns-dockercfg-np2v8" Nov 28 16:29:45 crc kubenswrapper[4909]: I1128 16:29:45.009761 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openshift-service-ca.crt" Nov 28 16:29:45 crc kubenswrapper[4909]: I1128 16:29:45.010373 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"kube-root-ca.crt" Nov 28 16:29:45 crc kubenswrapper[4909]: I1128 16:29:45.020940 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-jv5sk"] Nov 28 16:29:45 crc kubenswrapper[4909]: I1128 16:29:45.106272 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3348422f-2e14-4c58-9170-d026c3fdbda8-config\") pod \"dnsmasq-dns-675f4bcbfc-jv5sk\" (UID: \"3348422f-2e14-4c58-9170-d026c3fdbda8\") " pod="openstack/dnsmasq-dns-675f4bcbfc-jv5sk" Nov 28 16:29:45 crc kubenswrapper[4909]: I1128 16:29:45.106342 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qfvlm\" (UniqueName: \"kubernetes.io/projected/3348422f-2e14-4c58-9170-d026c3fdbda8-kube-api-access-qfvlm\") pod \"dnsmasq-dns-675f4bcbfc-jv5sk\" (UID: \"3348422f-2e14-4c58-9170-d026c3fdbda8\") " pod="openstack/dnsmasq-dns-675f4bcbfc-jv5sk" Nov 28 16:29:45 crc kubenswrapper[4909]: I1128 16:29:45.126121 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-gsrst"] Nov 28 16:29:45 crc kubenswrapper[4909]: I1128 16:29:45.127232 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-gsrst" Nov 28 16:29:45 crc kubenswrapper[4909]: I1128 16:29:45.130431 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-svc" Nov 28 16:29:45 crc kubenswrapper[4909]: I1128 16:29:45.161145 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-gsrst"] Nov 28 16:29:45 crc kubenswrapper[4909]: I1128 16:29:45.207960 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wrh7x\" (UniqueName: \"kubernetes.io/projected/53f5f255-27cb-46e1-bc20-17b38ea707bf-kube-api-access-wrh7x\") pod \"dnsmasq-dns-78dd6ddcc-gsrst\" (UID: \"53f5f255-27cb-46e1-bc20-17b38ea707bf\") " pod="openstack/dnsmasq-dns-78dd6ddcc-gsrst" Nov 28 16:29:45 crc kubenswrapper[4909]: I1128 16:29:45.208091 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/53f5f255-27cb-46e1-bc20-17b38ea707bf-config\") pod \"dnsmasq-dns-78dd6ddcc-gsrst\" (UID: \"53f5f255-27cb-46e1-bc20-17b38ea707bf\") " pod="openstack/dnsmasq-dns-78dd6ddcc-gsrst" Nov 28 16:29:45 crc kubenswrapper[4909]: I1128 16:29:45.208128 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3348422f-2e14-4c58-9170-d026c3fdbda8-config\") pod \"dnsmasq-dns-675f4bcbfc-jv5sk\" (UID: \"3348422f-2e14-4c58-9170-d026c3fdbda8\") " pod="openstack/dnsmasq-dns-675f4bcbfc-jv5sk" Nov 28 16:29:45 crc kubenswrapper[4909]: I1128 16:29:45.208149 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qfvlm\" (UniqueName: \"kubernetes.io/projected/3348422f-2e14-4c58-9170-d026c3fdbda8-kube-api-access-qfvlm\") pod \"dnsmasq-dns-675f4bcbfc-jv5sk\" (UID: \"3348422f-2e14-4c58-9170-d026c3fdbda8\") " pod="openstack/dnsmasq-dns-675f4bcbfc-jv5sk" Nov 28 16:29:45 crc kubenswrapper[4909]: I1128 16:29:45.208292 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/53f5f255-27cb-46e1-bc20-17b38ea707bf-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-gsrst\" (UID: \"53f5f255-27cb-46e1-bc20-17b38ea707bf\") " pod="openstack/dnsmasq-dns-78dd6ddcc-gsrst" Nov 28 16:29:45 crc kubenswrapper[4909]: I1128 16:29:45.208879 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3348422f-2e14-4c58-9170-d026c3fdbda8-config\") pod \"dnsmasq-dns-675f4bcbfc-jv5sk\" (UID: \"3348422f-2e14-4c58-9170-d026c3fdbda8\") " pod="openstack/dnsmasq-dns-675f4bcbfc-jv5sk" Nov 28 16:29:45 crc kubenswrapper[4909]: I1128 16:29:45.226203 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qfvlm\" (UniqueName: \"kubernetes.io/projected/3348422f-2e14-4c58-9170-d026c3fdbda8-kube-api-access-qfvlm\") pod \"dnsmasq-dns-675f4bcbfc-jv5sk\" (UID: \"3348422f-2e14-4c58-9170-d026c3fdbda8\") " pod="openstack/dnsmasq-dns-675f4bcbfc-jv5sk" Nov 28 16:29:45 crc kubenswrapper[4909]: I1128 16:29:45.309636 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/53f5f255-27cb-46e1-bc20-17b38ea707bf-config\") pod \"dnsmasq-dns-78dd6ddcc-gsrst\" (UID: \"53f5f255-27cb-46e1-bc20-17b38ea707bf\") " pod="openstack/dnsmasq-dns-78dd6ddcc-gsrst" Nov 28 16:29:45 crc kubenswrapper[4909]: I1128 16:29:45.309724 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/53f5f255-27cb-46e1-bc20-17b38ea707bf-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-gsrst\" (UID: \"53f5f255-27cb-46e1-bc20-17b38ea707bf\") " pod="openstack/dnsmasq-dns-78dd6ddcc-gsrst" Nov 28 16:29:45 crc kubenswrapper[4909]: I1128 16:29:45.309766 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wrh7x\" (UniqueName: \"kubernetes.io/projected/53f5f255-27cb-46e1-bc20-17b38ea707bf-kube-api-access-wrh7x\") pod \"dnsmasq-dns-78dd6ddcc-gsrst\" (UID: \"53f5f255-27cb-46e1-bc20-17b38ea707bf\") " pod="openstack/dnsmasq-dns-78dd6ddcc-gsrst" Nov 28 16:29:45 crc kubenswrapper[4909]: I1128 16:29:45.310753 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/53f5f255-27cb-46e1-bc20-17b38ea707bf-config\") pod \"dnsmasq-dns-78dd6ddcc-gsrst\" (UID: \"53f5f255-27cb-46e1-bc20-17b38ea707bf\") " pod="openstack/dnsmasq-dns-78dd6ddcc-gsrst" Nov 28 16:29:45 crc kubenswrapper[4909]: I1128 16:29:45.311215 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/53f5f255-27cb-46e1-bc20-17b38ea707bf-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-gsrst\" (UID: \"53f5f255-27cb-46e1-bc20-17b38ea707bf\") " pod="openstack/dnsmasq-dns-78dd6ddcc-gsrst" Nov 28 16:29:45 crc kubenswrapper[4909]: I1128 16:29:45.320941 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-jv5sk" Nov 28 16:29:45 crc kubenswrapper[4909]: I1128 16:29:45.324698 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wrh7x\" (UniqueName: \"kubernetes.io/projected/53f5f255-27cb-46e1-bc20-17b38ea707bf-kube-api-access-wrh7x\") pod \"dnsmasq-dns-78dd6ddcc-gsrst\" (UID: \"53f5f255-27cb-46e1-bc20-17b38ea707bf\") " pod="openstack/dnsmasq-dns-78dd6ddcc-gsrst" Nov 28 16:29:45 crc kubenswrapper[4909]: I1128 16:29:45.453968 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-gsrst" Nov 28 16:29:45 crc kubenswrapper[4909]: I1128 16:29:45.760895 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-jv5sk"] Nov 28 16:29:45 crc kubenswrapper[4909]: W1128 16:29:45.914449 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod53f5f255_27cb_46e1_bc20_17b38ea707bf.slice/crio-647ef863c64c74be39b4b0862ef5a668ea05e97ef052990a934965b24d4b3370 WatchSource:0}: Error finding container 647ef863c64c74be39b4b0862ef5a668ea05e97ef052990a934965b24d4b3370: Status 404 returned error can't find the container with id 647ef863c64c74be39b4b0862ef5a668ea05e97ef052990a934965b24d4b3370 Nov 28 16:29:45 crc kubenswrapper[4909]: I1128 16:29:45.915307 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-gsrst"] Nov 28 16:29:46 crc kubenswrapper[4909]: I1128 16:29:46.768779 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78dd6ddcc-gsrst" event={"ID":"53f5f255-27cb-46e1-bc20-17b38ea707bf","Type":"ContainerStarted","Data":"647ef863c64c74be39b4b0862ef5a668ea05e97ef052990a934965b24d4b3370"} Nov 28 16:29:46 crc kubenswrapper[4909]: I1128 16:29:46.769777 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-675f4bcbfc-jv5sk" event={"ID":"3348422f-2e14-4c58-9170-d026c3fdbda8","Type":"ContainerStarted","Data":"46f49428dee1c64aa317b5c1dfc680111e9715e8f178078353eb4f0270ee5a11"} Nov 28 16:29:47 crc kubenswrapper[4909]: I1128 16:29:47.537942 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-jv5sk"] Nov 28 16:29:47 crc kubenswrapper[4909]: I1128 16:29:47.562953 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5ccc8479f9-ddfj8"] Nov 28 16:29:47 crc kubenswrapper[4909]: I1128 16:29:47.564033 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5ccc8479f9-ddfj8" Nov 28 16:29:47 crc kubenswrapper[4909]: I1128 16:29:47.576596 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5ccc8479f9-ddfj8"] Nov 28 16:29:47 crc kubenswrapper[4909]: I1128 16:29:47.639124 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7a392b74-7812-4347-988a-1eef8b2778a5-config\") pod \"dnsmasq-dns-5ccc8479f9-ddfj8\" (UID: \"7a392b74-7812-4347-988a-1eef8b2778a5\") " pod="openstack/dnsmasq-dns-5ccc8479f9-ddfj8" Nov 28 16:29:47 crc kubenswrapper[4909]: I1128 16:29:47.639185 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vkqgx\" (UniqueName: \"kubernetes.io/projected/7a392b74-7812-4347-988a-1eef8b2778a5-kube-api-access-vkqgx\") pod \"dnsmasq-dns-5ccc8479f9-ddfj8\" (UID: \"7a392b74-7812-4347-988a-1eef8b2778a5\") " pod="openstack/dnsmasq-dns-5ccc8479f9-ddfj8" Nov 28 16:29:47 crc kubenswrapper[4909]: I1128 16:29:47.639208 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7a392b74-7812-4347-988a-1eef8b2778a5-dns-svc\") pod \"dnsmasq-dns-5ccc8479f9-ddfj8\" (UID: \"7a392b74-7812-4347-988a-1eef8b2778a5\") " pod="openstack/dnsmasq-dns-5ccc8479f9-ddfj8" Nov 28 16:29:47 crc kubenswrapper[4909]: I1128 16:29:47.740212 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7a392b74-7812-4347-988a-1eef8b2778a5-config\") pod \"dnsmasq-dns-5ccc8479f9-ddfj8\" (UID: \"7a392b74-7812-4347-988a-1eef8b2778a5\") " pod="openstack/dnsmasq-dns-5ccc8479f9-ddfj8" Nov 28 16:29:47 crc kubenswrapper[4909]: I1128 16:29:47.740288 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vkqgx\" (UniqueName: \"kubernetes.io/projected/7a392b74-7812-4347-988a-1eef8b2778a5-kube-api-access-vkqgx\") pod \"dnsmasq-dns-5ccc8479f9-ddfj8\" (UID: \"7a392b74-7812-4347-988a-1eef8b2778a5\") " pod="openstack/dnsmasq-dns-5ccc8479f9-ddfj8" Nov 28 16:29:47 crc kubenswrapper[4909]: I1128 16:29:47.740324 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7a392b74-7812-4347-988a-1eef8b2778a5-dns-svc\") pod \"dnsmasq-dns-5ccc8479f9-ddfj8\" (UID: \"7a392b74-7812-4347-988a-1eef8b2778a5\") " pod="openstack/dnsmasq-dns-5ccc8479f9-ddfj8" Nov 28 16:29:47 crc kubenswrapper[4909]: I1128 16:29:47.741385 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7a392b74-7812-4347-988a-1eef8b2778a5-dns-svc\") pod \"dnsmasq-dns-5ccc8479f9-ddfj8\" (UID: \"7a392b74-7812-4347-988a-1eef8b2778a5\") " pod="openstack/dnsmasq-dns-5ccc8479f9-ddfj8" Nov 28 16:29:47 crc kubenswrapper[4909]: I1128 16:29:47.741760 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7a392b74-7812-4347-988a-1eef8b2778a5-config\") pod \"dnsmasq-dns-5ccc8479f9-ddfj8\" (UID: \"7a392b74-7812-4347-988a-1eef8b2778a5\") " pod="openstack/dnsmasq-dns-5ccc8479f9-ddfj8" Nov 28 16:29:47 crc kubenswrapper[4909]: I1128 16:29:47.761092 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vkqgx\" (UniqueName: \"kubernetes.io/projected/7a392b74-7812-4347-988a-1eef8b2778a5-kube-api-access-vkqgx\") pod \"dnsmasq-dns-5ccc8479f9-ddfj8\" (UID: \"7a392b74-7812-4347-988a-1eef8b2778a5\") " pod="openstack/dnsmasq-dns-5ccc8479f9-ddfj8" Nov 28 16:29:47 crc kubenswrapper[4909]: I1128 16:29:47.837877 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-gsrst"] Nov 28 16:29:47 crc kubenswrapper[4909]: I1128 16:29:47.863536 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-nwdnb"] Nov 28 16:29:47 crc kubenswrapper[4909]: I1128 16:29:47.864729 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-nwdnb" Nov 28 16:29:47 crc kubenswrapper[4909]: I1128 16:29:47.878249 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-nwdnb"] Nov 28 16:29:47 crc kubenswrapper[4909]: I1128 16:29:47.884028 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5ccc8479f9-ddfj8" Nov 28 16:29:47 crc kubenswrapper[4909]: I1128 16:29:47.950459 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d68fe850-7328-4415-b01e-a7c2ec3e1f32-config\") pod \"dnsmasq-dns-57d769cc4f-nwdnb\" (UID: \"d68fe850-7328-4415-b01e-a7c2ec3e1f32\") " pod="openstack/dnsmasq-dns-57d769cc4f-nwdnb" Nov 28 16:29:47 crc kubenswrapper[4909]: I1128 16:29:47.950504 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d68fe850-7328-4415-b01e-a7c2ec3e1f32-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-nwdnb\" (UID: \"d68fe850-7328-4415-b01e-a7c2ec3e1f32\") " pod="openstack/dnsmasq-dns-57d769cc4f-nwdnb" Nov 28 16:29:47 crc kubenswrapper[4909]: I1128 16:29:47.950528 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7z24l\" (UniqueName: \"kubernetes.io/projected/d68fe850-7328-4415-b01e-a7c2ec3e1f32-kube-api-access-7z24l\") pod \"dnsmasq-dns-57d769cc4f-nwdnb\" (UID: \"d68fe850-7328-4415-b01e-a7c2ec3e1f32\") " pod="openstack/dnsmasq-dns-57d769cc4f-nwdnb" Nov 28 16:29:48 crc kubenswrapper[4909]: I1128 16:29:48.051919 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d68fe850-7328-4415-b01e-a7c2ec3e1f32-config\") pod \"dnsmasq-dns-57d769cc4f-nwdnb\" (UID: \"d68fe850-7328-4415-b01e-a7c2ec3e1f32\") " pod="openstack/dnsmasq-dns-57d769cc4f-nwdnb" Nov 28 16:29:48 crc kubenswrapper[4909]: I1128 16:29:48.051963 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d68fe850-7328-4415-b01e-a7c2ec3e1f32-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-nwdnb\" (UID: \"d68fe850-7328-4415-b01e-a7c2ec3e1f32\") " pod="openstack/dnsmasq-dns-57d769cc4f-nwdnb" Nov 28 16:29:48 crc kubenswrapper[4909]: I1128 16:29:48.051997 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7z24l\" (UniqueName: \"kubernetes.io/projected/d68fe850-7328-4415-b01e-a7c2ec3e1f32-kube-api-access-7z24l\") pod \"dnsmasq-dns-57d769cc4f-nwdnb\" (UID: \"d68fe850-7328-4415-b01e-a7c2ec3e1f32\") " pod="openstack/dnsmasq-dns-57d769cc4f-nwdnb" Nov 28 16:29:48 crc kubenswrapper[4909]: I1128 16:29:48.056142 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d68fe850-7328-4415-b01e-a7c2ec3e1f32-config\") pod \"dnsmasq-dns-57d769cc4f-nwdnb\" (UID: \"d68fe850-7328-4415-b01e-a7c2ec3e1f32\") " pod="openstack/dnsmasq-dns-57d769cc4f-nwdnb" Nov 28 16:29:48 crc kubenswrapper[4909]: I1128 16:29:48.056281 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d68fe850-7328-4415-b01e-a7c2ec3e1f32-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-nwdnb\" (UID: \"d68fe850-7328-4415-b01e-a7c2ec3e1f32\") " pod="openstack/dnsmasq-dns-57d769cc4f-nwdnb" Nov 28 16:29:48 crc kubenswrapper[4909]: I1128 16:29:48.074077 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7z24l\" (UniqueName: \"kubernetes.io/projected/d68fe850-7328-4415-b01e-a7c2ec3e1f32-kube-api-access-7z24l\") pod \"dnsmasq-dns-57d769cc4f-nwdnb\" (UID: \"d68fe850-7328-4415-b01e-a7c2ec3e1f32\") " pod="openstack/dnsmasq-dns-57d769cc4f-nwdnb" Nov 28 16:29:48 crc kubenswrapper[4909]: I1128 16:29:48.192028 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-nwdnb" Nov 28 16:29:48 crc kubenswrapper[4909]: I1128 16:29:48.436043 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5ccc8479f9-ddfj8"] Nov 28 16:29:48 crc kubenswrapper[4909]: W1128 16:29:48.448183 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7a392b74_7812_4347_988a_1eef8b2778a5.slice/crio-4ce76cb3f09f0a7a0b86a472bc3dc731006098a715cc810c865f125f481f371e WatchSource:0}: Error finding container 4ce76cb3f09f0a7a0b86a472bc3dc731006098a715cc810c865f125f481f371e: Status 404 returned error can't find the container with id 4ce76cb3f09f0a7a0b86a472bc3dc731006098a715cc810c865f125f481f371e Nov 28 16:29:48 crc kubenswrapper[4909]: I1128 16:29:48.664779 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-nwdnb"] Nov 28 16:29:48 crc kubenswrapper[4909]: I1128 16:29:48.711168 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 28 16:29:48 crc kubenswrapper[4909]: I1128 16:29:48.712301 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 28 16:29:48 crc kubenswrapper[4909]: I1128 16:29:48.720831 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Nov 28 16:29:48 crc kubenswrapper[4909]: I1128 16:29:48.721014 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Nov 28 16:29:48 crc kubenswrapper[4909]: I1128 16:29:48.721753 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-cell1-svc" Nov 28 16:29:48 crc kubenswrapper[4909]: I1128 16:29:48.721871 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-config-data" Nov 28 16:29:48 crc kubenswrapper[4909]: I1128 16:29:48.721996 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Nov 28 16:29:48 crc kubenswrapper[4909]: I1128 16:29:48.722153 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Nov 28 16:29:48 crc kubenswrapper[4909]: I1128 16:29:48.722299 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 28 16:29:48 crc kubenswrapper[4909]: I1128 16:29:48.722311 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-2sfzq" Nov 28 16:29:48 crc kubenswrapper[4909]: I1128 16:29:48.776903 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/02c83d05-a6ce-4c22-9015-91c0a766a518-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"02c83d05-a6ce-4c22-9015-91c0a766a518\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 16:29:48 crc kubenswrapper[4909]: I1128 16:29:48.777144 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/02c83d05-a6ce-4c22-9015-91c0a766a518-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"02c83d05-a6ce-4c22-9015-91c0a766a518\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 16:29:48 crc kubenswrapper[4909]: I1128 16:29:48.777169 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/02c83d05-a6ce-4c22-9015-91c0a766a518-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"02c83d05-a6ce-4c22-9015-91c0a766a518\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 16:29:48 crc kubenswrapper[4909]: I1128 16:29:48.777254 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/02c83d05-a6ce-4c22-9015-91c0a766a518-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"02c83d05-a6ce-4c22-9015-91c0a766a518\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 16:29:48 crc kubenswrapper[4909]: I1128 16:29:48.777285 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/02c83d05-a6ce-4c22-9015-91c0a766a518-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"02c83d05-a6ce-4c22-9015-91c0a766a518\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 16:29:48 crc kubenswrapper[4909]: I1128 16:29:48.777305 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"02c83d05-a6ce-4c22-9015-91c0a766a518\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 16:29:48 crc kubenswrapper[4909]: I1128 16:29:48.777331 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/02c83d05-a6ce-4c22-9015-91c0a766a518-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"02c83d05-a6ce-4c22-9015-91c0a766a518\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 16:29:48 crc kubenswrapper[4909]: I1128 16:29:48.777347 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/02c83d05-a6ce-4c22-9015-91c0a766a518-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"02c83d05-a6ce-4c22-9015-91c0a766a518\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 16:29:48 crc kubenswrapper[4909]: I1128 16:29:48.777375 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/02c83d05-a6ce-4c22-9015-91c0a766a518-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"02c83d05-a6ce-4c22-9015-91c0a766a518\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 16:29:48 crc kubenswrapper[4909]: I1128 16:29:48.777396 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sv7bn\" (UniqueName: \"kubernetes.io/projected/02c83d05-a6ce-4c22-9015-91c0a766a518-kube-api-access-sv7bn\") pod \"rabbitmq-cell1-server-0\" (UID: \"02c83d05-a6ce-4c22-9015-91c0a766a518\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 16:29:48 crc kubenswrapper[4909]: I1128 16:29:48.777414 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/02c83d05-a6ce-4c22-9015-91c0a766a518-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"02c83d05-a6ce-4c22-9015-91c0a766a518\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 16:29:48 crc kubenswrapper[4909]: I1128 16:29:48.800143 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5ccc8479f9-ddfj8" event={"ID":"7a392b74-7812-4347-988a-1eef8b2778a5","Type":"ContainerStarted","Data":"4ce76cb3f09f0a7a0b86a472bc3dc731006098a715cc810c865f125f481f371e"} Nov 28 16:29:48 crc kubenswrapper[4909]: I1128 16:29:48.802074 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-nwdnb" event={"ID":"d68fe850-7328-4415-b01e-a7c2ec3e1f32","Type":"ContainerStarted","Data":"34c0cd98d987c3a397be557827bfc1d9894d705d2f64e72a02d2fa1b3055196b"} Nov 28 16:29:48 crc kubenswrapper[4909]: I1128 16:29:48.879254 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/02c83d05-a6ce-4c22-9015-91c0a766a518-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"02c83d05-a6ce-4c22-9015-91c0a766a518\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 16:29:48 crc kubenswrapper[4909]: I1128 16:29:48.879300 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/02c83d05-a6ce-4c22-9015-91c0a766a518-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"02c83d05-a6ce-4c22-9015-91c0a766a518\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 16:29:48 crc kubenswrapper[4909]: I1128 16:29:48.879329 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/02c83d05-a6ce-4c22-9015-91c0a766a518-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"02c83d05-a6ce-4c22-9015-91c0a766a518\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 16:29:48 crc kubenswrapper[4909]: I1128 16:29:48.879359 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sv7bn\" (UniqueName: \"kubernetes.io/projected/02c83d05-a6ce-4c22-9015-91c0a766a518-kube-api-access-sv7bn\") pod \"rabbitmq-cell1-server-0\" (UID: \"02c83d05-a6ce-4c22-9015-91c0a766a518\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 16:29:48 crc kubenswrapper[4909]: I1128 16:29:48.879380 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/02c83d05-a6ce-4c22-9015-91c0a766a518-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"02c83d05-a6ce-4c22-9015-91c0a766a518\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 16:29:48 crc kubenswrapper[4909]: I1128 16:29:48.879441 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/02c83d05-a6ce-4c22-9015-91c0a766a518-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"02c83d05-a6ce-4c22-9015-91c0a766a518\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 16:29:48 crc kubenswrapper[4909]: I1128 16:29:48.879461 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/02c83d05-a6ce-4c22-9015-91c0a766a518-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"02c83d05-a6ce-4c22-9015-91c0a766a518\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 16:29:48 crc kubenswrapper[4909]: I1128 16:29:48.879483 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/02c83d05-a6ce-4c22-9015-91c0a766a518-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"02c83d05-a6ce-4c22-9015-91c0a766a518\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 16:29:48 crc kubenswrapper[4909]: I1128 16:29:48.879516 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/02c83d05-a6ce-4c22-9015-91c0a766a518-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"02c83d05-a6ce-4c22-9015-91c0a766a518\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 16:29:48 crc kubenswrapper[4909]: I1128 16:29:48.879584 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/02c83d05-a6ce-4c22-9015-91c0a766a518-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"02c83d05-a6ce-4c22-9015-91c0a766a518\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 16:29:48 crc kubenswrapper[4909]: I1128 16:29:48.879608 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"02c83d05-a6ce-4c22-9015-91c0a766a518\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 16:29:48 crc kubenswrapper[4909]: I1128 16:29:48.880237 4909 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"02c83d05-a6ce-4c22-9015-91c0a766a518\") device mount path \"/mnt/openstack/pv02\"" pod="openstack/rabbitmq-cell1-server-0" Nov 28 16:29:48 crc kubenswrapper[4909]: I1128 16:29:48.880588 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/02c83d05-a6ce-4c22-9015-91c0a766a518-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"02c83d05-a6ce-4c22-9015-91c0a766a518\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 16:29:48 crc kubenswrapper[4909]: I1128 16:29:48.881019 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/02c83d05-a6ce-4c22-9015-91c0a766a518-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"02c83d05-a6ce-4c22-9015-91c0a766a518\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 16:29:48 crc kubenswrapper[4909]: I1128 16:29:48.882805 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/02c83d05-a6ce-4c22-9015-91c0a766a518-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"02c83d05-a6ce-4c22-9015-91c0a766a518\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 16:29:48 crc kubenswrapper[4909]: I1128 16:29:48.883382 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/02c83d05-a6ce-4c22-9015-91c0a766a518-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"02c83d05-a6ce-4c22-9015-91c0a766a518\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 16:29:48 crc kubenswrapper[4909]: I1128 16:29:48.883979 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/02c83d05-a6ce-4c22-9015-91c0a766a518-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"02c83d05-a6ce-4c22-9015-91c0a766a518\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 16:29:48 crc kubenswrapper[4909]: I1128 16:29:48.893665 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/02c83d05-a6ce-4c22-9015-91c0a766a518-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"02c83d05-a6ce-4c22-9015-91c0a766a518\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 16:29:48 crc kubenswrapper[4909]: I1128 16:29:48.893723 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/02c83d05-a6ce-4c22-9015-91c0a766a518-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"02c83d05-a6ce-4c22-9015-91c0a766a518\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 16:29:48 crc kubenswrapper[4909]: I1128 16:29:48.896070 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/02c83d05-a6ce-4c22-9015-91c0a766a518-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"02c83d05-a6ce-4c22-9015-91c0a766a518\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 16:29:48 crc kubenswrapper[4909]: I1128 16:29:48.896555 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sv7bn\" (UniqueName: \"kubernetes.io/projected/02c83d05-a6ce-4c22-9015-91c0a766a518-kube-api-access-sv7bn\") pod \"rabbitmq-cell1-server-0\" (UID: \"02c83d05-a6ce-4c22-9015-91c0a766a518\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 16:29:48 crc kubenswrapper[4909]: I1128 16:29:48.897291 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/02c83d05-a6ce-4c22-9015-91c0a766a518-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"02c83d05-a6ce-4c22-9015-91c0a766a518\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 16:29:48 crc kubenswrapper[4909]: I1128 16:29:48.916258 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"02c83d05-a6ce-4c22-9015-91c0a766a518\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 16:29:49 crc kubenswrapper[4909]: I1128 16:29:49.013283 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Nov 28 16:29:49 crc kubenswrapper[4909]: I1128 16:29:49.016902 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 28 16:29:49 crc kubenswrapper[4909]: I1128 16:29:49.022051 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-svc" Nov 28 16:29:49 crc kubenswrapper[4909]: I1128 16:29:49.022088 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Nov 28 16:29:49 crc kubenswrapper[4909]: I1128 16:29:49.022251 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Nov 28 16:29:49 crc kubenswrapper[4909]: I1128 16:29:49.022331 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Nov 28 16:29:49 crc kubenswrapper[4909]: I1128 16:29:49.022433 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-config-data" Nov 28 16:29:49 crc kubenswrapper[4909]: I1128 16:29:49.023009 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-4g5mx" Nov 28 16:29:49 crc kubenswrapper[4909]: I1128 16:29:49.023204 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Nov 28 16:29:49 crc kubenswrapper[4909]: I1128 16:29:49.030570 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 28 16:29:49 crc kubenswrapper[4909]: I1128 16:29:49.039980 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 28 16:29:49 crc kubenswrapper[4909]: I1128 16:29:49.081849 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444\") " pod="openstack/rabbitmq-server-0" Nov 28 16:29:49 crc kubenswrapper[4909]: I1128 16:29:49.081898 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444-pod-info\") pod \"rabbitmq-server-0\" (UID: \"7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444\") " pod="openstack/rabbitmq-server-0" Nov 28 16:29:49 crc kubenswrapper[4909]: I1128 16:29:49.081946 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444-server-conf\") pod \"rabbitmq-server-0\" (UID: \"7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444\") " pod="openstack/rabbitmq-server-0" Nov 28 16:29:49 crc kubenswrapper[4909]: I1128 16:29:49.081966 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444\") " pod="openstack/rabbitmq-server-0" Nov 28 16:29:49 crc kubenswrapper[4909]: I1128 16:29:49.081993 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444\") " pod="openstack/rabbitmq-server-0" Nov 28 16:29:49 crc kubenswrapper[4909]: I1128 16:29:49.082033 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"rabbitmq-server-0\" (UID: \"7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444\") " pod="openstack/rabbitmq-server-0" Nov 28 16:29:49 crc kubenswrapper[4909]: I1128 16:29:49.082054 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444\") " pod="openstack/rabbitmq-server-0" Nov 28 16:29:49 crc kubenswrapper[4909]: I1128 16:29:49.082124 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444\") " pod="openstack/rabbitmq-server-0" Nov 28 16:29:49 crc kubenswrapper[4909]: I1128 16:29:49.082147 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444-config-data\") pod \"rabbitmq-server-0\" (UID: \"7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444\") " pod="openstack/rabbitmq-server-0" Nov 28 16:29:49 crc kubenswrapper[4909]: I1128 16:29:49.082221 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mgs9l\" (UniqueName: \"kubernetes.io/projected/7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444-kube-api-access-mgs9l\") pod \"rabbitmq-server-0\" (UID: \"7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444\") " pod="openstack/rabbitmq-server-0" Nov 28 16:29:49 crc kubenswrapper[4909]: I1128 16:29:49.082238 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444\") " pod="openstack/rabbitmq-server-0" Nov 28 16:29:49 crc kubenswrapper[4909]: I1128 16:29:49.183770 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444\") " pod="openstack/rabbitmq-server-0" Nov 28 16:29:49 crc kubenswrapper[4909]: I1128 16:29:49.183837 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444\") " pod="openstack/rabbitmq-server-0" Nov 28 16:29:49 crc kubenswrapper[4909]: I1128 16:29:49.183855 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444-config-data\") pod \"rabbitmq-server-0\" (UID: \"7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444\") " pod="openstack/rabbitmq-server-0" Nov 28 16:29:49 crc kubenswrapper[4909]: I1128 16:29:49.183902 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mgs9l\" (UniqueName: \"kubernetes.io/projected/7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444-kube-api-access-mgs9l\") pod \"rabbitmq-server-0\" (UID: \"7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444\") " pod="openstack/rabbitmq-server-0" Nov 28 16:29:49 crc kubenswrapper[4909]: I1128 16:29:49.184606 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444-config-data\") pod \"rabbitmq-server-0\" (UID: \"7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444\") " pod="openstack/rabbitmq-server-0" Nov 28 16:29:49 crc kubenswrapper[4909]: I1128 16:29:49.183917 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444\") " pod="openstack/rabbitmq-server-0" Nov 28 16:29:49 crc kubenswrapper[4909]: I1128 16:29:49.184880 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444\") " pod="openstack/rabbitmq-server-0" Nov 28 16:29:49 crc kubenswrapper[4909]: I1128 16:29:49.185189 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444\") " pod="openstack/rabbitmq-server-0" Nov 28 16:29:49 crc kubenswrapper[4909]: I1128 16:29:49.185219 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444\") " pod="openstack/rabbitmq-server-0" Nov 28 16:29:49 crc kubenswrapper[4909]: I1128 16:29:49.185248 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444-pod-info\") pod \"rabbitmq-server-0\" (UID: \"7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444\") " pod="openstack/rabbitmq-server-0" Nov 28 16:29:49 crc kubenswrapper[4909]: I1128 16:29:49.185277 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444-server-conf\") pod \"rabbitmq-server-0\" (UID: \"7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444\") " pod="openstack/rabbitmq-server-0" Nov 28 16:29:49 crc kubenswrapper[4909]: I1128 16:29:49.185292 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444\") " pod="openstack/rabbitmq-server-0" Nov 28 16:29:49 crc kubenswrapper[4909]: I1128 16:29:49.185317 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444\") " pod="openstack/rabbitmq-server-0" Nov 28 16:29:49 crc kubenswrapper[4909]: I1128 16:29:49.185356 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"rabbitmq-server-0\" (UID: \"7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444\") " pod="openstack/rabbitmq-server-0" Nov 28 16:29:49 crc kubenswrapper[4909]: I1128 16:29:49.185473 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444\") " pod="openstack/rabbitmq-server-0" Nov 28 16:29:49 crc kubenswrapper[4909]: I1128 16:29:49.185473 4909 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"rabbitmq-server-0\" (UID: \"7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444\") device mount path \"/mnt/openstack/pv07\"" pod="openstack/rabbitmq-server-0" Nov 28 16:29:49 crc kubenswrapper[4909]: I1128 16:29:49.186550 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444-server-conf\") pod \"rabbitmq-server-0\" (UID: \"7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444\") " pod="openstack/rabbitmq-server-0" Nov 28 16:29:49 crc kubenswrapper[4909]: I1128 16:29:49.198437 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444\") " pod="openstack/rabbitmq-server-0" Nov 28 16:29:49 crc kubenswrapper[4909]: I1128 16:29:49.203035 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444-pod-info\") pod \"rabbitmq-server-0\" (UID: \"7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444\") " pod="openstack/rabbitmq-server-0" Nov 28 16:29:49 crc kubenswrapper[4909]: I1128 16:29:49.205142 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444\") " pod="openstack/rabbitmq-server-0" Nov 28 16:29:49 crc kubenswrapper[4909]: I1128 16:29:49.205246 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444\") " pod="openstack/rabbitmq-server-0" Nov 28 16:29:49 crc kubenswrapper[4909]: I1128 16:29:49.208410 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mgs9l\" (UniqueName: \"kubernetes.io/projected/7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444-kube-api-access-mgs9l\") pod \"rabbitmq-server-0\" (UID: \"7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444\") " pod="openstack/rabbitmq-server-0" Nov 28 16:29:49 crc kubenswrapper[4909]: I1128 16:29:49.211214 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"rabbitmq-server-0\" (UID: \"7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444\") " pod="openstack/rabbitmq-server-0" Nov 28 16:29:49 crc kubenswrapper[4909]: I1128 16:29:49.354482 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 28 16:29:49 crc kubenswrapper[4909]: I1128 16:29:49.571455 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 28 16:29:49 crc kubenswrapper[4909]: W1128 16:29:49.633806 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod02c83d05_a6ce_4c22_9015_91c0a766a518.slice/crio-da6cf49208ef158abb711f1af001d84826e34b08ee2102eedfbf8a66683bb8b6 WatchSource:0}: Error finding container da6cf49208ef158abb711f1af001d84826e34b08ee2102eedfbf8a66683bb8b6: Status 404 returned error can't find the container with id da6cf49208ef158abb711f1af001d84826e34b08ee2102eedfbf8a66683bb8b6 Nov 28 16:29:49 crc kubenswrapper[4909]: I1128 16:29:49.716379 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 28 16:29:49 crc kubenswrapper[4909]: W1128 16:29:49.753008 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7eee3d9c_42a3_4d59_8a29_8d5f1cbc7444.slice/crio-d758f5bd74a408367d7310ae37d87089ee0f19c00277a74f6e04970d9f2bb8a2 WatchSource:0}: Error finding container d758f5bd74a408367d7310ae37d87089ee0f19c00277a74f6e04970d9f2bb8a2: Status 404 returned error can't find the container with id d758f5bd74a408367d7310ae37d87089ee0f19c00277a74f6e04970d9f2bb8a2 Nov 28 16:29:49 crc kubenswrapper[4909]: I1128 16:29:49.818271 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444","Type":"ContainerStarted","Data":"d758f5bd74a408367d7310ae37d87089ee0f19c00277a74f6e04970d9f2bb8a2"} Nov 28 16:29:49 crc kubenswrapper[4909]: I1128 16:29:49.820760 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"02c83d05-a6ce-4c22-9015-91c0a766a518","Type":"ContainerStarted","Data":"da6cf49208ef158abb711f1af001d84826e34b08ee2102eedfbf8a66683bb8b6"} Nov 28 16:29:50 crc kubenswrapper[4909]: I1128 16:29:50.150537 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-galera-0"] Nov 28 16:29:50 crc kubenswrapper[4909]: I1128 16:29:50.163821 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Nov 28 16:29:50 crc kubenswrapper[4909]: I1128 16:29:50.166465 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-svc" Nov 28 16:29:50 crc kubenswrapper[4909]: I1128 16:29:50.169096 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config-data" Nov 28 16:29:50 crc kubenswrapper[4909]: I1128 16:29:50.176149 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-dockercfg-n7tgg" Nov 28 16:29:50 crc kubenswrapper[4909]: I1128 16:29:50.178397 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-scripts" Nov 28 16:29:50 crc kubenswrapper[4909]: I1128 16:29:50.179363 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Nov 28 16:29:50 crc kubenswrapper[4909]: I1128 16:29:50.187508 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"combined-ca-bundle" Nov 28 16:29:50 crc kubenswrapper[4909]: I1128 16:29:50.202599 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/0b1d1797-999d-4453-b674-c40f53d4231e-config-data-default\") pod \"openstack-galera-0\" (UID: \"0b1d1797-999d-4453-b674-c40f53d4231e\") " pod="openstack/openstack-galera-0" Nov 28 16:29:50 crc kubenswrapper[4909]: I1128 16:29:50.202705 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/0b1d1797-999d-4453-b674-c40f53d4231e-config-data-generated\") pod \"openstack-galera-0\" (UID: \"0b1d1797-999d-4453-b674-c40f53d4231e\") " pod="openstack/openstack-galera-0" Nov 28 16:29:50 crc kubenswrapper[4909]: I1128 16:29:50.215004 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0b1d1797-999d-4453-b674-c40f53d4231e-operator-scripts\") pod \"openstack-galera-0\" (UID: \"0b1d1797-999d-4453-b674-c40f53d4231e\") " pod="openstack/openstack-galera-0" Nov 28 16:29:50 crc kubenswrapper[4909]: I1128 16:29:50.215063 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"openstack-galera-0\" (UID: \"0b1d1797-999d-4453-b674-c40f53d4231e\") " pod="openstack/openstack-galera-0" Nov 28 16:29:50 crc kubenswrapper[4909]: I1128 16:29:50.215080 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m782j\" (UniqueName: \"kubernetes.io/projected/0b1d1797-999d-4453-b674-c40f53d4231e-kube-api-access-m782j\") pod \"openstack-galera-0\" (UID: \"0b1d1797-999d-4453-b674-c40f53d4231e\") " pod="openstack/openstack-galera-0" Nov 28 16:29:50 crc kubenswrapper[4909]: I1128 16:29:50.215154 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/0b1d1797-999d-4453-b674-c40f53d4231e-kolla-config\") pod \"openstack-galera-0\" (UID: \"0b1d1797-999d-4453-b674-c40f53d4231e\") " pod="openstack/openstack-galera-0" Nov 28 16:29:50 crc kubenswrapper[4909]: I1128 16:29:50.215186 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0b1d1797-999d-4453-b674-c40f53d4231e-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"0b1d1797-999d-4453-b674-c40f53d4231e\") " pod="openstack/openstack-galera-0" Nov 28 16:29:50 crc kubenswrapper[4909]: I1128 16:29:50.215203 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/0b1d1797-999d-4453-b674-c40f53d4231e-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"0b1d1797-999d-4453-b674-c40f53d4231e\") " pod="openstack/openstack-galera-0" Nov 28 16:29:50 crc kubenswrapper[4909]: I1128 16:29:50.316611 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"openstack-galera-0\" (UID: \"0b1d1797-999d-4453-b674-c40f53d4231e\") " pod="openstack/openstack-galera-0" Nov 28 16:29:50 crc kubenswrapper[4909]: I1128 16:29:50.316650 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m782j\" (UniqueName: \"kubernetes.io/projected/0b1d1797-999d-4453-b674-c40f53d4231e-kube-api-access-m782j\") pod \"openstack-galera-0\" (UID: \"0b1d1797-999d-4453-b674-c40f53d4231e\") " pod="openstack/openstack-galera-0" Nov 28 16:29:50 crc kubenswrapper[4909]: I1128 16:29:50.316700 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/0b1d1797-999d-4453-b674-c40f53d4231e-kolla-config\") pod \"openstack-galera-0\" (UID: \"0b1d1797-999d-4453-b674-c40f53d4231e\") " pod="openstack/openstack-galera-0" Nov 28 16:29:50 crc kubenswrapper[4909]: I1128 16:29:50.316721 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0b1d1797-999d-4453-b674-c40f53d4231e-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"0b1d1797-999d-4453-b674-c40f53d4231e\") " pod="openstack/openstack-galera-0" Nov 28 16:29:50 crc kubenswrapper[4909]: I1128 16:29:50.316736 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/0b1d1797-999d-4453-b674-c40f53d4231e-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"0b1d1797-999d-4453-b674-c40f53d4231e\") " pod="openstack/openstack-galera-0" Nov 28 16:29:50 crc kubenswrapper[4909]: I1128 16:29:50.316757 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/0b1d1797-999d-4453-b674-c40f53d4231e-config-data-default\") pod \"openstack-galera-0\" (UID: \"0b1d1797-999d-4453-b674-c40f53d4231e\") " pod="openstack/openstack-galera-0" Nov 28 16:29:50 crc kubenswrapper[4909]: I1128 16:29:50.316783 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/0b1d1797-999d-4453-b674-c40f53d4231e-config-data-generated\") pod \"openstack-galera-0\" (UID: \"0b1d1797-999d-4453-b674-c40f53d4231e\") " pod="openstack/openstack-galera-0" Nov 28 16:29:50 crc kubenswrapper[4909]: I1128 16:29:50.316841 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0b1d1797-999d-4453-b674-c40f53d4231e-operator-scripts\") pod \"openstack-galera-0\" (UID: \"0b1d1797-999d-4453-b674-c40f53d4231e\") " pod="openstack/openstack-galera-0" Nov 28 16:29:50 crc kubenswrapper[4909]: I1128 16:29:50.316968 4909 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"openstack-galera-0\" (UID: \"0b1d1797-999d-4453-b674-c40f53d4231e\") device mount path \"/mnt/openstack/pv08\"" pod="openstack/openstack-galera-0" Nov 28 16:29:50 crc kubenswrapper[4909]: I1128 16:29:50.318156 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0b1d1797-999d-4453-b674-c40f53d4231e-operator-scripts\") pod \"openstack-galera-0\" (UID: \"0b1d1797-999d-4453-b674-c40f53d4231e\") " pod="openstack/openstack-galera-0" Nov 28 16:29:50 crc kubenswrapper[4909]: I1128 16:29:50.318358 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/0b1d1797-999d-4453-b674-c40f53d4231e-config-data-generated\") pod \"openstack-galera-0\" (UID: \"0b1d1797-999d-4453-b674-c40f53d4231e\") " pod="openstack/openstack-galera-0" Nov 28 16:29:50 crc kubenswrapper[4909]: I1128 16:29:50.320187 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/0b1d1797-999d-4453-b674-c40f53d4231e-config-data-default\") pod \"openstack-galera-0\" (UID: \"0b1d1797-999d-4453-b674-c40f53d4231e\") " pod="openstack/openstack-galera-0" Nov 28 16:29:50 crc kubenswrapper[4909]: I1128 16:29:50.321494 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/0b1d1797-999d-4453-b674-c40f53d4231e-kolla-config\") pod \"openstack-galera-0\" (UID: \"0b1d1797-999d-4453-b674-c40f53d4231e\") " pod="openstack/openstack-galera-0" Nov 28 16:29:50 crc kubenswrapper[4909]: I1128 16:29:50.324579 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/0b1d1797-999d-4453-b674-c40f53d4231e-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"0b1d1797-999d-4453-b674-c40f53d4231e\") " pod="openstack/openstack-galera-0" Nov 28 16:29:50 crc kubenswrapper[4909]: I1128 16:29:50.335317 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m782j\" (UniqueName: \"kubernetes.io/projected/0b1d1797-999d-4453-b674-c40f53d4231e-kube-api-access-m782j\") pod \"openstack-galera-0\" (UID: \"0b1d1797-999d-4453-b674-c40f53d4231e\") " pod="openstack/openstack-galera-0" Nov 28 16:29:50 crc kubenswrapper[4909]: I1128 16:29:50.343453 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0b1d1797-999d-4453-b674-c40f53d4231e-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"0b1d1797-999d-4453-b674-c40f53d4231e\") " pod="openstack/openstack-galera-0" Nov 28 16:29:50 crc kubenswrapper[4909]: I1128 16:29:50.369084 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"openstack-galera-0\" (UID: \"0b1d1797-999d-4453-b674-c40f53d4231e\") " pod="openstack/openstack-galera-0" Nov 28 16:29:50 crc kubenswrapper[4909]: I1128 16:29:50.524166 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Nov 28 16:29:50 crc kubenswrapper[4909]: I1128 16:29:50.904737 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Nov 28 16:29:51 crc kubenswrapper[4909]: I1128 16:29:51.274885 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 28 16:29:51 crc kubenswrapper[4909]: I1128 16:29:51.277706 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Nov 28 16:29:51 crc kubenswrapper[4909]: I1128 16:29:51.279457 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-cell1-dockercfg-7bshv" Nov 28 16:29:51 crc kubenswrapper[4909]: I1128 16:29:51.280268 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-config-data" Nov 28 16:29:51 crc kubenswrapper[4909]: I1128 16:29:51.280456 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-cell1-svc" Nov 28 16:29:51 crc kubenswrapper[4909]: I1128 16:29:51.281642 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-scripts" Nov 28 16:29:51 crc kubenswrapper[4909]: I1128 16:29:51.290475 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 28 16:29:51 crc kubenswrapper[4909]: I1128 16:29:51.344148 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2a8757b1-bbbf-4d00-9bbb-9f4bc855a9d9-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"2a8757b1-bbbf-4d00-9bbb-9f4bc855a9d9\") " pod="openstack/openstack-cell1-galera-0" Nov 28 16:29:51 crc kubenswrapper[4909]: I1128 16:29:51.344193 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/2a8757b1-bbbf-4d00-9bbb-9f4bc855a9d9-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"2a8757b1-bbbf-4d00-9bbb-9f4bc855a9d9\") " pod="openstack/openstack-cell1-galera-0" Nov 28 16:29:51 crc kubenswrapper[4909]: I1128 16:29:51.344219 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"openstack-cell1-galera-0\" (UID: \"2a8757b1-bbbf-4d00-9bbb-9f4bc855a9d9\") " pod="openstack/openstack-cell1-galera-0" Nov 28 16:29:51 crc kubenswrapper[4909]: I1128 16:29:51.344347 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/2a8757b1-bbbf-4d00-9bbb-9f4bc855a9d9-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"2a8757b1-bbbf-4d00-9bbb-9f4bc855a9d9\") " pod="openstack/openstack-cell1-galera-0" Nov 28 16:29:51 crc kubenswrapper[4909]: I1128 16:29:51.344371 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zpgq7\" (UniqueName: \"kubernetes.io/projected/2a8757b1-bbbf-4d00-9bbb-9f4bc855a9d9-kube-api-access-zpgq7\") pod \"openstack-cell1-galera-0\" (UID: \"2a8757b1-bbbf-4d00-9bbb-9f4bc855a9d9\") " pod="openstack/openstack-cell1-galera-0" Nov 28 16:29:51 crc kubenswrapper[4909]: I1128 16:29:51.344399 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/2a8757b1-bbbf-4d00-9bbb-9f4bc855a9d9-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"2a8757b1-bbbf-4d00-9bbb-9f4bc855a9d9\") " pod="openstack/openstack-cell1-galera-0" Nov 28 16:29:51 crc kubenswrapper[4909]: I1128 16:29:51.344421 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2a8757b1-bbbf-4d00-9bbb-9f4bc855a9d9-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"2a8757b1-bbbf-4d00-9bbb-9f4bc855a9d9\") " pod="openstack/openstack-cell1-galera-0" Nov 28 16:29:51 crc kubenswrapper[4909]: I1128 16:29:51.344438 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/2a8757b1-bbbf-4d00-9bbb-9f4bc855a9d9-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"2a8757b1-bbbf-4d00-9bbb-9f4bc855a9d9\") " pod="openstack/openstack-cell1-galera-0" Nov 28 16:29:51 crc kubenswrapper[4909]: I1128 16:29:51.445948 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/2a8757b1-bbbf-4d00-9bbb-9f4bc855a9d9-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"2a8757b1-bbbf-4d00-9bbb-9f4bc855a9d9\") " pod="openstack/openstack-cell1-galera-0" Nov 28 16:29:51 crc kubenswrapper[4909]: I1128 16:29:51.445987 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zpgq7\" (UniqueName: \"kubernetes.io/projected/2a8757b1-bbbf-4d00-9bbb-9f4bc855a9d9-kube-api-access-zpgq7\") pod \"openstack-cell1-galera-0\" (UID: \"2a8757b1-bbbf-4d00-9bbb-9f4bc855a9d9\") " pod="openstack/openstack-cell1-galera-0" Nov 28 16:29:51 crc kubenswrapper[4909]: I1128 16:29:51.446021 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/2a8757b1-bbbf-4d00-9bbb-9f4bc855a9d9-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"2a8757b1-bbbf-4d00-9bbb-9f4bc855a9d9\") " pod="openstack/openstack-cell1-galera-0" Nov 28 16:29:51 crc kubenswrapper[4909]: I1128 16:29:51.446046 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2a8757b1-bbbf-4d00-9bbb-9f4bc855a9d9-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"2a8757b1-bbbf-4d00-9bbb-9f4bc855a9d9\") " pod="openstack/openstack-cell1-galera-0" Nov 28 16:29:51 crc kubenswrapper[4909]: I1128 16:29:51.446073 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/2a8757b1-bbbf-4d00-9bbb-9f4bc855a9d9-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"2a8757b1-bbbf-4d00-9bbb-9f4bc855a9d9\") " pod="openstack/openstack-cell1-galera-0" Nov 28 16:29:51 crc kubenswrapper[4909]: I1128 16:29:51.446142 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2a8757b1-bbbf-4d00-9bbb-9f4bc855a9d9-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"2a8757b1-bbbf-4d00-9bbb-9f4bc855a9d9\") " pod="openstack/openstack-cell1-galera-0" Nov 28 16:29:51 crc kubenswrapper[4909]: I1128 16:29:51.446175 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/2a8757b1-bbbf-4d00-9bbb-9f4bc855a9d9-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"2a8757b1-bbbf-4d00-9bbb-9f4bc855a9d9\") " pod="openstack/openstack-cell1-galera-0" Nov 28 16:29:51 crc kubenswrapper[4909]: I1128 16:29:51.446196 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"openstack-cell1-galera-0\" (UID: \"2a8757b1-bbbf-4d00-9bbb-9f4bc855a9d9\") " pod="openstack/openstack-cell1-galera-0" Nov 28 16:29:51 crc kubenswrapper[4909]: I1128 16:29:51.446394 4909 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"openstack-cell1-galera-0\" (UID: \"2a8757b1-bbbf-4d00-9bbb-9f4bc855a9d9\") device mount path \"/mnt/openstack/pv10\"" pod="openstack/openstack-cell1-galera-0" Nov 28 16:29:51 crc kubenswrapper[4909]: I1128 16:29:51.447612 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/2a8757b1-bbbf-4d00-9bbb-9f4bc855a9d9-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"2a8757b1-bbbf-4d00-9bbb-9f4bc855a9d9\") " pod="openstack/openstack-cell1-galera-0" Nov 28 16:29:51 crc kubenswrapper[4909]: I1128 16:29:51.447656 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/2a8757b1-bbbf-4d00-9bbb-9f4bc855a9d9-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"2a8757b1-bbbf-4d00-9bbb-9f4bc855a9d9\") " pod="openstack/openstack-cell1-galera-0" Nov 28 16:29:51 crc kubenswrapper[4909]: I1128 16:29:51.448265 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/2a8757b1-bbbf-4d00-9bbb-9f4bc855a9d9-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"2a8757b1-bbbf-4d00-9bbb-9f4bc855a9d9\") " pod="openstack/openstack-cell1-galera-0" Nov 28 16:29:51 crc kubenswrapper[4909]: I1128 16:29:51.452872 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2a8757b1-bbbf-4d00-9bbb-9f4bc855a9d9-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"2a8757b1-bbbf-4d00-9bbb-9f4bc855a9d9\") " pod="openstack/openstack-cell1-galera-0" Nov 28 16:29:51 crc kubenswrapper[4909]: I1128 16:29:51.455072 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/2a8757b1-bbbf-4d00-9bbb-9f4bc855a9d9-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"2a8757b1-bbbf-4d00-9bbb-9f4bc855a9d9\") " pod="openstack/openstack-cell1-galera-0" Nov 28 16:29:51 crc kubenswrapper[4909]: I1128 16:29:51.455371 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2a8757b1-bbbf-4d00-9bbb-9f4bc855a9d9-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"2a8757b1-bbbf-4d00-9bbb-9f4bc855a9d9\") " pod="openstack/openstack-cell1-galera-0" Nov 28 16:29:51 crc kubenswrapper[4909]: I1128 16:29:51.467222 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"openstack-cell1-galera-0\" (UID: \"2a8757b1-bbbf-4d00-9bbb-9f4bc855a9d9\") " pod="openstack/openstack-cell1-galera-0" Nov 28 16:29:51 crc kubenswrapper[4909]: I1128 16:29:51.469052 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zpgq7\" (UniqueName: \"kubernetes.io/projected/2a8757b1-bbbf-4d00-9bbb-9f4bc855a9d9-kube-api-access-zpgq7\") pod \"openstack-cell1-galera-0\" (UID: \"2a8757b1-bbbf-4d00-9bbb-9f4bc855a9d9\") " pod="openstack/openstack-cell1-galera-0" Nov 28 16:29:51 crc kubenswrapper[4909]: I1128 16:29:51.597575 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Nov 28 16:29:51 crc kubenswrapper[4909]: I1128 16:29:51.674919 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/memcached-0"] Nov 28 16:29:51 crc kubenswrapper[4909]: I1128 16:29:51.676105 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Nov 28 16:29:51 crc kubenswrapper[4909]: I1128 16:29:51.681958 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-memcached-svc" Nov 28 16:29:51 crc kubenswrapper[4909]: I1128 16:29:51.682253 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"memcached-memcached-dockercfg-ndl2l" Nov 28 16:29:51 crc kubenswrapper[4909]: I1128 16:29:51.700867 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Nov 28 16:29:51 crc kubenswrapper[4909]: I1128 16:29:51.705063 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"memcached-config-data" Nov 28 16:29:51 crc kubenswrapper[4909]: I1128 16:29:51.758392 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/72f0e500-fe06-4373-9bc3-6cdaa2520043-memcached-tls-certs\") pod \"memcached-0\" (UID: \"72f0e500-fe06-4373-9bc3-6cdaa2520043\") " pod="openstack/memcached-0" Nov 28 16:29:51 crc kubenswrapper[4909]: I1128 16:29:51.758436 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/72f0e500-fe06-4373-9bc3-6cdaa2520043-kolla-config\") pod \"memcached-0\" (UID: \"72f0e500-fe06-4373-9bc3-6cdaa2520043\") " pod="openstack/memcached-0" Nov 28 16:29:51 crc kubenswrapper[4909]: I1128 16:29:51.758522 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/72f0e500-fe06-4373-9bc3-6cdaa2520043-config-data\") pod \"memcached-0\" (UID: \"72f0e500-fe06-4373-9bc3-6cdaa2520043\") " pod="openstack/memcached-0" Nov 28 16:29:51 crc kubenswrapper[4909]: I1128 16:29:51.758720 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/72f0e500-fe06-4373-9bc3-6cdaa2520043-combined-ca-bundle\") pod \"memcached-0\" (UID: \"72f0e500-fe06-4373-9bc3-6cdaa2520043\") " pod="openstack/memcached-0" Nov 28 16:29:51 crc kubenswrapper[4909]: I1128 16:29:51.758787 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5cd27\" (UniqueName: \"kubernetes.io/projected/72f0e500-fe06-4373-9bc3-6cdaa2520043-kube-api-access-5cd27\") pod \"memcached-0\" (UID: \"72f0e500-fe06-4373-9bc3-6cdaa2520043\") " pod="openstack/memcached-0" Nov 28 16:29:51 crc kubenswrapper[4909]: I1128 16:29:51.856295 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"0b1d1797-999d-4453-b674-c40f53d4231e","Type":"ContainerStarted","Data":"f7099d33e98225332f99451ed479e52ecd6fb45773030325a3b3d5ecb6f2af70"} Nov 28 16:29:51 crc kubenswrapper[4909]: I1128 16:29:51.859773 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/72f0e500-fe06-4373-9bc3-6cdaa2520043-memcached-tls-certs\") pod \"memcached-0\" (UID: \"72f0e500-fe06-4373-9bc3-6cdaa2520043\") " pod="openstack/memcached-0" Nov 28 16:29:51 crc kubenswrapper[4909]: I1128 16:29:51.859846 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/72f0e500-fe06-4373-9bc3-6cdaa2520043-kolla-config\") pod \"memcached-0\" (UID: \"72f0e500-fe06-4373-9bc3-6cdaa2520043\") " pod="openstack/memcached-0" Nov 28 16:29:51 crc kubenswrapper[4909]: I1128 16:29:51.860698 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/72f0e500-fe06-4373-9bc3-6cdaa2520043-config-data\") pod \"memcached-0\" (UID: \"72f0e500-fe06-4373-9bc3-6cdaa2520043\") " pod="openstack/memcached-0" Nov 28 16:29:51 crc kubenswrapper[4909]: I1128 16:29:51.860781 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/72f0e500-fe06-4373-9bc3-6cdaa2520043-combined-ca-bundle\") pod \"memcached-0\" (UID: \"72f0e500-fe06-4373-9bc3-6cdaa2520043\") " pod="openstack/memcached-0" Nov 28 16:29:51 crc kubenswrapper[4909]: I1128 16:29:51.860982 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5cd27\" (UniqueName: \"kubernetes.io/projected/72f0e500-fe06-4373-9bc3-6cdaa2520043-kube-api-access-5cd27\") pod \"memcached-0\" (UID: \"72f0e500-fe06-4373-9bc3-6cdaa2520043\") " pod="openstack/memcached-0" Nov 28 16:29:51 crc kubenswrapper[4909]: I1128 16:29:51.861299 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/72f0e500-fe06-4373-9bc3-6cdaa2520043-kolla-config\") pod \"memcached-0\" (UID: \"72f0e500-fe06-4373-9bc3-6cdaa2520043\") " pod="openstack/memcached-0" Nov 28 16:29:51 crc kubenswrapper[4909]: I1128 16:29:51.862389 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/72f0e500-fe06-4373-9bc3-6cdaa2520043-config-data\") pod \"memcached-0\" (UID: \"72f0e500-fe06-4373-9bc3-6cdaa2520043\") " pod="openstack/memcached-0" Nov 28 16:29:51 crc kubenswrapper[4909]: I1128 16:29:51.868909 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/72f0e500-fe06-4373-9bc3-6cdaa2520043-memcached-tls-certs\") pod \"memcached-0\" (UID: \"72f0e500-fe06-4373-9bc3-6cdaa2520043\") " pod="openstack/memcached-0" Nov 28 16:29:51 crc kubenswrapper[4909]: I1128 16:29:51.880396 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/72f0e500-fe06-4373-9bc3-6cdaa2520043-combined-ca-bundle\") pod \"memcached-0\" (UID: \"72f0e500-fe06-4373-9bc3-6cdaa2520043\") " pod="openstack/memcached-0" Nov 28 16:29:51 crc kubenswrapper[4909]: I1128 16:29:51.894187 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5cd27\" (UniqueName: \"kubernetes.io/projected/72f0e500-fe06-4373-9bc3-6cdaa2520043-kube-api-access-5cd27\") pod \"memcached-0\" (UID: \"72f0e500-fe06-4373-9bc3-6cdaa2520043\") " pod="openstack/memcached-0" Nov 28 16:29:52 crc kubenswrapper[4909]: I1128 16:29:52.027809 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Nov 28 16:29:52 crc kubenswrapper[4909]: I1128 16:29:52.252135 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 28 16:29:52 crc kubenswrapper[4909]: W1128 16:29:52.266985 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2a8757b1_bbbf_4d00_9bbb_9f4bc855a9d9.slice/crio-1994f966c75fb3df089f85d98060b5bd15e6a05e4d3800bd3ee8d2efc5482cce WatchSource:0}: Error finding container 1994f966c75fb3df089f85d98060b5bd15e6a05e4d3800bd3ee8d2efc5482cce: Status 404 returned error can't find the container with id 1994f966c75fb3df089f85d98060b5bd15e6a05e4d3800bd3ee8d2efc5482cce Nov 28 16:29:52 crc kubenswrapper[4909]: I1128 16:29:52.682788 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Nov 28 16:29:52 crc kubenswrapper[4909]: I1128 16:29:52.869413 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"72f0e500-fe06-4373-9bc3-6cdaa2520043","Type":"ContainerStarted","Data":"8b9c4e7fc23f26be96a3901625cb22f61e05dc6d320f1ce226e9d753fd86554e"} Nov 28 16:29:52 crc kubenswrapper[4909]: I1128 16:29:52.872338 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"2a8757b1-bbbf-4d00-9bbb-9f4bc855a9d9","Type":"ContainerStarted","Data":"1994f966c75fb3df089f85d98060b5bd15e6a05e4d3800bd3ee8d2efc5482cce"} Nov 28 16:29:53 crc kubenswrapper[4909]: I1128 16:29:53.444975 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Nov 28 16:29:53 crc kubenswrapper[4909]: I1128 16:29:53.447036 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 28 16:29:53 crc kubenswrapper[4909]: I1128 16:29:53.453385 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-ceilometer-dockercfg-jghd8" Nov 28 16:29:53 crc kubenswrapper[4909]: I1128 16:29:53.469731 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 28 16:29:53 crc kubenswrapper[4909]: I1128 16:29:53.594055 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qhlht\" (UniqueName: \"kubernetes.io/projected/9075f51d-3271-438e-b4e4-cf6ccf65a6eb-kube-api-access-qhlht\") pod \"kube-state-metrics-0\" (UID: \"9075f51d-3271-438e-b4e4-cf6ccf65a6eb\") " pod="openstack/kube-state-metrics-0" Nov 28 16:29:53 crc kubenswrapper[4909]: I1128 16:29:53.695251 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qhlht\" (UniqueName: \"kubernetes.io/projected/9075f51d-3271-438e-b4e4-cf6ccf65a6eb-kube-api-access-qhlht\") pod \"kube-state-metrics-0\" (UID: \"9075f51d-3271-438e-b4e4-cf6ccf65a6eb\") " pod="openstack/kube-state-metrics-0" Nov 28 16:29:53 crc kubenswrapper[4909]: I1128 16:29:53.720128 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qhlht\" (UniqueName: \"kubernetes.io/projected/9075f51d-3271-438e-b4e4-cf6ccf65a6eb-kube-api-access-qhlht\") pod \"kube-state-metrics-0\" (UID: \"9075f51d-3271-438e-b4e4-cf6ccf65a6eb\") " pod="openstack/kube-state-metrics-0" Nov 28 16:29:53 crc kubenswrapper[4909]: I1128 16:29:53.782290 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 28 16:29:57 crc kubenswrapper[4909]: I1128 16:29:57.040846 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 28 16:29:57 crc kubenswrapper[4909]: I1128 16:29:57.042877 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Nov 28 16:29:57 crc kubenswrapper[4909]: I1128 16:29:57.045258 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-nb-dockercfg-gsnc9" Nov 28 16:29:57 crc kubenswrapper[4909]: I1128 16:29:57.045506 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-config" Nov 28 16:29:57 crc kubenswrapper[4909]: I1128 16:29:57.045673 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-nb-ovndbs" Nov 28 16:29:57 crc kubenswrapper[4909]: I1128 16:29:57.046554 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-scripts" Nov 28 16:29:57 crc kubenswrapper[4909]: I1128 16:29:57.046869 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovn-metrics" Nov 28 16:29:57 crc kubenswrapper[4909]: I1128 16:29:57.051188 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 28 16:29:57 crc kubenswrapper[4909]: I1128 16:29:57.169719 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9a3a5941-5c86-4a65-be1e-26327ca990ad-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"9a3a5941-5c86-4a65-be1e-26327ca990ad\") " pod="openstack/ovsdbserver-nb-0" Nov 28 16:29:57 crc kubenswrapper[4909]: I1128 16:29:57.170076 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"ovsdbserver-nb-0\" (UID: \"9a3a5941-5c86-4a65-be1e-26327ca990ad\") " pod="openstack/ovsdbserver-nb-0" Nov 28 16:29:57 crc kubenswrapper[4909]: I1128 16:29:57.170137 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/9a3a5941-5c86-4a65-be1e-26327ca990ad-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"9a3a5941-5c86-4a65-be1e-26327ca990ad\") " pod="openstack/ovsdbserver-nb-0" Nov 28 16:29:57 crc kubenswrapper[4909]: I1128 16:29:57.170208 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/9a3a5941-5c86-4a65-be1e-26327ca990ad-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"9a3a5941-5c86-4a65-be1e-26327ca990ad\") " pod="openstack/ovsdbserver-nb-0" Nov 28 16:29:57 crc kubenswrapper[4909]: I1128 16:29:57.170247 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/9a3a5941-5c86-4a65-be1e-26327ca990ad-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"9a3a5941-5c86-4a65-be1e-26327ca990ad\") " pod="openstack/ovsdbserver-nb-0" Nov 28 16:29:57 crc kubenswrapper[4909]: I1128 16:29:57.170291 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5wtmw\" (UniqueName: \"kubernetes.io/projected/9a3a5941-5c86-4a65-be1e-26327ca990ad-kube-api-access-5wtmw\") pod \"ovsdbserver-nb-0\" (UID: \"9a3a5941-5c86-4a65-be1e-26327ca990ad\") " pod="openstack/ovsdbserver-nb-0" Nov 28 16:29:57 crc kubenswrapper[4909]: I1128 16:29:57.170317 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/9a3a5941-5c86-4a65-be1e-26327ca990ad-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"9a3a5941-5c86-4a65-be1e-26327ca990ad\") " pod="openstack/ovsdbserver-nb-0" Nov 28 16:29:57 crc kubenswrapper[4909]: I1128 16:29:57.170349 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9a3a5941-5c86-4a65-be1e-26327ca990ad-config\") pod \"ovsdbserver-nb-0\" (UID: \"9a3a5941-5c86-4a65-be1e-26327ca990ad\") " pod="openstack/ovsdbserver-nb-0" Nov 28 16:29:57 crc kubenswrapper[4909]: I1128 16:29:57.271623 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/9a3a5941-5c86-4a65-be1e-26327ca990ad-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"9a3a5941-5c86-4a65-be1e-26327ca990ad\") " pod="openstack/ovsdbserver-nb-0" Nov 28 16:29:57 crc kubenswrapper[4909]: I1128 16:29:57.271726 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/9a3a5941-5c86-4a65-be1e-26327ca990ad-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"9a3a5941-5c86-4a65-be1e-26327ca990ad\") " pod="openstack/ovsdbserver-nb-0" Nov 28 16:29:57 crc kubenswrapper[4909]: I1128 16:29:57.271748 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/9a3a5941-5c86-4a65-be1e-26327ca990ad-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"9a3a5941-5c86-4a65-be1e-26327ca990ad\") " pod="openstack/ovsdbserver-nb-0" Nov 28 16:29:57 crc kubenswrapper[4909]: I1128 16:29:57.271787 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5wtmw\" (UniqueName: \"kubernetes.io/projected/9a3a5941-5c86-4a65-be1e-26327ca990ad-kube-api-access-5wtmw\") pod \"ovsdbserver-nb-0\" (UID: \"9a3a5941-5c86-4a65-be1e-26327ca990ad\") " pod="openstack/ovsdbserver-nb-0" Nov 28 16:29:57 crc kubenswrapper[4909]: I1128 16:29:57.271811 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/9a3a5941-5c86-4a65-be1e-26327ca990ad-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"9a3a5941-5c86-4a65-be1e-26327ca990ad\") " pod="openstack/ovsdbserver-nb-0" Nov 28 16:29:57 crc kubenswrapper[4909]: I1128 16:29:57.271840 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9a3a5941-5c86-4a65-be1e-26327ca990ad-config\") pod \"ovsdbserver-nb-0\" (UID: \"9a3a5941-5c86-4a65-be1e-26327ca990ad\") " pod="openstack/ovsdbserver-nb-0" Nov 28 16:29:57 crc kubenswrapper[4909]: I1128 16:29:57.271886 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9a3a5941-5c86-4a65-be1e-26327ca990ad-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"9a3a5941-5c86-4a65-be1e-26327ca990ad\") " pod="openstack/ovsdbserver-nb-0" Nov 28 16:29:57 crc kubenswrapper[4909]: I1128 16:29:57.271907 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"ovsdbserver-nb-0\" (UID: \"9a3a5941-5c86-4a65-be1e-26327ca990ad\") " pod="openstack/ovsdbserver-nb-0" Nov 28 16:29:57 crc kubenswrapper[4909]: I1128 16:29:57.272247 4909 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"ovsdbserver-nb-0\" (UID: \"9a3a5941-5c86-4a65-be1e-26327ca990ad\") device mount path \"/mnt/openstack/pv04\"" pod="openstack/ovsdbserver-nb-0" Nov 28 16:29:57 crc kubenswrapper[4909]: I1128 16:29:57.273989 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/9a3a5941-5c86-4a65-be1e-26327ca990ad-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"9a3a5941-5c86-4a65-be1e-26327ca990ad\") " pod="openstack/ovsdbserver-nb-0" Nov 28 16:29:57 crc kubenswrapper[4909]: I1128 16:29:57.274322 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/9a3a5941-5c86-4a65-be1e-26327ca990ad-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"9a3a5941-5c86-4a65-be1e-26327ca990ad\") " pod="openstack/ovsdbserver-nb-0" Nov 28 16:29:57 crc kubenswrapper[4909]: I1128 16:29:57.275091 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9a3a5941-5c86-4a65-be1e-26327ca990ad-config\") pod \"ovsdbserver-nb-0\" (UID: \"9a3a5941-5c86-4a65-be1e-26327ca990ad\") " pod="openstack/ovsdbserver-nb-0" Nov 28 16:29:57 crc kubenswrapper[4909]: I1128 16:29:57.278477 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9a3a5941-5c86-4a65-be1e-26327ca990ad-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"9a3a5941-5c86-4a65-be1e-26327ca990ad\") " pod="openstack/ovsdbserver-nb-0" Nov 28 16:29:57 crc kubenswrapper[4909]: I1128 16:29:57.278546 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/9a3a5941-5c86-4a65-be1e-26327ca990ad-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"9a3a5941-5c86-4a65-be1e-26327ca990ad\") " pod="openstack/ovsdbserver-nb-0" Nov 28 16:29:57 crc kubenswrapper[4909]: I1128 16:29:57.284801 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/9a3a5941-5c86-4a65-be1e-26327ca990ad-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"9a3a5941-5c86-4a65-be1e-26327ca990ad\") " pod="openstack/ovsdbserver-nb-0" Nov 28 16:29:57 crc kubenswrapper[4909]: I1128 16:29:57.297414 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5wtmw\" (UniqueName: \"kubernetes.io/projected/9a3a5941-5c86-4a65-be1e-26327ca990ad-kube-api-access-5wtmw\") pod \"ovsdbserver-nb-0\" (UID: \"9a3a5941-5c86-4a65-be1e-26327ca990ad\") " pod="openstack/ovsdbserver-nb-0" Nov 28 16:29:57 crc kubenswrapper[4909]: I1128 16:29:57.304492 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"ovsdbserver-nb-0\" (UID: \"9a3a5941-5c86-4a65-be1e-26327ca990ad\") " pod="openstack/ovsdbserver-nb-0" Nov 28 16:29:57 crc kubenswrapper[4909]: I1128 16:29:57.367934 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Nov 28 16:29:57 crc kubenswrapper[4909]: I1128 16:29:57.855947 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-6tr6g"] Nov 28 16:29:57 crc kubenswrapper[4909]: I1128 16:29:57.857164 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-6tr6g" Nov 28 16:29:57 crc kubenswrapper[4909]: I1128 16:29:57.866938 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncontroller-ovncontroller-dockercfg-65t7j" Nov 28 16:29:57 crc kubenswrapper[4909]: I1128 16:29:57.867153 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovncontroller-ovndbs" Nov 28 16:29:57 crc kubenswrapper[4909]: I1128 16:29:57.872051 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-scripts" Nov 28 16:29:57 crc kubenswrapper[4909]: I1128 16:29:57.872935 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-ovs-q2kt7"] Nov 28 16:29:57 crc kubenswrapper[4909]: I1128 16:29:57.875023 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-q2kt7" Nov 28 16:29:57 crc kubenswrapper[4909]: I1128 16:29:57.879531 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-6tr6g"] Nov 28 16:29:57 crc kubenswrapper[4909]: I1128 16:29:57.945620 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-q2kt7"] Nov 28 16:29:57 crc kubenswrapper[4909]: I1128 16:29:57.983235 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/14b66e32-a660-4643-9f57-f66bf12a56ef-var-run-ovn\") pod \"ovn-controller-6tr6g\" (UID: \"14b66e32-a660-4643-9f57-f66bf12a56ef\") " pod="openstack/ovn-controller-6tr6g" Nov 28 16:29:57 crc kubenswrapper[4909]: I1128 16:29:57.983295 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/14b66e32-a660-4643-9f57-f66bf12a56ef-var-log-ovn\") pod \"ovn-controller-6tr6g\" (UID: \"14b66e32-a660-4643-9f57-f66bf12a56ef\") " pod="openstack/ovn-controller-6tr6g" Nov 28 16:29:57 crc kubenswrapper[4909]: I1128 16:29:57.983318 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/14b66e32-a660-4643-9f57-f66bf12a56ef-scripts\") pod \"ovn-controller-6tr6g\" (UID: \"14b66e32-a660-4643-9f57-f66bf12a56ef\") " pod="openstack/ovn-controller-6tr6g" Nov 28 16:29:57 crc kubenswrapper[4909]: I1128 16:29:57.983372 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/14b66e32-a660-4643-9f57-f66bf12a56ef-ovn-controller-tls-certs\") pod \"ovn-controller-6tr6g\" (UID: \"14b66e32-a660-4643-9f57-f66bf12a56ef\") " pod="openstack/ovn-controller-6tr6g" Nov 28 16:29:57 crc kubenswrapper[4909]: I1128 16:29:57.983410 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ff22194e-63a9-410d-80b6-9b1a1e68b164-scripts\") pod \"ovn-controller-ovs-q2kt7\" (UID: \"ff22194e-63a9-410d-80b6-9b1a1e68b164\") " pod="openstack/ovn-controller-ovs-q2kt7" Nov 28 16:29:57 crc kubenswrapper[4909]: I1128 16:29:57.983446 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/ff22194e-63a9-410d-80b6-9b1a1e68b164-var-log\") pod \"ovn-controller-ovs-q2kt7\" (UID: \"ff22194e-63a9-410d-80b6-9b1a1e68b164\") " pod="openstack/ovn-controller-ovs-q2kt7" Nov 28 16:29:57 crc kubenswrapper[4909]: I1128 16:29:57.983462 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-478mm\" (UniqueName: \"kubernetes.io/projected/14b66e32-a660-4643-9f57-f66bf12a56ef-kube-api-access-478mm\") pod \"ovn-controller-6tr6g\" (UID: \"14b66e32-a660-4643-9f57-f66bf12a56ef\") " pod="openstack/ovn-controller-6tr6g" Nov 28 16:29:57 crc kubenswrapper[4909]: I1128 16:29:57.983485 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/ff22194e-63a9-410d-80b6-9b1a1e68b164-etc-ovs\") pod \"ovn-controller-ovs-q2kt7\" (UID: \"ff22194e-63a9-410d-80b6-9b1a1e68b164\") " pod="openstack/ovn-controller-ovs-q2kt7" Nov 28 16:29:57 crc kubenswrapper[4909]: I1128 16:29:57.983511 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/ff22194e-63a9-410d-80b6-9b1a1e68b164-var-run\") pod \"ovn-controller-ovs-q2kt7\" (UID: \"ff22194e-63a9-410d-80b6-9b1a1e68b164\") " pod="openstack/ovn-controller-ovs-q2kt7" Nov 28 16:29:57 crc kubenswrapper[4909]: I1128 16:29:57.983541 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/ff22194e-63a9-410d-80b6-9b1a1e68b164-var-lib\") pod \"ovn-controller-ovs-q2kt7\" (UID: \"ff22194e-63a9-410d-80b6-9b1a1e68b164\") " pod="openstack/ovn-controller-ovs-q2kt7" Nov 28 16:29:57 crc kubenswrapper[4909]: I1128 16:29:57.983559 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/14b66e32-a660-4643-9f57-f66bf12a56ef-combined-ca-bundle\") pod \"ovn-controller-6tr6g\" (UID: \"14b66e32-a660-4643-9f57-f66bf12a56ef\") " pod="openstack/ovn-controller-6tr6g" Nov 28 16:29:57 crc kubenswrapper[4909]: I1128 16:29:57.983604 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x669m\" (UniqueName: \"kubernetes.io/projected/ff22194e-63a9-410d-80b6-9b1a1e68b164-kube-api-access-x669m\") pod \"ovn-controller-ovs-q2kt7\" (UID: \"ff22194e-63a9-410d-80b6-9b1a1e68b164\") " pod="openstack/ovn-controller-ovs-q2kt7" Nov 28 16:29:57 crc kubenswrapper[4909]: I1128 16:29:57.983634 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/14b66e32-a660-4643-9f57-f66bf12a56ef-var-run\") pod \"ovn-controller-6tr6g\" (UID: \"14b66e32-a660-4643-9f57-f66bf12a56ef\") " pod="openstack/ovn-controller-6tr6g" Nov 28 16:29:58 crc kubenswrapper[4909]: I1128 16:29:58.085206 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x669m\" (UniqueName: \"kubernetes.io/projected/ff22194e-63a9-410d-80b6-9b1a1e68b164-kube-api-access-x669m\") pod \"ovn-controller-ovs-q2kt7\" (UID: \"ff22194e-63a9-410d-80b6-9b1a1e68b164\") " pod="openstack/ovn-controller-ovs-q2kt7" Nov 28 16:29:58 crc kubenswrapper[4909]: I1128 16:29:58.085255 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/14b66e32-a660-4643-9f57-f66bf12a56ef-var-run\") pod \"ovn-controller-6tr6g\" (UID: \"14b66e32-a660-4643-9f57-f66bf12a56ef\") " pod="openstack/ovn-controller-6tr6g" Nov 28 16:29:58 crc kubenswrapper[4909]: I1128 16:29:58.085280 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/14b66e32-a660-4643-9f57-f66bf12a56ef-var-run-ovn\") pod \"ovn-controller-6tr6g\" (UID: \"14b66e32-a660-4643-9f57-f66bf12a56ef\") " pod="openstack/ovn-controller-6tr6g" Nov 28 16:29:58 crc kubenswrapper[4909]: I1128 16:29:58.085306 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/14b66e32-a660-4643-9f57-f66bf12a56ef-var-log-ovn\") pod \"ovn-controller-6tr6g\" (UID: \"14b66e32-a660-4643-9f57-f66bf12a56ef\") " pod="openstack/ovn-controller-6tr6g" Nov 28 16:29:58 crc kubenswrapper[4909]: I1128 16:29:58.085322 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/14b66e32-a660-4643-9f57-f66bf12a56ef-scripts\") pod \"ovn-controller-6tr6g\" (UID: \"14b66e32-a660-4643-9f57-f66bf12a56ef\") " pod="openstack/ovn-controller-6tr6g" Nov 28 16:29:58 crc kubenswrapper[4909]: I1128 16:29:58.085345 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/14b66e32-a660-4643-9f57-f66bf12a56ef-ovn-controller-tls-certs\") pod \"ovn-controller-6tr6g\" (UID: \"14b66e32-a660-4643-9f57-f66bf12a56ef\") " pod="openstack/ovn-controller-6tr6g" Nov 28 16:29:58 crc kubenswrapper[4909]: I1128 16:29:58.085369 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ff22194e-63a9-410d-80b6-9b1a1e68b164-scripts\") pod \"ovn-controller-ovs-q2kt7\" (UID: \"ff22194e-63a9-410d-80b6-9b1a1e68b164\") " pod="openstack/ovn-controller-ovs-q2kt7" Nov 28 16:29:58 crc kubenswrapper[4909]: I1128 16:29:58.085396 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/ff22194e-63a9-410d-80b6-9b1a1e68b164-var-log\") pod \"ovn-controller-ovs-q2kt7\" (UID: \"ff22194e-63a9-410d-80b6-9b1a1e68b164\") " pod="openstack/ovn-controller-ovs-q2kt7" Nov 28 16:29:58 crc kubenswrapper[4909]: I1128 16:29:58.085414 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-478mm\" (UniqueName: \"kubernetes.io/projected/14b66e32-a660-4643-9f57-f66bf12a56ef-kube-api-access-478mm\") pod \"ovn-controller-6tr6g\" (UID: \"14b66e32-a660-4643-9f57-f66bf12a56ef\") " pod="openstack/ovn-controller-6tr6g" Nov 28 16:29:58 crc kubenswrapper[4909]: I1128 16:29:58.085437 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/ff22194e-63a9-410d-80b6-9b1a1e68b164-etc-ovs\") pod \"ovn-controller-ovs-q2kt7\" (UID: \"ff22194e-63a9-410d-80b6-9b1a1e68b164\") " pod="openstack/ovn-controller-ovs-q2kt7" Nov 28 16:29:58 crc kubenswrapper[4909]: I1128 16:29:58.085462 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/ff22194e-63a9-410d-80b6-9b1a1e68b164-var-run\") pod \"ovn-controller-ovs-q2kt7\" (UID: \"ff22194e-63a9-410d-80b6-9b1a1e68b164\") " pod="openstack/ovn-controller-ovs-q2kt7" Nov 28 16:29:58 crc kubenswrapper[4909]: I1128 16:29:58.085487 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/ff22194e-63a9-410d-80b6-9b1a1e68b164-var-lib\") pod \"ovn-controller-ovs-q2kt7\" (UID: \"ff22194e-63a9-410d-80b6-9b1a1e68b164\") " pod="openstack/ovn-controller-ovs-q2kt7" Nov 28 16:29:58 crc kubenswrapper[4909]: I1128 16:29:58.085504 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/14b66e32-a660-4643-9f57-f66bf12a56ef-combined-ca-bundle\") pod \"ovn-controller-6tr6g\" (UID: \"14b66e32-a660-4643-9f57-f66bf12a56ef\") " pod="openstack/ovn-controller-6tr6g" Nov 28 16:29:58 crc kubenswrapper[4909]: I1128 16:29:58.086162 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/14b66e32-a660-4643-9f57-f66bf12a56ef-var-log-ovn\") pod \"ovn-controller-6tr6g\" (UID: \"14b66e32-a660-4643-9f57-f66bf12a56ef\") " pod="openstack/ovn-controller-6tr6g" Nov 28 16:29:58 crc kubenswrapper[4909]: I1128 16:29:58.086313 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/14b66e32-a660-4643-9f57-f66bf12a56ef-var-run\") pod \"ovn-controller-6tr6g\" (UID: \"14b66e32-a660-4643-9f57-f66bf12a56ef\") " pod="openstack/ovn-controller-6tr6g" Nov 28 16:29:58 crc kubenswrapper[4909]: I1128 16:29:58.086341 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/ff22194e-63a9-410d-80b6-9b1a1e68b164-var-log\") pod \"ovn-controller-ovs-q2kt7\" (UID: \"ff22194e-63a9-410d-80b6-9b1a1e68b164\") " pod="openstack/ovn-controller-ovs-q2kt7" Nov 28 16:29:58 crc kubenswrapper[4909]: I1128 16:29:58.086363 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/14b66e32-a660-4643-9f57-f66bf12a56ef-var-run-ovn\") pod \"ovn-controller-6tr6g\" (UID: \"14b66e32-a660-4643-9f57-f66bf12a56ef\") " pod="openstack/ovn-controller-6tr6g" Nov 28 16:29:58 crc kubenswrapper[4909]: I1128 16:29:58.086386 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/ff22194e-63a9-410d-80b6-9b1a1e68b164-etc-ovs\") pod \"ovn-controller-ovs-q2kt7\" (UID: \"ff22194e-63a9-410d-80b6-9b1a1e68b164\") " pod="openstack/ovn-controller-ovs-q2kt7" Nov 28 16:29:58 crc kubenswrapper[4909]: I1128 16:29:58.086478 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/ff22194e-63a9-410d-80b6-9b1a1e68b164-var-run\") pod \"ovn-controller-ovs-q2kt7\" (UID: \"ff22194e-63a9-410d-80b6-9b1a1e68b164\") " pod="openstack/ovn-controller-ovs-q2kt7" Nov 28 16:29:58 crc kubenswrapper[4909]: I1128 16:29:58.086555 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/ff22194e-63a9-410d-80b6-9b1a1e68b164-var-lib\") pod \"ovn-controller-ovs-q2kt7\" (UID: \"ff22194e-63a9-410d-80b6-9b1a1e68b164\") " pod="openstack/ovn-controller-ovs-q2kt7" Nov 28 16:29:58 crc kubenswrapper[4909]: I1128 16:29:58.088158 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ff22194e-63a9-410d-80b6-9b1a1e68b164-scripts\") pod \"ovn-controller-ovs-q2kt7\" (UID: \"ff22194e-63a9-410d-80b6-9b1a1e68b164\") " pod="openstack/ovn-controller-ovs-q2kt7" Nov 28 16:29:58 crc kubenswrapper[4909]: I1128 16:29:58.090162 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/14b66e32-a660-4643-9f57-f66bf12a56ef-combined-ca-bundle\") pod \"ovn-controller-6tr6g\" (UID: \"14b66e32-a660-4643-9f57-f66bf12a56ef\") " pod="openstack/ovn-controller-6tr6g" Nov 28 16:29:58 crc kubenswrapper[4909]: I1128 16:29:58.091291 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/14b66e32-a660-4643-9f57-f66bf12a56ef-ovn-controller-tls-certs\") pod \"ovn-controller-6tr6g\" (UID: \"14b66e32-a660-4643-9f57-f66bf12a56ef\") " pod="openstack/ovn-controller-6tr6g" Nov 28 16:29:58 crc kubenswrapper[4909]: I1128 16:29:58.103045 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x669m\" (UniqueName: \"kubernetes.io/projected/ff22194e-63a9-410d-80b6-9b1a1e68b164-kube-api-access-x669m\") pod \"ovn-controller-ovs-q2kt7\" (UID: \"ff22194e-63a9-410d-80b6-9b1a1e68b164\") " pod="openstack/ovn-controller-ovs-q2kt7" Nov 28 16:29:58 crc kubenswrapper[4909]: I1128 16:29:58.105092 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-478mm\" (UniqueName: \"kubernetes.io/projected/14b66e32-a660-4643-9f57-f66bf12a56ef-kube-api-access-478mm\") pod \"ovn-controller-6tr6g\" (UID: \"14b66e32-a660-4643-9f57-f66bf12a56ef\") " pod="openstack/ovn-controller-6tr6g" Nov 28 16:29:58 crc kubenswrapper[4909]: I1128 16:29:58.168717 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/14b66e32-a660-4643-9f57-f66bf12a56ef-scripts\") pod \"ovn-controller-6tr6g\" (UID: \"14b66e32-a660-4643-9f57-f66bf12a56ef\") " pod="openstack/ovn-controller-6tr6g" Nov 28 16:29:58 crc kubenswrapper[4909]: I1128 16:29:58.212082 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-6tr6g" Nov 28 16:29:58 crc kubenswrapper[4909]: I1128 16:29:58.219891 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-q2kt7" Nov 28 16:30:00 crc kubenswrapper[4909]: I1128 16:30:00.152973 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405790-d6zq2"] Nov 28 16:30:00 crc kubenswrapper[4909]: I1128 16:30:00.154128 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405790-d6zq2" Nov 28 16:30:00 crc kubenswrapper[4909]: I1128 16:30:00.156578 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 28 16:30:00 crc kubenswrapper[4909]: I1128 16:30:00.156743 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 28 16:30:00 crc kubenswrapper[4909]: I1128 16:30:00.167822 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405790-d6zq2"] Nov 28 16:30:00 crc kubenswrapper[4909]: I1128 16:30:00.327829 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-65s4f\" (UniqueName: \"kubernetes.io/projected/8e9cbe0d-d1db-420f-8a4c-1e24d559dba1-kube-api-access-65s4f\") pod \"collect-profiles-29405790-d6zq2\" (UID: \"8e9cbe0d-d1db-420f-8a4c-1e24d559dba1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405790-d6zq2" Nov 28 16:30:00 crc kubenswrapper[4909]: I1128 16:30:00.327905 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/8e9cbe0d-d1db-420f-8a4c-1e24d559dba1-secret-volume\") pod \"collect-profiles-29405790-d6zq2\" (UID: \"8e9cbe0d-d1db-420f-8a4c-1e24d559dba1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405790-d6zq2" Nov 28 16:30:00 crc kubenswrapper[4909]: I1128 16:30:00.328091 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/8e9cbe0d-d1db-420f-8a4c-1e24d559dba1-config-volume\") pod \"collect-profiles-29405790-d6zq2\" (UID: \"8e9cbe0d-d1db-420f-8a4c-1e24d559dba1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405790-d6zq2" Nov 28 16:30:00 crc kubenswrapper[4909]: I1128 16:30:00.429945 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/8e9cbe0d-d1db-420f-8a4c-1e24d559dba1-config-volume\") pod \"collect-profiles-29405790-d6zq2\" (UID: \"8e9cbe0d-d1db-420f-8a4c-1e24d559dba1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405790-d6zq2" Nov 28 16:30:00 crc kubenswrapper[4909]: I1128 16:30:00.430024 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-65s4f\" (UniqueName: \"kubernetes.io/projected/8e9cbe0d-d1db-420f-8a4c-1e24d559dba1-kube-api-access-65s4f\") pod \"collect-profiles-29405790-d6zq2\" (UID: \"8e9cbe0d-d1db-420f-8a4c-1e24d559dba1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405790-d6zq2" Nov 28 16:30:00 crc kubenswrapper[4909]: I1128 16:30:00.430114 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/8e9cbe0d-d1db-420f-8a4c-1e24d559dba1-secret-volume\") pod \"collect-profiles-29405790-d6zq2\" (UID: \"8e9cbe0d-d1db-420f-8a4c-1e24d559dba1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405790-d6zq2" Nov 28 16:30:00 crc kubenswrapper[4909]: I1128 16:30:00.430822 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/8e9cbe0d-d1db-420f-8a4c-1e24d559dba1-config-volume\") pod \"collect-profiles-29405790-d6zq2\" (UID: \"8e9cbe0d-d1db-420f-8a4c-1e24d559dba1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405790-d6zq2" Nov 28 16:30:00 crc kubenswrapper[4909]: I1128 16:30:00.440232 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/8e9cbe0d-d1db-420f-8a4c-1e24d559dba1-secret-volume\") pod \"collect-profiles-29405790-d6zq2\" (UID: \"8e9cbe0d-d1db-420f-8a4c-1e24d559dba1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405790-d6zq2" Nov 28 16:30:00 crc kubenswrapper[4909]: I1128 16:30:00.446182 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-65s4f\" (UniqueName: \"kubernetes.io/projected/8e9cbe0d-d1db-420f-8a4c-1e24d559dba1-kube-api-access-65s4f\") pod \"collect-profiles-29405790-d6zq2\" (UID: \"8e9cbe0d-d1db-420f-8a4c-1e24d559dba1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405790-d6zq2" Nov 28 16:30:00 crc kubenswrapper[4909]: I1128 16:30:00.475569 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405790-d6zq2" Nov 28 16:30:01 crc kubenswrapper[4909]: I1128 16:30:01.306256 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 28 16:30:01 crc kubenswrapper[4909]: I1128 16:30:01.308932 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Nov 28 16:30:01 crc kubenswrapper[4909]: I1128 16:30:01.311133 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-config" Nov 28 16:30:01 crc kubenswrapper[4909]: I1128 16:30:01.311310 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-sb-dockercfg-frps6" Nov 28 16:30:01 crc kubenswrapper[4909]: I1128 16:30:01.311473 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-scripts" Nov 28 16:30:01 crc kubenswrapper[4909]: I1128 16:30:01.312458 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-sb-ovndbs" Nov 28 16:30:01 crc kubenswrapper[4909]: I1128 16:30:01.315293 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 28 16:30:01 crc kubenswrapper[4909]: I1128 16:30:01.448634 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/bdf7de93-de28-49b8-b83f-1174c23bbd2f-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"bdf7de93-de28-49b8-b83f-1174c23bbd2f\") " pod="openstack/ovsdbserver-sb-0" Nov 28 16:30:01 crc kubenswrapper[4909]: I1128 16:30:01.449007 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/bdf7de93-de28-49b8-b83f-1174c23bbd2f-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"bdf7de93-de28-49b8-b83f-1174c23bbd2f\") " pod="openstack/ovsdbserver-sb-0" Nov 28 16:30:01 crc kubenswrapper[4909]: I1128 16:30:01.449075 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q68xh\" (UniqueName: \"kubernetes.io/projected/bdf7de93-de28-49b8-b83f-1174c23bbd2f-kube-api-access-q68xh\") pod \"ovsdbserver-sb-0\" (UID: \"bdf7de93-de28-49b8-b83f-1174c23bbd2f\") " pod="openstack/ovsdbserver-sb-0" Nov 28 16:30:01 crc kubenswrapper[4909]: I1128 16:30:01.449245 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/bdf7de93-de28-49b8-b83f-1174c23bbd2f-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"bdf7de93-de28-49b8-b83f-1174c23bbd2f\") " pod="openstack/ovsdbserver-sb-0" Nov 28 16:30:01 crc kubenswrapper[4909]: I1128 16:30:01.449394 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"ovsdbserver-sb-0\" (UID: \"bdf7de93-de28-49b8-b83f-1174c23bbd2f\") " pod="openstack/ovsdbserver-sb-0" Nov 28 16:30:01 crc kubenswrapper[4909]: I1128 16:30:01.449536 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bdf7de93-de28-49b8-b83f-1174c23bbd2f-config\") pod \"ovsdbserver-sb-0\" (UID: \"bdf7de93-de28-49b8-b83f-1174c23bbd2f\") " pod="openstack/ovsdbserver-sb-0" Nov 28 16:30:01 crc kubenswrapper[4909]: I1128 16:30:01.449671 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/bdf7de93-de28-49b8-b83f-1174c23bbd2f-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"bdf7de93-de28-49b8-b83f-1174c23bbd2f\") " pod="openstack/ovsdbserver-sb-0" Nov 28 16:30:01 crc kubenswrapper[4909]: I1128 16:30:01.449879 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bdf7de93-de28-49b8-b83f-1174c23bbd2f-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"bdf7de93-de28-49b8-b83f-1174c23bbd2f\") " pod="openstack/ovsdbserver-sb-0" Nov 28 16:30:01 crc kubenswrapper[4909]: I1128 16:30:01.551584 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/bdf7de93-de28-49b8-b83f-1174c23bbd2f-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"bdf7de93-de28-49b8-b83f-1174c23bbd2f\") " pod="openstack/ovsdbserver-sb-0" Nov 28 16:30:01 crc kubenswrapper[4909]: I1128 16:30:01.551634 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q68xh\" (UniqueName: \"kubernetes.io/projected/bdf7de93-de28-49b8-b83f-1174c23bbd2f-kube-api-access-q68xh\") pod \"ovsdbserver-sb-0\" (UID: \"bdf7de93-de28-49b8-b83f-1174c23bbd2f\") " pod="openstack/ovsdbserver-sb-0" Nov 28 16:30:01 crc kubenswrapper[4909]: I1128 16:30:01.551681 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/bdf7de93-de28-49b8-b83f-1174c23bbd2f-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"bdf7de93-de28-49b8-b83f-1174c23bbd2f\") " pod="openstack/ovsdbserver-sb-0" Nov 28 16:30:01 crc kubenswrapper[4909]: I1128 16:30:01.551714 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"ovsdbserver-sb-0\" (UID: \"bdf7de93-de28-49b8-b83f-1174c23bbd2f\") " pod="openstack/ovsdbserver-sb-0" Nov 28 16:30:01 crc kubenswrapper[4909]: I1128 16:30:01.551737 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bdf7de93-de28-49b8-b83f-1174c23bbd2f-config\") pod \"ovsdbserver-sb-0\" (UID: \"bdf7de93-de28-49b8-b83f-1174c23bbd2f\") " pod="openstack/ovsdbserver-sb-0" Nov 28 16:30:01 crc kubenswrapper[4909]: I1128 16:30:01.551764 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/bdf7de93-de28-49b8-b83f-1174c23bbd2f-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"bdf7de93-de28-49b8-b83f-1174c23bbd2f\") " pod="openstack/ovsdbserver-sb-0" Nov 28 16:30:01 crc kubenswrapper[4909]: I1128 16:30:01.551800 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bdf7de93-de28-49b8-b83f-1174c23bbd2f-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"bdf7de93-de28-49b8-b83f-1174c23bbd2f\") " pod="openstack/ovsdbserver-sb-0" Nov 28 16:30:01 crc kubenswrapper[4909]: I1128 16:30:01.551821 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/bdf7de93-de28-49b8-b83f-1174c23bbd2f-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"bdf7de93-de28-49b8-b83f-1174c23bbd2f\") " pod="openstack/ovsdbserver-sb-0" Nov 28 16:30:01 crc kubenswrapper[4909]: I1128 16:30:01.552279 4909 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"ovsdbserver-sb-0\" (UID: \"bdf7de93-de28-49b8-b83f-1174c23bbd2f\") device mount path \"/mnt/openstack/pv12\"" pod="openstack/ovsdbserver-sb-0" Nov 28 16:30:01 crc kubenswrapper[4909]: I1128 16:30:01.553056 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/bdf7de93-de28-49b8-b83f-1174c23bbd2f-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"bdf7de93-de28-49b8-b83f-1174c23bbd2f\") " pod="openstack/ovsdbserver-sb-0" Nov 28 16:30:01 crc kubenswrapper[4909]: I1128 16:30:01.553052 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/bdf7de93-de28-49b8-b83f-1174c23bbd2f-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"bdf7de93-de28-49b8-b83f-1174c23bbd2f\") " pod="openstack/ovsdbserver-sb-0" Nov 28 16:30:01 crc kubenswrapper[4909]: I1128 16:30:01.553594 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bdf7de93-de28-49b8-b83f-1174c23bbd2f-config\") pod \"ovsdbserver-sb-0\" (UID: \"bdf7de93-de28-49b8-b83f-1174c23bbd2f\") " pod="openstack/ovsdbserver-sb-0" Nov 28 16:30:01 crc kubenswrapper[4909]: I1128 16:30:01.558257 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bdf7de93-de28-49b8-b83f-1174c23bbd2f-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"bdf7de93-de28-49b8-b83f-1174c23bbd2f\") " pod="openstack/ovsdbserver-sb-0" Nov 28 16:30:01 crc kubenswrapper[4909]: I1128 16:30:01.561616 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/bdf7de93-de28-49b8-b83f-1174c23bbd2f-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"bdf7de93-de28-49b8-b83f-1174c23bbd2f\") " pod="openstack/ovsdbserver-sb-0" Nov 28 16:30:01 crc kubenswrapper[4909]: I1128 16:30:01.562096 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/bdf7de93-de28-49b8-b83f-1174c23bbd2f-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"bdf7de93-de28-49b8-b83f-1174c23bbd2f\") " pod="openstack/ovsdbserver-sb-0" Nov 28 16:30:01 crc kubenswrapper[4909]: I1128 16:30:01.577153 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q68xh\" (UniqueName: \"kubernetes.io/projected/bdf7de93-de28-49b8-b83f-1174c23bbd2f-kube-api-access-q68xh\") pod \"ovsdbserver-sb-0\" (UID: \"bdf7de93-de28-49b8-b83f-1174c23bbd2f\") " pod="openstack/ovsdbserver-sb-0" Nov 28 16:30:01 crc kubenswrapper[4909]: I1128 16:30:01.591498 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"ovsdbserver-sb-0\" (UID: \"bdf7de93-de28-49b8-b83f-1174c23bbd2f\") " pod="openstack/ovsdbserver-sb-0" Nov 28 16:30:01 crc kubenswrapper[4909]: I1128 16:30:01.637464 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Nov 28 16:30:13 crc kubenswrapper[4909]: E1128 16:30:13.465843 4909 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-mariadb:current-podified" Nov 28 16:30:13 crc kubenswrapper[4909]: E1128 16:30:13.466727 4909 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:mysql-bootstrap,Image:quay.io/podified-antelope-centos9/openstack-mariadb:current-podified,Command:[bash /var/lib/operator-scripts/mysql_bootstrap.sh],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:True,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:mysql-db,ReadOnly:false,MountPath:/var/lib/mysql,SubPath:mysql,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data-default,ReadOnly:true,MountPath:/var/lib/config-data/default,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data-generated,ReadOnly:false,MountPath:/var/lib/config-data/generated,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:operator-scripts,ReadOnly:true,MountPath:/var/lib/operator-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kolla-config,ReadOnly:true,MountPath:/var/lib/kolla/config_files,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-zpgq7,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod openstack-cell1-galera-0_openstack(2a8757b1-bbbf-4d00-9bbb-9f4bc855a9d9): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 16:30:13 crc kubenswrapper[4909]: E1128 16:30:13.468737 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mysql-bootstrap\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/openstack-cell1-galera-0" podUID="2a8757b1-bbbf-4d00-9bbb-9f4bc855a9d9" Nov 28 16:30:13 crc kubenswrapper[4909]: E1128 16:30:13.510576 4909 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-mariadb:current-podified" Nov 28 16:30:13 crc kubenswrapper[4909]: E1128 16:30:13.510787 4909 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:mysql-bootstrap,Image:quay.io/podified-antelope-centos9/openstack-mariadb:current-podified,Command:[bash /var/lib/operator-scripts/mysql_bootstrap.sh],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:True,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:mysql-db,ReadOnly:false,MountPath:/var/lib/mysql,SubPath:mysql,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data-default,ReadOnly:true,MountPath:/var/lib/config-data/default,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data-generated,ReadOnly:false,MountPath:/var/lib/config-data/generated,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:operator-scripts,ReadOnly:true,MountPath:/var/lib/operator-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kolla-config,ReadOnly:true,MountPath:/var/lib/kolla/config_files,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-m782j,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod openstack-galera-0_openstack(0b1d1797-999d-4453-b674-c40f53d4231e): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 16:30:13 crc kubenswrapper[4909]: E1128 16:30:13.512268 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mysql-bootstrap\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/openstack-galera-0" podUID="0b1d1797-999d-4453-b674-c40f53d4231e" Nov 28 16:30:14 crc kubenswrapper[4909]: E1128 16:30:14.039141 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mysql-bootstrap\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-mariadb:current-podified\\\"\"" pod="openstack/openstack-cell1-galera-0" podUID="2a8757b1-bbbf-4d00-9bbb-9f4bc855a9d9" Nov 28 16:30:14 crc kubenswrapper[4909]: E1128 16:30:14.039390 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mysql-bootstrap\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-mariadb:current-podified\\\"\"" pod="openstack/openstack-galera-0" podUID="0b1d1797-999d-4453-b674-c40f53d4231e" Nov 28 16:30:14 crc kubenswrapper[4909]: E1128 16:30:14.405063 4909 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified" Nov 28 16:30:14 crc kubenswrapper[4909]: E1128 16:30:14.405226 4909 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:setup-container,Image:quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified,Command:[sh -c cp /tmp/erlang-cookie-secret/.erlang.cookie /var/lib/rabbitmq/.erlang.cookie && chmod 600 /var/lib/rabbitmq/.erlang.cookie ; cp /tmp/rabbitmq-plugins/enabled_plugins /operator/enabled_plugins ; echo '[default]' > /var/lib/rabbitmq/.rabbitmqadmin.conf && sed -e 's/default_user/username/' -e 's/default_pass/password/' /tmp/default_user.conf >> /var/lib/rabbitmq/.rabbitmqadmin.conf && chmod 600 /var/lib/rabbitmq/.rabbitmqadmin.conf ; sleep 30],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Requests:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:plugins-conf,ReadOnly:false,MountPath:/tmp/rabbitmq-plugins/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-erlang-cookie,ReadOnly:false,MountPath:/var/lib/rabbitmq/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:erlang-cookie-secret,ReadOnly:false,MountPath:/tmp/erlang-cookie-secret/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-plugins,ReadOnly:false,MountPath:/operator,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:persistence,ReadOnly:false,MountPath:/var/lib/rabbitmq/mnesia/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-confd,ReadOnly:false,MountPath:/tmp/default_user.conf,SubPath:default_user.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-mgs9l,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-server-0_openstack(7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 16:30:14 crc kubenswrapper[4909]: E1128 16:30:14.406600 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/rabbitmq-server-0" podUID="7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444" Nov 28 16:30:14 crc kubenswrapper[4909]: E1128 16:30:14.413579 4909 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified" Nov 28 16:30:14 crc kubenswrapper[4909]: E1128 16:30:14.413809 4909 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:setup-container,Image:quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified,Command:[sh -c cp /tmp/erlang-cookie-secret/.erlang.cookie /var/lib/rabbitmq/.erlang.cookie && chmod 600 /var/lib/rabbitmq/.erlang.cookie ; cp /tmp/rabbitmq-plugins/enabled_plugins /operator/enabled_plugins ; echo '[default]' > /var/lib/rabbitmq/.rabbitmqadmin.conf && sed -e 's/default_user/username/' -e 's/default_pass/password/' /tmp/default_user.conf >> /var/lib/rabbitmq/.rabbitmqadmin.conf && chmod 600 /var/lib/rabbitmq/.rabbitmqadmin.conf ; sleep 30],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Requests:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:plugins-conf,ReadOnly:false,MountPath:/tmp/rabbitmq-plugins/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-erlang-cookie,ReadOnly:false,MountPath:/var/lib/rabbitmq/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:erlang-cookie-secret,ReadOnly:false,MountPath:/tmp/erlang-cookie-secret/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-plugins,ReadOnly:false,MountPath:/operator,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:persistence,ReadOnly:false,MountPath:/var/lib/rabbitmq/mnesia/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-confd,ReadOnly:false,MountPath:/tmp/default_user.conf,SubPath:default_user.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-sv7bn,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-cell1-server-0_openstack(02c83d05-a6ce-4c22-9015-91c0a766a518): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 16:30:14 crc kubenswrapper[4909]: E1128 16:30:14.415037 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/rabbitmq-cell1-server-0" podUID="02c83d05-a6ce-4c22-9015-91c0a766a518" Nov 28 16:30:15 crc kubenswrapper[4909]: E1128 16:30:15.044436 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified\\\"\"" pod="openstack/rabbitmq-server-0" podUID="7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444" Nov 28 16:30:15 crc kubenswrapper[4909]: E1128 16:30:15.044723 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified\\\"\"" pod="openstack/rabbitmq-cell1-server-0" podUID="02c83d05-a6ce-4c22-9015-91c0a766a518" Nov 28 16:30:18 crc kubenswrapper[4909]: E1128 16:30:18.537471 4909 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-memcached:current-podified" Nov 28 16:30:18 crc kubenswrapper[4909]: E1128 16:30:18.538061 4909 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:memcached,Image:quay.io/podified-antelope-centos9/openstack-memcached:current-podified,Command:[/usr/bin/dumb-init -- /usr/local/bin/kolla_start],Args:[],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:memcached,HostPort:0,ContainerPort:11211,Protocol:TCP,HostIP:,},ContainerPort{Name:memcached-tls,HostPort:0,ContainerPort:11212,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},EnvVar{Name:POD_IPS,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIPs,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:CONFIG_HASH,Value:n594h648hd5h649h544h67ch8bh558hd6h658h56bh548hcch89h7h64ch57bhd8h695h66fhb6h657h554h654h66fh55ch584h9ch699h5d4hcfhcfq,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/src,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kolla-config,ReadOnly:true,MountPath:/var/lib/kolla/config_files,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:memcached-tls-certs,ReadOnly:true,MountPath:/var/lib/config-data/tls/certs/memcached.crt,SubPath:tls.crt,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:memcached-tls-certs,ReadOnly:true,MountPath:/var/lib/config-data/tls/private/memcached.key,SubPath:tls.key,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-5cd27,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:nil,TCPSocket:&TCPSocketAction{Port:{0 11211 },Host:,},GRPC:nil,},InitialDelaySeconds:3,TimeoutSeconds:5,PeriodSeconds:3,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:nil,TCPSocket:&TCPSocketAction{Port:{0 11211 },Host:,},GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42457,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:*42457,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod memcached-0_openstack(72f0e500-fe06-4373-9bc3-6cdaa2520043): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 16:30:18 crc kubenswrapper[4909]: E1128 16:30:18.539190 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"memcached\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/memcached-0" podUID="72f0e500-fe06-4373-9bc3-6cdaa2520043" Nov 28 16:30:19 crc kubenswrapper[4909]: E1128 16:30:19.070709 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"memcached\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-memcached:current-podified\\\"\"" pod="openstack/memcached-0" podUID="72f0e500-fe06-4373-9bc3-6cdaa2520043" Nov 28 16:30:19 crc kubenswrapper[4909]: E1128 16:30:19.208961 4909 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Nov 28 16:30:19 crc kubenswrapper[4909]: E1128 16:30:19.209608 4909 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n659h4h664hbh658h587h67ch89h587h8fh679hc6hf9h55fh644h5d5h698h68dh5cdh5ffh669h54ch9h689hb8hd4h5bfhd8h5d7h5fh665h574q,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-7z24l,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-57d769cc4f-nwdnb_openstack(d68fe850-7328-4415-b01e-a7c2ec3e1f32): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 16:30:19 crc kubenswrapper[4909]: E1128 16:30:19.211153 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-57d769cc4f-nwdnb" podUID="d68fe850-7328-4415-b01e-a7c2ec3e1f32" Nov 28 16:30:19 crc kubenswrapper[4909]: E1128 16:30:19.223011 4909 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Nov 28 16:30:19 crc kubenswrapper[4909]: E1128 16:30:19.223171 4909 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:nffh5bdhf4h5f8h79h55h77h58fh56dh7bh6fh578hbch55dh68h56bhd9h65dh57ch658hc9h566h666h688h58h65dh684h5d7h6ch575h5d6h88q,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-qfvlm,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-675f4bcbfc-jv5sk_openstack(3348422f-2e14-4c58-9170-d026c3fdbda8): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 16:30:19 crc kubenswrapper[4909]: E1128 16:30:19.224267 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-675f4bcbfc-jv5sk" podUID="3348422f-2e14-4c58-9170-d026c3fdbda8" Nov 28 16:30:19 crc kubenswrapper[4909]: E1128 16:30:19.258456 4909 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Nov 28 16:30:19 crc kubenswrapper[4909]: E1128 16:30:19.258777 4909 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:nfdh5dfhb6h64h676hc4h78h97h669h54chfbh696hb5h54bh5d4h6bh64h644h677h584h5cbh698h9dh5bbh5f8h5b8hcdh644h5c7h694hbfh589q,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-vkqgx,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-5ccc8479f9-ddfj8_openstack(7a392b74-7812-4347-988a-1eef8b2778a5): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 16:30:19 crc kubenswrapper[4909]: E1128 16:30:19.260131 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-5ccc8479f9-ddfj8" podUID="7a392b74-7812-4347-988a-1eef8b2778a5" Nov 28 16:30:19 crc kubenswrapper[4909]: E1128 16:30:19.264692 4909 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Nov 28 16:30:19 crc kubenswrapper[4909]: E1128 16:30:19.264862 4909 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:ndfhb5h667h568h584h5f9h58dh565h664h587h597h577h64bh5c4h66fh647hbdh68ch5c5h68dh686h5f7h64hd7hc6h55fh57bh98h57fh87h5fh57fq,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-wrh7x,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-78dd6ddcc-gsrst_openstack(53f5f255-27cb-46e1-bc20-17b38ea707bf): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 16:30:19 crc kubenswrapper[4909]: E1128 16:30:19.266692 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-78dd6ddcc-gsrst" podUID="53f5f255-27cb-46e1-bc20-17b38ea707bf" Nov 28 16:30:19 crc kubenswrapper[4909]: I1128 16:30:19.644970 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 28 16:30:19 crc kubenswrapper[4909]: W1128 16:30:19.652701 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9075f51d_3271_438e_b4e4_cf6ccf65a6eb.slice/crio-b6999729045961873f3efe2c5e3d2c2962fa224c22914a18d74eeeb0be95a0d5 WatchSource:0}: Error finding container b6999729045961873f3efe2c5e3d2c2962fa224c22914a18d74eeeb0be95a0d5: Status 404 returned error can't find the container with id b6999729045961873f3efe2c5e3d2c2962fa224c22914a18d74eeeb0be95a0d5 Nov 28 16:30:19 crc kubenswrapper[4909]: I1128 16:30:19.754345 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405790-d6zq2"] Nov 28 16:30:19 crc kubenswrapper[4909]: I1128 16:30:19.770452 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-6tr6g"] Nov 28 16:30:19 crc kubenswrapper[4909]: W1128 16:30:19.773736 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod14b66e32_a660_4643_9f57_f66bf12a56ef.slice/crio-4043cd901717c0c55a563dd5c052609a3e5039e2af5eb6426d0ad896e6c31fc9 WatchSource:0}: Error finding container 4043cd901717c0c55a563dd5c052609a3e5039e2af5eb6426d0ad896e6c31fc9: Status 404 returned error can't find the container with id 4043cd901717c0c55a563dd5c052609a3e5039e2af5eb6426d0ad896e6c31fc9 Nov 28 16:30:19 crc kubenswrapper[4909]: I1128 16:30:19.792796 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-q2kt7"] Nov 28 16:30:19 crc kubenswrapper[4909]: W1128 16:30:19.803104 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podff22194e_63a9_410d_80b6_9b1a1e68b164.slice/crio-175842d147817b5559190c7827d9da2f04b34520f781cd5e9ce0bfce76228679 WatchSource:0}: Error finding container 175842d147817b5559190c7827d9da2f04b34520f781cd5e9ce0bfce76228679: Status 404 returned error can't find the container with id 175842d147817b5559190c7827d9da2f04b34520f781cd5e9ce0bfce76228679 Nov 28 16:30:19 crc kubenswrapper[4909]: I1128 16:30:19.857032 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 28 16:30:19 crc kubenswrapper[4909]: W1128 16:30:19.865830 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podbdf7de93_de28_49b8_b83f_1174c23bbd2f.slice/crio-5ee73e3a160a074a846c2f1633fd2786b1e1fdf857e8650508847fec4846e0a1 WatchSource:0}: Error finding container 5ee73e3a160a074a846c2f1633fd2786b1e1fdf857e8650508847fec4846e0a1: Status 404 returned error can't find the container with id 5ee73e3a160a074a846c2f1633fd2786b1e1fdf857e8650508847fec4846e0a1 Nov 28 16:30:19 crc kubenswrapper[4909]: I1128 16:30:19.910677 4909 patch_prober.go:28] interesting pod/machine-config-daemon-d5nd7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 16:30:19 crc kubenswrapper[4909]: I1128 16:30:19.910734 4909 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 16:30:20 crc kubenswrapper[4909]: I1128 16:30:20.075732 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-6tr6g" event={"ID":"14b66e32-a660-4643-9f57-f66bf12a56ef","Type":"ContainerStarted","Data":"4043cd901717c0c55a563dd5c052609a3e5039e2af5eb6426d0ad896e6c31fc9"} Nov 28 16:30:20 crc kubenswrapper[4909]: I1128 16:30:20.077395 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405790-d6zq2" event={"ID":"8e9cbe0d-d1db-420f-8a4c-1e24d559dba1","Type":"ContainerStarted","Data":"4ed4598a6df4a267eb842fe650e4d800b1e2e1739b94711a922bf86bb4b17b33"} Nov 28 16:30:20 crc kubenswrapper[4909]: I1128 16:30:20.077430 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405790-d6zq2" event={"ID":"8e9cbe0d-d1db-420f-8a4c-1e24d559dba1","Type":"ContainerStarted","Data":"61e72df3d37012d4d5c61f7eb0fc058aea795cfcc9ce30e591c32da487cc5b5d"} Nov 28 16:30:20 crc kubenswrapper[4909]: I1128 16:30:20.078795 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"9075f51d-3271-438e-b4e4-cf6ccf65a6eb","Type":"ContainerStarted","Data":"b6999729045961873f3efe2c5e3d2c2962fa224c22914a18d74eeeb0be95a0d5"} Nov 28 16:30:20 crc kubenswrapper[4909]: I1128 16:30:20.079815 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-q2kt7" event={"ID":"ff22194e-63a9-410d-80b6-9b1a1e68b164","Type":"ContainerStarted","Data":"175842d147817b5559190c7827d9da2f04b34520f781cd5e9ce0bfce76228679"} Nov 28 16:30:20 crc kubenswrapper[4909]: I1128 16:30:20.081501 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"bdf7de93-de28-49b8-b83f-1174c23bbd2f","Type":"ContainerStarted","Data":"5ee73e3a160a074a846c2f1633fd2786b1e1fdf857e8650508847fec4846e0a1"} Nov 28 16:30:20 crc kubenswrapper[4909]: E1128 16:30:20.083101 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified\\\"\"" pod="openstack/dnsmasq-dns-57d769cc4f-nwdnb" podUID="d68fe850-7328-4415-b01e-a7c2ec3e1f32" Nov 28 16:30:20 crc kubenswrapper[4909]: E1128 16:30:20.083669 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified\\\"\"" pod="openstack/dnsmasq-dns-5ccc8479f9-ddfj8" podUID="7a392b74-7812-4347-988a-1eef8b2778a5" Nov 28 16:30:20 crc kubenswrapper[4909]: I1128 16:30:20.100426 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29405790-d6zq2" podStartSLOduration=20.10040634 podStartE2EDuration="20.10040634s" podCreationTimestamp="2025-11-28 16:30:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:30:20.09703586 +0000 UTC m=+1202.493720384" watchObservedRunningTime="2025-11-28 16:30:20.10040634 +0000 UTC m=+1202.497090864" Nov 28 16:30:20 crc kubenswrapper[4909]: I1128 16:30:20.607847 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-gsrst" Nov 28 16:30:20 crc kubenswrapper[4909]: I1128 16:30:20.612986 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-jv5sk" Nov 28 16:30:20 crc kubenswrapper[4909]: I1128 16:30:20.774647 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/53f5f255-27cb-46e1-bc20-17b38ea707bf-config\") pod \"53f5f255-27cb-46e1-bc20-17b38ea707bf\" (UID: \"53f5f255-27cb-46e1-bc20-17b38ea707bf\") " Nov 28 16:30:20 crc kubenswrapper[4909]: I1128 16:30:20.774763 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wrh7x\" (UniqueName: \"kubernetes.io/projected/53f5f255-27cb-46e1-bc20-17b38ea707bf-kube-api-access-wrh7x\") pod \"53f5f255-27cb-46e1-bc20-17b38ea707bf\" (UID: \"53f5f255-27cb-46e1-bc20-17b38ea707bf\") " Nov 28 16:30:20 crc kubenswrapper[4909]: I1128 16:30:20.774803 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3348422f-2e14-4c58-9170-d026c3fdbda8-config\") pod \"3348422f-2e14-4c58-9170-d026c3fdbda8\" (UID: \"3348422f-2e14-4c58-9170-d026c3fdbda8\") " Nov 28 16:30:20 crc kubenswrapper[4909]: I1128 16:30:20.774877 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/53f5f255-27cb-46e1-bc20-17b38ea707bf-dns-svc\") pod \"53f5f255-27cb-46e1-bc20-17b38ea707bf\" (UID: \"53f5f255-27cb-46e1-bc20-17b38ea707bf\") " Nov 28 16:30:20 crc kubenswrapper[4909]: I1128 16:30:20.775309 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3348422f-2e14-4c58-9170-d026c3fdbda8-config" (OuterVolumeSpecName: "config") pod "3348422f-2e14-4c58-9170-d026c3fdbda8" (UID: "3348422f-2e14-4c58-9170-d026c3fdbda8"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:30:20 crc kubenswrapper[4909]: I1128 16:30:20.775441 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/53f5f255-27cb-46e1-bc20-17b38ea707bf-config" (OuterVolumeSpecName: "config") pod "53f5f255-27cb-46e1-bc20-17b38ea707bf" (UID: "53f5f255-27cb-46e1-bc20-17b38ea707bf"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:30:20 crc kubenswrapper[4909]: I1128 16:30:20.775449 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/53f5f255-27cb-46e1-bc20-17b38ea707bf-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "53f5f255-27cb-46e1-bc20-17b38ea707bf" (UID: "53f5f255-27cb-46e1-bc20-17b38ea707bf"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:30:20 crc kubenswrapper[4909]: I1128 16:30:20.775831 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qfvlm\" (UniqueName: \"kubernetes.io/projected/3348422f-2e14-4c58-9170-d026c3fdbda8-kube-api-access-qfvlm\") pod \"3348422f-2e14-4c58-9170-d026c3fdbda8\" (UID: \"3348422f-2e14-4c58-9170-d026c3fdbda8\") " Nov 28 16:30:20 crc kubenswrapper[4909]: I1128 16:30:20.776518 4909 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/53f5f255-27cb-46e1-bc20-17b38ea707bf-config\") on node \"crc\" DevicePath \"\"" Nov 28 16:30:20 crc kubenswrapper[4909]: I1128 16:30:20.776540 4909 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3348422f-2e14-4c58-9170-d026c3fdbda8-config\") on node \"crc\" DevicePath \"\"" Nov 28 16:30:20 crc kubenswrapper[4909]: I1128 16:30:20.776554 4909 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/53f5f255-27cb-46e1-bc20-17b38ea707bf-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 16:30:20 crc kubenswrapper[4909]: I1128 16:30:20.779828 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3348422f-2e14-4c58-9170-d026c3fdbda8-kube-api-access-qfvlm" (OuterVolumeSpecName: "kube-api-access-qfvlm") pod "3348422f-2e14-4c58-9170-d026c3fdbda8" (UID: "3348422f-2e14-4c58-9170-d026c3fdbda8"). InnerVolumeSpecName "kube-api-access-qfvlm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:30:20 crc kubenswrapper[4909]: I1128 16:30:20.779888 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/53f5f255-27cb-46e1-bc20-17b38ea707bf-kube-api-access-wrh7x" (OuterVolumeSpecName: "kube-api-access-wrh7x") pod "53f5f255-27cb-46e1-bc20-17b38ea707bf" (UID: "53f5f255-27cb-46e1-bc20-17b38ea707bf"). InnerVolumeSpecName "kube-api-access-wrh7x". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:30:20 crc kubenswrapper[4909]: I1128 16:30:20.868579 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 28 16:30:20 crc kubenswrapper[4909]: W1128 16:30:20.871527 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9a3a5941_5c86_4a65_be1e_26327ca990ad.slice/crio-c8b638f19b86187965ecc2b21a7ed972377396f1caac26c27dd389e1e14da07a WatchSource:0}: Error finding container c8b638f19b86187965ecc2b21a7ed972377396f1caac26c27dd389e1e14da07a: Status 404 returned error can't find the container with id c8b638f19b86187965ecc2b21a7ed972377396f1caac26c27dd389e1e14da07a Nov 28 16:30:20 crc kubenswrapper[4909]: I1128 16:30:20.877988 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qfvlm\" (UniqueName: \"kubernetes.io/projected/3348422f-2e14-4c58-9170-d026c3fdbda8-kube-api-access-qfvlm\") on node \"crc\" DevicePath \"\"" Nov 28 16:30:20 crc kubenswrapper[4909]: I1128 16:30:20.878019 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wrh7x\" (UniqueName: \"kubernetes.io/projected/53f5f255-27cb-46e1-bc20-17b38ea707bf-kube-api-access-wrh7x\") on node \"crc\" DevicePath \"\"" Nov 28 16:30:21 crc kubenswrapper[4909]: I1128 16:30:21.096808 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-675f4bcbfc-jv5sk" event={"ID":"3348422f-2e14-4c58-9170-d026c3fdbda8","Type":"ContainerDied","Data":"46f49428dee1c64aa317b5c1dfc680111e9715e8f178078353eb4f0270ee5a11"} Nov 28 16:30:21 crc kubenswrapper[4909]: I1128 16:30:21.097304 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-jv5sk" Nov 28 16:30:21 crc kubenswrapper[4909]: I1128 16:30:21.101126 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78dd6ddcc-gsrst" event={"ID":"53f5f255-27cb-46e1-bc20-17b38ea707bf","Type":"ContainerDied","Data":"647ef863c64c74be39b4b0862ef5a668ea05e97ef052990a934965b24d4b3370"} Nov 28 16:30:21 crc kubenswrapper[4909]: I1128 16:30:21.101157 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-gsrst" Nov 28 16:30:21 crc kubenswrapper[4909]: I1128 16:30:21.103621 4909 generic.go:334] "Generic (PLEG): container finished" podID="8e9cbe0d-d1db-420f-8a4c-1e24d559dba1" containerID="4ed4598a6df4a267eb842fe650e4d800b1e2e1739b94711a922bf86bb4b17b33" exitCode=0 Nov 28 16:30:21 crc kubenswrapper[4909]: I1128 16:30:21.103720 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405790-d6zq2" event={"ID":"8e9cbe0d-d1db-420f-8a4c-1e24d559dba1","Type":"ContainerDied","Data":"4ed4598a6df4a267eb842fe650e4d800b1e2e1739b94711a922bf86bb4b17b33"} Nov 28 16:30:21 crc kubenswrapper[4909]: I1128 16:30:21.106408 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"9a3a5941-5c86-4a65-be1e-26327ca990ad","Type":"ContainerStarted","Data":"c8b638f19b86187965ecc2b21a7ed972377396f1caac26c27dd389e1e14da07a"} Nov 28 16:30:21 crc kubenswrapper[4909]: I1128 16:30:21.170239 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-gsrst"] Nov 28 16:30:21 crc kubenswrapper[4909]: I1128 16:30:21.180865 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-gsrst"] Nov 28 16:30:21 crc kubenswrapper[4909]: I1128 16:30:21.198321 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-jv5sk"] Nov 28 16:30:21 crc kubenswrapper[4909]: I1128 16:30:21.205631 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-jv5sk"] Nov 28 16:30:21 crc kubenswrapper[4909]: I1128 16:30:21.911470 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3348422f-2e14-4c58-9170-d026c3fdbda8" path="/var/lib/kubelet/pods/3348422f-2e14-4c58-9170-d026c3fdbda8/volumes" Nov 28 16:30:21 crc kubenswrapper[4909]: I1128 16:30:21.911860 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="53f5f255-27cb-46e1-bc20-17b38ea707bf" path="/var/lib/kubelet/pods/53f5f255-27cb-46e1-bc20-17b38ea707bf/volumes" Nov 28 16:30:22 crc kubenswrapper[4909]: I1128 16:30:22.658600 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405790-d6zq2" Nov 28 16:30:22 crc kubenswrapper[4909]: I1128 16:30:22.808770 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/8e9cbe0d-d1db-420f-8a4c-1e24d559dba1-config-volume\") pod \"8e9cbe0d-d1db-420f-8a4c-1e24d559dba1\" (UID: \"8e9cbe0d-d1db-420f-8a4c-1e24d559dba1\") " Nov 28 16:30:22 crc kubenswrapper[4909]: I1128 16:30:22.808912 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-65s4f\" (UniqueName: \"kubernetes.io/projected/8e9cbe0d-d1db-420f-8a4c-1e24d559dba1-kube-api-access-65s4f\") pod \"8e9cbe0d-d1db-420f-8a4c-1e24d559dba1\" (UID: \"8e9cbe0d-d1db-420f-8a4c-1e24d559dba1\") " Nov 28 16:30:22 crc kubenswrapper[4909]: I1128 16:30:22.808942 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/8e9cbe0d-d1db-420f-8a4c-1e24d559dba1-secret-volume\") pod \"8e9cbe0d-d1db-420f-8a4c-1e24d559dba1\" (UID: \"8e9cbe0d-d1db-420f-8a4c-1e24d559dba1\") " Nov 28 16:30:22 crc kubenswrapper[4909]: I1128 16:30:22.809669 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8e9cbe0d-d1db-420f-8a4c-1e24d559dba1-config-volume" (OuterVolumeSpecName: "config-volume") pod "8e9cbe0d-d1db-420f-8a4c-1e24d559dba1" (UID: "8e9cbe0d-d1db-420f-8a4c-1e24d559dba1"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:30:22 crc kubenswrapper[4909]: I1128 16:30:22.814839 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8e9cbe0d-d1db-420f-8a4c-1e24d559dba1-kube-api-access-65s4f" (OuterVolumeSpecName: "kube-api-access-65s4f") pod "8e9cbe0d-d1db-420f-8a4c-1e24d559dba1" (UID: "8e9cbe0d-d1db-420f-8a4c-1e24d559dba1"). InnerVolumeSpecName "kube-api-access-65s4f". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:30:22 crc kubenswrapper[4909]: I1128 16:30:22.815691 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8e9cbe0d-d1db-420f-8a4c-1e24d559dba1-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "8e9cbe0d-d1db-420f-8a4c-1e24d559dba1" (UID: "8e9cbe0d-d1db-420f-8a4c-1e24d559dba1"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:30:22 crc kubenswrapper[4909]: I1128 16:30:22.911071 4909 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/8e9cbe0d-d1db-420f-8a4c-1e24d559dba1-config-volume\") on node \"crc\" DevicePath \"\"" Nov 28 16:30:22 crc kubenswrapper[4909]: I1128 16:30:22.911102 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-65s4f\" (UniqueName: \"kubernetes.io/projected/8e9cbe0d-d1db-420f-8a4c-1e24d559dba1-kube-api-access-65s4f\") on node \"crc\" DevicePath \"\"" Nov 28 16:30:22 crc kubenswrapper[4909]: I1128 16:30:22.911113 4909 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/8e9cbe0d-d1db-420f-8a4c-1e24d559dba1-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 28 16:30:23 crc kubenswrapper[4909]: I1128 16:30:23.122288 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405790-d6zq2" event={"ID":"8e9cbe0d-d1db-420f-8a4c-1e24d559dba1","Type":"ContainerDied","Data":"61e72df3d37012d4d5c61f7eb0fc058aea795cfcc9ce30e591c32da487cc5b5d"} Nov 28 16:30:23 crc kubenswrapper[4909]: I1128 16:30:23.122328 4909 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="61e72df3d37012d4d5c61f7eb0fc058aea795cfcc9ce30e591c32da487cc5b5d" Nov 28 16:30:23 crc kubenswrapper[4909]: I1128 16:30:23.122381 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405790-d6zq2" Nov 28 16:30:26 crc kubenswrapper[4909]: I1128 16:30:26.154109 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"9075f51d-3271-438e-b4e4-cf6ccf65a6eb","Type":"ContainerStarted","Data":"b8c2b6121fa77c8742b8b9510e8ffcb85bed8675ecae8d86cce549aa45594ae3"} Nov 28 16:30:26 crc kubenswrapper[4909]: I1128 16:30:26.154750 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Nov 28 16:30:26 crc kubenswrapper[4909]: I1128 16:30:26.157879 4909 generic.go:334] "Generic (PLEG): container finished" podID="ff22194e-63a9-410d-80b6-9b1a1e68b164" containerID="4f5d8f239fd0e5782668fdf514cb48a3f0d94294be3f0d631b1c21d4439268b8" exitCode=0 Nov 28 16:30:26 crc kubenswrapper[4909]: I1128 16:30:26.157977 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-q2kt7" event={"ID":"ff22194e-63a9-410d-80b6-9b1a1e68b164","Type":"ContainerDied","Data":"4f5d8f239fd0e5782668fdf514cb48a3f0d94294be3f0d631b1c21d4439268b8"} Nov 28 16:30:26 crc kubenswrapper[4909]: I1128 16:30:26.160551 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"2a8757b1-bbbf-4d00-9bbb-9f4bc855a9d9","Type":"ContainerStarted","Data":"44d78f1f1841456ec290a5fd131e7e46dad4741dfefbdbd69b4571e60a9b775f"} Nov 28 16:30:26 crc kubenswrapper[4909]: I1128 16:30:26.163783 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"bdf7de93-de28-49b8-b83f-1174c23bbd2f","Type":"ContainerStarted","Data":"f3031d38398a1299821a38dba71739cd77b4202204dd1cd5dd6367b419b14d7b"} Nov 28 16:30:26 crc kubenswrapper[4909]: I1128 16:30:26.169608 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-6tr6g" event={"ID":"14b66e32-a660-4643-9f57-f66bf12a56ef","Type":"ContainerStarted","Data":"dcbacc4980de48368b434765e94f18d28973918c9194edcb4918b9d52bf6bafd"} Nov 28 16:30:26 crc kubenswrapper[4909]: I1128 16:30:26.169778 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-6tr6g" Nov 28 16:30:26 crc kubenswrapper[4909]: I1128 16:30:26.172950 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"9a3a5941-5c86-4a65-be1e-26327ca990ad","Type":"ContainerStarted","Data":"6e2af8a1d96aee4df901387db3f0677372000cdb75cb35bf23b1e8474fd7bde6"} Nov 28 16:30:26 crc kubenswrapper[4909]: I1128 16:30:26.178730 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=27.921200448 podStartE2EDuration="33.178706344s" podCreationTimestamp="2025-11-28 16:29:53 +0000 UTC" firstStartedPulling="2025-11-28 16:30:19.655169242 +0000 UTC m=+1202.051853766" lastFinishedPulling="2025-11-28 16:30:24.912675138 +0000 UTC m=+1207.309359662" observedRunningTime="2025-11-28 16:30:26.169142919 +0000 UTC m=+1208.565827443" watchObservedRunningTime="2025-11-28 16:30:26.178706344 +0000 UTC m=+1208.575390858" Nov 28 16:30:26 crc kubenswrapper[4909]: I1128 16:30:26.233347 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-6tr6g" podStartSLOduration=24.051694506 podStartE2EDuration="29.23332477s" podCreationTimestamp="2025-11-28 16:29:57 +0000 UTC" firstStartedPulling="2025-11-28 16:30:19.776765563 +0000 UTC m=+1202.173450087" lastFinishedPulling="2025-11-28 16:30:24.958395827 +0000 UTC m=+1207.355080351" observedRunningTime="2025-11-28 16:30:26.223123258 +0000 UTC m=+1208.619807782" watchObservedRunningTime="2025-11-28 16:30:26.23332477 +0000 UTC m=+1208.630009294" Nov 28 16:30:27 crc kubenswrapper[4909]: I1128 16:30:27.184672 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-q2kt7" event={"ID":"ff22194e-63a9-410d-80b6-9b1a1e68b164","Type":"ContainerStarted","Data":"afca6794cc8913f3a96d1e4b580a859e2e2d5089f2b862e784689db6cf4ab6c4"} Nov 28 16:30:27 crc kubenswrapper[4909]: I1128 16:30:27.185279 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-q2kt7" event={"ID":"ff22194e-63a9-410d-80b6-9b1a1e68b164","Type":"ContainerStarted","Data":"96dc460f1fe51a7876a0d73f5eedaf50b76b8405b0eee13b2afa64eddabda435"} Nov 28 16:30:27 crc kubenswrapper[4909]: I1128 16:30:27.185691 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-q2kt7" Nov 28 16:30:27 crc kubenswrapper[4909]: I1128 16:30:27.185715 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-q2kt7" Nov 28 16:30:27 crc kubenswrapper[4909]: I1128 16:30:27.204970 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-ovs-q2kt7" podStartSLOduration=25.052423891 podStartE2EDuration="30.204953978s" podCreationTimestamp="2025-11-28 16:29:57 +0000 UTC" firstStartedPulling="2025-11-28 16:30:19.804996126 +0000 UTC m=+1202.201680650" lastFinishedPulling="2025-11-28 16:30:24.957526213 +0000 UTC m=+1207.354210737" observedRunningTime="2025-11-28 16:30:27.201331162 +0000 UTC m=+1209.598015686" watchObservedRunningTime="2025-11-28 16:30:27.204953978 +0000 UTC m=+1209.601638502" Nov 28 16:30:28 crc kubenswrapper[4909]: I1128 16:30:28.193530 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"02c83d05-a6ce-4c22-9015-91c0a766a518","Type":"ContainerStarted","Data":"49cf443f1a213e1c0c384ac59bd266107cf92076be4dceb74f6ecba7e3ee0c82"} Nov 28 16:30:29 crc kubenswrapper[4909]: I1128 16:30:29.203572 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"bdf7de93-de28-49b8-b83f-1174c23bbd2f","Type":"ContainerStarted","Data":"11ac9a6b55dccdabe7e64c630e4c33e9f41f6c8c26d42b4e367805b2de03dbc8"} Nov 28 16:30:29 crc kubenswrapper[4909]: I1128 16:30:29.206501 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"9a3a5941-5c86-4a65-be1e-26327ca990ad","Type":"ContainerStarted","Data":"8e03e0c2cfe2a76da7c2c9a3025b7a0ea43754fa7aeb32ae6610a67be0eb8a43"} Nov 28 16:30:29 crc kubenswrapper[4909]: I1128 16:30:29.208116 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"0b1d1797-999d-4453-b674-c40f53d4231e","Type":"ContainerStarted","Data":"67c0edd657bacfc89509d50f3315a4b0e80b205b4d0aa611b83d2d7a21e317a6"} Nov 28 16:30:29 crc kubenswrapper[4909]: I1128 16:30:29.229067 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-sb-0" podStartSLOduration=20.399203594 podStartE2EDuration="29.229047569s" podCreationTimestamp="2025-11-28 16:30:00 +0000 UTC" firstStartedPulling="2025-11-28 16:30:19.868980541 +0000 UTC m=+1202.265665065" lastFinishedPulling="2025-11-28 16:30:28.698824516 +0000 UTC m=+1211.095509040" observedRunningTime="2025-11-28 16:30:29.22607127 +0000 UTC m=+1211.622755794" watchObservedRunningTime="2025-11-28 16:30:29.229047569 +0000 UTC m=+1211.625732093" Nov 28 16:30:29 crc kubenswrapper[4909]: I1128 16:30:29.265961 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-nb-0" podStartSLOduration=25.441327711 podStartE2EDuration="33.265945623s" podCreationTimestamp="2025-11-28 16:29:56 +0000 UTC" firstStartedPulling="2025-11-28 16:30:20.87443584 +0000 UTC m=+1203.271120364" lastFinishedPulling="2025-11-28 16:30:28.699053752 +0000 UTC m=+1211.095738276" observedRunningTime="2025-11-28 16:30:29.262414238 +0000 UTC m=+1211.659098762" watchObservedRunningTime="2025-11-28 16:30:29.265945623 +0000 UTC m=+1211.662630147" Nov 28 16:30:30 crc kubenswrapper[4909]: I1128 16:30:30.218874 4909 generic.go:334] "Generic (PLEG): container finished" podID="2a8757b1-bbbf-4d00-9bbb-9f4bc855a9d9" containerID="44d78f1f1841456ec290a5fd131e7e46dad4741dfefbdbd69b4571e60a9b775f" exitCode=0 Nov 28 16:30:30 crc kubenswrapper[4909]: I1128 16:30:30.218958 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"2a8757b1-bbbf-4d00-9bbb-9f4bc855a9d9","Type":"ContainerDied","Data":"44d78f1f1841456ec290a5fd131e7e46dad4741dfefbdbd69b4571e60a9b775f"} Nov 28 16:30:30 crc kubenswrapper[4909]: I1128 16:30:30.221563 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444","Type":"ContainerStarted","Data":"d88fa152549e49d038ad50024ffe594844eac1a2e9fbab72853e4ad449f04b71"} Nov 28 16:30:30 crc kubenswrapper[4909]: I1128 16:30:30.368779 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-nb-0" Nov 28 16:30:30 crc kubenswrapper[4909]: I1128 16:30:30.408531 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-nb-0" Nov 28 16:30:31 crc kubenswrapper[4909]: I1128 16:30:31.233025 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"2a8757b1-bbbf-4d00-9bbb-9f4bc855a9d9","Type":"ContainerStarted","Data":"d3f40b1a27b1ea548b445a46b92770d9795f4ed9429685b38f64c4173b4e0c3f"} Nov 28 16:30:31 crc kubenswrapper[4909]: I1128 16:30:31.233385 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-nb-0" Nov 28 16:30:31 crc kubenswrapper[4909]: I1128 16:30:31.262987 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-cell1-galera-0" podStartSLOduration=8.007011881 podStartE2EDuration="41.262972262s" podCreationTimestamp="2025-11-28 16:29:50 +0000 UTC" firstStartedPulling="2025-11-28 16:29:52.270008674 +0000 UTC m=+1174.666693198" lastFinishedPulling="2025-11-28 16:30:25.525969055 +0000 UTC m=+1207.922653579" observedRunningTime="2025-11-28 16:30:31.259619362 +0000 UTC m=+1213.656303896" watchObservedRunningTime="2025-11-28 16:30:31.262972262 +0000 UTC m=+1213.659656786" Nov 28 16:30:31 crc kubenswrapper[4909]: I1128 16:30:31.285293 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-nb-0" Nov 28 16:30:31 crc kubenswrapper[4909]: I1128 16:30:31.541770 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-nwdnb"] Nov 28 16:30:31 crc kubenswrapper[4909]: I1128 16:30:31.583586 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5bf47b49b7-n9gnh"] Nov 28 16:30:31 crc kubenswrapper[4909]: E1128 16:30:31.584054 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8e9cbe0d-d1db-420f-8a4c-1e24d559dba1" containerName="collect-profiles" Nov 28 16:30:31 crc kubenswrapper[4909]: I1128 16:30:31.584077 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="8e9cbe0d-d1db-420f-8a4c-1e24d559dba1" containerName="collect-profiles" Nov 28 16:30:31 crc kubenswrapper[4909]: I1128 16:30:31.584265 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="8e9cbe0d-d1db-420f-8a4c-1e24d559dba1" containerName="collect-profiles" Nov 28 16:30:31 crc kubenswrapper[4909]: I1128 16:30:31.585337 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5bf47b49b7-n9gnh" Nov 28 16:30:31 crc kubenswrapper[4909]: I1128 16:30:31.588480 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-nb" Nov 28 16:30:31 crc kubenswrapper[4909]: I1128 16:30:31.600362 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-cell1-galera-0" Nov 28 16:30:31 crc kubenswrapper[4909]: I1128 16:30:31.602597 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-cell1-galera-0" Nov 28 16:30:31 crc kubenswrapper[4909]: I1128 16:30:31.613484 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-metrics-fg65t"] Nov 28 16:30:31 crc kubenswrapper[4909]: I1128 16:30:31.614514 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-fg65t" Nov 28 16:30:31 crc kubenswrapper[4909]: I1128 16:30:31.617602 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-metrics-config" Nov 28 16:30:31 crc kubenswrapper[4909]: I1128 16:30:31.634550 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5bf47b49b7-n9gnh"] Nov 28 16:30:31 crc kubenswrapper[4909]: I1128 16:30:31.635671 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-fg65t"] Nov 28 16:30:31 crc kubenswrapper[4909]: I1128 16:30:31.649124 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rxd4r\" (UniqueName: \"kubernetes.io/projected/fd174f39-a477-40c2-aacb-1c854a0ac704-kube-api-access-rxd4r\") pod \"dnsmasq-dns-5bf47b49b7-n9gnh\" (UID: \"fd174f39-a477-40c2-aacb-1c854a0ac704\") " pod="openstack/dnsmasq-dns-5bf47b49b7-n9gnh" Nov 28 16:30:31 crc kubenswrapper[4909]: I1128 16:30:31.649173 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/fd174f39-a477-40c2-aacb-1c854a0ac704-dns-svc\") pod \"dnsmasq-dns-5bf47b49b7-n9gnh\" (UID: \"fd174f39-a477-40c2-aacb-1c854a0ac704\") " pod="openstack/dnsmasq-dns-5bf47b49b7-n9gnh" Nov 28 16:30:31 crc kubenswrapper[4909]: I1128 16:30:31.649209 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/fd174f39-a477-40c2-aacb-1c854a0ac704-ovsdbserver-nb\") pod \"dnsmasq-dns-5bf47b49b7-n9gnh\" (UID: \"fd174f39-a477-40c2-aacb-1c854a0ac704\") " pod="openstack/dnsmasq-dns-5bf47b49b7-n9gnh" Nov 28 16:30:31 crc kubenswrapper[4909]: I1128 16:30:31.649283 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fd174f39-a477-40c2-aacb-1c854a0ac704-config\") pod \"dnsmasq-dns-5bf47b49b7-n9gnh\" (UID: \"fd174f39-a477-40c2-aacb-1c854a0ac704\") " pod="openstack/dnsmasq-dns-5bf47b49b7-n9gnh" Nov 28 16:30:31 crc kubenswrapper[4909]: I1128 16:30:31.650070 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-sb-0" Nov 28 16:30:31 crc kubenswrapper[4909]: I1128 16:30:31.650636 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-sb-0" Nov 28 16:30:31 crc kubenswrapper[4909]: I1128 16:30:31.707702 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-sb-0" Nov 28 16:30:31 crc kubenswrapper[4909]: I1128 16:30:31.750435 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f69e804c-fdc4-4b8f-86f3-d497612f42b8-config\") pod \"ovn-controller-metrics-fg65t\" (UID: \"f69e804c-fdc4-4b8f-86f3-d497612f42b8\") " pod="openstack/ovn-controller-metrics-fg65t" Nov 28 16:30:31 crc kubenswrapper[4909]: I1128 16:30:31.750488 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/f69e804c-fdc4-4b8f-86f3-d497612f42b8-ovn-rundir\") pod \"ovn-controller-metrics-fg65t\" (UID: \"f69e804c-fdc4-4b8f-86f3-d497612f42b8\") " pod="openstack/ovn-controller-metrics-fg65t" Nov 28 16:30:31 crc kubenswrapper[4909]: I1128 16:30:31.750513 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f69e804c-fdc4-4b8f-86f3-d497612f42b8-combined-ca-bundle\") pod \"ovn-controller-metrics-fg65t\" (UID: \"f69e804c-fdc4-4b8f-86f3-d497612f42b8\") " pod="openstack/ovn-controller-metrics-fg65t" Nov 28 16:30:31 crc kubenswrapper[4909]: I1128 16:30:31.750550 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fd174f39-a477-40c2-aacb-1c854a0ac704-config\") pod \"dnsmasq-dns-5bf47b49b7-n9gnh\" (UID: \"fd174f39-a477-40c2-aacb-1c854a0ac704\") " pod="openstack/dnsmasq-dns-5bf47b49b7-n9gnh" Nov 28 16:30:31 crc kubenswrapper[4909]: I1128 16:30:31.750586 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/f69e804c-fdc4-4b8f-86f3-d497612f42b8-ovs-rundir\") pod \"ovn-controller-metrics-fg65t\" (UID: \"f69e804c-fdc4-4b8f-86f3-d497612f42b8\") " pod="openstack/ovn-controller-metrics-fg65t" Nov 28 16:30:31 crc kubenswrapper[4909]: I1128 16:30:31.750636 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rxd4r\" (UniqueName: \"kubernetes.io/projected/fd174f39-a477-40c2-aacb-1c854a0ac704-kube-api-access-rxd4r\") pod \"dnsmasq-dns-5bf47b49b7-n9gnh\" (UID: \"fd174f39-a477-40c2-aacb-1c854a0ac704\") " pod="openstack/dnsmasq-dns-5bf47b49b7-n9gnh" Nov 28 16:30:31 crc kubenswrapper[4909]: I1128 16:30:31.750678 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/f69e804c-fdc4-4b8f-86f3-d497612f42b8-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-fg65t\" (UID: \"f69e804c-fdc4-4b8f-86f3-d497612f42b8\") " pod="openstack/ovn-controller-metrics-fg65t" Nov 28 16:30:31 crc kubenswrapper[4909]: I1128 16:30:31.750700 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/fd174f39-a477-40c2-aacb-1c854a0ac704-dns-svc\") pod \"dnsmasq-dns-5bf47b49b7-n9gnh\" (UID: \"fd174f39-a477-40c2-aacb-1c854a0ac704\") " pod="openstack/dnsmasq-dns-5bf47b49b7-n9gnh" Nov 28 16:30:31 crc kubenswrapper[4909]: I1128 16:30:31.750717 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-shm9n\" (UniqueName: \"kubernetes.io/projected/f69e804c-fdc4-4b8f-86f3-d497612f42b8-kube-api-access-shm9n\") pod \"ovn-controller-metrics-fg65t\" (UID: \"f69e804c-fdc4-4b8f-86f3-d497612f42b8\") " pod="openstack/ovn-controller-metrics-fg65t" Nov 28 16:30:31 crc kubenswrapper[4909]: I1128 16:30:31.750763 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/fd174f39-a477-40c2-aacb-1c854a0ac704-ovsdbserver-nb\") pod \"dnsmasq-dns-5bf47b49b7-n9gnh\" (UID: \"fd174f39-a477-40c2-aacb-1c854a0ac704\") " pod="openstack/dnsmasq-dns-5bf47b49b7-n9gnh" Nov 28 16:30:31 crc kubenswrapper[4909]: I1128 16:30:31.751936 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/fd174f39-a477-40c2-aacb-1c854a0ac704-ovsdbserver-nb\") pod \"dnsmasq-dns-5bf47b49b7-n9gnh\" (UID: \"fd174f39-a477-40c2-aacb-1c854a0ac704\") " pod="openstack/dnsmasq-dns-5bf47b49b7-n9gnh" Nov 28 16:30:31 crc kubenswrapper[4909]: I1128 16:30:31.752951 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/fd174f39-a477-40c2-aacb-1c854a0ac704-dns-svc\") pod \"dnsmasq-dns-5bf47b49b7-n9gnh\" (UID: \"fd174f39-a477-40c2-aacb-1c854a0ac704\") " pod="openstack/dnsmasq-dns-5bf47b49b7-n9gnh" Nov 28 16:30:31 crc kubenswrapper[4909]: I1128 16:30:31.753140 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fd174f39-a477-40c2-aacb-1c854a0ac704-config\") pod \"dnsmasq-dns-5bf47b49b7-n9gnh\" (UID: \"fd174f39-a477-40c2-aacb-1c854a0ac704\") " pod="openstack/dnsmasq-dns-5bf47b49b7-n9gnh" Nov 28 16:30:31 crc kubenswrapper[4909]: I1128 16:30:31.778839 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rxd4r\" (UniqueName: \"kubernetes.io/projected/fd174f39-a477-40c2-aacb-1c854a0ac704-kube-api-access-rxd4r\") pod \"dnsmasq-dns-5bf47b49b7-n9gnh\" (UID: \"fd174f39-a477-40c2-aacb-1c854a0ac704\") " pod="openstack/dnsmasq-dns-5bf47b49b7-n9gnh" Nov 28 16:30:31 crc kubenswrapper[4909]: I1128 16:30:31.852520 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f69e804c-fdc4-4b8f-86f3-d497612f42b8-config\") pod \"ovn-controller-metrics-fg65t\" (UID: \"f69e804c-fdc4-4b8f-86f3-d497612f42b8\") " pod="openstack/ovn-controller-metrics-fg65t" Nov 28 16:30:31 crc kubenswrapper[4909]: I1128 16:30:31.852939 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/f69e804c-fdc4-4b8f-86f3-d497612f42b8-ovn-rundir\") pod \"ovn-controller-metrics-fg65t\" (UID: \"f69e804c-fdc4-4b8f-86f3-d497612f42b8\") " pod="openstack/ovn-controller-metrics-fg65t" Nov 28 16:30:31 crc kubenswrapper[4909]: I1128 16:30:31.852965 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f69e804c-fdc4-4b8f-86f3-d497612f42b8-combined-ca-bundle\") pod \"ovn-controller-metrics-fg65t\" (UID: \"f69e804c-fdc4-4b8f-86f3-d497612f42b8\") " pod="openstack/ovn-controller-metrics-fg65t" Nov 28 16:30:31 crc kubenswrapper[4909]: I1128 16:30:31.853266 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f69e804c-fdc4-4b8f-86f3-d497612f42b8-config\") pod \"ovn-controller-metrics-fg65t\" (UID: \"f69e804c-fdc4-4b8f-86f3-d497612f42b8\") " pod="openstack/ovn-controller-metrics-fg65t" Nov 28 16:30:31 crc kubenswrapper[4909]: I1128 16:30:31.853319 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/f69e804c-fdc4-4b8f-86f3-d497612f42b8-ovn-rundir\") pod \"ovn-controller-metrics-fg65t\" (UID: \"f69e804c-fdc4-4b8f-86f3-d497612f42b8\") " pod="openstack/ovn-controller-metrics-fg65t" Nov 28 16:30:31 crc kubenswrapper[4909]: I1128 16:30:31.853816 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/f69e804c-fdc4-4b8f-86f3-d497612f42b8-ovs-rundir\") pod \"ovn-controller-metrics-fg65t\" (UID: \"f69e804c-fdc4-4b8f-86f3-d497612f42b8\") " pod="openstack/ovn-controller-metrics-fg65t" Nov 28 16:30:31 crc kubenswrapper[4909]: I1128 16:30:31.853962 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/f69e804c-fdc4-4b8f-86f3-d497612f42b8-ovs-rundir\") pod \"ovn-controller-metrics-fg65t\" (UID: \"f69e804c-fdc4-4b8f-86f3-d497612f42b8\") " pod="openstack/ovn-controller-metrics-fg65t" Nov 28 16:30:31 crc kubenswrapper[4909]: I1128 16:30:31.854012 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/f69e804c-fdc4-4b8f-86f3-d497612f42b8-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-fg65t\" (UID: \"f69e804c-fdc4-4b8f-86f3-d497612f42b8\") " pod="openstack/ovn-controller-metrics-fg65t" Nov 28 16:30:31 crc kubenswrapper[4909]: I1128 16:30:31.854036 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-shm9n\" (UniqueName: \"kubernetes.io/projected/f69e804c-fdc4-4b8f-86f3-d497612f42b8-kube-api-access-shm9n\") pod \"ovn-controller-metrics-fg65t\" (UID: \"f69e804c-fdc4-4b8f-86f3-d497612f42b8\") " pod="openstack/ovn-controller-metrics-fg65t" Nov 28 16:30:31 crc kubenswrapper[4909]: I1128 16:30:31.863372 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5ccc8479f9-ddfj8"] Nov 28 16:30:31 crc kubenswrapper[4909]: I1128 16:30:31.865606 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/f69e804c-fdc4-4b8f-86f3-d497612f42b8-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-fg65t\" (UID: \"f69e804c-fdc4-4b8f-86f3-d497612f42b8\") " pod="openstack/ovn-controller-metrics-fg65t" Nov 28 16:30:31 crc kubenswrapper[4909]: I1128 16:30:31.882053 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f69e804c-fdc4-4b8f-86f3-d497612f42b8-combined-ca-bundle\") pod \"ovn-controller-metrics-fg65t\" (UID: \"f69e804c-fdc4-4b8f-86f3-d497612f42b8\") " pod="openstack/ovn-controller-metrics-fg65t" Nov 28 16:30:31 crc kubenswrapper[4909]: I1128 16:30:31.884586 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-shm9n\" (UniqueName: \"kubernetes.io/projected/f69e804c-fdc4-4b8f-86f3-d497612f42b8-kube-api-access-shm9n\") pod \"ovn-controller-metrics-fg65t\" (UID: \"f69e804c-fdc4-4b8f-86f3-d497612f42b8\") " pod="openstack/ovn-controller-metrics-fg65t" Nov 28 16:30:31 crc kubenswrapper[4909]: I1128 16:30:31.910590 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5bf47b49b7-n9gnh" Nov 28 16:30:31 crc kubenswrapper[4909]: I1128 16:30:31.928696 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-8554648995-fzkt7"] Nov 28 16:30:31 crc kubenswrapper[4909]: I1128 16:30:31.930251 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8554648995-fzkt7" Nov 28 16:30:31 crc kubenswrapper[4909]: I1128 16:30:31.931966 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-sb" Nov 28 16:30:31 crc kubenswrapper[4909]: I1128 16:30:31.949031 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-8554648995-fzkt7"] Nov 28 16:30:31 crc kubenswrapper[4909]: I1128 16:30:31.956567 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-fg65t" Nov 28 16:30:32 crc kubenswrapper[4909]: I1128 16:30:32.020184 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-nwdnb" Nov 28 16:30:32 crc kubenswrapper[4909]: I1128 16:30:32.056749 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/090022a6-59be-4353-8dd1-22e9e542c57a-dns-svc\") pod \"dnsmasq-dns-8554648995-fzkt7\" (UID: \"090022a6-59be-4353-8dd1-22e9e542c57a\") " pod="openstack/dnsmasq-dns-8554648995-fzkt7" Nov 28 16:30:32 crc kubenswrapper[4909]: I1128 16:30:32.056793 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/090022a6-59be-4353-8dd1-22e9e542c57a-ovsdbserver-sb\") pod \"dnsmasq-dns-8554648995-fzkt7\" (UID: \"090022a6-59be-4353-8dd1-22e9e542c57a\") " pod="openstack/dnsmasq-dns-8554648995-fzkt7" Nov 28 16:30:32 crc kubenswrapper[4909]: I1128 16:30:32.056947 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/090022a6-59be-4353-8dd1-22e9e542c57a-config\") pod \"dnsmasq-dns-8554648995-fzkt7\" (UID: \"090022a6-59be-4353-8dd1-22e9e542c57a\") " pod="openstack/dnsmasq-dns-8554648995-fzkt7" Nov 28 16:30:32 crc kubenswrapper[4909]: I1128 16:30:32.056992 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/090022a6-59be-4353-8dd1-22e9e542c57a-ovsdbserver-nb\") pod \"dnsmasq-dns-8554648995-fzkt7\" (UID: \"090022a6-59be-4353-8dd1-22e9e542c57a\") " pod="openstack/dnsmasq-dns-8554648995-fzkt7" Nov 28 16:30:32 crc kubenswrapper[4909]: I1128 16:30:32.057033 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9cthd\" (UniqueName: \"kubernetes.io/projected/090022a6-59be-4353-8dd1-22e9e542c57a-kube-api-access-9cthd\") pod \"dnsmasq-dns-8554648995-fzkt7\" (UID: \"090022a6-59be-4353-8dd1-22e9e542c57a\") " pod="openstack/dnsmasq-dns-8554648995-fzkt7" Nov 28 16:30:32 crc kubenswrapper[4909]: I1128 16:30:32.158446 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7z24l\" (UniqueName: \"kubernetes.io/projected/d68fe850-7328-4415-b01e-a7c2ec3e1f32-kube-api-access-7z24l\") pod \"d68fe850-7328-4415-b01e-a7c2ec3e1f32\" (UID: \"d68fe850-7328-4415-b01e-a7c2ec3e1f32\") " Nov 28 16:30:32 crc kubenswrapper[4909]: I1128 16:30:32.158492 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d68fe850-7328-4415-b01e-a7c2ec3e1f32-dns-svc\") pod \"d68fe850-7328-4415-b01e-a7c2ec3e1f32\" (UID: \"d68fe850-7328-4415-b01e-a7c2ec3e1f32\") " Nov 28 16:30:32 crc kubenswrapper[4909]: I1128 16:30:32.158513 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d68fe850-7328-4415-b01e-a7c2ec3e1f32-config\") pod \"d68fe850-7328-4415-b01e-a7c2ec3e1f32\" (UID: \"d68fe850-7328-4415-b01e-a7c2ec3e1f32\") " Nov 28 16:30:32 crc kubenswrapper[4909]: I1128 16:30:32.158975 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d68fe850-7328-4415-b01e-a7c2ec3e1f32-config" (OuterVolumeSpecName: "config") pod "d68fe850-7328-4415-b01e-a7c2ec3e1f32" (UID: "d68fe850-7328-4415-b01e-a7c2ec3e1f32"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:30:32 crc kubenswrapper[4909]: I1128 16:30:32.159949 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/090022a6-59be-4353-8dd1-22e9e542c57a-ovsdbserver-nb\") pod \"dnsmasq-dns-8554648995-fzkt7\" (UID: \"090022a6-59be-4353-8dd1-22e9e542c57a\") " pod="openstack/dnsmasq-dns-8554648995-fzkt7" Nov 28 16:30:32 crc kubenswrapper[4909]: I1128 16:30:32.160028 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9cthd\" (UniqueName: \"kubernetes.io/projected/090022a6-59be-4353-8dd1-22e9e542c57a-kube-api-access-9cthd\") pod \"dnsmasq-dns-8554648995-fzkt7\" (UID: \"090022a6-59be-4353-8dd1-22e9e542c57a\") " pod="openstack/dnsmasq-dns-8554648995-fzkt7" Nov 28 16:30:32 crc kubenswrapper[4909]: I1128 16:30:32.160252 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d68fe850-7328-4415-b01e-a7c2ec3e1f32-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "d68fe850-7328-4415-b01e-a7c2ec3e1f32" (UID: "d68fe850-7328-4415-b01e-a7c2ec3e1f32"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:30:32 crc kubenswrapper[4909]: I1128 16:30:32.160512 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/090022a6-59be-4353-8dd1-22e9e542c57a-dns-svc\") pod \"dnsmasq-dns-8554648995-fzkt7\" (UID: \"090022a6-59be-4353-8dd1-22e9e542c57a\") " pod="openstack/dnsmasq-dns-8554648995-fzkt7" Nov 28 16:30:32 crc kubenswrapper[4909]: I1128 16:30:32.161223 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/090022a6-59be-4353-8dd1-22e9e542c57a-ovsdbserver-sb\") pod \"dnsmasq-dns-8554648995-fzkt7\" (UID: \"090022a6-59be-4353-8dd1-22e9e542c57a\") " pod="openstack/dnsmasq-dns-8554648995-fzkt7" Nov 28 16:30:32 crc kubenswrapper[4909]: I1128 16:30:32.161395 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/090022a6-59be-4353-8dd1-22e9e542c57a-config\") pod \"dnsmasq-dns-8554648995-fzkt7\" (UID: \"090022a6-59be-4353-8dd1-22e9e542c57a\") " pod="openstack/dnsmasq-dns-8554648995-fzkt7" Nov 28 16:30:32 crc kubenswrapper[4909]: I1128 16:30:32.161435 4909 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d68fe850-7328-4415-b01e-a7c2ec3e1f32-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 16:30:32 crc kubenswrapper[4909]: I1128 16:30:32.161446 4909 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d68fe850-7328-4415-b01e-a7c2ec3e1f32-config\") on node \"crc\" DevicePath \"\"" Nov 28 16:30:32 crc kubenswrapper[4909]: I1128 16:30:32.161942 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/090022a6-59be-4353-8dd1-22e9e542c57a-config\") pod \"dnsmasq-dns-8554648995-fzkt7\" (UID: \"090022a6-59be-4353-8dd1-22e9e542c57a\") " pod="openstack/dnsmasq-dns-8554648995-fzkt7" Nov 28 16:30:32 crc kubenswrapper[4909]: I1128 16:30:32.160768 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/090022a6-59be-4353-8dd1-22e9e542c57a-ovsdbserver-nb\") pod \"dnsmasq-dns-8554648995-fzkt7\" (UID: \"090022a6-59be-4353-8dd1-22e9e542c57a\") " pod="openstack/dnsmasq-dns-8554648995-fzkt7" Nov 28 16:30:32 crc kubenswrapper[4909]: I1128 16:30:32.161196 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/090022a6-59be-4353-8dd1-22e9e542c57a-dns-svc\") pod \"dnsmasq-dns-8554648995-fzkt7\" (UID: \"090022a6-59be-4353-8dd1-22e9e542c57a\") " pod="openstack/dnsmasq-dns-8554648995-fzkt7" Nov 28 16:30:32 crc kubenswrapper[4909]: I1128 16:30:32.162484 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/090022a6-59be-4353-8dd1-22e9e542c57a-ovsdbserver-sb\") pod \"dnsmasq-dns-8554648995-fzkt7\" (UID: \"090022a6-59be-4353-8dd1-22e9e542c57a\") " pod="openstack/dnsmasq-dns-8554648995-fzkt7" Nov 28 16:30:32 crc kubenswrapper[4909]: I1128 16:30:32.164356 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d68fe850-7328-4415-b01e-a7c2ec3e1f32-kube-api-access-7z24l" (OuterVolumeSpecName: "kube-api-access-7z24l") pod "d68fe850-7328-4415-b01e-a7c2ec3e1f32" (UID: "d68fe850-7328-4415-b01e-a7c2ec3e1f32"). InnerVolumeSpecName "kube-api-access-7z24l". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:30:32 crc kubenswrapper[4909]: I1128 16:30:32.190686 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9cthd\" (UniqueName: \"kubernetes.io/projected/090022a6-59be-4353-8dd1-22e9e542c57a-kube-api-access-9cthd\") pod \"dnsmasq-dns-8554648995-fzkt7\" (UID: \"090022a6-59be-4353-8dd1-22e9e542c57a\") " pod="openstack/dnsmasq-dns-8554648995-fzkt7" Nov 28 16:30:32 crc kubenswrapper[4909]: I1128 16:30:32.213981 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5ccc8479f9-ddfj8" Nov 28 16:30:32 crc kubenswrapper[4909]: I1128 16:30:32.241986 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5ccc8479f9-ddfj8" event={"ID":"7a392b74-7812-4347-988a-1eef8b2778a5","Type":"ContainerDied","Data":"4ce76cb3f09f0a7a0b86a472bc3dc731006098a715cc810c865f125f481f371e"} Nov 28 16:30:32 crc kubenswrapper[4909]: I1128 16:30:32.242068 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5ccc8479f9-ddfj8" Nov 28 16:30:32 crc kubenswrapper[4909]: I1128 16:30:32.244463 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-nwdnb" Nov 28 16:30:32 crc kubenswrapper[4909]: I1128 16:30:32.245150 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-nwdnb" event={"ID":"d68fe850-7328-4415-b01e-a7c2ec3e1f32","Type":"ContainerDied","Data":"34c0cd98d987c3a397be557827bfc1d9894d705d2f64e72a02d2fa1b3055196b"} Nov 28 16:30:32 crc kubenswrapper[4909]: I1128 16:30:32.264055 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vkqgx\" (UniqueName: \"kubernetes.io/projected/7a392b74-7812-4347-988a-1eef8b2778a5-kube-api-access-vkqgx\") pod \"7a392b74-7812-4347-988a-1eef8b2778a5\" (UID: \"7a392b74-7812-4347-988a-1eef8b2778a5\") " Nov 28 16:30:32 crc kubenswrapper[4909]: I1128 16:30:32.264134 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7a392b74-7812-4347-988a-1eef8b2778a5-config\") pod \"7a392b74-7812-4347-988a-1eef8b2778a5\" (UID: \"7a392b74-7812-4347-988a-1eef8b2778a5\") " Nov 28 16:30:32 crc kubenswrapper[4909]: I1128 16:30:32.264193 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7a392b74-7812-4347-988a-1eef8b2778a5-dns-svc\") pod \"7a392b74-7812-4347-988a-1eef8b2778a5\" (UID: \"7a392b74-7812-4347-988a-1eef8b2778a5\") " Nov 28 16:30:32 crc kubenswrapper[4909]: I1128 16:30:32.264566 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7z24l\" (UniqueName: \"kubernetes.io/projected/d68fe850-7328-4415-b01e-a7c2ec3e1f32-kube-api-access-7z24l\") on node \"crc\" DevicePath \"\"" Nov 28 16:30:32 crc kubenswrapper[4909]: I1128 16:30:32.265064 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7a392b74-7812-4347-988a-1eef8b2778a5-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "7a392b74-7812-4347-988a-1eef8b2778a5" (UID: "7a392b74-7812-4347-988a-1eef8b2778a5"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:30:32 crc kubenswrapper[4909]: I1128 16:30:32.265319 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7a392b74-7812-4347-988a-1eef8b2778a5-config" (OuterVolumeSpecName: "config") pod "7a392b74-7812-4347-988a-1eef8b2778a5" (UID: "7a392b74-7812-4347-988a-1eef8b2778a5"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:30:32 crc kubenswrapper[4909]: I1128 16:30:32.268077 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7a392b74-7812-4347-988a-1eef8b2778a5-kube-api-access-vkqgx" (OuterVolumeSpecName: "kube-api-access-vkqgx") pod "7a392b74-7812-4347-988a-1eef8b2778a5" (UID: "7a392b74-7812-4347-988a-1eef8b2778a5"). InnerVolumeSpecName "kube-api-access-vkqgx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:30:32 crc kubenswrapper[4909]: I1128 16:30:32.302922 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-sb-0" Nov 28 16:30:32 crc kubenswrapper[4909]: I1128 16:30:32.315054 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-nwdnb"] Nov 28 16:30:32 crc kubenswrapper[4909]: I1128 16:30:32.321914 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-nwdnb"] Nov 28 16:30:32 crc kubenswrapper[4909]: I1128 16:30:32.336506 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8554648995-fzkt7" Nov 28 16:30:32 crc kubenswrapper[4909]: I1128 16:30:32.376727 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vkqgx\" (UniqueName: \"kubernetes.io/projected/7a392b74-7812-4347-988a-1eef8b2778a5-kube-api-access-vkqgx\") on node \"crc\" DevicePath \"\"" Nov 28 16:30:32 crc kubenswrapper[4909]: I1128 16:30:32.376757 4909 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7a392b74-7812-4347-988a-1eef8b2778a5-config\") on node \"crc\" DevicePath \"\"" Nov 28 16:30:32 crc kubenswrapper[4909]: I1128 16:30:32.376767 4909 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7a392b74-7812-4347-988a-1eef8b2778a5-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 16:30:32 crc kubenswrapper[4909]: I1128 16:30:32.442422 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5bf47b49b7-n9gnh"] Nov 28 16:30:32 crc kubenswrapper[4909]: I1128 16:30:32.486969 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-northd-0"] Nov 28 16:30:32 crc kubenswrapper[4909]: I1128 16:30:32.488312 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Nov 28 16:30:32 crc kubenswrapper[4909]: I1128 16:30:32.492370 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-scripts" Nov 28 16:30:32 crc kubenswrapper[4909]: I1128 16:30:32.492432 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovnnorthd-ovndbs" Nov 28 16:30:32 crc kubenswrapper[4909]: I1128 16:30:32.492503 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-config" Nov 28 16:30:32 crc kubenswrapper[4909]: I1128 16:30:32.492622 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovnnorthd-ovnnorthd-dockercfg-7kxw2" Nov 28 16:30:32 crc kubenswrapper[4909]: I1128 16:30:32.493613 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-fg65t"] Nov 28 16:30:32 crc kubenswrapper[4909]: I1128 16:30:32.499830 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Nov 28 16:30:32 crc kubenswrapper[4909]: I1128 16:30:32.580062 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/dc78dec8-567e-41a1-9fbf-793224410d3b-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"dc78dec8-567e-41a1-9fbf-793224410d3b\") " pod="openstack/ovn-northd-0" Nov 28 16:30:32 crc kubenswrapper[4909]: I1128 16:30:32.580401 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/dc78dec8-567e-41a1-9fbf-793224410d3b-scripts\") pod \"ovn-northd-0\" (UID: \"dc78dec8-567e-41a1-9fbf-793224410d3b\") " pod="openstack/ovn-northd-0" Nov 28 16:30:32 crc kubenswrapper[4909]: I1128 16:30:32.580454 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xg9tm\" (UniqueName: \"kubernetes.io/projected/dc78dec8-567e-41a1-9fbf-793224410d3b-kube-api-access-xg9tm\") pod \"ovn-northd-0\" (UID: \"dc78dec8-567e-41a1-9fbf-793224410d3b\") " pod="openstack/ovn-northd-0" Nov 28 16:30:32 crc kubenswrapper[4909]: I1128 16:30:32.580490 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/dc78dec8-567e-41a1-9fbf-793224410d3b-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"dc78dec8-567e-41a1-9fbf-793224410d3b\") " pod="openstack/ovn-northd-0" Nov 28 16:30:32 crc kubenswrapper[4909]: I1128 16:30:32.580523 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/dc78dec8-567e-41a1-9fbf-793224410d3b-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"dc78dec8-567e-41a1-9fbf-793224410d3b\") " pod="openstack/ovn-northd-0" Nov 28 16:30:32 crc kubenswrapper[4909]: I1128 16:30:32.580553 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dc78dec8-567e-41a1-9fbf-793224410d3b-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"dc78dec8-567e-41a1-9fbf-793224410d3b\") " pod="openstack/ovn-northd-0" Nov 28 16:30:32 crc kubenswrapper[4909]: I1128 16:30:32.580576 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dc78dec8-567e-41a1-9fbf-793224410d3b-config\") pod \"ovn-northd-0\" (UID: \"dc78dec8-567e-41a1-9fbf-793224410d3b\") " pod="openstack/ovn-northd-0" Nov 28 16:30:32 crc kubenswrapper[4909]: I1128 16:30:32.609097 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5ccc8479f9-ddfj8"] Nov 28 16:30:32 crc kubenswrapper[4909]: I1128 16:30:32.615025 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5ccc8479f9-ddfj8"] Nov 28 16:30:32 crc kubenswrapper[4909]: I1128 16:30:32.682545 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/dc78dec8-567e-41a1-9fbf-793224410d3b-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"dc78dec8-567e-41a1-9fbf-793224410d3b\") " pod="openstack/ovn-northd-0" Nov 28 16:30:32 crc kubenswrapper[4909]: I1128 16:30:32.682599 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/dc78dec8-567e-41a1-9fbf-793224410d3b-scripts\") pod \"ovn-northd-0\" (UID: \"dc78dec8-567e-41a1-9fbf-793224410d3b\") " pod="openstack/ovn-northd-0" Nov 28 16:30:32 crc kubenswrapper[4909]: I1128 16:30:32.682638 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xg9tm\" (UniqueName: \"kubernetes.io/projected/dc78dec8-567e-41a1-9fbf-793224410d3b-kube-api-access-xg9tm\") pod \"ovn-northd-0\" (UID: \"dc78dec8-567e-41a1-9fbf-793224410d3b\") " pod="openstack/ovn-northd-0" Nov 28 16:30:32 crc kubenswrapper[4909]: I1128 16:30:32.682686 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/dc78dec8-567e-41a1-9fbf-793224410d3b-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"dc78dec8-567e-41a1-9fbf-793224410d3b\") " pod="openstack/ovn-northd-0" Nov 28 16:30:32 crc kubenswrapper[4909]: I1128 16:30:32.682717 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/dc78dec8-567e-41a1-9fbf-793224410d3b-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"dc78dec8-567e-41a1-9fbf-793224410d3b\") " pod="openstack/ovn-northd-0" Nov 28 16:30:32 crc kubenswrapper[4909]: I1128 16:30:32.682740 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dc78dec8-567e-41a1-9fbf-793224410d3b-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"dc78dec8-567e-41a1-9fbf-793224410d3b\") " pod="openstack/ovn-northd-0" Nov 28 16:30:32 crc kubenswrapper[4909]: I1128 16:30:32.682763 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dc78dec8-567e-41a1-9fbf-793224410d3b-config\") pod \"ovn-northd-0\" (UID: \"dc78dec8-567e-41a1-9fbf-793224410d3b\") " pod="openstack/ovn-northd-0" Nov 28 16:30:32 crc kubenswrapper[4909]: I1128 16:30:32.683840 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dc78dec8-567e-41a1-9fbf-793224410d3b-config\") pod \"ovn-northd-0\" (UID: \"dc78dec8-567e-41a1-9fbf-793224410d3b\") " pod="openstack/ovn-northd-0" Nov 28 16:30:32 crc kubenswrapper[4909]: I1128 16:30:32.684234 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/dc78dec8-567e-41a1-9fbf-793224410d3b-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"dc78dec8-567e-41a1-9fbf-793224410d3b\") " pod="openstack/ovn-northd-0" Nov 28 16:30:32 crc kubenswrapper[4909]: I1128 16:30:32.684493 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/dc78dec8-567e-41a1-9fbf-793224410d3b-scripts\") pod \"ovn-northd-0\" (UID: \"dc78dec8-567e-41a1-9fbf-793224410d3b\") " pod="openstack/ovn-northd-0" Nov 28 16:30:32 crc kubenswrapper[4909]: I1128 16:30:32.687741 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dc78dec8-567e-41a1-9fbf-793224410d3b-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"dc78dec8-567e-41a1-9fbf-793224410d3b\") " pod="openstack/ovn-northd-0" Nov 28 16:30:32 crc kubenswrapper[4909]: I1128 16:30:32.688947 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/dc78dec8-567e-41a1-9fbf-793224410d3b-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"dc78dec8-567e-41a1-9fbf-793224410d3b\") " pod="openstack/ovn-northd-0" Nov 28 16:30:32 crc kubenswrapper[4909]: I1128 16:30:32.689218 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/dc78dec8-567e-41a1-9fbf-793224410d3b-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"dc78dec8-567e-41a1-9fbf-793224410d3b\") " pod="openstack/ovn-northd-0" Nov 28 16:30:32 crc kubenswrapper[4909]: I1128 16:30:32.708723 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xg9tm\" (UniqueName: \"kubernetes.io/projected/dc78dec8-567e-41a1-9fbf-793224410d3b-kube-api-access-xg9tm\") pod \"ovn-northd-0\" (UID: \"dc78dec8-567e-41a1-9fbf-793224410d3b\") " pod="openstack/ovn-northd-0" Nov 28 16:30:32 crc kubenswrapper[4909]: I1128 16:30:32.804630 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Nov 28 16:30:32 crc kubenswrapper[4909]: I1128 16:30:32.868167 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-8554648995-fzkt7"] Nov 28 16:30:33 crc kubenswrapper[4909]: I1128 16:30:33.262877 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-fg65t" event={"ID":"f69e804c-fdc4-4b8f-86f3-d497612f42b8","Type":"ContainerStarted","Data":"31efa2915b2d01df78595f44826736718a1033b3247d499a7a9d8cc17106d2a1"} Nov 28 16:30:33 crc kubenswrapper[4909]: I1128 16:30:33.263295 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-fg65t" event={"ID":"f69e804c-fdc4-4b8f-86f3-d497612f42b8","Type":"ContainerStarted","Data":"4ac20f70f1463d6a742e63d0a2b9de508ef07b24cccd5e2683e667a4d6a7523f"} Nov 28 16:30:33 crc kubenswrapper[4909]: I1128 16:30:33.271133 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8554648995-fzkt7" event={"ID":"090022a6-59be-4353-8dd1-22e9e542c57a","Type":"ContainerStarted","Data":"a5b4631021eb83d90f166de2e5c3405d003bf29289292c9c69743b468d2aae8a"} Nov 28 16:30:33 crc kubenswrapper[4909]: I1128 16:30:33.273383 4909 generic.go:334] "Generic (PLEG): container finished" podID="0b1d1797-999d-4453-b674-c40f53d4231e" containerID="67c0edd657bacfc89509d50f3315a4b0e80b205b4d0aa611b83d2d7a21e317a6" exitCode=0 Nov 28 16:30:33 crc kubenswrapper[4909]: I1128 16:30:33.273439 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"0b1d1797-999d-4453-b674-c40f53d4231e","Type":"ContainerDied","Data":"67c0edd657bacfc89509d50f3315a4b0e80b205b4d0aa611b83d2d7a21e317a6"} Nov 28 16:30:33 crc kubenswrapper[4909]: I1128 16:30:33.275562 4909 generic.go:334] "Generic (PLEG): container finished" podID="fd174f39-a477-40c2-aacb-1c854a0ac704" containerID="1384e4ddb7eb867a4879271ef410e07de45b5dd06e94c39e463a1d465d609945" exitCode=0 Nov 28 16:30:33 crc kubenswrapper[4909]: I1128 16:30:33.275812 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5bf47b49b7-n9gnh" event={"ID":"fd174f39-a477-40c2-aacb-1c854a0ac704","Type":"ContainerDied","Data":"1384e4ddb7eb867a4879271ef410e07de45b5dd06e94c39e463a1d465d609945"} Nov 28 16:30:33 crc kubenswrapper[4909]: I1128 16:30:33.275864 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5bf47b49b7-n9gnh" event={"ID":"fd174f39-a477-40c2-aacb-1c854a0ac704","Type":"ContainerStarted","Data":"e1fa268d204646e094f4934e1883b01a9a5557e5234c06fb8a9ccdf42b4530ed"} Nov 28 16:30:33 crc kubenswrapper[4909]: I1128 16:30:33.294834 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-metrics-fg65t" podStartSLOduration=2.29481383 podStartE2EDuration="2.29481383s" podCreationTimestamp="2025-11-28 16:30:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:30:33.287177056 +0000 UTC m=+1215.683861580" watchObservedRunningTime="2025-11-28 16:30:33.29481383 +0000 UTC m=+1215.691498364" Nov 28 16:30:33 crc kubenswrapper[4909]: I1128 16:30:33.324918 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Nov 28 16:30:33 crc kubenswrapper[4909]: W1128 16:30:33.328991 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poddc78dec8_567e_41a1_9fbf_793224410d3b.slice/crio-8af8397c30efca8e6a308b7609f31c2e06d62be6c080bb677b2b1e1b67d6dc77 WatchSource:0}: Error finding container 8af8397c30efca8e6a308b7609f31c2e06d62be6c080bb677b2b1e1b67d6dc77: Status 404 returned error can't find the container with id 8af8397c30efca8e6a308b7609f31c2e06d62be6c080bb677b2b1e1b67d6dc77 Nov 28 16:30:33 crc kubenswrapper[4909]: I1128 16:30:33.788466 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Nov 28 16:30:33 crc kubenswrapper[4909]: I1128 16:30:33.918381 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7a392b74-7812-4347-988a-1eef8b2778a5" path="/var/lib/kubelet/pods/7a392b74-7812-4347-988a-1eef8b2778a5/volumes" Nov 28 16:30:33 crc kubenswrapper[4909]: I1128 16:30:33.918815 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d68fe850-7328-4415-b01e-a7c2ec3e1f32" path="/var/lib/kubelet/pods/d68fe850-7328-4415-b01e-a7c2ec3e1f32/volumes" Nov 28 16:30:34 crc kubenswrapper[4909]: I1128 16:30:34.284338 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8554648995-fzkt7" event={"ID":"090022a6-59be-4353-8dd1-22e9e542c57a","Type":"ContainerStarted","Data":"06e466dfb79a1bb06b9072c7cbb230045e51e4d4f27797b0be954d4fa29d493d"} Nov 28 16:30:34 crc kubenswrapper[4909]: I1128 16:30:34.287382 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"0b1d1797-999d-4453-b674-c40f53d4231e","Type":"ContainerStarted","Data":"219f6794715541737d340cec186fc3847a65402dd4d251a98a0ddbe7c6c7178b"} Nov 28 16:30:34 crc kubenswrapper[4909]: I1128 16:30:34.289494 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5bf47b49b7-n9gnh" event={"ID":"fd174f39-a477-40c2-aacb-1c854a0ac704","Type":"ContainerStarted","Data":"ffef6c3b270fcdd6b9ec833d8e7bbd38628ee4682ed06beb35d30442a06affb9"} Nov 28 16:30:34 crc kubenswrapper[4909]: I1128 16:30:34.289625 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5bf47b49b7-n9gnh" Nov 28 16:30:34 crc kubenswrapper[4909]: I1128 16:30:34.290706 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"dc78dec8-567e-41a1-9fbf-793224410d3b","Type":"ContainerStarted","Data":"8af8397c30efca8e6a308b7609f31c2e06d62be6c080bb677b2b1e1b67d6dc77"} Nov 28 16:30:34 crc kubenswrapper[4909]: I1128 16:30:34.293238 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"72f0e500-fe06-4373-9bc3-6cdaa2520043","Type":"ContainerStarted","Data":"9c0ed5eb3169a895fddc352403d70cbfcb4b590d019f84677427fdac6ec6cf71"} Nov 28 16:30:34 crc kubenswrapper[4909]: I1128 16:30:34.322486 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-galera-0" podStartSLOduration=-9223371991.532312 podStartE2EDuration="45.322463942s" podCreationTimestamp="2025-11-28 16:29:49 +0000 UTC" firstStartedPulling="2025-11-28 16:29:50.923985817 +0000 UTC m=+1173.320670341" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:30:34.310100272 +0000 UTC m=+1216.706784796" watchObservedRunningTime="2025-11-28 16:30:34.322463942 +0000 UTC m=+1216.719148466" Nov 28 16:30:34 crc kubenswrapper[4909]: I1128 16:30:34.338488 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5bf47b49b7-n9gnh" podStartSLOduration=2.8110866100000003 podStartE2EDuration="3.338468258s" podCreationTimestamp="2025-11-28 16:30:31 +0000 UTC" firstStartedPulling="2025-11-28 16:30:32.458136548 +0000 UTC m=+1214.854821072" lastFinishedPulling="2025-11-28 16:30:32.985518196 +0000 UTC m=+1215.382202720" observedRunningTime="2025-11-28 16:30:34.332759386 +0000 UTC m=+1216.729443920" watchObservedRunningTime="2025-11-28 16:30:34.338468258 +0000 UTC m=+1216.735152782" Nov 28 16:30:34 crc kubenswrapper[4909]: E1128 16:30:34.488509 4909 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod090022a6_59be_4353_8dd1_22e9e542c57a.slice/crio-conmon-06e466dfb79a1bb06b9072c7cbb230045e51e4d4f27797b0be954d4fa29d493d.scope\": RecentStats: unable to find data in memory cache]" Nov 28 16:30:35 crc kubenswrapper[4909]: I1128 16:30:35.301924 4909 generic.go:334] "Generic (PLEG): container finished" podID="090022a6-59be-4353-8dd1-22e9e542c57a" containerID="06e466dfb79a1bb06b9072c7cbb230045e51e4d4f27797b0be954d4fa29d493d" exitCode=0 Nov 28 16:30:35 crc kubenswrapper[4909]: I1128 16:30:35.305016 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8554648995-fzkt7" event={"ID":"090022a6-59be-4353-8dd1-22e9e542c57a","Type":"ContainerDied","Data":"06e466dfb79a1bb06b9072c7cbb230045e51e4d4f27797b0be954d4fa29d493d"} Nov 28 16:30:35 crc kubenswrapper[4909]: I1128 16:30:35.305062 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/memcached-0" Nov 28 16:30:35 crc kubenswrapper[4909]: I1128 16:30:35.332089 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/memcached-0" podStartSLOduration=3.627300746 podStartE2EDuration="44.332066501s" podCreationTimestamp="2025-11-28 16:29:51 +0000 UTC" firstStartedPulling="2025-11-28 16:29:52.713563337 +0000 UTC m=+1175.110247851" lastFinishedPulling="2025-11-28 16:30:33.418329082 +0000 UTC m=+1215.815013606" observedRunningTime="2025-11-28 16:30:35.323255687 +0000 UTC m=+1217.719940241" watchObservedRunningTime="2025-11-28 16:30:35.332066501 +0000 UTC m=+1217.728751025" Nov 28 16:30:36 crc kubenswrapper[4909]: I1128 16:30:36.311993 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8554648995-fzkt7" event={"ID":"090022a6-59be-4353-8dd1-22e9e542c57a","Type":"ContainerStarted","Data":"e74af5111c8900c319129179dbac0afe759ed2f2adb26d89f96d3891c86f5643"} Nov 28 16:30:36 crc kubenswrapper[4909]: I1128 16:30:36.312622 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-8554648995-fzkt7" Nov 28 16:30:36 crc kubenswrapper[4909]: I1128 16:30:36.314340 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"dc78dec8-567e-41a1-9fbf-793224410d3b","Type":"ContainerStarted","Data":"1c3a9af8648dff180e4db3b4e37877beb5c3240d62fe2ea612826fb90703150d"} Nov 28 16:30:36 crc kubenswrapper[4909]: I1128 16:30:36.314376 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"dc78dec8-567e-41a1-9fbf-793224410d3b","Type":"ContainerStarted","Data":"604d9593fae6f90c84804afb01b99c2a6be4dbebec46ec2fa908b5b83bb8c9dc"} Nov 28 16:30:36 crc kubenswrapper[4909]: I1128 16:30:36.329473 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-8554648995-fzkt7" podStartSLOduration=4.657194959 podStartE2EDuration="5.329451647s" podCreationTimestamp="2025-11-28 16:30:31 +0000 UTC" firstStartedPulling="2025-11-28 16:30:32.88737018 +0000 UTC m=+1215.284054704" lastFinishedPulling="2025-11-28 16:30:33.559626868 +0000 UTC m=+1215.956311392" observedRunningTime="2025-11-28 16:30:36.327351031 +0000 UTC m=+1218.724035575" watchObservedRunningTime="2025-11-28 16:30:36.329451647 +0000 UTC m=+1218.726136171" Nov 28 16:30:37 crc kubenswrapper[4909]: I1128 16:30:37.320910 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-northd-0" Nov 28 16:30:37 crc kubenswrapper[4909]: I1128 16:30:37.723358 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-cell1-galera-0" Nov 28 16:30:37 crc kubenswrapper[4909]: I1128 16:30:37.746604 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-northd-0" podStartSLOduration=3.828809174 podStartE2EDuration="5.74657597s" podCreationTimestamp="2025-11-28 16:30:32 +0000 UTC" firstStartedPulling="2025-11-28 16:30:33.339899482 +0000 UTC m=+1215.736584006" lastFinishedPulling="2025-11-28 16:30:35.257666278 +0000 UTC m=+1217.654350802" observedRunningTime="2025-11-28 16:30:36.353120437 +0000 UTC m=+1218.749804971" watchObservedRunningTime="2025-11-28 16:30:37.74657597 +0000 UTC m=+1220.143260494" Nov 28 16:30:37 crc kubenswrapper[4909]: I1128 16:30:37.806439 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-cell1-galera-0" Nov 28 16:30:40 crc kubenswrapper[4909]: I1128 16:30:40.524888 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-galera-0" Nov 28 16:30:40 crc kubenswrapper[4909]: I1128 16:30:40.525262 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-galera-0" Nov 28 16:30:40 crc kubenswrapper[4909]: I1128 16:30:40.599714 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-galera-0" Nov 28 16:30:41 crc kubenswrapper[4909]: I1128 16:30:41.415145 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-galera-0" Nov 28 16:30:41 crc kubenswrapper[4909]: I1128 16:30:41.693589 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-create-shkqq"] Nov 28 16:30:41 crc kubenswrapper[4909]: I1128 16:30:41.694760 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-shkqq" Nov 28 16:30:41 crc kubenswrapper[4909]: I1128 16:30:41.702142 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-shkqq"] Nov 28 16:30:41 crc kubenswrapper[4909]: I1128 16:30:41.723331 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-f14d-account-create-update-pr2mt"] Nov 28 16:30:41 crc kubenswrapper[4909]: I1128 16:30:41.724479 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-f14d-account-create-update-pr2mt" Nov 28 16:30:41 crc kubenswrapper[4909]: I1128 16:30:41.726426 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-db-secret" Nov 28 16:30:41 crc kubenswrapper[4909]: I1128 16:30:41.745106 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-f14d-account-create-update-pr2mt"] Nov 28 16:30:41 crc kubenswrapper[4909]: I1128 16:30:41.849416 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4xm9m\" (UniqueName: \"kubernetes.io/projected/22576912-638d-46ee-ad3e-9d78debb719e-kube-api-access-4xm9m\") pod \"keystone-db-create-shkqq\" (UID: \"22576912-638d-46ee-ad3e-9d78debb719e\") " pod="openstack/keystone-db-create-shkqq" Nov 28 16:30:41 crc kubenswrapper[4909]: I1128 16:30:41.849486 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/59f1690b-72e4-431e-b207-77553fd936f2-operator-scripts\") pod \"keystone-f14d-account-create-update-pr2mt\" (UID: \"59f1690b-72e4-431e-b207-77553fd936f2\") " pod="openstack/keystone-f14d-account-create-update-pr2mt" Nov 28 16:30:41 crc kubenswrapper[4909]: I1128 16:30:41.849505 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rlvrb\" (UniqueName: \"kubernetes.io/projected/59f1690b-72e4-431e-b207-77553fd936f2-kube-api-access-rlvrb\") pod \"keystone-f14d-account-create-update-pr2mt\" (UID: \"59f1690b-72e4-431e-b207-77553fd936f2\") " pod="openstack/keystone-f14d-account-create-update-pr2mt" Nov 28 16:30:41 crc kubenswrapper[4909]: I1128 16:30:41.849525 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/22576912-638d-46ee-ad3e-9d78debb719e-operator-scripts\") pod \"keystone-db-create-shkqq\" (UID: \"22576912-638d-46ee-ad3e-9d78debb719e\") " pod="openstack/keystone-db-create-shkqq" Nov 28 16:30:41 crc kubenswrapper[4909]: I1128 16:30:41.912808 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-5bf47b49b7-n9gnh" Nov 28 16:30:41 crc kubenswrapper[4909]: I1128 16:30:41.951876 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4xm9m\" (UniqueName: \"kubernetes.io/projected/22576912-638d-46ee-ad3e-9d78debb719e-kube-api-access-4xm9m\") pod \"keystone-db-create-shkqq\" (UID: \"22576912-638d-46ee-ad3e-9d78debb719e\") " pod="openstack/keystone-db-create-shkqq" Nov 28 16:30:41 crc kubenswrapper[4909]: I1128 16:30:41.952250 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/59f1690b-72e4-431e-b207-77553fd936f2-operator-scripts\") pod \"keystone-f14d-account-create-update-pr2mt\" (UID: \"59f1690b-72e4-431e-b207-77553fd936f2\") " pod="openstack/keystone-f14d-account-create-update-pr2mt" Nov 28 16:30:41 crc kubenswrapper[4909]: I1128 16:30:41.952277 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rlvrb\" (UniqueName: \"kubernetes.io/projected/59f1690b-72e4-431e-b207-77553fd936f2-kube-api-access-rlvrb\") pod \"keystone-f14d-account-create-update-pr2mt\" (UID: \"59f1690b-72e4-431e-b207-77553fd936f2\") " pod="openstack/keystone-f14d-account-create-update-pr2mt" Nov 28 16:30:41 crc kubenswrapper[4909]: I1128 16:30:41.952326 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/22576912-638d-46ee-ad3e-9d78debb719e-operator-scripts\") pod \"keystone-db-create-shkqq\" (UID: \"22576912-638d-46ee-ad3e-9d78debb719e\") " pod="openstack/keystone-db-create-shkqq" Nov 28 16:30:41 crc kubenswrapper[4909]: I1128 16:30:41.953067 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/59f1690b-72e4-431e-b207-77553fd936f2-operator-scripts\") pod \"keystone-f14d-account-create-update-pr2mt\" (UID: \"59f1690b-72e4-431e-b207-77553fd936f2\") " pod="openstack/keystone-f14d-account-create-update-pr2mt" Nov 28 16:30:41 crc kubenswrapper[4909]: I1128 16:30:41.953085 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/22576912-638d-46ee-ad3e-9d78debb719e-operator-scripts\") pod \"keystone-db-create-shkqq\" (UID: \"22576912-638d-46ee-ad3e-9d78debb719e\") " pod="openstack/keystone-db-create-shkqq" Nov 28 16:30:41 crc kubenswrapper[4909]: I1128 16:30:41.975158 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4xm9m\" (UniqueName: \"kubernetes.io/projected/22576912-638d-46ee-ad3e-9d78debb719e-kube-api-access-4xm9m\") pod \"keystone-db-create-shkqq\" (UID: \"22576912-638d-46ee-ad3e-9d78debb719e\") " pod="openstack/keystone-db-create-shkqq" Nov 28 16:30:41 crc kubenswrapper[4909]: I1128 16:30:41.980367 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rlvrb\" (UniqueName: \"kubernetes.io/projected/59f1690b-72e4-431e-b207-77553fd936f2-kube-api-access-rlvrb\") pod \"keystone-f14d-account-create-update-pr2mt\" (UID: \"59f1690b-72e4-431e-b207-77553fd936f2\") " pod="openstack/keystone-f14d-account-create-update-pr2mt" Nov 28 16:30:41 crc kubenswrapper[4909]: I1128 16:30:41.999859 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-create-pd67x"] Nov 28 16:30:42 crc kubenswrapper[4909]: I1128 16:30:42.004467 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-pd67x" Nov 28 16:30:42 crc kubenswrapper[4909]: I1128 16:30:42.018117 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-3326-account-create-update-qqqzv"] Nov 28 16:30:42 crc kubenswrapper[4909]: I1128 16:30:42.019231 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-3326-account-create-update-qqqzv" Nov 28 16:30:42 crc kubenswrapper[4909]: I1128 16:30:42.019951 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-shkqq" Nov 28 16:30:42 crc kubenswrapper[4909]: I1128 16:30:42.021135 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-db-secret" Nov 28 16:30:42 crc kubenswrapper[4909]: I1128 16:30:42.030785 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/memcached-0" Nov 28 16:30:42 crc kubenswrapper[4909]: I1128 16:30:42.045991 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-f14d-account-create-update-pr2mt" Nov 28 16:30:42 crc kubenswrapper[4909]: I1128 16:30:42.047435 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-pd67x"] Nov 28 16:30:42 crc kubenswrapper[4909]: I1128 16:30:42.071508 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-3326-account-create-update-qqqzv"] Nov 28 16:30:42 crc kubenswrapper[4909]: I1128 16:30:42.154945 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gj6vh\" (UniqueName: \"kubernetes.io/projected/be6ec85b-3437-458a-9e53-6464138dbcea-kube-api-access-gj6vh\") pod \"placement-db-create-pd67x\" (UID: \"be6ec85b-3437-458a-9e53-6464138dbcea\") " pod="openstack/placement-db-create-pd67x" Nov 28 16:30:42 crc kubenswrapper[4909]: I1128 16:30:42.155059 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/be6ec85b-3437-458a-9e53-6464138dbcea-operator-scripts\") pod \"placement-db-create-pd67x\" (UID: \"be6ec85b-3437-458a-9e53-6464138dbcea\") " pod="openstack/placement-db-create-pd67x" Nov 28 16:30:42 crc kubenswrapper[4909]: I1128 16:30:42.155261 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/be57306e-a9e9-45f4-8660-f5e1515799f8-operator-scripts\") pod \"placement-3326-account-create-update-qqqzv\" (UID: \"be57306e-a9e9-45f4-8660-f5e1515799f8\") " pod="openstack/placement-3326-account-create-update-qqqzv" Nov 28 16:30:42 crc kubenswrapper[4909]: I1128 16:30:42.155291 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hm9nc\" (UniqueName: \"kubernetes.io/projected/be57306e-a9e9-45f4-8660-f5e1515799f8-kube-api-access-hm9nc\") pod \"placement-3326-account-create-update-qqqzv\" (UID: \"be57306e-a9e9-45f4-8660-f5e1515799f8\") " pod="openstack/placement-3326-account-create-update-qqqzv" Nov 28 16:30:42 crc kubenswrapper[4909]: I1128 16:30:42.196807 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-create-9v966"] Nov 28 16:30:42 crc kubenswrapper[4909]: I1128 16:30:42.201154 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-9v966" Nov 28 16:30:42 crc kubenswrapper[4909]: I1128 16:30:42.215325 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-9v966"] Nov 28 16:30:42 crc kubenswrapper[4909]: I1128 16:30:42.256770 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/be57306e-a9e9-45f4-8660-f5e1515799f8-operator-scripts\") pod \"placement-3326-account-create-update-qqqzv\" (UID: \"be57306e-a9e9-45f4-8660-f5e1515799f8\") " pod="openstack/placement-3326-account-create-update-qqqzv" Nov 28 16:30:42 crc kubenswrapper[4909]: I1128 16:30:42.256807 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hm9nc\" (UniqueName: \"kubernetes.io/projected/be57306e-a9e9-45f4-8660-f5e1515799f8-kube-api-access-hm9nc\") pod \"placement-3326-account-create-update-qqqzv\" (UID: \"be57306e-a9e9-45f4-8660-f5e1515799f8\") " pod="openstack/placement-3326-account-create-update-qqqzv" Nov 28 16:30:42 crc kubenswrapper[4909]: I1128 16:30:42.256861 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gj6vh\" (UniqueName: \"kubernetes.io/projected/be6ec85b-3437-458a-9e53-6464138dbcea-kube-api-access-gj6vh\") pod \"placement-db-create-pd67x\" (UID: \"be6ec85b-3437-458a-9e53-6464138dbcea\") " pod="openstack/placement-db-create-pd67x" Nov 28 16:30:42 crc kubenswrapper[4909]: I1128 16:30:42.256896 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/be6ec85b-3437-458a-9e53-6464138dbcea-operator-scripts\") pod \"placement-db-create-pd67x\" (UID: \"be6ec85b-3437-458a-9e53-6464138dbcea\") " pod="openstack/placement-db-create-pd67x" Nov 28 16:30:42 crc kubenswrapper[4909]: I1128 16:30:42.257645 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/be57306e-a9e9-45f4-8660-f5e1515799f8-operator-scripts\") pod \"placement-3326-account-create-update-qqqzv\" (UID: \"be57306e-a9e9-45f4-8660-f5e1515799f8\") " pod="openstack/placement-3326-account-create-update-qqqzv" Nov 28 16:30:42 crc kubenswrapper[4909]: I1128 16:30:42.257689 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/be6ec85b-3437-458a-9e53-6464138dbcea-operator-scripts\") pod \"placement-db-create-pd67x\" (UID: \"be6ec85b-3437-458a-9e53-6464138dbcea\") " pod="openstack/placement-db-create-pd67x" Nov 28 16:30:42 crc kubenswrapper[4909]: I1128 16:30:42.276927 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gj6vh\" (UniqueName: \"kubernetes.io/projected/be6ec85b-3437-458a-9e53-6464138dbcea-kube-api-access-gj6vh\") pod \"placement-db-create-pd67x\" (UID: \"be6ec85b-3437-458a-9e53-6464138dbcea\") " pod="openstack/placement-db-create-pd67x" Nov 28 16:30:42 crc kubenswrapper[4909]: I1128 16:30:42.298096 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hm9nc\" (UniqueName: \"kubernetes.io/projected/be57306e-a9e9-45f4-8660-f5e1515799f8-kube-api-access-hm9nc\") pod \"placement-3326-account-create-update-qqqzv\" (UID: \"be57306e-a9e9-45f4-8660-f5e1515799f8\") " pod="openstack/placement-3326-account-create-update-qqqzv" Nov 28 16:30:42 crc kubenswrapper[4909]: I1128 16:30:42.304608 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-2631-account-create-update-kvmx8"] Nov 28 16:30:42 crc kubenswrapper[4909]: I1128 16:30:42.305612 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-2631-account-create-update-kvmx8" Nov 28 16:30:42 crc kubenswrapper[4909]: I1128 16:30:42.308436 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-db-secret" Nov 28 16:30:42 crc kubenswrapper[4909]: I1128 16:30:42.311038 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-2631-account-create-update-kvmx8"] Nov 28 16:30:42 crc kubenswrapper[4909]: I1128 16:30:42.341827 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-8554648995-fzkt7" Nov 28 16:30:42 crc kubenswrapper[4909]: I1128 16:30:42.357942 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9369b072-ffb6-4fe8-bd5b-ed55bd086ca0-operator-scripts\") pod \"glance-db-create-9v966\" (UID: \"9369b072-ffb6-4fe8-bd5b-ed55bd086ca0\") " pod="openstack/glance-db-create-9v966" Nov 28 16:30:42 crc kubenswrapper[4909]: I1128 16:30:42.357985 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2xbvk\" (UniqueName: \"kubernetes.io/projected/9369b072-ffb6-4fe8-bd5b-ed55bd086ca0-kube-api-access-2xbvk\") pod \"glance-db-create-9v966\" (UID: \"9369b072-ffb6-4fe8-bd5b-ed55bd086ca0\") " pod="openstack/glance-db-create-9v966" Nov 28 16:30:42 crc kubenswrapper[4909]: I1128 16:30:42.421281 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5bf47b49b7-n9gnh"] Nov 28 16:30:42 crc kubenswrapper[4909]: I1128 16:30:42.421519 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5bf47b49b7-n9gnh" podUID="fd174f39-a477-40c2-aacb-1c854a0ac704" containerName="dnsmasq-dns" containerID="cri-o://ffef6c3b270fcdd6b9ec833d8e7bbd38628ee4682ed06beb35d30442a06affb9" gracePeriod=10 Nov 28 16:30:42 crc kubenswrapper[4909]: I1128 16:30:42.455591 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-pd67x" Nov 28 16:30:42 crc kubenswrapper[4909]: I1128 16:30:42.461069 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q4rd5\" (UniqueName: \"kubernetes.io/projected/a91c37b7-2346-47e6-b462-a48c06d30017-kube-api-access-q4rd5\") pod \"glance-2631-account-create-update-kvmx8\" (UID: \"a91c37b7-2346-47e6-b462-a48c06d30017\") " pod="openstack/glance-2631-account-create-update-kvmx8" Nov 28 16:30:42 crc kubenswrapper[4909]: I1128 16:30:42.461345 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9369b072-ffb6-4fe8-bd5b-ed55bd086ca0-operator-scripts\") pod \"glance-db-create-9v966\" (UID: \"9369b072-ffb6-4fe8-bd5b-ed55bd086ca0\") " pod="openstack/glance-db-create-9v966" Nov 28 16:30:42 crc kubenswrapper[4909]: I1128 16:30:42.461479 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2xbvk\" (UniqueName: \"kubernetes.io/projected/9369b072-ffb6-4fe8-bd5b-ed55bd086ca0-kube-api-access-2xbvk\") pod \"glance-db-create-9v966\" (UID: \"9369b072-ffb6-4fe8-bd5b-ed55bd086ca0\") " pod="openstack/glance-db-create-9v966" Nov 28 16:30:42 crc kubenswrapper[4909]: I1128 16:30:42.461696 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a91c37b7-2346-47e6-b462-a48c06d30017-operator-scripts\") pod \"glance-2631-account-create-update-kvmx8\" (UID: \"a91c37b7-2346-47e6-b462-a48c06d30017\") " pod="openstack/glance-2631-account-create-update-kvmx8" Nov 28 16:30:42 crc kubenswrapper[4909]: I1128 16:30:42.463454 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9369b072-ffb6-4fe8-bd5b-ed55bd086ca0-operator-scripts\") pod \"glance-db-create-9v966\" (UID: \"9369b072-ffb6-4fe8-bd5b-ed55bd086ca0\") " pod="openstack/glance-db-create-9v966" Nov 28 16:30:42 crc kubenswrapper[4909]: I1128 16:30:42.464309 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-3326-account-create-update-qqqzv" Nov 28 16:30:42 crc kubenswrapper[4909]: I1128 16:30:42.482297 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2xbvk\" (UniqueName: \"kubernetes.io/projected/9369b072-ffb6-4fe8-bd5b-ed55bd086ca0-kube-api-access-2xbvk\") pod \"glance-db-create-9v966\" (UID: \"9369b072-ffb6-4fe8-bd5b-ed55bd086ca0\") " pod="openstack/glance-db-create-9v966" Nov 28 16:30:42 crc kubenswrapper[4909]: I1128 16:30:42.588193 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a91c37b7-2346-47e6-b462-a48c06d30017-operator-scripts\") pod \"glance-2631-account-create-update-kvmx8\" (UID: \"a91c37b7-2346-47e6-b462-a48c06d30017\") " pod="openstack/glance-2631-account-create-update-kvmx8" Nov 28 16:30:42 crc kubenswrapper[4909]: I1128 16:30:42.588305 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q4rd5\" (UniqueName: \"kubernetes.io/projected/a91c37b7-2346-47e6-b462-a48c06d30017-kube-api-access-q4rd5\") pod \"glance-2631-account-create-update-kvmx8\" (UID: \"a91c37b7-2346-47e6-b462-a48c06d30017\") " pod="openstack/glance-2631-account-create-update-kvmx8" Nov 28 16:30:42 crc kubenswrapper[4909]: I1128 16:30:42.589106 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a91c37b7-2346-47e6-b462-a48c06d30017-operator-scripts\") pod \"glance-2631-account-create-update-kvmx8\" (UID: \"a91c37b7-2346-47e6-b462-a48c06d30017\") " pod="openstack/glance-2631-account-create-update-kvmx8" Nov 28 16:30:42 crc kubenswrapper[4909]: I1128 16:30:42.589267 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-9v966" Nov 28 16:30:42 crc kubenswrapper[4909]: I1128 16:30:42.616239 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q4rd5\" (UniqueName: \"kubernetes.io/projected/a91c37b7-2346-47e6-b462-a48c06d30017-kube-api-access-q4rd5\") pod \"glance-2631-account-create-update-kvmx8\" (UID: \"a91c37b7-2346-47e6-b462-a48c06d30017\") " pod="openstack/glance-2631-account-create-update-kvmx8" Nov 28 16:30:42 crc kubenswrapper[4909]: I1128 16:30:42.625904 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-2631-account-create-update-kvmx8" Nov 28 16:30:42 crc kubenswrapper[4909]: I1128 16:30:42.645962 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-shkqq"] Nov 28 16:30:42 crc kubenswrapper[4909]: I1128 16:30:42.725824 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-f14d-account-create-update-pr2mt"] Nov 28 16:30:42 crc kubenswrapper[4909]: I1128 16:30:42.968109 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-pd67x"] Nov 28 16:30:42 crc kubenswrapper[4909]: W1128 16:30:42.969607 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podbe6ec85b_3437_458a_9e53_6464138dbcea.slice/crio-65de1d1c954878cf5828aca50bb05f8bad969b0a11242d59a46dcffd2eb65f28 WatchSource:0}: Error finding container 65de1d1c954878cf5828aca50bb05f8bad969b0a11242d59a46dcffd2eb65f28: Status 404 returned error can't find the container with id 65de1d1c954878cf5828aca50bb05f8bad969b0a11242d59a46dcffd2eb65f28 Nov 28 16:30:43 crc kubenswrapper[4909]: I1128 16:30:43.134254 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-3326-account-create-update-qqqzv"] Nov 28 16:30:43 crc kubenswrapper[4909]: I1128 16:30:43.229705 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-9v966"] Nov 28 16:30:43 crc kubenswrapper[4909]: I1128 16:30:43.306650 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-2631-account-create-update-kvmx8"] Nov 28 16:30:43 crc kubenswrapper[4909]: I1128 16:30:43.373434 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-9v966" event={"ID":"9369b072-ffb6-4fe8-bd5b-ed55bd086ca0","Type":"ContainerStarted","Data":"3168b5567312a3d8a09e6f005ec5e30300fa8edb2b1823f692bd74b2c83278a6"} Nov 28 16:30:43 crc kubenswrapper[4909]: I1128 16:30:43.383372 4909 generic.go:334] "Generic (PLEG): container finished" podID="22576912-638d-46ee-ad3e-9d78debb719e" containerID="b15e6ee9a424c1a2201ed8acab650a198cf7fac5fbe51d039543b682ba20f5b0" exitCode=0 Nov 28 16:30:43 crc kubenswrapper[4909]: I1128 16:30:43.383467 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-shkqq" event={"ID":"22576912-638d-46ee-ad3e-9d78debb719e","Type":"ContainerDied","Data":"b15e6ee9a424c1a2201ed8acab650a198cf7fac5fbe51d039543b682ba20f5b0"} Nov 28 16:30:43 crc kubenswrapper[4909]: I1128 16:30:43.383496 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-shkqq" event={"ID":"22576912-638d-46ee-ad3e-9d78debb719e","Type":"ContainerStarted","Data":"7c1244198a1c04e82b37bbc4a3a40bf4b42cb974e1649b7af12cae2ad63b6f93"} Nov 28 16:30:43 crc kubenswrapper[4909]: I1128 16:30:43.387444 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-f14d-account-create-update-pr2mt" event={"ID":"59f1690b-72e4-431e-b207-77553fd936f2","Type":"ContainerStarted","Data":"ad3df5afbfd584781834b3bb93b0448fff4b3864ed8ef602a45c14c6bbecf55b"} Nov 28 16:30:43 crc kubenswrapper[4909]: I1128 16:30:43.387517 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-f14d-account-create-update-pr2mt" event={"ID":"59f1690b-72e4-431e-b207-77553fd936f2","Type":"ContainerStarted","Data":"4cfa9770ddab9037ed452705fab240080db51e00b660536f76c75a806f9de09a"} Nov 28 16:30:43 crc kubenswrapper[4909]: I1128 16:30:43.393537 4909 generic.go:334] "Generic (PLEG): container finished" podID="fd174f39-a477-40c2-aacb-1c854a0ac704" containerID="ffef6c3b270fcdd6b9ec833d8e7bbd38628ee4682ed06beb35d30442a06affb9" exitCode=0 Nov 28 16:30:43 crc kubenswrapper[4909]: I1128 16:30:43.393778 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5bf47b49b7-n9gnh" event={"ID":"fd174f39-a477-40c2-aacb-1c854a0ac704","Type":"ContainerDied","Data":"ffef6c3b270fcdd6b9ec833d8e7bbd38628ee4682ed06beb35d30442a06affb9"} Nov 28 16:30:43 crc kubenswrapper[4909]: I1128 16:30:43.408289 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-pd67x" event={"ID":"be6ec85b-3437-458a-9e53-6464138dbcea","Type":"ContainerStarted","Data":"e765eee4154b9a239a1ea2aab2c7f476eaed3191104b1030d1738062b69d97f7"} Nov 28 16:30:43 crc kubenswrapper[4909]: I1128 16:30:43.408338 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-pd67x" event={"ID":"be6ec85b-3437-458a-9e53-6464138dbcea","Type":"ContainerStarted","Data":"65de1d1c954878cf5828aca50bb05f8bad969b0a11242d59a46dcffd2eb65f28"} Nov 28 16:30:43 crc kubenswrapper[4909]: I1128 16:30:43.414710 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-f14d-account-create-update-pr2mt" podStartSLOduration=2.41469334 podStartE2EDuration="2.41469334s" podCreationTimestamp="2025-11-28 16:30:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:30:43.413021356 +0000 UTC m=+1225.809705890" watchObservedRunningTime="2025-11-28 16:30:43.41469334 +0000 UTC m=+1225.811377864" Nov 28 16:30:43 crc kubenswrapper[4909]: I1128 16:30:43.425102 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-3326-account-create-update-qqqzv" event={"ID":"be57306e-a9e9-45f4-8660-f5e1515799f8","Type":"ContainerStarted","Data":"12d46648e7055abe059dce638209aee315cee75a6172151b18b68d530276d87f"} Nov 28 16:30:43 crc kubenswrapper[4909]: I1128 16:30:43.437056 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-db-create-pd67x" podStartSLOduration=2.437038796 podStartE2EDuration="2.437038796s" podCreationTimestamp="2025-11-28 16:30:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:30:43.43195812 +0000 UTC m=+1225.828642674" watchObservedRunningTime="2025-11-28 16:30:43.437038796 +0000 UTC m=+1225.833723320" Nov 28 16:30:43 crc kubenswrapper[4909]: I1128 16:30:43.450215 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-3326-account-create-update-qqqzv" podStartSLOduration=2.450191846 podStartE2EDuration="2.450191846s" podCreationTimestamp="2025-11-28 16:30:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:30:43.447185616 +0000 UTC m=+1225.843870150" watchObservedRunningTime="2025-11-28 16:30:43.450191846 +0000 UTC m=+1225.846876370" Nov 28 16:30:43 crc kubenswrapper[4909]: I1128 16:30:43.463994 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5bf47b49b7-n9gnh" Nov 28 16:30:43 crc kubenswrapper[4909]: I1128 16:30:43.522467 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rxd4r\" (UniqueName: \"kubernetes.io/projected/fd174f39-a477-40c2-aacb-1c854a0ac704-kube-api-access-rxd4r\") pod \"fd174f39-a477-40c2-aacb-1c854a0ac704\" (UID: \"fd174f39-a477-40c2-aacb-1c854a0ac704\") " Nov 28 16:30:43 crc kubenswrapper[4909]: I1128 16:30:43.522507 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/fd174f39-a477-40c2-aacb-1c854a0ac704-dns-svc\") pod \"fd174f39-a477-40c2-aacb-1c854a0ac704\" (UID: \"fd174f39-a477-40c2-aacb-1c854a0ac704\") " Nov 28 16:30:43 crc kubenswrapper[4909]: I1128 16:30:43.522635 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/fd174f39-a477-40c2-aacb-1c854a0ac704-ovsdbserver-nb\") pod \"fd174f39-a477-40c2-aacb-1c854a0ac704\" (UID: \"fd174f39-a477-40c2-aacb-1c854a0ac704\") " Nov 28 16:30:43 crc kubenswrapper[4909]: I1128 16:30:43.522716 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fd174f39-a477-40c2-aacb-1c854a0ac704-config\") pod \"fd174f39-a477-40c2-aacb-1c854a0ac704\" (UID: \"fd174f39-a477-40c2-aacb-1c854a0ac704\") " Nov 28 16:30:43 crc kubenswrapper[4909]: I1128 16:30:43.537030 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fd174f39-a477-40c2-aacb-1c854a0ac704-kube-api-access-rxd4r" (OuterVolumeSpecName: "kube-api-access-rxd4r") pod "fd174f39-a477-40c2-aacb-1c854a0ac704" (UID: "fd174f39-a477-40c2-aacb-1c854a0ac704"). InnerVolumeSpecName "kube-api-access-rxd4r". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:30:43 crc kubenswrapper[4909]: I1128 16:30:43.624505 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rxd4r\" (UniqueName: \"kubernetes.io/projected/fd174f39-a477-40c2-aacb-1c854a0ac704-kube-api-access-rxd4r\") on node \"crc\" DevicePath \"\"" Nov 28 16:30:43 crc kubenswrapper[4909]: I1128 16:30:43.659930 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fd174f39-a477-40c2-aacb-1c854a0ac704-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "fd174f39-a477-40c2-aacb-1c854a0ac704" (UID: "fd174f39-a477-40c2-aacb-1c854a0ac704"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:30:43 crc kubenswrapper[4909]: I1128 16:30:43.663875 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fd174f39-a477-40c2-aacb-1c854a0ac704-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "fd174f39-a477-40c2-aacb-1c854a0ac704" (UID: "fd174f39-a477-40c2-aacb-1c854a0ac704"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:30:43 crc kubenswrapper[4909]: I1128 16:30:43.664329 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fd174f39-a477-40c2-aacb-1c854a0ac704-config" (OuterVolumeSpecName: "config") pod "fd174f39-a477-40c2-aacb-1c854a0ac704" (UID: "fd174f39-a477-40c2-aacb-1c854a0ac704"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:30:43 crc kubenswrapper[4909]: I1128 16:30:43.725493 4909 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/fd174f39-a477-40c2-aacb-1c854a0ac704-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 28 16:30:43 crc kubenswrapper[4909]: I1128 16:30:43.725524 4909 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fd174f39-a477-40c2-aacb-1c854a0ac704-config\") on node \"crc\" DevicePath \"\"" Nov 28 16:30:43 crc kubenswrapper[4909]: I1128 16:30:43.725535 4909 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/fd174f39-a477-40c2-aacb-1c854a0ac704-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 16:30:43 crc kubenswrapper[4909]: I1128 16:30:43.763478 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-b8fbc5445-rvm96"] Nov 28 16:30:43 crc kubenswrapper[4909]: E1128 16:30:43.767263 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fd174f39-a477-40c2-aacb-1c854a0ac704" containerName="init" Nov 28 16:30:43 crc kubenswrapper[4909]: I1128 16:30:43.767300 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="fd174f39-a477-40c2-aacb-1c854a0ac704" containerName="init" Nov 28 16:30:43 crc kubenswrapper[4909]: E1128 16:30:43.767319 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fd174f39-a477-40c2-aacb-1c854a0ac704" containerName="dnsmasq-dns" Nov 28 16:30:43 crc kubenswrapper[4909]: I1128 16:30:43.767327 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="fd174f39-a477-40c2-aacb-1c854a0ac704" containerName="dnsmasq-dns" Nov 28 16:30:43 crc kubenswrapper[4909]: I1128 16:30:43.767593 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="fd174f39-a477-40c2-aacb-1c854a0ac704" containerName="dnsmasq-dns" Nov 28 16:30:43 crc kubenswrapper[4909]: I1128 16:30:43.779436 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-b8fbc5445-rvm96" Nov 28 16:30:43 crc kubenswrapper[4909]: I1128 16:30:43.794093 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-b8fbc5445-rvm96"] Nov 28 16:30:43 crc kubenswrapper[4909]: I1128 16:30:43.827370 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/524d2697-3bb0-4029-a2d2-1211f9fc8fc5-ovsdbserver-sb\") pod \"dnsmasq-dns-b8fbc5445-rvm96\" (UID: \"524d2697-3bb0-4029-a2d2-1211f9fc8fc5\") " pod="openstack/dnsmasq-dns-b8fbc5445-rvm96" Nov 28 16:30:43 crc kubenswrapper[4909]: I1128 16:30:43.827410 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/524d2697-3bb0-4029-a2d2-1211f9fc8fc5-dns-svc\") pod \"dnsmasq-dns-b8fbc5445-rvm96\" (UID: \"524d2697-3bb0-4029-a2d2-1211f9fc8fc5\") " pod="openstack/dnsmasq-dns-b8fbc5445-rvm96" Nov 28 16:30:43 crc kubenswrapper[4909]: I1128 16:30:43.827797 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g6n8h\" (UniqueName: \"kubernetes.io/projected/524d2697-3bb0-4029-a2d2-1211f9fc8fc5-kube-api-access-g6n8h\") pod \"dnsmasq-dns-b8fbc5445-rvm96\" (UID: \"524d2697-3bb0-4029-a2d2-1211f9fc8fc5\") " pod="openstack/dnsmasq-dns-b8fbc5445-rvm96" Nov 28 16:30:43 crc kubenswrapper[4909]: I1128 16:30:43.827877 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/524d2697-3bb0-4029-a2d2-1211f9fc8fc5-config\") pod \"dnsmasq-dns-b8fbc5445-rvm96\" (UID: \"524d2697-3bb0-4029-a2d2-1211f9fc8fc5\") " pod="openstack/dnsmasq-dns-b8fbc5445-rvm96" Nov 28 16:30:43 crc kubenswrapper[4909]: I1128 16:30:43.827933 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/524d2697-3bb0-4029-a2d2-1211f9fc8fc5-ovsdbserver-nb\") pod \"dnsmasq-dns-b8fbc5445-rvm96\" (UID: \"524d2697-3bb0-4029-a2d2-1211f9fc8fc5\") " pod="openstack/dnsmasq-dns-b8fbc5445-rvm96" Nov 28 16:30:43 crc kubenswrapper[4909]: I1128 16:30:43.929584 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g6n8h\" (UniqueName: \"kubernetes.io/projected/524d2697-3bb0-4029-a2d2-1211f9fc8fc5-kube-api-access-g6n8h\") pod \"dnsmasq-dns-b8fbc5445-rvm96\" (UID: \"524d2697-3bb0-4029-a2d2-1211f9fc8fc5\") " pod="openstack/dnsmasq-dns-b8fbc5445-rvm96" Nov 28 16:30:43 crc kubenswrapper[4909]: I1128 16:30:43.929631 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/524d2697-3bb0-4029-a2d2-1211f9fc8fc5-config\") pod \"dnsmasq-dns-b8fbc5445-rvm96\" (UID: \"524d2697-3bb0-4029-a2d2-1211f9fc8fc5\") " pod="openstack/dnsmasq-dns-b8fbc5445-rvm96" Nov 28 16:30:43 crc kubenswrapper[4909]: I1128 16:30:43.929687 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/524d2697-3bb0-4029-a2d2-1211f9fc8fc5-ovsdbserver-nb\") pod \"dnsmasq-dns-b8fbc5445-rvm96\" (UID: \"524d2697-3bb0-4029-a2d2-1211f9fc8fc5\") " pod="openstack/dnsmasq-dns-b8fbc5445-rvm96" Nov 28 16:30:43 crc kubenswrapper[4909]: I1128 16:30:43.929742 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/524d2697-3bb0-4029-a2d2-1211f9fc8fc5-ovsdbserver-sb\") pod \"dnsmasq-dns-b8fbc5445-rvm96\" (UID: \"524d2697-3bb0-4029-a2d2-1211f9fc8fc5\") " pod="openstack/dnsmasq-dns-b8fbc5445-rvm96" Nov 28 16:30:43 crc kubenswrapper[4909]: I1128 16:30:43.929759 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/524d2697-3bb0-4029-a2d2-1211f9fc8fc5-dns-svc\") pod \"dnsmasq-dns-b8fbc5445-rvm96\" (UID: \"524d2697-3bb0-4029-a2d2-1211f9fc8fc5\") " pod="openstack/dnsmasq-dns-b8fbc5445-rvm96" Nov 28 16:30:43 crc kubenswrapper[4909]: I1128 16:30:43.930466 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/524d2697-3bb0-4029-a2d2-1211f9fc8fc5-dns-svc\") pod \"dnsmasq-dns-b8fbc5445-rvm96\" (UID: \"524d2697-3bb0-4029-a2d2-1211f9fc8fc5\") " pod="openstack/dnsmasq-dns-b8fbc5445-rvm96" Nov 28 16:30:43 crc kubenswrapper[4909]: I1128 16:30:43.933489 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/524d2697-3bb0-4029-a2d2-1211f9fc8fc5-ovsdbserver-nb\") pod \"dnsmasq-dns-b8fbc5445-rvm96\" (UID: \"524d2697-3bb0-4029-a2d2-1211f9fc8fc5\") " pod="openstack/dnsmasq-dns-b8fbc5445-rvm96" Nov 28 16:30:43 crc kubenswrapper[4909]: I1128 16:30:43.934147 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/524d2697-3bb0-4029-a2d2-1211f9fc8fc5-ovsdbserver-sb\") pod \"dnsmasq-dns-b8fbc5445-rvm96\" (UID: \"524d2697-3bb0-4029-a2d2-1211f9fc8fc5\") " pod="openstack/dnsmasq-dns-b8fbc5445-rvm96" Nov 28 16:30:43 crc kubenswrapper[4909]: I1128 16:30:43.939118 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/524d2697-3bb0-4029-a2d2-1211f9fc8fc5-config\") pod \"dnsmasq-dns-b8fbc5445-rvm96\" (UID: \"524d2697-3bb0-4029-a2d2-1211f9fc8fc5\") " pod="openstack/dnsmasq-dns-b8fbc5445-rvm96" Nov 28 16:30:43 crc kubenswrapper[4909]: I1128 16:30:43.993173 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g6n8h\" (UniqueName: \"kubernetes.io/projected/524d2697-3bb0-4029-a2d2-1211f9fc8fc5-kube-api-access-g6n8h\") pod \"dnsmasq-dns-b8fbc5445-rvm96\" (UID: \"524d2697-3bb0-4029-a2d2-1211f9fc8fc5\") " pod="openstack/dnsmasq-dns-b8fbc5445-rvm96" Nov 28 16:30:44 crc kubenswrapper[4909]: I1128 16:30:44.118202 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-b8fbc5445-rvm96" Nov 28 16:30:44 crc kubenswrapper[4909]: I1128 16:30:44.439023 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5bf47b49b7-n9gnh" event={"ID":"fd174f39-a477-40c2-aacb-1c854a0ac704","Type":"ContainerDied","Data":"e1fa268d204646e094f4934e1883b01a9a5557e5234c06fb8a9ccdf42b4530ed"} Nov 28 16:30:44 crc kubenswrapper[4909]: I1128 16:30:44.439320 4909 scope.go:117] "RemoveContainer" containerID="ffef6c3b270fcdd6b9ec833d8e7bbd38628ee4682ed06beb35d30442a06affb9" Nov 28 16:30:44 crc kubenswrapper[4909]: I1128 16:30:44.439458 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5bf47b49b7-n9gnh" Nov 28 16:30:44 crc kubenswrapper[4909]: I1128 16:30:44.445005 4909 generic.go:334] "Generic (PLEG): container finished" podID="be6ec85b-3437-458a-9e53-6464138dbcea" containerID="e765eee4154b9a239a1ea2aab2c7f476eaed3191104b1030d1738062b69d97f7" exitCode=0 Nov 28 16:30:44 crc kubenswrapper[4909]: I1128 16:30:44.445091 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-pd67x" event={"ID":"be6ec85b-3437-458a-9e53-6464138dbcea","Type":"ContainerDied","Data":"e765eee4154b9a239a1ea2aab2c7f476eaed3191104b1030d1738062b69d97f7"} Nov 28 16:30:44 crc kubenswrapper[4909]: I1128 16:30:44.455956 4909 generic.go:334] "Generic (PLEG): container finished" podID="a91c37b7-2346-47e6-b462-a48c06d30017" containerID="f7d9e81b5c5bad1caa908ec31490b084c28a10b2175c027689445d8ecc258612" exitCode=0 Nov 28 16:30:44 crc kubenswrapper[4909]: I1128 16:30:44.456058 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-2631-account-create-update-kvmx8" event={"ID":"a91c37b7-2346-47e6-b462-a48c06d30017","Type":"ContainerDied","Data":"f7d9e81b5c5bad1caa908ec31490b084c28a10b2175c027689445d8ecc258612"} Nov 28 16:30:44 crc kubenswrapper[4909]: I1128 16:30:44.456084 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-2631-account-create-update-kvmx8" event={"ID":"a91c37b7-2346-47e6-b462-a48c06d30017","Type":"ContainerStarted","Data":"08662a34e6a7632b9f31266c591c8d7f86efe5a78e9ee8a6ac07433bd37a6764"} Nov 28 16:30:44 crc kubenswrapper[4909]: I1128 16:30:44.462163 4909 generic.go:334] "Generic (PLEG): container finished" podID="be57306e-a9e9-45f4-8660-f5e1515799f8" containerID="285b7816902378eb6aafb34755baa7d6f7517f3859e339349c4092091bf2462d" exitCode=0 Nov 28 16:30:44 crc kubenswrapper[4909]: I1128 16:30:44.462241 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-3326-account-create-update-qqqzv" event={"ID":"be57306e-a9e9-45f4-8660-f5e1515799f8","Type":"ContainerDied","Data":"285b7816902378eb6aafb34755baa7d6f7517f3859e339349c4092091bf2462d"} Nov 28 16:30:44 crc kubenswrapper[4909]: I1128 16:30:44.463911 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5bf47b49b7-n9gnh"] Nov 28 16:30:44 crc kubenswrapper[4909]: I1128 16:30:44.465230 4909 generic.go:334] "Generic (PLEG): container finished" podID="9369b072-ffb6-4fe8-bd5b-ed55bd086ca0" containerID="50a2302af69a187d0d31ed9cd28408c4635015a121cf1e21481bf48f95da59e6" exitCode=0 Nov 28 16:30:44 crc kubenswrapper[4909]: I1128 16:30:44.465275 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-9v966" event={"ID":"9369b072-ffb6-4fe8-bd5b-ed55bd086ca0","Type":"ContainerDied","Data":"50a2302af69a187d0d31ed9cd28408c4635015a121cf1e21481bf48f95da59e6"} Nov 28 16:30:44 crc kubenswrapper[4909]: I1128 16:30:44.466901 4909 generic.go:334] "Generic (PLEG): container finished" podID="59f1690b-72e4-431e-b207-77553fd936f2" containerID="ad3df5afbfd584781834b3bb93b0448fff4b3864ed8ef602a45c14c6bbecf55b" exitCode=0 Nov 28 16:30:44 crc kubenswrapper[4909]: I1128 16:30:44.467131 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-f14d-account-create-update-pr2mt" event={"ID":"59f1690b-72e4-431e-b207-77553fd936f2","Type":"ContainerDied","Data":"ad3df5afbfd584781834b3bb93b0448fff4b3864ed8ef602a45c14c6bbecf55b"} Nov 28 16:30:44 crc kubenswrapper[4909]: I1128 16:30:44.469928 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5bf47b49b7-n9gnh"] Nov 28 16:30:44 crc kubenswrapper[4909]: I1128 16:30:44.473919 4909 scope.go:117] "RemoveContainer" containerID="1384e4ddb7eb867a4879271ef410e07de45b5dd06e94c39e463a1d465d609945" Nov 28 16:30:44 crc kubenswrapper[4909]: I1128 16:30:44.634867 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-b8fbc5445-rvm96"] Nov 28 16:30:44 crc kubenswrapper[4909]: I1128 16:30:44.841732 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-shkqq" Nov 28 16:30:44 crc kubenswrapper[4909]: I1128 16:30:44.867778 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-storage-0"] Nov 28 16:30:44 crc kubenswrapper[4909]: E1128 16:30:44.868245 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="22576912-638d-46ee-ad3e-9d78debb719e" containerName="mariadb-database-create" Nov 28 16:30:44 crc kubenswrapper[4909]: I1128 16:30:44.868268 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="22576912-638d-46ee-ad3e-9d78debb719e" containerName="mariadb-database-create" Nov 28 16:30:44 crc kubenswrapper[4909]: I1128 16:30:44.868510 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="22576912-638d-46ee-ad3e-9d78debb719e" containerName="mariadb-database-create" Nov 28 16:30:44 crc kubenswrapper[4909]: I1128 16:30:44.873180 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Nov 28 16:30:44 crc kubenswrapper[4909]: I1128 16:30:44.880145 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-storage-config-data" Nov 28 16:30:44 crc kubenswrapper[4909]: I1128 16:30:44.880474 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-conf" Nov 28 16:30:44 crc kubenswrapper[4909]: I1128 16:30:44.880571 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-files" Nov 28 16:30:44 crc kubenswrapper[4909]: I1128 16:30:44.880856 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-swift-dockercfg-spz2l" Nov 28 16:30:44 crc kubenswrapper[4909]: I1128 16:30:44.896829 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-storage-0"] Nov 28 16:30:44 crc kubenswrapper[4909]: I1128 16:30:44.955205 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/22576912-638d-46ee-ad3e-9d78debb719e-operator-scripts\") pod \"22576912-638d-46ee-ad3e-9d78debb719e\" (UID: \"22576912-638d-46ee-ad3e-9d78debb719e\") " Nov 28 16:30:44 crc kubenswrapper[4909]: I1128 16:30:44.955345 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4xm9m\" (UniqueName: \"kubernetes.io/projected/22576912-638d-46ee-ad3e-9d78debb719e-kube-api-access-4xm9m\") pod \"22576912-638d-46ee-ad3e-9d78debb719e\" (UID: \"22576912-638d-46ee-ad3e-9d78debb719e\") " Nov 28 16:30:44 crc kubenswrapper[4909]: I1128 16:30:44.956050 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22576912-638d-46ee-ad3e-9d78debb719e-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "22576912-638d-46ee-ad3e-9d78debb719e" (UID: "22576912-638d-46ee-ad3e-9d78debb719e"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:30:44 crc kubenswrapper[4909]: I1128 16:30:44.959683 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/22576912-638d-46ee-ad3e-9d78debb719e-kube-api-access-4xm9m" (OuterVolumeSpecName: "kube-api-access-4xm9m") pod "22576912-638d-46ee-ad3e-9d78debb719e" (UID: "22576912-638d-46ee-ad3e-9d78debb719e"). InnerVolumeSpecName "kube-api-access-4xm9m". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:30:45 crc kubenswrapper[4909]: I1128 16:30:45.057471 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7nnbx\" (UniqueName: \"kubernetes.io/projected/af476a0f-b390-443d-b7a5-14181e7c7bc7-kube-api-access-7nnbx\") pod \"swift-storage-0\" (UID: \"af476a0f-b390-443d-b7a5-14181e7c7bc7\") " pod="openstack/swift-storage-0" Nov 28 16:30:45 crc kubenswrapper[4909]: I1128 16:30:45.057827 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/af476a0f-b390-443d-b7a5-14181e7c7bc7-lock\") pod \"swift-storage-0\" (UID: \"af476a0f-b390-443d-b7a5-14181e7c7bc7\") " pod="openstack/swift-storage-0" Nov 28 16:30:45 crc kubenswrapper[4909]: I1128 16:30:45.057855 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/af476a0f-b390-443d-b7a5-14181e7c7bc7-cache\") pod \"swift-storage-0\" (UID: \"af476a0f-b390-443d-b7a5-14181e7c7bc7\") " pod="openstack/swift-storage-0" Nov 28 16:30:45 crc kubenswrapper[4909]: I1128 16:30:45.057903 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"swift-storage-0\" (UID: \"af476a0f-b390-443d-b7a5-14181e7c7bc7\") " pod="openstack/swift-storage-0" Nov 28 16:30:45 crc kubenswrapper[4909]: I1128 16:30:45.058488 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/af476a0f-b390-443d-b7a5-14181e7c7bc7-etc-swift\") pod \"swift-storage-0\" (UID: \"af476a0f-b390-443d-b7a5-14181e7c7bc7\") " pod="openstack/swift-storage-0" Nov 28 16:30:45 crc kubenswrapper[4909]: I1128 16:30:45.058890 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4xm9m\" (UniqueName: \"kubernetes.io/projected/22576912-638d-46ee-ad3e-9d78debb719e-kube-api-access-4xm9m\") on node \"crc\" DevicePath \"\"" Nov 28 16:30:45 crc kubenswrapper[4909]: I1128 16:30:45.058913 4909 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/22576912-638d-46ee-ad3e-9d78debb719e-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 16:30:45 crc kubenswrapper[4909]: I1128 16:30:45.160071 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/af476a0f-b390-443d-b7a5-14181e7c7bc7-etc-swift\") pod \"swift-storage-0\" (UID: \"af476a0f-b390-443d-b7a5-14181e7c7bc7\") " pod="openstack/swift-storage-0" Nov 28 16:30:45 crc kubenswrapper[4909]: I1128 16:30:45.160195 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7nnbx\" (UniqueName: \"kubernetes.io/projected/af476a0f-b390-443d-b7a5-14181e7c7bc7-kube-api-access-7nnbx\") pod \"swift-storage-0\" (UID: \"af476a0f-b390-443d-b7a5-14181e7c7bc7\") " pod="openstack/swift-storage-0" Nov 28 16:30:45 crc kubenswrapper[4909]: I1128 16:30:45.160225 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/af476a0f-b390-443d-b7a5-14181e7c7bc7-lock\") pod \"swift-storage-0\" (UID: \"af476a0f-b390-443d-b7a5-14181e7c7bc7\") " pod="openstack/swift-storage-0" Nov 28 16:30:45 crc kubenswrapper[4909]: I1128 16:30:45.160253 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/af476a0f-b390-443d-b7a5-14181e7c7bc7-cache\") pod \"swift-storage-0\" (UID: \"af476a0f-b390-443d-b7a5-14181e7c7bc7\") " pod="openstack/swift-storage-0" Nov 28 16:30:45 crc kubenswrapper[4909]: I1128 16:30:45.160293 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"swift-storage-0\" (UID: \"af476a0f-b390-443d-b7a5-14181e7c7bc7\") " pod="openstack/swift-storage-0" Nov 28 16:30:45 crc kubenswrapper[4909]: E1128 16:30:45.160321 4909 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 28 16:30:45 crc kubenswrapper[4909]: E1128 16:30:45.160358 4909 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 28 16:30:45 crc kubenswrapper[4909]: E1128 16:30:45.160417 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/af476a0f-b390-443d-b7a5-14181e7c7bc7-etc-swift podName:af476a0f-b390-443d-b7a5-14181e7c7bc7 nodeName:}" failed. No retries permitted until 2025-11-28 16:30:45.6603969 +0000 UTC m=+1228.057081524 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/af476a0f-b390-443d-b7a5-14181e7c7bc7-etc-swift") pod "swift-storage-0" (UID: "af476a0f-b390-443d-b7a5-14181e7c7bc7") : configmap "swift-ring-files" not found Nov 28 16:30:45 crc kubenswrapper[4909]: I1128 16:30:45.160725 4909 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"swift-storage-0\" (UID: \"af476a0f-b390-443d-b7a5-14181e7c7bc7\") device mount path \"/mnt/openstack/pv06\"" pod="openstack/swift-storage-0" Nov 28 16:30:45 crc kubenswrapper[4909]: I1128 16:30:45.160810 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/af476a0f-b390-443d-b7a5-14181e7c7bc7-lock\") pod \"swift-storage-0\" (UID: \"af476a0f-b390-443d-b7a5-14181e7c7bc7\") " pod="openstack/swift-storage-0" Nov 28 16:30:45 crc kubenswrapper[4909]: I1128 16:30:45.161129 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/af476a0f-b390-443d-b7a5-14181e7c7bc7-cache\") pod \"swift-storage-0\" (UID: \"af476a0f-b390-443d-b7a5-14181e7c7bc7\") " pod="openstack/swift-storage-0" Nov 28 16:30:45 crc kubenswrapper[4909]: I1128 16:30:45.181233 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7nnbx\" (UniqueName: \"kubernetes.io/projected/af476a0f-b390-443d-b7a5-14181e7c7bc7-kube-api-access-7nnbx\") pod \"swift-storage-0\" (UID: \"af476a0f-b390-443d-b7a5-14181e7c7bc7\") " pod="openstack/swift-storage-0" Nov 28 16:30:45 crc kubenswrapper[4909]: I1128 16:30:45.186119 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"swift-storage-0\" (UID: \"af476a0f-b390-443d-b7a5-14181e7c7bc7\") " pod="openstack/swift-storage-0" Nov 28 16:30:45 crc kubenswrapper[4909]: I1128 16:30:45.459087 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-ring-rebalance-cpxgj"] Nov 28 16:30:45 crc kubenswrapper[4909]: I1128 16:30:45.460172 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-cpxgj" Nov 28 16:30:45 crc kubenswrapper[4909]: I1128 16:30:45.463235 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-proxy-config-data" Nov 28 16:30:45 crc kubenswrapper[4909]: I1128 16:30:45.463630 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-scripts" Nov 28 16:30:45 crc kubenswrapper[4909]: I1128 16:30:45.466249 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-config-data" Nov 28 16:30:45 crc kubenswrapper[4909]: I1128 16:30:45.470866 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-cpxgj"] Nov 28 16:30:45 crc kubenswrapper[4909]: I1128 16:30:45.478455 4909 generic.go:334] "Generic (PLEG): container finished" podID="524d2697-3bb0-4029-a2d2-1211f9fc8fc5" containerID="c5fdc1d67f914e9abb01dcd6e33bd0e89f9d463fd4794c90aeab2d33ad4d7c1d" exitCode=0 Nov 28 16:30:45 crc kubenswrapper[4909]: I1128 16:30:45.478533 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-b8fbc5445-rvm96" event={"ID":"524d2697-3bb0-4029-a2d2-1211f9fc8fc5","Type":"ContainerDied","Data":"c5fdc1d67f914e9abb01dcd6e33bd0e89f9d463fd4794c90aeab2d33ad4d7c1d"} Nov 28 16:30:45 crc kubenswrapper[4909]: I1128 16:30:45.478565 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-b8fbc5445-rvm96" event={"ID":"524d2697-3bb0-4029-a2d2-1211f9fc8fc5","Type":"ContainerStarted","Data":"565c6cdaeaf6293fc468cfbfe40deb4bffee19abdd0660464f10d8dfd775d628"} Nov 28 16:30:45 crc kubenswrapper[4909]: I1128 16:30:45.485799 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-shkqq" event={"ID":"22576912-638d-46ee-ad3e-9d78debb719e","Type":"ContainerDied","Data":"7c1244198a1c04e82b37bbc4a3a40bf4b42cb974e1649b7af12cae2ad63b6f93"} Nov 28 16:30:45 crc kubenswrapper[4909]: I1128 16:30:45.485901 4909 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7c1244198a1c04e82b37bbc4a3a40bf4b42cb974e1649b7af12cae2ad63b6f93" Nov 28 16:30:45 crc kubenswrapper[4909]: I1128 16:30:45.486096 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-shkqq" Nov 28 16:30:45 crc kubenswrapper[4909]: I1128 16:30:45.573576 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ghjck\" (UniqueName: \"kubernetes.io/projected/828d440a-4ae3-45ef-83fe-79866f3f2d8e-kube-api-access-ghjck\") pod \"swift-ring-rebalance-cpxgj\" (UID: \"828d440a-4ae3-45ef-83fe-79866f3f2d8e\") " pod="openstack/swift-ring-rebalance-cpxgj" Nov 28 16:30:45 crc kubenswrapper[4909]: I1128 16:30:45.573695 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/828d440a-4ae3-45ef-83fe-79866f3f2d8e-scripts\") pod \"swift-ring-rebalance-cpxgj\" (UID: \"828d440a-4ae3-45ef-83fe-79866f3f2d8e\") " pod="openstack/swift-ring-rebalance-cpxgj" Nov 28 16:30:45 crc kubenswrapper[4909]: I1128 16:30:45.573755 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/828d440a-4ae3-45ef-83fe-79866f3f2d8e-etc-swift\") pod \"swift-ring-rebalance-cpxgj\" (UID: \"828d440a-4ae3-45ef-83fe-79866f3f2d8e\") " pod="openstack/swift-ring-rebalance-cpxgj" Nov 28 16:30:45 crc kubenswrapper[4909]: I1128 16:30:45.573939 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/828d440a-4ae3-45ef-83fe-79866f3f2d8e-ring-data-devices\") pod \"swift-ring-rebalance-cpxgj\" (UID: \"828d440a-4ae3-45ef-83fe-79866f3f2d8e\") " pod="openstack/swift-ring-rebalance-cpxgj" Nov 28 16:30:45 crc kubenswrapper[4909]: I1128 16:30:45.573993 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/828d440a-4ae3-45ef-83fe-79866f3f2d8e-dispersionconf\") pod \"swift-ring-rebalance-cpxgj\" (UID: \"828d440a-4ae3-45ef-83fe-79866f3f2d8e\") " pod="openstack/swift-ring-rebalance-cpxgj" Nov 28 16:30:45 crc kubenswrapper[4909]: I1128 16:30:45.574045 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/828d440a-4ae3-45ef-83fe-79866f3f2d8e-swiftconf\") pod \"swift-ring-rebalance-cpxgj\" (UID: \"828d440a-4ae3-45ef-83fe-79866f3f2d8e\") " pod="openstack/swift-ring-rebalance-cpxgj" Nov 28 16:30:45 crc kubenswrapper[4909]: I1128 16:30:45.574091 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/828d440a-4ae3-45ef-83fe-79866f3f2d8e-combined-ca-bundle\") pod \"swift-ring-rebalance-cpxgj\" (UID: \"828d440a-4ae3-45ef-83fe-79866f3f2d8e\") " pod="openstack/swift-ring-rebalance-cpxgj" Nov 28 16:30:45 crc kubenswrapper[4909]: I1128 16:30:45.675782 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/828d440a-4ae3-45ef-83fe-79866f3f2d8e-scripts\") pod \"swift-ring-rebalance-cpxgj\" (UID: \"828d440a-4ae3-45ef-83fe-79866f3f2d8e\") " pod="openstack/swift-ring-rebalance-cpxgj" Nov 28 16:30:45 crc kubenswrapper[4909]: I1128 16:30:45.676051 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/af476a0f-b390-443d-b7a5-14181e7c7bc7-etc-swift\") pod \"swift-storage-0\" (UID: \"af476a0f-b390-443d-b7a5-14181e7c7bc7\") " pod="openstack/swift-storage-0" Nov 28 16:30:45 crc kubenswrapper[4909]: I1128 16:30:45.676081 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/828d440a-4ae3-45ef-83fe-79866f3f2d8e-etc-swift\") pod \"swift-ring-rebalance-cpxgj\" (UID: \"828d440a-4ae3-45ef-83fe-79866f3f2d8e\") " pod="openstack/swift-ring-rebalance-cpxgj" Nov 28 16:30:45 crc kubenswrapper[4909]: I1128 16:30:45.676178 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/828d440a-4ae3-45ef-83fe-79866f3f2d8e-ring-data-devices\") pod \"swift-ring-rebalance-cpxgj\" (UID: \"828d440a-4ae3-45ef-83fe-79866f3f2d8e\") " pod="openstack/swift-ring-rebalance-cpxgj" Nov 28 16:30:45 crc kubenswrapper[4909]: I1128 16:30:45.676221 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/828d440a-4ae3-45ef-83fe-79866f3f2d8e-dispersionconf\") pod \"swift-ring-rebalance-cpxgj\" (UID: \"828d440a-4ae3-45ef-83fe-79866f3f2d8e\") " pod="openstack/swift-ring-rebalance-cpxgj" Nov 28 16:30:45 crc kubenswrapper[4909]: I1128 16:30:45.676258 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/828d440a-4ae3-45ef-83fe-79866f3f2d8e-swiftconf\") pod \"swift-ring-rebalance-cpxgj\" (UID: \"828d440a-4ae3-45ef-83fe-79866f3f2d8e\") " pod="openstack/swift-ring-rebalance-cpxgj" Nov 28 16:30:45 crc kubenswrapper[4909]: I1128 16:30:45.676294 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/828d440a-4ae3-45ef-83fe-79866f3f2d8e-combined-ca-bundle\") pod \"swift-ring-rebalance-cpxgj\" (UID: \"828d440a-4ae3-45ef-83fe-79866f3f2d8e\") " pod="openstack/swift-ring-rebalance-cpxgj" Nov 28 16:30:45 crc kubenswrapper[4909]: I1128 16:30:45.676345 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ghjck\" (UniqueName: \"kubernetes.io/projected/828d440a-4ae3-45ef-83fe-79866f3f2d8e-kube-api-access-ghjck\") pod \"swift-ring-rebalance-cpxgj\" (UID: \"828d440a-4ae3-45ef-83fe-79866f3f2d8e\") " pod="openstack/swift-ring-rebalance-cpxgj" Nov 28 16:30:45 crc kubenswrapper[4909]: E1128 16:30:45.677708 4909 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 28 16:30:45 crc kubenswrapper[4909]: E1128 16:30:45.677726 4909 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 28 16:30:45 crc kubenswrapper[4909]: E1128 16:30:45.677826 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/af476a0f-b390-443d-b7a5-14181e7c7bc7-etc-swift podName:af476a0f-b390-443d-b7a5-14181e7c7bc7 nodeName:}" failed. No retries permitted until 2025-11-28 16:30:46.67779473 +0000 UTC m=+1229.074479254 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/af476a0f-b390-443d-b7a5-14181e7c7bc7-etc-swift") pod "swift-storage-0" (UID: "af476a0f-b390-443d-b7a5-14181e7c7bc7") : configmap "swift-ring-files" not found Nov 28 16:30:45 crc kubenswrapper[4909]: I1128 16:30:45.678236 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/828d440a-4ae3-45ef-83fe-79866f3f2d8e-etc-swift\") pod \"swift-ring-rebalance-cpxgj\" (UID: \"828d440a-4ae3-45ef-83fe-79866f3f2d8e\") " pod="openstack/swift-ring-rebalance-cpxgj" Nov 28 16:30:45 crc kubenswrapper[4909]: I1128 16:30:45.680641 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/828d440a-4ae3-45ef-83fe-79866f3f2d8e-scripts\") pod \"swift-ring-rebalance-cpxgj\" (UID: \"828d440a-4ae3-45ef-83fe-79866f3f2d8e\") " pod="openstack/swift-ring-rebalance-cpxgj" Nov 28 16:30:45 crc kubenswrapper[4909]: I1128 16:30:45.680972 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/828d440a-4ae3-45ef-83fe-79866f3f2d8e-ring-data-devices\") pod \"swift-ring-rebalance-cpxgj\" (UID: \"828d440a-4ae3-45ef-83fe-79866f3f2d8e\") " pod="openstack/swift-ring-rebalance-cpxgj" Nov 28 16:30:45 crc kubenswrapper[4909]: I1128 16:30:45.682707 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/828d440a-4ae3-45ef-83fe-79866f3f2d8e-dispersionconf\") pod \"swift-ring-rebalance-cpxgj\" (UID: \"828d440a-4ae3-45ef-83fe-79866f3f2d8e\") " pod="openstack/swift-ring-rebalance-cpxgj" Nov 28 16:30:45 crc kubenswrapper[4909]: I1128 16:30:45.685516 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/828d440a-4ae3-45ef-83fe-79866f3f2d8e-swiftconf\") pod \"swift-ring-rebalance-cpxgj\" (UID: \"828d440a-4ae3-45ef-83fe-79866f3f2d8e\") " pod="openstack/swift-ring-rebalance-cpxgj" Nov 28 16:30:45 crc kubenswrapper[4909]: I1128 16:30:45.688113 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/828d440a-4ae3-45ef-83fe-79866f3f2d8e-combined-ca-bundle\") pod \"swift-ring-rebalance-cpxgj\" (UID: \"828d440a-4ae3-45ef-83fe-79866f3f2d8e\") " pod="openstack/swift-ring-rebalance-cpxgj" Nov 28 16:30:45 crc kubenswrapper[4909]: I1128 16:30:45.696961 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ghjck\" (UniqueName: \"kubernetes.io/projected/828d440a-4ae3-45ef-83fe-79866f3f2d8e-kube-api-access-ghjck\") pod \"swift-ring-rebalance-cpxgj\" (UID: \"828d440a-4ae3-45ef-83fe-79866f3f2d8e\") " pod="openstack/swift-ring-rebalance-cpxgj" Nov 28 16:30:45 crc kubenswrapper[4909]: I1128 16:30:45.780513 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-cpxgj" Nov 28 16:30:45 crc kubenswrapper[4909]: I1128 16:30:45.794722 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-pd67x" Nov 28 16:30:45 crc kubenswrapper[4909]: I1128 16:30:45.879921 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gj6vh\" (UniqueName: \"kubernetes.io/projected/be6ec85b-3437-458a-9e53-6464138dbcea-kube-api-access-gj6vh\") pod \"be6ec85b-3437-458a-9e53-6464138dbcea\" (UID: \"be6ec85b-3437-458a-9e53-6464138dbcea\") " Nov 28 16:30:45 crc kubenswrapper[4909]: I1128 16:30:45.879999 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/be6ec85b-3437-458a-9e53-6464138dbcea-operator-scripts\") pod \"be6ec85b-3437-458a-9e53-6464138dbcea\" (UID: \"be6ec85b-3437-458a-9e53-6464138dbcea\") " Nov 28 16:30:45 crc kubenswrapper[4909]: I1128 16:30:45.881899 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/be6ec85b-3437-458a-9e53-6464138dbcea-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "be6ec85b-3437-458a-9e53-6464138dbcea" (UID: "be6ec85b-3437-458a-9e53-6464138dbcea"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:30:45 crc kubenswrapper[4909]: I1128 16:30:45.885579 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/be6ec85b-3437-458a-9e53-6464138dbcea-kube-api-access-gj6vh" (OuterVolumeSpecName: "kube-api-access-gj6vh") pod "be6ec85b-3437-458a-9e53-6464138dbcea" (UID: "be6ec85b-3437-458a-9e53-6464138dbcea"). InnerVolumeSpecName "kube-api-access-gj6vh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:30:45 crc kubenswrapper[4909]: I1128 16:30:45.936694 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fd174f39-a477-40c2-aacb-1c854a0ac704" path="/var/lib/kubelet/pods/fd174f39-a477-40c2-aacb-1c854a0ac704/volumes" Nov 28 16:30:45 crc kubenswrapper[4909]: I1128 16:30:45.954893 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-9v966" Nov 28 16:30:45 crc kubenswrapper[4909]: I1128 16:30:45.981905 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gj6vh\" (UniqueName: \"kubernetes.io/projected/be6ec85b-3437-458a-9e53-6464138dbcea-kube-api-access-gj6vh\") on node \"crc\" DevicePath \"\"" Nov 28 16:30:45 crc kubenswrapper[4909]: I1128 16:30:45.981940 4909 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/be6ec85b-3437-458a-9e53-6464138dbcea-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 16:30:46 crc kubenswrapper[4909]: I1128 16:30:46.083456 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9369b072-ffb6-4fe8-bd5b-ed55bd086ca0-operator-scripts\") pod \"9369b072-ffb6-4fe8-bd5b-ed55bd086ca0\" (UID: \"9369b072-ffb6-4fe8-bd5b-ed55bd086ca0\") " Nov 28 16:30:46 crc kubenswrapper[4909]: I1128 16:30:46.083726 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2xbvk\" (UniqueName: \"kubernetes.io/projected/9369b072-ffb6-4fe8-bd5b-ed55bd086ca0-kube-api-access-2xbvk\") pod \"9369b072-ffb6-4fe8-bd5b-ed55bd086ca0\" (UID: \"9369b072-ffb6-4fe8-bd5b-ed55bd086ca0\") " Nov 28 16:30:46 crc kubenswrapper[4909]: I1128 16:30:46.084330 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9369b072-ffb6-4fe8-bd5b-ed55bd086ca0-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "9369b072-ffb6-4fe8-bd5b-ed55bd086ca0" (UID: "9369b072-ffb6-4fe8-bd5b-ed55bd086ca0"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:30:46 crc kubenswrapper[4909]: I1128 16:30:46.099212 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9369b072-ffb6-4fe8-bd5b-ed55bd086ca0-kube-api-access-2xbvk" (OuterVolumeSpecName: "kube-api-access-2xbvk") pod "9369b072-ffb6-4fe8-bd5b-ed55bd086ca0" (UID: "9369b072-ffb6-4fe8-bd5b-ed55bd086ca0"). InnerVolumeSpecName "kube-api-access-2xbvk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:30:46 crc kubenswrapper[4909]: I1128 16:30:46.190498 4909 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9369b072-ffb6-4fe8-bd5b-ed55bd086ca0-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 16:30:46 crc kubenswrapper[4909]: I1128 16:30:46.190538 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2xbvk\" (UniqueName: \"kubernetes.io/projected/9369b072-ffb6-4fe8-bd5b-ed55bd086ca0-kube-api-access-2xbvk\") on node \"crc\" DevicePath \"\"" Nov 28 16:30:46 crc kubenswrapper[4909]: I1128 16:30:46.284193 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-2631-account-create-update-kvmx8" Nov 28 16:30:46 crc kubenswrapper[4909]: I1128 16:30:46.290951 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-3326-account-create-update-qqqzv" Nov 28 16:30:46 crc kubenswrapper[4909]: I1128 16:30:46.300399 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-f14d-account-create-update-pr2mt" Nov 28 16:30:46 crc kubenswrapper[4909]: I1128 16:30:46.392532 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/be57306e-a9e9-45f4-8660-f5e1515799f8-operator-scripts\") pod \"be57306e-a9e9-45f4-8660-f5e1515799f8\" (UID: \"be57306e-a9e9-45f4-8660-f5e1515799f8\") " Nov 28 16:30:46 crc kubenswrapper[4909]: I1128 16:30:46.392589 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rlvrb\" (UniqueName: \"kubernetes.io/projected/59f1690b-72e4-431e-b207-77553fd936f2-kube-api-access-rlvrb\") pod \"59f1690b-72e4-431e-b207-77553fd936f2\" (UID: \"59f1690b-72e4-431e-b207-77553fd936f2\") " Nov 28 16:30:46 crc kubenswrapper[4909]: I1128 16:30:46.392723 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/59f1690b-72e4-431e-b207-77553fd936f2-operator-scripts\") pod \"59f1690b-72e4-431e-b207-77553fd936f2\" (UID: \"59f1690b-72e4-431e-b207-77553fd936f2\") " Nov 28 16:30:46 crc kubenswrapper[4909]: I1128 16:30:46.392758 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a91c37b7-2346-47e6-b462-a48c06d30017-operator-scripts\") pod \"a91c37b7-2346-47e6-b462-a48c06d30017\" (UID: \"a91c37b7-2346-47e6-b462-a48c06d30017\") " Nov 28 16:30:46 crc kubenswrapper[4909]: I1128 16:30:46.392787 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q4rd5\" (UniqueName: \"kubernetes.io/projected/a91c37b7-2346-47e6-b462-a48c06d30017-kube-api-access-q4rd5\") pod \"a91c37b7-2346-47e6-b462-a48c06d30017\" (UID: \"a91c37b7-2346-47e6-b462-a48c06d30017\") " Nov 28 16:30:46 crc kubenswrapper[4909]: I1128 16:30:46.392834 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hm9nc\" (UniqueName: \"kubernetes.io/projected/be57306e-a9e9-45f4-8660-f5e1515799f8-kube-api-access-hm9nc\") pod \"be57306e-a9e9-45f4-8660-f5e1515799f8\" (UID: \"be57306e-a9e9-45f4-8660-f5e1515799f8\") " Nov 28 16:30:46 crc kubenswrapper[4909]: I1128 16:30:46.393518 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/be57306e-a9e9-45f4-8660-f5e1515799f8-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "be57306e-a9e9-45f4-8660-f5e1515799f8" (UID: "be57306e-a9e9-45f4-8660-f5e1515799f8"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:30:46 crc kubenswrapper[4909]: I1128 16:30:46.394161 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/59f1690b-72e4-431e-b207-77553fd936f2-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "59f1690b-72e4-431e-b207-77553fd936f2" (UID: "59f1690b-72e4-431e-b207-77553fd936f2"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:30:46 crc kubenswrapper[4909]: I1128 16:30:46.394332 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a91c37b7-2346-47e6-b462-a48c06d30017-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "a91c37b7-2346-47e6-b462-a48c06d30017" (UID: "a91c37b7-2346-47e6-b462-a48c06d30017"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:30:46 crc kubenswrapper[4909]: I1128 16:30:46.397550 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/59f1690b-72e4-431e-b207-77553fd936f2-kube-api-access-rlvrb" (OuterVolumeSpecName: "kube-api-access-rlvrb") pod "59f1690b-72e4-431e-b207-77553fd936f2" (UID: "59f1690b-72e4-431e-b207-77553fd936f2"). InnerVolumeSpecName "kube-api-access-rlvrb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:30:46 crc kubenswrapper[4909]: I1128 16:30:46.410634 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a91c37b7-2346-47e6-b462-a48c06d30017-kube-api-access-q4rd5" (OuterVolumeSpecName: "kube-api-access-q4rd5") pod "a91c37b7-2346-47e6-b462-a48c06d30017" (UID: "a91c37b7-2346-47e6-b462-a48c06d30017"). InnerVolumeSpecName "kube-api-access-q4rd5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:30:46 crc kubenswrapper[4909]: I1128 16:30:46.417123 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/be57306e-a9e9-45f4-8660-f5e1515799f8-kube-api-access-hm9nc" (OuterVolumeSpecName: "kube-api-access-hm9nc") pod "be57306e-a9e9-45f4-8660-f5e1515799f8" (UID: "be57306e-a9e9-45f4-8660-f5e1515799f8"). InnerVolumeSpecName "kube-api-access-hm9nc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:30:46 crc kubenswrapper[4909]: I1128 16:30:46.446024 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-cpxgj"] Nov 28 16:30:46 crc kubenswrapper[4909]: I1128 16:30:46.494514 4909 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a91c37b7-2346-47e6-b462-a48c06d30017-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 16:30:46 crc kubenswrapper[4909]: I1128 16:30:46.494905 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q4rd5\" (UniqueName: \"kubernetes.io/projected/a91c37b7-2346-47e6-b462-a48c06d30017-kube-api-access-q4rd5\") on node \"crc\" DevicePath \"\"" Nov 28 16:30:46 crc kubenswrapper[4909]: I1128 16:30:46.494923 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hm9nc\" (UniqueName: \"kubernetes.io/projected/be57306e-a9e9-45f4-8660-f5e1515799f8-kube-api-access-hm9nc\") on node \"crc\" DevicePath \"\"" Nov 28 16:30:46 crc kubenswrapper[4909]: I1128 16:30:46.494936 4909 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/be57306e-a9e9-45f4-8660-f5e1515799f8-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 16:30:46 crc kubenswrapper[4909]: I1128 16:30:46.494948 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rlvrb\" (UniqueName: \"kubernetes.io/projected/59f1690b-72e4-431e-b207-77553fd936f2-kube-api-access-rlvrb\") on node \"crc\" DevicePath \"\"" Nov 28 16:30:46 crc kubenswrapper[4909]: I1128 16:30:46.494959 4909 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/59f1690b-72e4-431e-b207-77553fd936f2-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 16:30:46 crc kubenswrapper[4909]: I1128 16:30:46.496566 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-pd67x" event={"ID":"be6ec85b-3437-458a-9e53-6464138dbcea","Type":"ContainerDied","Data":"65de1d1c954878cf5828aca50bb05f8bad969b0a11242d59a46dcffd2eb65f28"} Nov 28 16:30:46 crc kubenswrapper[4909]: I1128 16:30:46.496610 4909 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="65de1d1c954878cf5828aca50bb05f8bad969b0a11242d59a46dcffd2eb65f28" Nov 28 16:30:46 crc kubenswrapper[4909]: I1128 16:30:46.496708 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-pd67x" Nov 28 16:30:46 crc kubenswrapper[4909]: I1128 16:30:46.501014 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-2631-account-create-update-kvmx8" Nov 28 16:30:46 crc kubenswrapper[4909]: I1128 16:30:46.501038 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-2631-account-create-update-kvmx8" event={"ID":"a91c37b7-2346-47e6-b462-a48c06d30017","Type":"ContainerDied","Data":"08662a34e6a7632b9f31266c591c8d7f86efe5a78e9ee8a6ac07433bd37a6764"} Nov 28 16:30:46 crc kubenswrapper[4909]: I1128 16:30:46.501091 4909 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="08662a34e6a7632b9f31266c591c8d7f86efe5a78e9ee8a6ac07433bd37a6764" Nov 28 16:30:46 crc kubenswrapper[4909]: I1128 16:30:46.502946 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-3326-account-create-update-qqqzv" event={"ID":"be57306e-a9e9-45f4-8660-f5e1515799f8","Type":"ContainerDied","Data":"12d46648e7055abe059dce638209aee315cee75a6172151b18b68d530276d87f"} Nov 28 16:30:46 crc kubenswrapper[4909]: I1128 16:30:46.502984 4909 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="12d46648e7055abe059dce638209aee315cee75a6172151b18b68d530276d87f" Nov 28 16:30:46 crc kubenswrapper[4909]: I1128 16:30:46.503063 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-3326-account-create-update-qqqzv" Nov 28 16:30:46 crc kubenswrapper[4909]: I1128 16:30:46.513453 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-9v966" event={"ID":"9369b072-ffb6-4fe8-bd5b-ed55bd086ca0","Type":"ContainerDied","Data":"3168b5567312a3d8a09e6f005ec5e30300fa8edb2b1823f692bd74b2c83278a6"} Nov 28 16:30:46 crc kubenswrapper[4909]: I1128 16:30:46.513506 4909 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3168b5567312a3d8a09e6f005ec5e30300fa8edb2b1823f692bd74b2c83278a6" Nov 28 16:30:46 crc kubenswrapper[4909]: I1128 16:30:46.513593 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-9v966" Nov 28 16:30:46 crc kubenswrapper[4909]: I1128 16:30:46.522246 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-f14d-account-create-update-pr2mt" event={"ID":"59f1690b-72e4-431e-b207-77553fd936f2","Type":"ContainerDied","Data":"4cfa9770ddab9037ed452705fab240080db51e00b660536f76c75a806f9de09a"} Nov 28 16:30:46 crc kubenswrapper[4909]: I1128 16:30:46.522281 4909 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4cfa9770ddab9037ed452705fab240080db51e00b660536f76c75a806f9de09a" Nov 28 16:30:46 crc kubenswrapper[4909]: I1128 16:30:46.522306 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-f14d-account-create-update-pr2mt" Nov 28 16:30:46 crc kubenswrapper[4909]: I1128 16:30:46.523161 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-cpxgj" event={"ID":"828d440a-4ae3-45ef-83fe-79866f3f2d8e","Type":"ContainerStarted","Data":"4b064ea81f33b7e1e3e0982e6711aa8c83d0b1fcb53a0f2d5e5fab4c651bb470"} Nov 28 16:30:46 crc kubenswrapper[4909]: I1128 16:30:46.697550 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/af476a0f-b390-443d-b7a5-14181e7c7bc7-etc-swift\") pod \"swift-storage-0\" (UID: \"af476a0f-b390-443d-b7a5-14181e7c7bc7\") " pod="openstack/swift-storage-0" Nov 28 16:30:46 crc kubenswrapper[4909]: E1128 16:30:46.697815 4909 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 28 16:30:46 crc kubenswrapper[4909]: E1128 16:30:46.697836 4909 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 28 16:30:46 crc kubenswrapper[4909]: E1128 16:30:46.697878 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/af476a0f-b390-443d-b7a5-14181e7c7bc7-etc-swift podName:af476a0f-b390-443d-b7a5-14181e7c7bc7 nodeName:}" failed. No retries permitted until 2025-11-28 16:30:48.69786447 +0000 UTC m=+1231.094548994 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/af476a0f-b390-443d-b7a5-14181e7c7bc7-etc-swift") pod "swift-storage-0" (UID: "af476a0f-b390-443d-b7a5-14181e7c7bc7") : configmap "swift-ring-files" not found Nov 28 16:30:47 crc kubenswrapper[4909]: I1128 16:30:47.553549 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-sync-wz42m"] Nov 28 16:30:47 crc kubenswrapper[4909]: E1128 16:30:47.554222 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="be6ec85b-3437-458a-9e53-6464138dbcea" containerName="mariadb-database-create" Nov 28 16:30:47 crc kubenswrapper[4909]: I1128 16:30:47.554237 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="be6ec85b-3437-458a-9e53-6464138dbcea" containerName="mariadb-database-create" Nov 28 16:30:47 crc kubenswrapper[4909]: E1128 16:30:47.554244 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="59f1690b-72e4-431e-b207-77553fd936f2" containerName="mariadb-account-create-update" Nov 28 16:30:47 crc kubenswrapper[4909]: I1128 16:30:47.554250 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="59f1690b-72e4-431e-b207-77553fd936f2" containerName="mariadb-account-create-update" Nov 28 16:30:47 crc kubenswrapper[4909]: E1128 16:30:47.554263 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9369b072-ffb6-4fe8-bd5b-ed55bd086ca0" containerName="mariadb-database-create" Nov 28 16:30:47 crc kubenswrapper[4909]: I1128 16:30:47.554269 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="9369b072-ffb6-4fe8-bd5b-ed55bd086ca0" containerName="mariadb-database-create" Nov 28 16:30:47 crc kubenswrapper[4909]: E1128 16:30:47.554279 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="be57306e-a9e9-45f4-8660-f5e1515799f8" containerName="mariadb-account-create-update" Nov 28 16:30:47 crc kubenswrapper[4909]: I1128 16:30:47.554285 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="be57306e-a9e9-45f4-8660-f5e1515799f8" containerName="mariadb-account-create-update" Nov 28 16:30:47 crc kubenswrapper[4909]: E1128 16:30:47.554296 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a91c37b7-2346-47e6-b462-a48c06d30017" containerName="mariadb-account-create-update" Nov 28 16:30:47 crc kubenswrapper[4909]: I1128 16:30:47.554302 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="a91c37b7-2346-47e6-b462-a48c06d30017" containerName="mariadb-account-create-update" Nov 28 16:30:47 crc kubenswrapper[4909]: I1128 16:30:47.554444 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="be6ec85b-3437-458a-9e53-6464138dbcea" containerName="mariadb-database-create" Nov 28 16:30:47 crc kubenswrapper[4909]: I1128 16:30:47.554458 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="be57306e-a9e9-45f4-8660-f5e1515799f8" containerName="mariadb-account-create-update" Nov 28 16:30:47 crc kubenswrapper[4909]: I1128 16:30:47.554471 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="9369b072-ffb6-4fe8-bd5b-ed55bd086ca0" containerName="mariadb-database-create" Nov 28 16:30:47 crc kubenswrapper[4909]: I1128 16:30:47.554482 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="59f1690b-72e4-431e-b207-77553fd936f2" containerName="mariadb-account-create-update" Nov 28 16:30:47 crc kubenswrapper[4909]: I1128 16:30:47.554490 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="a91c37b7-2346-47e6-b462-a48c06d30017" containerName="mariadb-account-create-update" Nov 28 16:30:47 crc kubenswrapper[4909]: I1128 16:30:47.555003 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-wz42m" Nov 28 16:30:47 crc kubenswrapper[4909]: I1128 16:30:47.557625 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-config-data" Nov 28 16:30:47 crc kubenswrapper[4909]: I1128 16:30:47.557687 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-fzds2" Nov 28 16:30:47 crc kubenswrapper[4909]: I1128 16:30:47.572508 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-wz42m"] Nov 28 16:30:47 crc kubenswrapper[4909]: I1128 16:30:47.712575 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ff8acb59-a082-400b-a87a-f4ef8cfa22f4-combined-ca-bundle\") pod \"glance-db-sync-wz42m\" (UID: \"ff8acb59-a082-400b-a87a-f4ef8cfa22f4\") " pod="openstack/glance-db-sync-wz42m" Nov 28 16:30:47 crc kubenswrapper[4909]: I1128 16:30:47.712644 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/ff8acb59-a082-400b-a87a-f4ef8cfa22f4-db-sync-config-data\") pod \"glance-db-sync-wz42m\" (UID: \"ff8acb59-a082-400b-a87a-f4ef8cfa22f4\") " pod="openstack/glance-db-sync-wz42m" Nov 28 16:30:47 crc kubenswrapper[4909]: I1128 16:30:47.712684 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ff8acb59-a082-400b-a87a-f4ef8cfa22f4-config-data\") pod \"glance-db-sync-wz42m\" (UID: \"ff8acb59-a082-400b-a87a-f4ef8cfa22f4\") " pod="openstack/glance-db-sync-wz42m" Nov 28 16:30:47 crc kubenswrapper[4909]: I1128 16:30:47.712735 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xlwv7\" (UniqueName: \"kubernetes.io/projected/ff8acb59-a082-400b-a87a-f4ef8cfa22f4-kube-api-access-xlwv7\") pod \"glance-db-sync-wz42m\" (UID: \"ff8acb59-a082-400b-a87a-f4ef8cfa22f4\") " pod="openstack/glance-db-sync-wz42m" Nov 28 16:30:47 crc kubenswrapper[4909]: I1128 16:30:47.814446 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/ff8acb59-a082-400b-a87a-f4ef8cfa22f4-db-sync-config-data\") pod \"glance-db-sync-wz42m\" (UID: \"ff8acb59-a082-400b-a87a-f4ef8cfa22f4\") " pod="openstack/glance-db-sync-wz42m" Nov 28 16:30:47 crc kubenswrapper[4909]: I1128 16:30:47.814503 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ff8acb59-a082-400b-a87a-f4ef8cfa22f4-config-data\") pod \"glance-db-sync-wz42m\" (UID: \"ff8acb59-a082-400b-a87a-f4ef8cfa22f4\") " pod="openstack/glance-db-sync-wz42m" Nov 28 16:30:47 crc kubenswrapper[4909]: I1128 16:30:47.814584 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xlwv7\" (UniqueName: \"kubernetes.io/projected/ff8acb59-a082-400b-a87a-f4ef8cfa22f4-kube-api-access-xlwv7\") pod \"glance-db-sync-wz42m\" (UID: \"ff8acb59-a082-400b-a87a-f4ef8cfa22f4\") " pod="openstack/glance-db-sync-wz42m" Nov 28 16:30:47 crc kubenswrapper[4909]: I1128 16:30:47.814715 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ff8acb59-a082-400b-a87a-f4ef8cfa22f4-combined-ca-bundle\") pod \"glance-db-sync-wz42m\" (UID: \"ff8acb59-a082-400b-a87a-f4ef8cfa22f4\") " pod="openstack/glance-db-sync-wz42m" Nov 28 16:30:47 crc kubenswrapper[4909]: I1128 16:30:47.819708 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/ff8acb59-a082-400b-a87a-f4ef8cfa22f4-db-sync-config-data\") pod \"glance-db-sync-wz42m\" (UID: \"ff8acb59-a082-400b-a87a-f4ef8cfa22f4\") " pod="openstack/glance-db-sync-wz42m" Nov 28 16:30:47 crc kubenswrapper[4909]: I1128 16:30:47.819813 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ff8acb59-a082-400b-a87a-f4ef8cfa22f4-combined-ca-bundle\") pod \"glance-db-sync-wz42m\" (UID: \"ff8acb59-a082-400b-a87a-f4ef8cfa22f4\") " pod="openstack/glance-db-sync-wz42m" Nov 28 16:30:47 crc kubenswrapper[4909]: I1128 16:30:47.823073 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ff8acb59-a082-400b-a87a-f4ef8cfa22f4-config-data\") pod \"glance-db-sync-wz42m\" (UID: \"ff8acb59-a082-400b-a87a-f4ef8cfa22f4\") " pod="openstack/glance-db-sync-wz42m" Nov 28 16:30:47 crc kubenswrapper[4909]: I1128 16:30:47.832560 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xlwv7\" (UniqueName: \"kubernetes.io/projected/ff8acb59-a082-400b-a87a-f4ef8cfa22f4-kube-api-access-xlwv7\") pod \"glance-db-sync-wz42m\" (UID: \"ff8acb59-a082-400b-a87a-f4ef8cfa22f4\") " pod="openstack/glance-db-sync-wz42m" Nov 28 16:30:47 crc kubenswrapper[4909]: I1128 16:30:47.873219 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-wz42m" Nov 28 16:30:47 crc kubenswrapper[4909]: I1128 16:30:47.886508 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-northd-0" Nov 28 16:30:48 crc kubenswrapper[4909]: I1128 16:30:48.470526 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-wz42m"] Nov 28 16:30:48 crc kubenswrapper[4909]: I1128 16:30:48.540480 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-b8fbc5445-rvm96" event={"ID":"524d2697-3bb0-4029-a2d2-1211f9fc8fc5","Type":"ContainerStarted","Data":"bff5a818f773719ec61196a5ef335b3aa8b2c1fdb3309845fc51ffaa68703ed1"} Nov 28 16:30:48 crc kubenswrapper[4909]: I1128 16:30:48.540566 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-b8fbc5445-rvm96" Nov 28 16:30:48 crc kubenswrapper[4909]: I1128 16:30:48.543052 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-wz42m" event={"ID":"ff8acb59-a082-400b-a87a-f4ef8cfa22f4","Type":"ContainerStarted","Data":"e146670826e947f0a408e143e9a3e99bc52d0ed0c66e68477dda9f02999c5a43"} Nov 28 16:30:48 crc kubenswrapper[4909]: I1128 16:30:48.582360 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-b8fbc5445-rvm96" podStartSLOduration=5.58234242 podStartE2EDuration="5.58234242s" podCreationTimestamp="2025-11-28 16:30:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:30:48.558538256 +0000 UTC m=+1230.955222780" watchObservedRunningTime="2025-11-28 16:30:48.58234242 +0000 UTC m=+1230.979026954" Nov 28 16:30:48 crc kubenswrapper[4909]: I1128 16:30:48.736136 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/af476a0f-b390-443d-b7a5-14181e7c7bc7-etc-swift\") pod \"swift-storage-0\" (UID: \"af476a0f-b390-443d-b7a5-14181e7c7bc7\") " pod="openstack/swift-storage-0" Nov 28 16:30:48 crc kubenswrapper[4909]: E1128 16:30:48.736365 4909 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 28 16:30:48 crc kubenswrapper[4909]: E1128 16:30:48.736534 4909 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 28 16:30:48 crc kubenswrapper[4909]: E1128 16:30:48.736603 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/af476a0f-b390-443d-b7a5-14181e7c7bc7-etc-swift podName:af476a0f-b390-443d-b7a5-14181e7c7bc7 nodeName:}" failed. No retries permitted until 2025-11-28 16:30:52.736583742 +0000 UTC m=+1235.133268266 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/af476a0f-b390-443d-b7a5-14181e7c7bc7-etc-swift") pod "swift-storage-0" (UID: "af476a0f-b390-443d-b7a5-14181e7c7bc7") : configmap "swift-ring-files" not found Nov 28 16:30:49 crc kubenswrapper[4909]: I1128 16:30:49.910769 4909 patch_prober.go:28] interesting pod/machine-config-daemon-d5nd7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 16:30:49 crc kubenswrapper[4909]: I1128 16:30:49.910823 4909 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 16:30:52 crc kubenswrapper[4909]: I1128 16:30:52.821043 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/af476a0f-b390-443d-b7a5-14181e7c7bc7-etc-swift\") pod \"swift-storage-0\" (UID: \"af476a0f-b390-443d-b7a5-14181e7c7bc7\") " pod="openstack/swift-storage-0" Nov 28 16:30:52 crc kubenswrapper[4909]: E1128 16:30:52.821290 4909 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 28 16:30:52 crc kubenswrapper[4909]: E1128 16:30:52.821790 4909 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 28 16:30:52 crc kubenswrapper[4909]: E1128 16:30:52.821865 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/af476a0f-b390-443d-b7a5-14181e7c7bc7-etc-swift podName:af476a0f-b390-443d-b7a5-14181e7c7bc7 nodeName:}" failed. No retries permitted until 2025-11-28 16:31:00.821842732 +0000 UTC m=+1243.218527266 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/af476a0f-b390-443d-b7a5-14181e7c7bc7-etc-swift") pod "swift-storage-0" (UID: "af476a0f-b390-443d-b7a5-14181e7c7bc7") : configmap "swift-ring-files" not found Nov 28 16:30:54 crc kubenswrapper[4909]: I1128 16:30:54.120476 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-b8fbc5445-rvm96" Nov 28 16:30:54 crc kubenswrapper[4909]: I1128 16:30:54.175936 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-8554648995-fzkt7"] Nov 28 16:30:54 crc kubenswrapper[4909]: I1128 16:30:54.176205 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-8554648995-fzkt7" podUID="090022a6-59be-4353-8dd1-22e9e542c57a" containerName="dnsmasq-dns" containerID="cri-o://e74af5111c8900c319129179dbac0afe759ed2f2adb26d89f96d3891c86f5643" gracePeriod=10 Nov 28 16:30:54 crc kubenswrapper[4909]: I1128 16:30:54.600391 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-cpxgj" event={"ID":"828d440a-4ae3-45ef-83fe-79866f3f2d8e","Type":"ContainerStarted","Data":"e6cc5cc31656fc880bbdc23b2063b2d818555caa73caf69f28bf8decd88df8aa"} Nov 28 16:30:55 crc kubenswrapper[4909]: I1128 16:30:55.617750 4909 generic.go:334] "Generic (PLEG): container finished" podID="090022a6-59be-4353-8dd1-22e9e542c57a" containerID="e74af5111c8900c319129179dbac0afe759ed2f2adb26d89f96d3891c86f5643" exitCode=0 Nov 28 16:30:55 crc kubenswrapper[4909]: I1128 16:30:55.618200 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8554648995-fzkt7" event={"ID":"090022a6-59be-4353-8dd1-22e9e542c57a","Type":"ContainerDied","Data":"e74af5111c8900c319129179dbac0afe759ed2f2adb26d89f96d3891c86f5643"} Nov 28 16:30:55 crc kubenswrapper[4909]: I1128 16:30:55.634056 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-ring-rebalance-cpxgj" podStartSLOduration=5.789122068 podStartE2EDuration="10.634039004s" podCreationTimestamp="2025-11-28 16:30:45 +0000 UTC" firstStartedPulling="2025-11-28 16:30:46.451330909 +0000 UTC m=+1228.848015433" lastFinishedPulling="2025-11-28 16:30:51.296247855 +0000 UTC m=+1233.692932369" observedRunningTime="2025-11-28 16:30:55.630711254 +0000 UTC m=+1238.027395778" watchObservedRunningTime="2025-11-28 16:30:55.634039004 +0000 UTC m=+1238.030723528" Nov 28 16:30:58 crc kubenswrapper[4909]: I1128 16:30:58.255093 4909 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-6tr6g" podUID="14b66e32-a660-4643-9f57-f66bf12a56ef" containerName="ovn-controller" probeResult="failure" output=< Nov 28 16:30:58 crc kubenswrapper[4909]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Nov 28 16:30:58 crc kubenswrapper[4909]: > Nov 28 16:30:58 crc kubenswrapper[4909]: I1128 16:30:58.257769 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-q2kt7" Nov 28 16:30:58 crc kubenswrapper[4909]: I1128 16:30:58.264346 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-q2kt7" Nov 28 16:30:58 crc kubenswrapper[4909]: I1128 16:30:58.495963 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-6tr6g-config-2hdz9"] Nov 28 16:30:58 crc kubenswrapper[4909]: I1128 16:30:58.498265 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-6tr6g-config-2hdz9" Nov 28 16:30:58 crc kubenswrapper[4909]: I1128 16:30:58.501253 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-extra-scripts" Nov 28 16:30:58 crc kubenswrapper[4909]: I1128 16:30:58.506134 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-6tr6g-config-2hdz9"] Nov 28 16:30:58 crc kubenswrapper[4909]: I1128 16:30:58.624927 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/4b173ebc-3835-4705-8c96-8712cc817e1c-scripts\") pod \"ovn-controller-6tr6g-config-2hdz9\" (UID: \"4b173ebc-3835-4705-8c96-8712cc817e1c\") " pod="openstack/ovn-controller-6tr6g-config-2hdz9" Nov 28 16:30:58 crc kubenswrapper[4909]: I1128 16:30:58.624988 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/4b173ebc-3835-4705-8c96-8712cc817e1c-additional-scripts\") pod \"ovn-controller-6tr6g-config-2hdz9\" (UID: \"4b173ebc-3835-4705-8c96-8712cc817e1c\") " pod="openstack/ovn-controller-6tr6g-config-2hdz9" Nov 28 16:30:58 crc kubenswrapper[4909]: I1128 16:30:58.625155 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/4b173ebc-3835-4705-8c96-8712cc817e1c-var-run-ovn\") pod \"ovn-controller-6tr6g-config-2hdz9\" (UID: \"4b173ebc-3835-4705-8c96-8712cc817e1c\") " pod="openstack/ovn-controller-6tr6g-config-2hdz9" Nov 28 16:30:58 crc kubenswrapper[4909]: I1128 16:30:58.625207 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/4b173ebc-3835-4705-8c96-8712cc817e1c-var-log-ovn\") pod \"ovn-controller-6tr6g-config-2hdz9\" (UID: \"4b173ebc-3835-4705-8c96-8712cc817e1c\") " pod="openstack/ovn-controller-6tr6g-config-2hdz9" Nov 28 16:30:58 crc kubenswrapper[4909]: I1128 16:30:58.625250 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4phqm\" (UniqueName: \"kubernetes.io/projected/4b173ebc-3835-4705-8c96-8712cc817e1c-kube-api-access-4phqm\") pod \"ovn-controller-6tr6g-config-2hdz9\" (UID: \"4b173ebc-3835-4705-8c96-8712cc817e1c\") " pod="openstack/ovn-controller-6tr6g-config-2hdz9" Nov 28 16:30:58 crc kubenswrapper[4909]: I1128 16:30:58.625294 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/4b173ebc-3835-4705-8c96-8712cc817e1c-var-run\") pod \"ovn-controller-6tr6g-config-2hdz9\" (UID: \"4b173ebc-3835-4705-8c96-8712cc817e1c\") " pod="openstack/ovn-controller-6tr6g-config-2hdz9" Nov 28 16:30:58 crc kubenswrapper[4909]: I1128 16:30:58.726743 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/4b173ebc-3835-4705-8c96-8712cc817e1c-var-run-ovn\") pod \"ovn-controller-6tr6g-config-2hdz9\" (UID: \"4b173ebc-3835-4705-8c96-8712cc817e1c\") " pod="openstack/ovn-controller-6tr6g-config-2hdz9" Nov 28 16:30:58 crc kubenswrapper[4909]: I1128 16:30:58.726814 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/4b173ebc-3835-4705-8c96-8712cc817e1c-var-log-ovn\") pod \"ovn-controller-6tr6g-config-2hdz9\" (UID: \"4b173ebc-3835-4705-8c96-8712cc817e1c\") " pod="openstack/ovn-controller-6tr6g-config-2hdz9" Nov 28 16:30:58 crc kubenswrapper[4909]: I1128 16:30:58.726865 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4phqm\" (UniqueName: \"kubernetes.io/projected/4b173ebc-3835-4705-8c96-8712cc817e1c-kube-api-access-4phqm\") pod \"ovn-controller-6tr6g-config-2hdz9\" (UID: \"4b173ebc-3835-4705-8c96-8712cc817e1c\") " pod="openstack/ovn-controller-6tr6g-config-2hdz9" Nov 28 16:30:58 crc kubenswrapper[4909]: I1128 16:30:58.726916 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/4b173ebc-3835-4705-8c96-8712cc817e1c-var-run\") pod \"ovn-controller-6tr6g-config-2hdz9\" (UID: \"4b173ebc-3835-4705-8c96-8712cc817e1c\") " pod="openstack/ovn-controller-6tr6g-config-2hdz9" Nov 28 16:30:58 crc kubenswrapper[4909]: I1128 16:30:58.726989 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/4b173ebc-3835-4705-8c96-8712cc817e1c-scripts\") pod \"ovn-controller-6tr6g-config-2hdz9\" (UID: \"4b173ebc-3835-4705-8c96-8712cc817e1c\") " pod="openstack/ovn-controller-6tr6g-config-2hdz9" Nov 28 16:30:58 crc kubenswrapper[4909]: I1128 16:30:58.727010 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/4b173ebc-3835-4705-8c96-8712cc817e1c-additional-scripts\") pod \"ovn-controller-6tr6g-config-2hdz9\" (UID: \"4b173ebc-3835-4705-8c96-8712cc817e1c\") " pod="openstack/ovn-controller-6tr6g-config-2hdz9" Nov 28 16:30:58 crc kubenswrapper[4909]: I1128 16:30:58.727138 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/4b173ebc-3835-4705-8c96-8712cc817e1c-var-log-ovn\") pod \"ovn-controller-6tr6g-config-2hdz9\" (UID: \"4b173ebc-3835-4705-8c96-8712cc817e1c\") " pod="openstack/ovn-controller-6tr6g-config-2hdz9" Nov 28 16:30:58 crc kubenswrapper[4909]: I1128 16:30:58.727193 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/4b173ebc-3835-4705-8c96-8712cc817e1c-var-run\") pod \"ovn-controller-6tr6g-config-2hdz9\" (UID: \"4b173ebc-3835-4705-8c96-8712cc817e1c\") " pod="openstack/ovn-controller-6tr6g-config-2hdz9" Nov 28 16:30:58 crc kubenswrapper[4909]: I1128 16:30:58.727138 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/4b173ebc-3835-4705-8c96-8712cc817e1c-var-run-ovn\") pod \"ovn-controller-6tr6g-config-2hdz9\" (UID: \"4b173ebc-3835-4705-8c96-8712cc817e1c\") " pod="openstack/ovn-controller-6tr6g-config-2hdz9" Nov 28 16:30:58 crc kubenswrapper[4909]: I1128 16:30:58.727861 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/4b173ebc-3835-4705-8c96-8712cc817e1c-additional-scripts\") pod \"ovn-controller-6tr6g-config-2hdz9\" (UID: \"4b173ebc-3835-4705-8c96-8712cc817e1c\") " pod="openstack/ovn-controller-6tr6g-config-2hdz9" Nov 28 16:30:58 crc kubenswrapper[4909]: I1128 16:30:58.729975 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/4b173ebc-3835-4705-8c96-8712cc817e1c-scripts\") pod \"ovn-controller-6tr6g-config-2hdz9\" (UID: \"4b173ebc-3835-4705-8c96-8712cc817e1c\") " pod="openstack/ovn-controller-6tr6g-config-2hdz9" Nov 28 16:30:58 crc kubenswrapper[4909]: I1128 16:30:58.748516 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4phqm\" (UniqueName: \"kubernetes.io/projected/4b173ebc-3835-4705-8c96-8712cc817e1c-kube-api-access-4phqm\") pod \"ovn-controller-6tr6g-config-2hdz9\" (UID: \"4b173ebc-3835-4705-8c96-8712cc817e1c\") " pod="openstack/ovn-controller-6tr6g-config-2hdz9" Nov 28 16:30:58 crc kubenswrapper[4909]: I1128 16:30:58.872228 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-6tr6g-config-2hdz9" Nov 28 16:31:00 crc kubenswrapper[4909]: I1128 16:31:00.666063 4909 generic.go:334] "Generic (PLEG): container finished" podID="02c83d05-a6ce-4c22-9015-91c0a766a518" containerID="49cf443f1a213e1c0c384ac59bd266107cf92076be4dceb74f6ecba7e3ee0c82" exitCode=0 Nov 28 16:31:00 crc kubenswrapper[4909]: I1128 16:31:00.666135 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"02c83d05-a6ce-4c22-9015-91c0a766a518","Type":"ContainerDied","Data":"49cf443f1a213e1c0c384ac59bd266107cf92076be4dceb74f6ecba7e3ee0c82"} Nov 28 16:31:00 crc kubenswrapper[4909]: I1128 16:31:00.905000 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/af476a0f-b390-443d-b7a5-14181e7c7bc7-etc-swift\") pod \"swift-storage-0\" (UID: \"af476a0f-b390-443d-b7a5-14181e7c7bc7\") " pod="openstack/swift-storage-0" Nov 28 16:31:00 crc kubenswrapper[4909]: E1128 16:31:00.905305 4909 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 28 16:31:00 crc kubenswrapper[4909]: E1128 16:31:00.905322 4909 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 28 16:31:00 crc kubenswrapper[4909]: E1128 16:31:00.905378 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/af476a0f-b390-443d-b7a5-14181e7c7bc7-etc-swift podName:af476a0f-b390-443d-b7a5-14181e7c7bc7 nodeName:}" failed. No retries permitted until 2025-11-28 16:31:16.905360285 +0000 UTC m=+1259.302044809 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/af476a0f-b390-443d-b7a5-14181e7c7bc7-etc-swift") pod "swift-storage-0" (UID: "af476a0f-b390-443d-b7a5-14181e7c7bc7") : configmap "swift-ring-files" not found Nov 28 16:31:02 crc kubenswrapper[4909]: I1128 16:31:02.338049 4909 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-8554648995-fzkt7" podUID="090022a6-59be-4353-8dd1-22e9e542c57a" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.112:5353: i/o timeout" Nov 28 16:31:02 crc kubenswrapper[4909]: I1128 16:31:02.684375 4909 generic.go:334] "Generic (PLEG): container finished" podID="7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444" containerID="d88fa152549e49d038ad50024ffe594844eac1a2e9fbab72853e4ad449f04b71" exitCode=0 Nov 28 16:31:02 crc kubenswrapper[4909]: I1128 16:31:02.684450 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444","Type":"ContainerDied","Data":"d88fa152549e49d038ad50024ffe594844eac1a2e9fbab72853e4ad449f04b71"} Nov 28 16:31:03 crc kubenswrapper[4909]: I1128 16:31:03.251885 4909 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-6tr6g" podUID="14b66e32-a660-4643-9f57-f66bf12a56ef" containerName="ovn-controller" probeResult="failure" output=< Nov 28 16:31:03 crc kubenswrapper[4909]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Nov 28 16:31:03 crc kubenswrapper[4909]: > Nov 28 16:31:04 crc kubenswrapper[4909]: I1128 16:31:04.050778 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8554648995-fzkt7" Nov 28 16:31:04 crc kubenswrapper[4909]: I1128 16:31:04.067273 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/090022a6-59be-4353-8dd1-22e9e542c57a-ovsdbserver-sb\") pod \"090022a6-59be-4353-8dd1-22e9e542c57a\" (UID: \"090022a6-59be-4353-8dd1-22e9e542c57a\") " Nov 28 16:31:04 crc kubenswrapper[4909]: I1128 16:31:04.067357 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9cthd\" (UniqueName: \"kubernetes.io/projected/090022a6-59be-4353-8dd1-22e9e542c57a-kube-api-access-9cthd\") pod \"090022a6-59be-4353-8dd1-22e9e542c57a\" (UID: \"090022a6-59be-4353-8dd1-22e9e542c57a\") " Nov 28 16:31:04 crc kubenswrapper[4909]: I1128 16:31:04.067431 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/090022a6-59be-4353-8dd1-22e9e542c57a-config\") pod \"090022a6-59be-4353-8dd1-22e9e542c57a\" (UID: \"090022a6-59be-4353-8dd1-22e9e542c57a\") " Nov 28 16:31:04 crc kubenswrapper[4909]: I1128 16:31:04.067526 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/090022a6-59be-4353-8dd1-22e9e542c57a-dns-svc\") pod \"090022a6-59be-4353-8dd1-22e9e542c57a\" (UID: \"090022a6-59be-4353-8dd1-22e9e542c57a\") " Nov 28 16:31:04 crc kubenswrapper[4909]: I1128 16:31:04.067637 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/090022a6-59be-4353-8dd1-22e9e542c57a-ovsdbserver-nb\") pod \"090022a6-59be-4353-8dd1-22e9e542c57a\" (UID: \"090022a6-59be-4353-8dd1-22e9e542c57a\") " Nov 28 16:31:04 crc kubenswrapper[4909]: I1128 16:31:04.082702 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/090022a6-59be-4353-8dd1-22e9e542c57a-kube-api-access-9cthd" (OuterVolumeSpecName: "kube-api-access-9cthd") pod "090022a6-59be-4353-8dd1-22e9e542c57a" (UID: "090022a6-59be-4353-8dd1-22e9e542c57a"). InnerVolumeSpecName "kube-api-access-9cthd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:31:04 crc kubenswrapper[4909]: I1128 16:31:04.122374 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/090022a6-59be-4353-8dd1-22e9e542c57a-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "090022a6-59be-4353-8dd1-22e9e542c57a" (UID: "090022a6-59be-4353-8dd1-22e9e542c57a"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:31:04 crc kubenswrapper[4909]: I1128 16:31:04.125222 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/090022a6-59be-4353-8dd1-22e9e542c57a-config" (OuterVolumeSpecName: "config") pod "090022a6-59be-4353-8dd1-22e9e542c57a" (UID: "090022a6-59be-4353-8dd1-22e9e542c57a"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:31:04 crc kubenswrapper[4909]: I1128 16:31:04.136945 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/090022a6-59be-4353-8dd1-22e9e542c57a-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "090022a6-59be-4353-8dd1-22e9e542c57a" (UID: "090022a6-59be-4353-8dd1-22e9e542c57a"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:31:04 crc kubenswrapper[4909]: I1128 16:31:04.141907 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/090022a6-59be-4353-8dd1-22e9e542c57a-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "090022a6-59be-4353-8dd1-22e9e542c57a" (UID: "090022a6-59be-4353-8dd1-22e9e542c57a"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:31:04 crc kubenswrapper[4909]: I1128 16:31:04.169391 4909 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/090022a6-59be-4353-8dd1-22e9e542c57a-config\") on node \"crc\" DevicePath \"\"" Nov 28 16:31:04 crc kubenswrapper[4909]: I1128 16:31:04.169699 4909 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/090022a6-59be-4353-8dd1-22e9e542c57a-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 16:31:04 crc kubenswrapper[4909]: I1128 16:31:04.169711 4909 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/090022a6-59be-4353-8dd1-22e9e542c57a-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 28 16:31:04 crc kubenswrapper[4909]: I1128 16:31:04.169721 4909 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/090022a6-59be-4353-8dd1-22e9e542c57a-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 28 16:31:04 crc kubenswrapper[4909]: I1128 16:31:04.169731 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9cthd\" (UniqueName: \"kubernetes.io/projected/090022a6-59be-4353-8dd1-22e9e542c57a-kube-api-access-9cthd\") on node \"crc\" DevicePath \"\"" Nov 28 16:31:04 crc kubenswrapper[4909]: I1128 16:31:04.404156 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-6tr6g-config-2hdz9"] Nov 28 16:31:04 crc kubenswrapper[4909]: W1128 16:31:04.583606 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4b173ebc_3835_4705_8c96_8712cc817e1c.slice/crio-00de9b97682e0413e911c992ec451c4f24c90495d5d7f173cf6d4c09615326a3 WatchSource:0}: Error finding container 00de9b97682e0413e911c992ec451c4f24c90495d5d7f173cf6d4c09615326a3: Status 404 returned error can't find the container with id 00de9b97682e0413e911c992ec451c4f24c90495d5d7f173cf6d4c09615326a3 Nov 28 16:31:04 crc kubenswrapper[4909]: I1128 16:31:04.702058 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-6tr6g-config-2hdz9" event={"ID":"4b173ebc-3835-4705-8c96-8712cc817e1c","Type":"ContainerStarted","Data":"00de9b97682e0413e911c992ec451c4f24c90495d5d7f173cf6d4c09615326a3"} Nov 28 16:31:04 crc kubenswrapper[4909]: I1128 16:31:04.705043 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"02c83d05-a6ce-4c22-9015-91c0a766a518","Type":"ContainerStarted","Data":"e404e875f3c6c8a15d87ad24861803ab1e659ac087607f8971106d0d6890fc63"} Nov 28 16:31:04 crc kubenswrapper[4909]: I1128 16:31:04.706058 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Nov 28 16:31:04 crc kubenswrapper[4909]: I1128 16:31:04.708565 4909 generic.go:334] "Generic (PLEG): container finished" podID="828d440a-4ae3-45ef-83fe-79866f3f2d8e" containerID="e6cc5cc31656fc880bbdc23b2063b2d818555caa73caf69f28bf8decd88df8aa" exitCode=0 Nov 28 16:31:04 crc kubenswrapper[4909]: I1128 16:31:04.708631 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-cpxgj" event={"ID":"828d440a-4ae3-45ef-83fe-79866f3f2d8e","Type":"ContainerDied","Data":"e6cc5cc31656fc880bbdc23b2063b2d818555caa73caf69f28bf8decd88df8aa"} Nov 28 16:31:04 crc kubenswrapper[4909]: I1128 16:31:04.712617 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444","Type":"ContainerStarted","Data":"ea9f036508aa973d9d7d95b6b3c4ac6136769fc0843a49037989a78d48329266"} Nov 28 16:31:04 crc kubenswrapper[4909]: I1128 16:31:04.712866 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Nov 28 16:31:04 crc kubenswrapper[4909]: I1128 16:31:04.716571 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8554648995-fzkt7" event={"ID":"090022a6-59be-4353-8dd1-22e9e542c57a","Type":"ContainerDied","Data":"a5b4631021eb83d90f166de2e5c3405d003bf29289292c9c69743b468d2aae8a"} Nov 28 16:31:04 crc kubenswrapper[4909]: I1128 16:31:04.716630 4909 scope.go:117] "RemoveContainer" containerID="e74af5111c8900c319129179dbac0afe759ed2f2adb26d89f96d3891c86f5643" Nov 28 16:31:04 crc kubenswrapper[4909]: I1128 16:31:04.716834 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8554648995-fzkt7" Nov 28 16:31:04 crc kubenswrapper[4909]: I1128 16:31:04.741916 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=40.776155283 podStartE2EDuration="1m17.741895477s" podCreationTimestamp="2025-11-28 16:29:47 +0000 UTC" firstStartedPulling="2025-11-28 16:29:49.658771613 +0000 UTC m=+1172.055456137" lastFinishedPulling="2025-11-28 16:30:26.624511807 +0000 UTC m=+1209.021196331" observedRunningTime="2025-11-28 16:31:04.740833819 +0000 UTC m=+1247.137518363" watchObservedRunningTime="2025-11-28 16:31:04.741895477 +0000 UTC m=+1247.138580001" Nov 28 16:31:04 crc kubenswrapper[4909]: I1128 16:31:04.772934 4909 scope.go:117] "RemoveContainer" containerID="06e466dfb79a1bb06b9072c7cbb230045e51e4d4f27797b0be954d4fa29d493d" Nov 28 16:31:04 crc kubenswrapper[4909]: I1128 16:31:04.788415 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=-9223371959.06638 podStartE2EDuration="1m17.788395676s" podCreationTimestamp="2025-11-28 16:29:47 +0000 UTC" firstStartedPulling="2025-11-28 16:29:49.755752158 +0000 UTC m=+1172.152436702" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:31:04.784302166 +0000 UTC m=+1247.180986690" watchObservedRunningTime="2025-11-28 16:31:04.788395676 +0000 UTC m=+1247.185080200" Nov 28 16:31:04 crc kubenswrapper[4909]: I1128 16:31:04.812357 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-8554648995-fzkt7"] Nov 28 16:31:04 crc kubenswrapper[4909]: I1128 16:31:04.834616 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-8554648995-fzkt7"] Nov 28 16:31:05 crc kubenswrapper[4909]: I1128 16:31:05.731010 4909 generic.go:334] "Generic (PLEG): container finished" podID="4b173ebc-3835-4705-8c96-8712cc817e1c" containerID="221a2a3a02a2a8fb9553609d5de48c8286407e498f136ee370834da2d6b9d893" exitCode=0 Nov 28 16:31:05 crc kubenswrapper[4909]: I1128 16:31:05.731057 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-6tr6g-config-2hdz9" event={"ID":"4b173ebc-3835-4705-8c96-8712cc817e1c","Type":"ContainerDied","Data":"221a2a3a02a2a8fb9553609d5de48c8286407e498f136ee370834da2d6b9d893"} Nov 28 16:31:05 crc kubenswrapper[4909]: I1128 16:31:05.734383 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-wz42m" event={"ID":"ff8acb59-a082-400b-a87a-f4ef8cfa22f4","Type":"ContainerStarted","Data":"e54825a33d457bd02ac5f8d82396b60ba6d36114687450eaeb63fbb7ee2b93b3"} Nov 28 16:31:05 crc kubenswrapper[4909]: I1128 16:31:05.847902 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-db-sync-wz42m" podStartSLOduration=2.6319964110000003 podStartE2EDuration="18.84788082s" podCreationTimestamp="2025-11-28 16:30:47 +0000 UTC" firstStartedPulling="2025-11-28 16:30:48.47654644 +0000 UTC m=+1230.873230964" lastFinishedPulling="2025-11-28 16:31:04.692430849 +0000 UTC m=+1247.089115373" observedRunningTime="2025-11-28 16:31:05.843883323 +0000 UTC m=+1248.240567847" watchObservedRunningTime="2025-11-28 16:31:05.84788082 +0000 UTC m=+1248.244565344" Nov 28 16:31:05 crc kubenswrapper[4909]: I1128 16:31:05.933108 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="090022a6-59be-4353-8dd1-22e9e542c57a" path="/var/lib/kubelet/pods/090022a6-59be-4353-8dd1-22e9e542c57a/volumes" Nov 28 16:31:06 crc kubenswrapper[4909]: I1128 16:31:06.221961 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-cpxgj" Nov 28 16:31:06 crc kubenswrapper[4909]: I1128 16:31:06.325560 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/828d440a-4ae3-45ef-83fe-79866f3f2d8e-etc-swift\") pod \"828d440a-4ae3-45ef-83fe-79866f3f2d8e\" (UID: \"828d440a-4ae3-45ef-83fe-79866f3f2d8e\") " Nov 28 16:31:06 crc kubenswrapper[4909]: I1128 16:31:06.325678 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ghjck\" (UniqueName: \"kubernetes.io/projected/828d440a-4ae3-45ef-83fe-79866f3f2d8e-kube-api-access-ghjck\") pod \"828d440a-4ae3-45ef-83fe-79866f3f2d8e\" (UID: \"828d440a-4ae3-45ef-83fe-79866f3f2d8e\") " Nov 28 16:31:06 crc kubenswrapper[4909]: I1128 16:31:06.325754 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/828d440a-4ae3-45ef-83fe-79866f3f2d8e-dispersionconf\") pod \"828d440a-4ae3-45ef-83fe-79866f3f2d8e\" (UID: \"828d440a-4ae3-45ef-83fe-79866f3f2d8e\") " Nov 28 16:31:06 crc kubenswrapper[4909]: I1128 16:31:06.325813 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/828d440a-4ae3-45ef-83fe-79866f3f2d8e-swiftconf\") pod \"828d440a-4ae3-45ef-83fe-79866f3f2d8e\" (UID: \"828d440a-4ae3-45ef-83fe-79866f3f2d8e\") " Nov 28 16:31:06 crc kubenswrapper[4909]: I1128 16:31:06.325840 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/828d440a-4ae3-45ef-83fe-79866f3f2d8e-ring-data-devices\") pod \"828d440a-4ae3-45ef-83fe-79866f3f2d8e\" (UID: \"828d440a-4ae3-45ef-83fe-79866f3f2d8e\") " Nov 28 16:31:06 crc kubenswrapper[4909]: I1128 16:31:06.325874 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/828d440a-4ae3-45ef-83fe-79866f3f2d8e-combined-ca-bundle\") pod \"828d440a-4ae3-45ef-83fe-79866f3f2d8e\" (UID: \"828d440a-4ae3-45ef-83fe-79866f3f2d8e\") " Nov 28 16:31:06 crc kubenswrapper[4909]: I1128 16:31:06.325902 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/828d440a-4ae3-45ef-83fe-79866f3f2d8e-scripts\") pod \"828d440a-4ae3-45ef-83fe-79866f3f2d8e\" (UID: \"828d440a-4ae3-45ef-83fe-79866f3f2d8e\") " Nov 28 16:31:06 crc kubenswrapper[4909]: I1128 16:31:06.326414 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/828d440a-4ae3-45ef-83fe-79866f3f2d8e-ring-data-devices" (OuterVolumeSpecName: "ring-data-devices") pod "828d440a-4ae3-45ef-83fe-79866f3f2d8e" (UID: "828d440a-4ae3-45ef-83fe-79866f3f2d8e"). InnerVolumeSpecName "ring-data-devices". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:31:06 crc kubenswrapper[4909]: I1128 16:31:06.326531 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/828d440a-4ae3-45ef-83fe-79866f3f2d8e-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "828d440a-4ae3-45ef-83fe-79866f3f2d8e" (UID: "828d440a-4ae3-45ef-83fe-79866f3f2d8e"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:31:06 crc kubenswrapper[4909]: I1128 16:31:06.340134 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/828d440a-4ae3-45ef-83fe-79866f3f2d8e-kube-api-access-ghjck" (OuterVolumeSpecName: "kube-api-access-ghjck") pod "828d440a-4ae3-45ef-83fe-79866f3f2d8e" (UID: "828d440a-4ae3-45ef-83fe-79866f3f2d8e"). InnerVolumeSpecName "kube-api-access-ghjck". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:31:06 crc kubenswrapper[4909]: I1128 16:31:06.341060 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/828d440a-4ae3-45ef-83fe-79866f3f2d8e-dispersionconf" (OuterVolumeSpecName: "dispersionconf") pod "828d440a-4ae3-45ef-83fe-79866f3f2d8e" (UID: "828d440a-4ae3-45ef-83fe-79866f3f2d8e"). InnerVolumeSpecName "dispersionconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:31:06 crc kubenswrapper[4909]: I1128 16:31:06.352385 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/828d440a-4ae3-45ef-83fe-79866f3f2d8e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "828d440a-4ae3-45ef-83fe-79866f3f2d8e" (UID: "828d440a-4ae3-45ef-83fe-79866f3f2d8e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:31:06 crc kubenswrapper[4909]: I1128 16:31:06.357913 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/828d440a-4ae3-45ef-83fe-79866f3f2d8e-scripts" (OuterVolumeSpecName: "scripts") pod "828d440a-4ae3-45ef-83fe-79866f3f2d8e" (UID: "828d440a-4ae3-45ef-83fe-79866f3f2d8e"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:31:06 crc kubenswrapper[4909]: I1128 16:31:06.370300 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/828d440a-4ae3-45ef-83fe-79866f3f2d8e-swiftconf" (OuterVolumeSpecName: "swiftconf") pod "828d440a-4ae3-45ef-83fe-79866f3f2d8e" (UID: "828d440a-4ae3-45ef-83fe-79866f3f2d8e"). InnerVolumeSpecName "swiftconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:31:06 crc kubenswrapper[4909]: I1128 16:31:06.428001 4909 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/828d440a-4ae3-45ef-83fe-79866f3f2d8e-etc-swift\") on node \"crc\" DevicePath \"\"" Nov 28 16:31:06 crc kubenswrapper[4909]: I1128 16:31:06.428052 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ghjck\" (UniqueName: \"kubernetes.io/projected/828d440a-4ae3-45ef-83fe-79866f3f2d8e-kube-api-access-ghjck\") on node \"crc\" DevicePath \"\"" Nov 28 16:31:06 crc kubenswrapper[4909]: I1128 16:31:06.428064 4909 reconciler_common.go:293] "Volume detached for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/828d440a-4ae3-45ef-83fe-79866f3f2d8e-dispersionconf\") on node \"crc\" DevicePath \"\"" Nov 28 16:31:06 crc kubenswrapper[4909]: I1128 16:31:06.428079 4909 reconciler_common.go:293] "Volume detached for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/828d440a-4ae3-45ef-83fe-79866f3f2d8e-swiftconf\") on node \"crc\" DevicePath \"\"" Nov 28 16:31:06 crc kubenswrapper[4909]: I1128 16:31:06.428113 4909 reconciler_common.go:293] "Volume detached for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/828d440a-4ae3-45ef-83fe-79866f3f2d8e-ring-data-devices\") on node \"crc\" DevicePath \"\"" Nov 28 16:31:06 crc kubenswrapper[4909]: I1128 16:31:06.428122 4909 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/828d440a-4ae3-45ef-83fe-79866f3f2d8e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:31:06 crc kubenswrapper[4909]: I1128 16:31:06.428130 4909 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/828d440a-4ae3-45ef-83fe-79866f3f2d8e-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 16:31:06 crc kubenswrapper[4909]: I1128 16:31:06.742258 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-cpxgj" event={"ID":"828d440a-4ae3-45ef-83fe-79866f3f2d8e","Type":"ContainerDied","Data":"4b064ea81f33b7e1e3e0982e6711aa8c83d0b1fcb53a0f2d5e5fab4c651bb470"} Nov 28 16:31:06 crc kubenswrapper[4909]: I1128 16:31:06.742318 4909 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4b064ea81f33b7e1e3e0982e6711aa8c83d0b1fcb53a0f2d5e5fab4c651bb470" Nov 28 16:31:06 crc kubenswrapper[4909]: I1128 16:31:06.742377 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-cpxgj" Nov 28 16:31:07 crc kubenswrapper[4909]: I1128 16:31:07.031077 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-6tr6g-config-2hdz9" Nov 28 16:31:07 crc kubenswrapper[4909]: I1128 16:31:07.137777 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/4b173ebc-3835-4705-8c96-8712cc817e1c-var-run\") pod \"4b173ebc-3835-4705-8c96-8712cc817e1c\" (UID: \"4b173ebc-3835-4705-8c96-8712cc817e1c\") " Nov 28 16:31:07 crc kubenswrapper[4909]: I1128 16:31:07.137876 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4phqm\" (UniqueName: \"kubernetes.io/projected/4b173ebc-3835-4705-8c96-8712cc817e1c-kube-api-access-4phqm\") pod \"4b173ebc-3835-4705-8c96-8712cc817e1c\" (UID: \"4b173ebc-3835-4705-8c96-8712cc817e1c\") " Nov 28 16:31:07 crc kubenswrapper[4909]: I1128 16:31:07.137898 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/4b173ebc-3835-4705-8c96-8712cc817e1c-var-run-ovn\") pod \"4b173ebc-3835-4705-8c96-8712cc817e1c\" (UID: \"4b173ebc-3835-4705-8c96-8712cc817e1c\") " Nov 28 16:31:07 crc kubenswrapper[4909]: I1128 16:31:07.137925 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/4b173ebc-3835-4705-8c96-8712cc817e1c-additional-scripts\") pod \"4b173ebc-3835-4705-8c96-8712cc817e1c\" (UID: \"4b173ebc-3835-4705-8c96-8712cc817e1c\") " Nov 28 16:31:07 crc kubenswrapper[4909]: I1128 16:31:07.138005 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/4b173ebc-3835-4705-8c96-8712cc817e1c-var-log-ovn\") pod \"4b173ebc-3835-4705-8c96-8712cc817e1c\" (UID: \"4b173ebc-3835-4705-8c96-8712cc817e1c\") " Nov 28 16:31:07 crc kubenswrapper[4909]: I1128 16:31:07.138032 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/4b173ebc-3835-4705-8c96-8712cc817e1c-scripts\") pod \"4b173ebc-3835-4705-8c96-8712cc817e1c\" (UID: \"4b173ebc-3835-4705-8c96-8712cc817e1c\") " Nov 28 16:31:07 crc kubenswrapper[4909]: I1128 16:31:07.139412 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4b173ebc-3835-4705-8c96-8712cc817e1c-scripts" (OuterVolumeSpecName: "scripts") pod "4b173ebc-3835-4705-8c96-8712cc817e1c" (UID: "4b173ebc-3835-4705-8c96-8712cc817e1c"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:31:07 crc kubenswrapper[4909]: I1128 16:31:07.139453 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/4b173ebc-3835-4705-8c96-8712cc817e1c-var-run" (OuterVolumeSpecName: "var-run") pod "4b173ebc-3835-4705-8c96-8712cc817e1c" (UID: "4b173ebc-3835-4705-8c96-8712cc817e1c"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 16:31:07 crc kubenswrapper[4909]: I1128 16:31:07.140088 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/4b173ebc-3835-4705-8c96-8712cc817e1c-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "4b173ebc-3835-4705-8c96-8712cc817e1c" (UID: "4b173ebc-3835-4705-8c96-8712cc817e1c"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 16:31:07 crc kubenswrapper[4909]: I1128 16:31:07.140086 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/4b173ebc-3835-4705-8c96-8712cc817e1c-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "4b173ebc-3835-4705-8c96-8712cc817e1c" (UID: "4b173ebc-3835-4705-8c96-8712cc817e1c"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 16:31:07 crc kubenswrapper[4909]: I1128 16:31:07.140247 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4b173ebc-3835-4705-8c96-8712cc817e1c-additional-scripts" (OuterVolumeSpecName: "additional-scripts") pod "4b173ebc-3835-4705-8c96-8712cc817e1c" (UID: "4b173ebc-3835-4705-8c96-8712cc817e1c"). InnerVolumeSpecName "additional-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:31:07 crc kubenswrapper[4909]: I1128 16:31:07.144353 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4b173ebc-3835-4705-8c96-8712cc817e1c-kube-api-access-4phqm" (OuterVolumeSpecName: "kube-api-access-4phqm") pod "4b173ebc-3835-4705-8c96-8712cc817e1c" (UID: "4b173ebc-3835-4705-8c96-8712cc817e1c"). InnerVolumeSpecName "kube-api-access-4phqm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:31:07 crc kubenswrapper[4909]: I1128 16:31:07.239932 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4phqm\" (UniqueName: \"kubernetes.io/projected/4b173ebc-3835-4705-8c96-8712cc817e1c-kube-api-access-4phqm\") on node \"crc\" DevicePath \"\"" Nov 28 16:31:07 crc kubenswrapper[4909]: I1128 16:31:07.239970 4909 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/4b173ebc-3835-4705-8c96-8712cc817e1c-var-run-ovn\") on node \"crc\" DevicePath \"\"" Nov 28 16:31:07 crc kubenswrapper[4909]: I1128 16:31:07.239984 4909 reconciler_common.go:293] "Volume detached for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/4b173ebc-3835-4705-8c96-8712cc817e1c-additional-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 16:31:07 crc kubenswrapper[4909]: I1128 16:31:07.239996 4909 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/4b173ebc-3835-4705-8c96-8712cc817e1c-var-log-ovn\") on node \"crc\" DevicePath \"\"" Nov 28 16:31:07 crc kubenswrapper[4909]: I1128 16:31:07.240007 4909 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/4b173ebc-3835-4705-8c96-8712cc817e1c-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 16:31:07 crc kubenswrapper[4909]: I1128 16:31:07.240018 4909 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/4b173ebc-3835-4705-8c96-8712cc817e1c-var-run\") on node \"crc\" DevicePath \"\"" Nov 28 16:31:07 crc kubenswrapper[4909]: I1128 16:31:07.338490 4909 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-8554648995-fzkt7" podUID="090022a6-59be-4353-8dd1-22e9e542c57a" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.112:5353: i/o timeout" Nov 28 16:31:07 crc kubenswrapper[4909]: I1128 16:31:07.750838 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-6tr6g-config-2hdz9" event={"ID":"4b173ebc-3835-4705-8c96-8712cc817e1c","Type":"ContainerDied","Data":"00de9b97682e0413e911c992ec451c4f24c90495d5d7f173cf6d4c09615326a3"} Nov 28 16:31:07 crc kubenswrapper[4909]: I1128 16:31:07.750874 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-6tr6g-config-2hdz9" Nov 28 16:31:07 crc kubenswrapper[4909]: I1128 16:31:07.750881 4909 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="00de9b97682e0413e911c992ec451c4f24c90495d5d7f173cf6d4c09615326a3" Nov 28 16:31:08 crc kubenswrapper[4909]: I1128 16:31:08.150533 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-6tr6g-config-2hdz9"] Nov 28 16:31:08 crc kubenswrapper[4909]: I1128 16:31:08.157757 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-6tr6g-config-2hdz9"] Nov 28 16:31:08 crc kubenswrapper[4909]: I1128 16:31:08.261579 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-6tr6g" Nov 28 16:31:09 crc kubenswrapper[4909]: I1128 16:31:09.921016 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4b173ebc-3835-4705-8c96-8712cc817e1c" path="/var/lib/kubelet/pods/4b173ebc-3835-4705-8c96-8712cc817e1c/volumes" Nov 28 16:31:13 crc kubenswrapper[4909]: I1128 16:31:13.797116 4909 generic.go:334] "Generic (PLEG): container finished" podID="ff8acb59-a082-400b-a87a-f4ef8cfa22f4" containerID="e54825a33d457bd02ac5f8d82396b60ba6d36114687450eaeb63fbb7ee2b93b3" exitCode=0 Nov 28 16:31:13 crc kubenswrapper[4909]: I1128 16:31:13.797212 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-wz42m" event={"ID":"ff8acb59-a082-400b-a87a-f4ef8cfa22f4","Type":"ContainerDied","Data":"e54825a33d457bd02ac5f8d82396b60ba6d36114687450eaeb63fbb7ee2b93b3"} Nov 28 16:31:15 crc kubenswrapper[4909]: I1128 16:31:15.242047 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-wz42m" Nov 28 16:31:15 crc kubenswrapper[4909]: I1128 16:31:15.367153 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ff8acb59-a082-400b-a87a-f4ef8cfa22f4-combined-ca-bundle\") pod \"ff8acb59-a082-400b-a87a-f4ef8cfa22f4\" (UID: \"ff8acb59-a082-400b-a87a-f4ef8cfa22f4\") " Nov 28 16:31:15 crc kubenswrapper[4909]: I1128 16:31:15.367288 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/ff8acb59-a082-400b-a87a-f4ef8cfa22f4-db-sync-config-data\") pod \"ff8acb59-a082-400b-a87a-f4ef8cfa22f4\" (UID: \"ff8acb59-a082-400b-a87a-f4ef8cfa22f4\") " Nov 28 16:31:15 crc kubenswrapper[4909]: I1128 16:31:15.367318 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xlwv7\" (UniqueName: \"kubernetes.io/projected/ff8acb59-a082-400b-a87a-f4ef8cfa22f4-kube-api-access-xlwv7\") pod \"ff8acb59-a082-400b-a87a-f4ef8cfa22f4\" (UID: \"ff8acb59-a082-400b-a87a-f4ef8cfa22f4\") " Nov 28 16:31:15 crc kubenswrapper[4909]: I1128 16:31:15.367374 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ff8acb59-a082-400b-a87a-f4ef8cfa22f4-config-data\") pod \"ff8acb59-a082-400b-a87a-f4ef8cfa22f4\" (UID: \"ff8acb59-a082-400b-a87a-f4ef8cfa22f4\") " Nov 28 16:31:15 crc kubenswrapper[4909]: I1128 16:31:15.372982 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ff8acb59-a082-400b-a87a-f4ef8cfa22f4-kube-api-access-xlwv7" (OuterVolumeSpecName: "kube-api-access-xlwv7") pod "ff8acb59-a082-400b-a87a-f4ef8cfa22f4" (UID: "ff8acb59-a082-400b-a87a-f4ef8cfa22f4"). InnerVolumeSpecName "kube-api-access-xlwv7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:31:15 crc kubenswrapper[4909]: I1128 16:31:15.373051 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ff8acb59-a082-400b-a87a-f4ef8cfa22f4-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "ff8acb59-a082-400b-a87a-f4ef8cfa22f4" (UID: "ff8acb59-a082-400b-a87a-f4ef8cfa22f4"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:31:15 crc kubenswrapper[4909]: I1128 16:31:15.412077 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ff8acb59-a082-400b-a87a-f4ef8cfa22f4-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ff8acb59-a082-400b-a87a-f4ef8cfa22f4" (UID: "ff8acb59-a082-400b-a87a-f4ef8cfa22f4"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:31:15 crc kubenswrapper[4909]: I1128 16:31:15.428999 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ff8acb59-a082-400b-a87a-f4ef8cfa22f4-config-data" (OuterVolumeSpecName: "config-data") pod "ff8acb59-a082-400b-a87a-f4ef8cfa22f4" (UID: "ff8acb59-a082-400b-a87a-f4ef8cfa22f4"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:31:15 crc kubenswrapper[4909]: I1128 16:31:15.469166 4909 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/ff8acb59-a082-400b-a87a-f4ef8cfa22f4-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:31:15 crc kubenswrapper[4909]: I1128 16:31:15.469203 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xlwv7\" (UniqueName: \"kubernetes.io/projected/ff8acb59-a082-400b-a87a-f4ef8cfa22f4-kube-api-access-xlwv7\") on node \"crc\" DevicePath \"\"" Nov 28 16:31:15 crc kubenswrapper[4909]: I1128 16:31:15.469216 4909 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ff8acb59-a082-400b-a87a-f4ef8cfa22f4-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:31:15 crc kubenswrapper[4909]: I1128 16:31:15.469227 4909 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ff8acb59-a082-400b-a87a-f4ef8cfa22f4-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:31:15 crc kubenswrapper[4909]: I1128 16:31:15.817740 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-wz42m" event={"ID":"ff8acb59-a082-400b-a87a-f4ef8cfa22f4","Type":"ContainerDied","Data":"e146670826e947f0a408e143e9a3e99bc52d0ed0c66e68477dda9f02999c5a43"} Nov 28 16:31:15 crc kubenswrapper[4909]: I1128 16:31:15.818117 4909 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e146670826e947f0a408e143e9a3e99bc52d0ed0c66e68477dda9f02999c5a43" Nov 28 16:31:15 crc kubenswrapper[4909]: I1128 16:31:15.818204 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-wz42m" Nov 28 16:31:16 crc kubenswrapper[4909]: I1128 16:31:16.192350 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-74dc88fc-cvh5p"] Nov 28 16:31:16 crc kubenswrapper[4909]: E1128 16:31:16.192679 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="828d440a-4ae3-45ef-83fe-79866f3f2d8e" containerName="swift-ring-rebalance" Nov 28 16:31:16 crc kubenswrapper[4909]: I1128 16:31:16.192693 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="828d440a-4ae3-45ef-83fe-79866f3f2d8e" containerName="swift-ring-rebalance" Nov 28 16:31:16 crc kubenswrapper[4909]: E1128 16:31:16.192707 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="090022a6-59be-4353-8dd1-22e9e542c57a" containerName="init" Nov 28 16:31:16 crc kubenswrapper[4909]: I1128 16:31:16.192713 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="090022a6-59be-4353-8dd1-22e9e542c57a" containerName="init" Nov 28 16:31:16 crc kubenswrapper[4909]: E1128 16:31:16.192726 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4b173ebc-3835-4705-8c96-8712cc817e1c" containerName="ovn-config" Nov 28 16:31:16 crc kubenswrapper[4909]: I1128 16:31:16.192732 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="4b173ebc-3835-4705-8c96-8712cc817e1c" containerName="ovn-config" Nov 28 16:31:16 crc kubenswrapper[4909]: E1128 16:31:16.192746 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ff8acb59-a082-400b-a87a-f4ef8cfa22f4" containerName="glance-db-sync" Nov 28 16:31:16 crc kubenswrapper[4909]: I1128 16:31:16.192752 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="ff8acb59-a082-400b-a87a-f4ef8cfa22f4" containerName="glance-db-sync" Nov 28 16:31:16 crc kubenswrapper[4909]: E1128 16:31:16.192765 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="090022a6-59be-4353-8dd1-22e9e542c57a" containerName="dnsmasq-dns" Nov 28 16:31:16 crc kubenswrapper[4909]: I1128 16:31:16.192771 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="090022a6-59be-4353-8dd1-22e9e542c57a" containerName="dnsmasq-dns" Nov 28 16:31:16 crc kubenswrapper[4909]: I1128 16:31:16.192903 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="4b173ebc-3835-4705-8c96-8712cc817e1c" containerName="ovn-config" Nov 28 16:31:16 crc kubenswrapper[4909]: I1128 16:31:16.192921 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="828d440a-4ae3-45ef-83fe-79866f3f2d8e" containerName="swift-ring-rebalance" Nov 28 16:31:16 crc kubenswrapper[4909]: I1128 16:31:16.192934 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="ff8acb59-a082-400b-a87a-f4ef8cfa22f4" containerName="glance-db-sync" Nov 28 16:31:16 crc kubenswrapper[4909]: I1128 16:31:16.192944 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="090022a6-59be-4353-8dd1-22e9e542c57a" containerName="dnsmasq-dns" Nov 28 16:31:16 crc kubenswrapper[4909]: I1128 16:31:16.216580 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-74dc88fc-cvh5p"] Nov 28 16:31:16 crc kubenswrapper[4909]: I1128 16:31:16.216908 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-74dc88fc-cvh5p" Nov 28 16:31:16 crc kubenswrapper[4909]: I1128 16:31:16.282114 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/886ced6c-0930-40c8-903f-16146cd8994e-ovsdbserver-sb\") pod \"dnsmasq-dns-74dc88fc-cvh5p\" (UID: \"886ced6c-0930-40c8-903f-16146cd8994e\") " pod="openstack/dnsmasq-dns-74dc88fc-cvh5p" Nov 28 16:31:16 crc kubenswrapper[4909]: I1128 16:31:16.282166 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/886ced6c-0930-40c8-903f-16146cd8994e-dns-svc\") pod \"dnsmasq-dns-74dc88fc-cvh5p\" (UID: \"886ced6c-0930-40c8-903f-16146cd8994e\") " pod="openstack/dnsmasq-dns-74dc88fc-cvh5p" Nov 28 16:31:16 crc kubenswrapper[4909]: I1128 16:31:16.282210 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/886ced6c-0930-40c8-903f-16146cd8994e-ovsdbserver-nb\") pod \"dnsmasq-dns-74dc88fc-cvh5p\" (UID: \"886ced6c-0930-40c8-903f-16146cd8994e\") " pod="openstack/dnsmasq-dns-74dc88fc-cvh5p" Nov 28 16:31:16 crc kubenswrapper[4909]: I1128 16:31:16.282488 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/886ced6c-0930-40c8-903f-16146cd8994e-config\") pod \"dnsmasq-dns-74dc88fc-cvh5p\" (UID: \"886ced6c-0930-40c8-903f-16146cd8994e\") " pod="openstack/dnsmasq-dns-74dc88fc-cvh5p" Nov 28 16:31:16 crc kubenswrapper[4909]: I1128 16:31:16.282720 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4wkp2\" (UniqueName: \"kubernetes.io/projected/886ced6c-0930-40c8-903f-16146cd8994e-kube-api-access-4wkp2\") pod \"dnsmasq-dns-74dc88fc-cvh5p\" (UID: \"886ced6c-0930-40c8-903f-16146cd8994e\") " pod="openstack/dnsmasq-dns-74dc88fc-cvh5p" Nov 28 16:31:16 crc kubenswrapper[4909]: I1128 16:31:16.383785 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4wkp2\" (UniqueName: \"kubernetes.io/projected/886ced6c-0930-40c8-903f-16146cd8994e-kube-api-access-4wkp2\") pod \"dnsmasq-dns-74dc88fc-cvh5p\" (UID: \"886ced6c-0930-40c8-903f-16146cd8994e\") " pod="openstack/dnsmasq-dns-74dc88fc-cvh5p" Nov 28 16:31:16 crc kubenswrapper[4909]: I1128 16:31:16.383853 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/886ced6c-0930-40c8-903f-16146cd8994e-ovsdbserver-sb\") pod \"dnsmasq-dns-74dc88fc-cvh5p\" (UID: \"886ced6c-0930-40c8-903f-16146cd8994e\") " pod="openstack/dnsmasq-dns-74dc88fc-cvh5p" Nov 28 16:31:16 crc kubenswrapper[4909]: I1128 16:31:16.383878 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/886ced6c-0930-40c8-903f-16146cd8994e-dns-svc\") pod \"dnsmasq-dns-74dc88fc-cvh5p\" (UID: \"886ced6c-0930-40c8-903f-16146cd8994e\") " pod="openstack/dnsmasq-dns-74dc88fc-cvh5p" Nov 28 16:31:16 crc kubenswrapper[4909]: I1128 16:31:16.383916 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/886ced6c-0930-40c8-903f-16146cd8994e-ovsdbserver-nb\") pod \"dnsmasq-dns-74dc88fc-cvh5p\" (UID: \"886ced6c-0930-40c8-903f-16146cd8994e\") " pod="openstack/dnsmasq-dns-74dc88fc-cvh5p" Nov 28 16:31:16 crc kubenswrapper[4909]: I1128 16:31:16.383999 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/886ced6c-0930-40c8-903f-16146cd8994e-config\") pod \"dnsmasq-dns-74dc88fc-cvh5p\" (UID: \"886ced6c-0930-40c8-903f-16146cd8994e\") " pod="openstack/dnsmasq-dns-74dc88fc-cvh5p" Nov 28 16:31:16 crc kubenswrapper[4909]: I1128 16:31:16.384855 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/886ced6c-0930-40c8-903f-16146cd8994e-ovsdbserver-sb\") pod \"dnsmasq-dns-74dc88fc-cvh5p\" (UID: \"886ced6c-0930-40c8-903f-16146cd8994e\") " pod="openstack/dnsmasq-dns-74dc88fc-cvh5p" Nov 28 16:31:16 crc kubenswrapper[4909]: I1128 16:31:16.385060 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/886ced6c-0930-40c8-903f-16146cd8994e-dns-svc\") pod \"dnsmasq-dns-74dc88fc-cvh5p\" (UID: \"886ced6c-0930-40c8-903f-16146cd8994e\") " pod="openstack/dnsmasq-dns-74dc88fc-cvh5p" Nov 28 16:31:16 crc kubenswrapper[4909]: I1128 16:31:16.385073 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/886ced6c-0930-40c8-903f-16146cd8994e-config\") pod \"dnsmasq-dns-74dc88fc-cvh5p\" (UID: \"886ced6c-0930-40c8-903f-16146cd8994e\") " pod="openstack/dnsmasq-dns-74dc88fc-cvh5p" Nov 28 16:31:16 crc kubenswrapper[4909]: I1128 16:31:16.385517 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/886ced6c-0930-40c8-903f-16146cd8994e-ovsdbserver-nb\") pod \"dnsmasq-dns-74dc88fc-cvh5p\" (UID: \"886ced6c-0930-40c8-903f-16146cd8994e\") " pod="openstack/dnsmasq-dns-74dc88fc-cvh5p" Nov 28 16:31:16 crc kubenswrapper[4909]: I1128 16:31:16.403897 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4wkp2\" (UniqueName: \"kubernetes.io/projected/886ced6c-0930-40c8-903f-16146cd8994e-kube-api-access-4wkp2\") pod \"dnsmasq-dns-74dc88fc-cvh5p\" (UID: \"886ced6c-0930-40c8-903f-16146cd8994e\") " pod="openstack/dnsmasq-dns-74dc88fc-cvh5p" Nov 28 16:31:16 crc kubenswrapper[4909]: I1128 16:31:16.533951 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-74dc88fc-cvh5p" Nov 28 16:31:16 crc kubenswrapper[4909]: I1128 16:31:16.976472 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/af476a0f-b390-443d-b7a5-14181e7c7bc7-etc-swift\") pod \"swift-storage-0\" (UID: \"af476a0f-b390-443d-b7a5-14181e7c7bc7\") " pod="openstack/swift-storage-0" Nov 28 16:31:16 crc kubenswrapper[4909]: I1128 16:31:16.982120 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/af476a0f-b390-443d-b7a5-14181e7c7bc7-etc-swift\") pod \"swift-storage-0\" (UID: \"af476a0f-b390-443d-b7a5-14181e7c7bc7\") " pod="openstack/swift-storage-0" Nov 28 16:31:17 crc kubenswrapper[4909]: I1128 16:31:17.056179 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Nov 28 16:31:17 crc kubenswrapper[4909]: I1128 16:31:17.107443 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-74dc88fc-cvh5p"] Nov 28 16:31:17 crc kubenswrapper[4909]: W1128 16:31:17.128482 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod886ced6c_0930_40c8_903f_16146cd8994e.slice/crio-67430a65bc0e8c8ff0643e4dc80c56469446a2cdd2b6de4ce93ab1f9af1abd67 WatchSource:0}: Error finding container 67430a65bc0e8c8ff0643e4dc80c56469446a2cdd2b6de4ce93ab1f9af1abd67: Status 404 returned error can't find the container with id 67430a65bc0e8c8ff0643e4dc80c56469446a2cdd2b6de4ce93ab1f9af1abd67 Nov 28 16:31:17 crc kubenswrapper[4909]: I1128 16:31:17.607558 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-storage-0"] Nov 28 16:31:17 crc kubenswrapper[4909]: W1128 16:31:17.610603 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podaf476a0f_b390_443d_b7a5_14181e7c7bc7.slice/crio-0f7cc4d922dbf0a99e60ac3a4ea3ccc6245ef275c02bfb082139cd45e859a584 WatchSource:0}: Error finding container 0f7cc4d922dbf0a99e60ac3a4ea3ccc6245ef275c02bfb082139cd45e859a584: Status 404 returned error can't find the container with id 0f7cc4d922dbf0a99e60ac3a4ea3ccc6245ef275c02bfb082139cd45e859a584 Nov 28 16:31:17 crc kubenswrapper[4909]: I1128 16:31:17.837241 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"af476a0f-b390-443d-b7a5-14181e7c7bc7","Type":"ContainerStarted","Data":"0f7cc4d922dbf0a99e60ac3a4ea3ccc6245ef275c02bfb082139cd45e859a584"} Nov 28 16:31:17 crc kubenswrapper[4909]: I1128 16:31:17.840354 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-74dc88fc-cvh5p" event={"ID":"886ced6c-0930-40c8-903f-16146cd8994e","Type":"ContainerStarted","Data":"67430a65bc0e8c8ff0643e4dc80c56469446a2cdd2b6de4ce93ab1f9af1abd67"} Nov 28 16:31:18 crc kubenswrapper[4909]: I1128 16:31:18.851582 4909 generic.go:334] "Generic (PLEG): container finished" podID="886ced6c-0930-40c8-903f-16146cd8994e" containerID="dbdadc1bcc34558d89af786f93a444efc87620a0c8ac8d55f5db6d1716d267fe" exitCode=0 Nov 28 16:31:18 crc kubenswrapper[4909]: I1128 16:31:18.851699 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-74dc88fc-cvh5p" event={"ID":"886ced6c-0930-40c8-903f-16146cd8994e","Type":"ContainerDied","Data":"dbdadc1bcc34558d89af786f93a444efc87620a0c8ac8d55f5db6d1716d267fe"} Nov 28 16:31:19 crc kubenswrapper[4909]: I1128 16:31:19.045063 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Nov 28 16:31:19 crc kubenswrapper[4909]: I1128 16:31:19.362779 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Nov 28 16:31:19 crc kubenswrapper[4909]: I1128 16:31:19.860985 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"af476a0f-b390-443d-b7a5-14181e7c7bc7","Type":"ContainerStarted","Data":"f7767062c82c125be67fc8d87066f9088acb549941128e8dfa6db30304d06a51"} Nov 28 16:31:19 crc kubenswrapper[4909]: I1128 16:31:19.861046 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"af476a0f-b390-443d-b7a5-14181e7c7bc7","Type":"ContainerStarted","Data":"ce4539306b72b839722ed46646da187a61406695c153b17107ee77a2ce3e2377"} Nov 28 16:31:19 crc kubenswrapper[4909]: I1128 16:31:19.861062 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"af476a0f-b390-443d-b7a5-14181e7c7bc7","Type":"ContainerStarted","Data":"2431575eb32cdbcb1846b977ebb5a16dee5e1fd73658a4da6fe6d41dd6ea5859"} Nov 28 16:31:19 crc kubenswrapper[4909]: I1128 16:31:19.861072 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"af476a0f-b390-443d-b7a5-14181e7c7bc7","Type":"ContainerStarted","Data":"0f9831bb56002e61b2af5b4efe43f7352b26f5bcba0fac4ff2b7c7594d30ca11"} Nov 28 16:31:19 crc kubenswrapper[4909]: I1128 16:31:19.862677 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-74dc88fc-cvh5p" event={"ID":"886ced6c-0930-40c8-903f-16146cd8994e","Type":"ContainerStarted","Data":"9ad9179f1c4cb74cc6246d262eaeb8f0011cd0e7b11bdc7780a08e8b6bba596e"} Nov 28 16:31:19 crc kubenswrapper[4909]: I1128 16:31:19.862947 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-74dc88fc-cvh5p" Nov 28 16:31:19 crc kubenswrapper[4909]: I1128 16:31:19.880860 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-74dc88fc-cvh5p" podStartSLOduration=3.880836801 podStartE2EDuration="3.880836801s" podCreationTimestamp="2025-11-28 16:31:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:31:19.878514298 +0000 UTC m=+1262.275198842" watchObservedRunningTime="2025-11-28 16:31:19.880836801 +0000 UTC m=+1262.277521325" Nov 28 16:31:19 crc kubenswrapper[4909]: I1128 16:31:19.910664 4909 patch_prober.go:28] interesting pod/machine-config-daemon-d5nd7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 16:31:19 crc kubenswrapper[4909]: I1128 16:31:19.910718 4909 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 16:31:19 crc kubenswrapper[4909]: I1128 16:31:19.910755 4909 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" Nov 28 16:31:19 crc kubenswrapper[4909]: I1128 16:31:19.911326 4909 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"938b3015525903a828287e46b6bae7ec7c3c38edf7df86757b71b4c9037a7ecd"} pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 16:31:19 crc kubenswrapper[4909]: I1128 16:31:19.911381 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerName="machine-config-daemon" containerID="cri-o://938b3015525903a828287e46b6bae7ec7c3c38edf7df86757b71b4c9037a7ecd" gracePeriod=600 Nov 28 16:31:20 crc kubenswrapper[4909]: I1128 16:31:20.872882 4909 generic.go:334] "Generic (PLEG): container finished" podID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerID="938b3015525903a828287e46b6bae7ec7c3c38edf7df86757b71b4c9037a7ecd" exitCode=0 Nov 28 16:31:20 crc kubenswrapper[4909]: I1128 16:31:20.873020 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" event={"ID":"5f0ac931-d37b-4342-8c12-c2779b455cc5","Type":"ContainerDied","Data":"938b3015525903a828287e46b6bae7ec7c3c38edf7df86757b71b4c9037a7ecd"} Nov 28 16:31:20 crc kubenswrapper[4909]: I1128 16:31:20.874341 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" event={"ID":"5f0ac931-d37b-4342-8c12-c2779b455cc5","Type":"ContainerStarted","Data":"076c0f146f0317096c61ce94d56824e15d18793233a7703c2d57740fa454a4f9"} Nov 28 16:31:20 crc kubenswrapper[4909]: I1128 16:31:20.874366 4909 scope.go:117] "RemoveContainer" containerID="a1d1b89b71acf3efad2ebc7b1465d76f5e0a096e6a7cd92cd5f8be9dcf1f258e" Nov 28 16:31:21 crc kubenswrapper[4909]: I1128 16:31:21.025309 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-create-lqx45"] Nov 28 16:31:21 crc kubenswrapper[4909]: I1128 16:31:21.032429 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-lqx45" Nov 28 16:31:21 crc kubenswrapper[4909]: I1128 16:31:21.045672 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-lqx45"] Nov 28 16:31:21 crc kubenswrapper[4909]: I1128 16:31:21.055771 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-9799-account-create-update-w49wd"] Nov 28 16:31:21 crc kubenswrapper[4909]: I1128 16:31:21.056871 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-9799-account-create-update-w49wd" Nov 28 16:31:21 crc kubenswrapper[4909]: I1128 16:31:21.058523 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-db-secret" Nov 28 16:31:21 crc kubenswrapper[4909]: I1128 16:31:21.074764 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-9799-account-create-update-w49wd"] Nov 28 16:31:21 crc kubenswrapper[4909]: I1128 16:31:21.135380 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-937d-account-create-update-bmpjc"] Nov 28 16:31:21 crc kubenswrapper[4909]: I1128 16:31:21.136444 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-937d-account-create-update-bmpjc" Nov 28 16:31:21 crc kubenswrapper[4909]: I1128 16:31:21.138401 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-create-vlgkn"] Nov 28 16:31:21 crc kubenswrapper[4909]: I1128 16:31:21.139708 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-vlgkn" Nov 28 16:31:21 crc kubenswrapper[4909]: I1128 16:31:21.145631 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-937d-account-create-update-bmpjc"] Nov 28 16:31:21 crc kubenswrapper[4909]: I1128 16:31:21.147606 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9m6wt\" (UniqueName: \"kubernetes.io/projected/7648e31f-ca65-4302-903a-abf1fe2aa860-kube-api-access-9m6wt\") pod \"barbican-db-create-lqx45\" (UID: \"7648e31f-ca65-4302-903a-abf1fe2aa860\") " pod="openstack/barbican-db-create-lqx45" Nov 28 16:31:21 crc kubenswrapper[4909]: I1128 16:31:21.147719 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4740f8ad-ec12-40b8-b3eb-f22149cbd07a-operator-scripts\") pod \"barbican-9799-account-create-update-w49wd\" (UID: \"4740f8ad-ec12-40b8-b3eb-f22149cbd07a\") " pod="openstack/barbican-9799-account-create-update-w49wd" Nov 28 16:31:21 crc kubenswrapper[4909]: I1128 16:31:21.147773 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mb2xv\" (UniqueName: \"kubernetes.io/projected/4740f8ad-ec12-40b8-b3eb-f22149cbd07a-kube-api-access-mb2xv\") pod \"barbican-9799-account-create-update-w49wd\" (UID: \"4740f8ad-ec12-40b8-b3eb-f22149cbd07a\") " pod="openstack/barbican-9799-account-create-update-w49wd" Nov 28 16:31:21 crc kubenswrapper[4909]: I1128 16:31:21.147801 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7648e31f-ca65-4302-903a-abf1fe2aa860-operator-scripts\") pod \"barbican-db-create-lqx45\" (UID: \"7648e31f-ca65-4302-903a-abf1fe2aa860\") " pod="openstack/barbican-db-create-lqx45" Nov 28 16:31:21 crc kubenswrapper[4909]: I1128 16:31:21.150192 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-db-secret" Nov 28 16:31:21 crc kubenswrapper[4909]: I1128 16:31:21.166741 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-vlgkn"] Nov 28 16:31:21 crc kubenswrapper[4909]: I1128 16:31:21.248771 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b561f9ee-f192-4215-a832-f60fd675206e-operator-scripts\") pod \"cinder-db-create-vlgkn\" (UID: \"b561f9ee-f192-4215-a832-f60fd675206e\") " pod="openstack/cinder-db-create-vlgkn" Nov 28 16:31:21 crc kubenswrapper[4909]: I1128 16:31:21.248845 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mb2xv\" (UniqueName: \"kubernetes.io/projected/4740f8ad-ec12-40b8-b3eb-f22149cbd07a-kube-api-access-mb2xv\") pod \"barbican-9799-account-create-update-w49wd\" (UID: \"4740f8ad-ec12-40b8-b3eb-f22149cbd07a\") " pod="openstack/barbican-9799-account-create-update-w49wd" Nov 28 16:31:21 crc kubenswrapper[4909]: I1128 16:31:21.248867 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7648e31f-ca65-4302-903a-abf1fe2aa860-operator-scripts\") pod \"barbican-db-create-lqx45\" (UID: \"7648e31f-ca65-4302-903a-abf1fe2aa860\") " pod="openstack/barbican-db-create-lqx45" Nov 28 16:31:21 crc kubenswrapper[4909]: I1128 16:31:21.248896 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/469389c1-482e-4968-b969-163e760e87f2-operator-scripts\") pod \"cinder-937d-account-create-update-bmpjc\" (UID: \"469389c1-482e-4968-b969-163e760e87f2\") " pod="openstack/cinder-937d-account-create-update-bmpjc" Nov 28 16:31:21 crc kubenswrapper[4909]: I1128 16:31:21.248977 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5r6p6\" (UniqueName: \"kubernetes.io/projected/469389c1-482e-4968-b969-163e760e87f2-kube-api-access-5r6p6\") pod \"cinder-937d-account-create-update-bmpjc\" (UID: \"469389c1-482e-4968-b969-163e760e87f2\") " pod="openstack/cinder-937d-account-create-update-bmpjc" Nov 28 16:31:21 crc kubenswrapper[4909]: I1128 16:31:21.248998 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5sj4t\" (UniqueName: \"kubernetes.io/projected/b561f9ee-f192-4215-a832-f60fd675206e-kube-api-access-5sj4t\") pod \"cinder-db-create-vlgkn\" (UID: \"b561f9ee-f192-4215-a832-f60fd675206e\") " pod="openstack/cinder-db-create-vlgkn" Nov 28 16:31:21 crc kubenswrapper[4909]: I1128 16:31:21.249025 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9m6wt\" (UniqueName: \"kubernetes.io/projected/7648e31f-ca65-4302-903a-abf1fe2aa860-kube-api-access-9m6wt\") pod \"barbican-db-create-lqx45\" (UID: \"7648e31f-ca65-4302-903a-abf1fe2aa860\") " pod="openstack/barbican-db-create-lqx45" Nov 28 16:31:21 crc kubenswrapper[4909]: I1128 16:31:21.249201 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4740f8ad-ec12-40b8-b3eb-f22149cbd07a-operator-scripts\") pod \"barbican-9799-account-create-update-w49wd\" (UID: \"4740f8ad-ec12-40b8-b3eb-f22149cbd07a\") " pod="openstack/barbican-9799-account-create-update-w49wd" Nov 28 16:31:21 crc kubenswrapper[4909]: I1128 16:31:21.249562 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7648e31f-ca65-4302-903a-abf1fe2aa860-operator-scripts\") pod \"barbican-db-create-lqx45\" (UID: \"7648e31f-ca65-4302-903a-abf1fe2aa860\") " pod="openstack/barbican-db-create-lqx45" Nov 28 16:31:21 crc kubenswrapper[4909]: I1128 16:31:21.249795 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4740f8ad-ec12-40b8-b3eb-f22149cbd07a-operator-scripts\") pod \"barbican-9799-account-create-update-w49wd\" (UID: \"4740f8ad-ec12-40b8-b3eb-f22149cbd07a\") " pod="openstack/barbican-9799-account-create-update-w49wd" Nov 28 16:31:21 crc kubenswrapper[4909]: I1128 16:31:21.266553 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mb2xv\" (UniqueName: \"kubernetes.io/projected/4740f8ad-ec12-40b8-b3eb-f22149cbd07a-kube-api-access-mb2xv\") pod \"barbican-9799-account-create-update-w49wd\" (UID: \"4740f8ad-ec12-40b8-b3eb-f22149cbd07a\") " pod="openstack/barbican-9799-account-create-update-w49wd" Nov 28 16:31:21 crc kubenswrapper[4909]: I1128 16:31:21.267519 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9m6wt\" (UniqueName: \"kubernetes.io/projected/7648e31f-ca65-4302-903a-abf1fe2aa860-kube-api-access-9m6wt\") pod \"barbican-db-create-lqx45\" (UID: \"7648e31f-ca65-4302-903a-abf1fe2aa860\") " pod="openstack/barbican-db-create-lqx45" Nov 28 16:31:21 crc kubenswrapper[4909]: I1128 16:31:21.297433 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-sync-xhz8l"] Nov 28 16:31:21 crc kubenswrapper[4909]: I1128 16:31:21.298421 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-xhz8l" Nov 28 16:31:21 crc kubenswrapper[4909]: I1128 16:31:21.314506 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 28 16:31:21 crc kubenswrapper[4909]: I1128 16:31:21.314749 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 28 16:31:21 crc kubenswrapper[4909]: I1128 16:31:21.315827 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 28 16:31:21 crc kubenswrapper[4909]: I1128 16:31:21.324080 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-fmn76" Nov 28 16:31:21 crc kubenswrapper[4909]: I1128 16:31:21.329992 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-xhz8l"] Nov 28 16:31:21 crc kubenswrapper[4909]: I1128 16:31:21.350222 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b561f9ee-f192-4215-a832-f60fd675206e-operator-scripts\") pod \"cinder-db-create-vlgkn\" (UID: \"b561f9ee-f192-4215-a832-f60fd675206e\") " pod="openstack/cinder-db-create-vlgkn" Nov 28 16:31:21 crc kubenswrapper[4909]: I1128 16:31:21.350542 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/469389c1-482e-4968-b969-163e760e87f2-operator-scripts\") pod \"cinder-937d-account-create-update-bmpjc\" (UID: \"469389c1-482e-4968-b969-163e760e87f2\") " pod="openstack/cinder-937d-account-create-update-bmpjc" Nov 28 16:31:21 crc kubenswrapper[4909]: I1128 16:31:21.350771 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5r6p6\" (UniqueName: \"kubernetes.io/projected/469389c1-482e-4968-b969-163e760e87f2-kube-api-access-5r6p6\") pod \"cinder-937d-account-create-update-bmpjc\" (UID: \"469389c1-482e-4968-b969-163e760e87f2\") " pod="openstack/cinder-937d-account-create-update-bmpjc" Nov 28 16:31:21 crc kubenswrapper[4909]: I1128 16:31:21.350875 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5sj4t\" (UniqueName: \"kubernetes.io/projected/b561f9ee-f192-4215-a832-f60fd675206e-kube-api-access-5sj4t\") pod \"cinder-db-create-vlgkn\" (UID: \"b561f9ee-f192-4215-a832-f60fd675206e\") " pod="openstack/cinder-db-create-vlgkn" Nov 28 16:31:21 crc kubenswrapper[4909]: I1128 16:31:21.351019 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b561f9ee-f192-4215-a832-f60fd675206e-operator-scripts\") pod \"cinder-db-create-vlgkn\" (UID: \"b561f9ee-f192-4215-a832-f60fd675206e\") " pod="openstack/cinder-db-create-vlgkn" Nov 28 16:31:21 crc kubenswrapper[4909]: I1128 16:31:21.351302 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/469389c1-482e-4968-b969-163e760e87f2-operator-scripts\") pod \"cinder-937d-account-create-update-bmpjc\" (UID: \"469389c1-482e-4968-b969-163e760e87f2\") " pod="openstack/cinder-937d-account-create-update-bmpjc" Nov 28 16:31:21 crc kubenswrapper[4909]: I1128 16:31:21.351524 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-lqx45" Nov 28 16:31:21 crc kubenswrapper[4909]: I1128 16:31:21.377835 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-9799-account-create-update-w49wd" Nov 28 16:31:21 crc kubenswrapper[4909]: I1128 16:31:21.386641 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5r6p6\" (UniqueName: \"kubernetes.io/projected/469389c1-482e-4968-b969-163e760e87f2-kube-api-access-5r6p6\") pod \"cinder-937d-account-create-update-bmpjc\" (UID: \"469389c1-482e-4968-b969-163e760e87f2\") " pod="openstack/cinder-937d-account-create-update-bmpjc" Nov 28 16:31:21 crc kubenswrapper[4909]: I1128 16:31:21.384273 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5sj4t\" (UniqueName: \"kubernetes.io/projected/b561f9ee-f192-4215-a832-f60fd675206e-kube-api-access-5sj4t\") pod \"cinder-db-create-vlgkn\" (UID: \"b561f9ee-f192-4215-a832-f60fd675206e\") " pod="openstack/cinder-db-create-vlgkn" Nov 28 16:31:21 crc kubenswrapper[4909]: I1128 16:31:21.435948 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-create-fhbph"] Nov 28 16:31:21 crc kubenswrapper[4909]: I1128 16:31:21.437148 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-fhbph" Nov 28 16:31:21 crc kubenswrapper[4909]: I1128 16:31:21.448720 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-8770-account-create-update-tphgw"] Nov 28 16:31:21 crc kubenswrapper[4909]: I1128 16:31:21.449951 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-8770-account-create-update-tphgw" Nov 28 16:31:21 crc kubenswrapper[4909]: I1128 16:31:21.459371 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-fhbph"] Nov 28 16:31:21 crc kubenswrapper[4909]: I1128 16:31:21.459889 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-db-secret" Nov 28 16:31:21 crc kubenswrapper[4909]: I1128 16:31:21.460440 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-937d-account-create-update-bmpjc" Nov 28 16:31:21 crc kubenswrapper[4909]: I1128 16:31:21.463865 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/970bbe40-62b2-4c32-8f8f-6b36abe92607-combined-ca-bundle\") pod \"keystone-db-sync-xhz8l\" (UID: \"970bbe40-62b2-4c32-8f8f-6b36abe92607\") " pod="openstack/keystone-db-sync-xhz8l" Nov 28 16:31:21 crc kubenswrapper[4909]: I1128 16:31:21.463992 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-knpnt\" (UniqueName: \"kubernetes.io/projected/970bbe40-62b2-4c32-8f8f-6b36abe92607-kube-api-access-knpnt\") pod \"keystone-db-sync-xhz8l\" (UID: \"970bbe40-62b2-4c32-8f8f-6b36abe92607\") " pod="openstack/keystone-db-sync-xhz8l" Nov 28 16:31:21 crc kubenswrapper[4909]: I1128 16:31:21.464045 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/970bbe40-62b2-4c32-8f8f-6b36abe92607-config-data\") pod \"keystone-db-sync-xhz8l\" (UID: \"970bbe40-62b2-4c32-8f8f-6b36abe92607\") " pod="openstack/keystone-db-sync-xhz8l" Nov 28 16:31:21 crc kubenswrapper[4909]: I1128 16:31:21.464237 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-vlgkn" Nov 28 16:31:21 crc kubenswrapper[4909]: I1128 16:31:21.465097 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-8770-account-create-update-tphgw"] Nov 28 16:31:21 crc kubenswrapper[4909]: I1128 16:31:21.565064 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/970bbe40-62b2-4c32-8f8f-6b36abe92607-config-data\") pod \"keystone-db-sync-xhz8l\" (UID: \"970bbe40-62b2-4c32-8f8f-6b36abe92607\") " pod="openstack/keystone-db-sync-xhz8l" Nov 28 16:31:21 crc kubenswrapper[4909]: I1128 16:31:21.565115 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dghsk\" (UniqueName: \"kubernetes.io/projected/79e6b32d-eeda-4bfa-8872-e2581a703aa5-kube-api-access-dghsk\") pod \"neutron-db-create-fhbph\" (UID: \"79e6b32d-eeda-4bfa-8872-e2581a703aa5\") " pod="openstack/neutron-db-create-fhbph" Nov 28 16:31:21 crc kubenswrapper[4909]: I1128 16:31:21.565148 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9384b65d-c575-4479-99be-eb182b848bb5-operator-scripts\") pod \"neutron-8770-account-create-update-tphgw\" (UID: \"9384b65d-c575-4479-99be-eb182b848bb5\") " pod="openstack/neutron-8770-account-create-update-tphgw" Nov 28 16:31:21 crc kubenswrapper[4909]: I1128 16:31:21.565326 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/970bbe40-62b2-4c32-8f8f-6b36abe92607-combined-ca-bundle\") pod \"keystone-db-sync-xhz8l\" (UID: \"970bbe40-62b2-4c32-8f8f-6b36abe92607\") " pod="openstack/keystone-db-sync-xhz8l" Nov 28 16:31:21 crc kubenswrapper[4909]: I1128 16:31:21.565426 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/79e6b32d-eeda-4bfa-8872-e2581a703aa5-operator-scripts\") pod \"neutron-db-create-fhbph\" (UID: \"79e6b32d-eeda-4bfa-8872-e2581a703aa5\") " pod="openstack/neutron-db-create-fhbph" Nov 28 16:31:21 crc kubenswrapper[4909]: I1128 16:31:21.565537 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6zznk\" (UniqueName: \"kubernetes.io/projected/9384b65d-c575-4479-99be-eb182b848bb5-kube-api-access-6zznk\") pod \"neutron-8770-account-create-update-tphgw\" (UID: \"9384b65d-c575-4479-99be-eb182b848bb5\") " pod="openstack/neutron-8770-account-create-update-tphgw" Nov 28 16:31:21 crc kubenswrapper[4909]: I1128 16:31:21.565663 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-knpnt\" (UniqueName: \"kubernetes.io/projected/970bbe40-62b2-4c32-8f8f-6b36abe92607-kube-api-access-knpnt\") pod \"keystone-db-sync-xhz8l\" (UID: \"970bbe40-62b2-4c32-8f8f-6b36abe92607\") " pod="openstack/keystone-db-sync-xhz8l" Nov 28 16:31:21 crc kubenswrapper[4909]: I1128 16:31:21.569560 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/970bbe40-62b2-4c32-8f8f-6b36abe92607-combined-ca-bundle\") pod \"keystone-db-sync-xhz8l\" (UID: \"970bbe40-62b2-4c32-8f8f-6b36abe92607\") " pod="openstack/keystone-db-sync-xhz8l" Nov 28 16:31:21 crc kubenswrapper[4909]: I1128 16:31:21.569758 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/970bbe40-62b2-4c32-8f8f-6b36abe92607-config-data\") pod \"keystone-db-sync-xhz8l\" (UID: \"970bbe40-62b2-4c32-8f8f-6b36abe92607\") " pod="openstack/keystone-db-sync-xhz8l" Nov 28 16:31:21 crc kubenswrapper[4909]: I1128 16:31:21.580903 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-knpnt\" (UniqueName: \"kubernetes.io/projected/970bbe40-62b2-4c32-8f8f-6b36abe92607-kube-api-access-knpnt\") pod \"keystone-db-sync-xhz8l\" (UID: \"970bbe40-62b2-4c32-8f8f-6b36abe92607\") " pod="openstack/keystone-db-sync-xhz8l" Nov 28 16:31:21 crc kubenswrapper[4909]: I1128 16:31:21.667750 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6zznk\" (UniqueName: \"kubernetes.io/projected/9384b65d-c575-4479-99be-eb182b848bb5-kube-api-access-6zznk\") pod \"neutron-8770-account-create-update-tphgw\" (UID: \"9384b65d-c575-4479-99be-eb182b848bb5\") " pod="openstack/neutron-8770-account-create-update-tphgw" Nov 28 16:31:21 crc kubenswrapper[4909]: I1128 16:31:21.667898 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dghsk\" (UniqueName: \"kubernetes.io/projected/79e6b32d-eeda-4bfa-8872-e2581a703aa5-kube-api-access-dghsk\") pod \"neutron-db-create-fhbph\" (UID: \"79e6b32d-eeda-4bfa-8872-e2581a703aa5\") " pod="openstack/neutron-db-create-fhbph" Nov 28 16:31:21 crc kubenswrapper[4909]: I1128 16:31:21.667936 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9384b65d-c575-4479-99be-eb182b848bb5-operator-scripts\") pod \"neutron-8770-account-create-update-tphgw\" (UID: \"9384b65d-c575-4479-99be-eb182b848bb5\") " pod="openstack/neutron-8770-account-create-update-tphgw" Nov 28 16:31:21 crc kubenswrapper[4909]: I1128 16:31:21.668018 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/79e6b32d-eeda-4bfa-8872-e2581a703aa5-operator-scripts\") pod \"neutron-db-create-fhbph\" (UID: \"79e6b32d-eeda-4bfa-8872-e2581a703aa5\") " pod="openstack/neutron-db-create-fhbph" Nov 28 16:31:21 crc kubenswrapper[4909]: I1128 16:31:21.668978 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/79e6b32d-eeda-4bfa-8872-e2581a703aa5-operator-scripts\") pod \"neutron-db-create-fhbph\" (UID: \"79e6b32d-eeda-4bfa-8872-e2581a703aa5\") " pod="openstack/neutron-db-create-fhbph" Nov 28 16:31:21 crc kubenswrapper[4909]: I1128 16:31:21.668993 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9384b65d-c575-4479-99be-eb182b848bb5-operator-scripts\") pod \"neutron-8770-account-create-update-tphgw\" (UID: \"9384b65d-c575-4479-99be-eb182b848bb5\") " pod="openstack/neutron-8770-account-create-update-tphgw" Nov 28 16:31:21 crc kubenswrapper[4909]: I1128 16:31:21.674787 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-xhz8l" Nov 28 16:31:21 crc kubenswrapper[4909]: I1128 16:31:21.693271 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6zznk\" (UniqueName: \"kubernetes.io/projected/9384b65d-c575-4479-99be-eb182b848bb5-kube-api-access-6zznk\") pod \"neutron-8770-account-create-update-tphgw\" (UID: \"9384b65d-c575-4479-99be-eb182b848bb5\") " pod="openstack/neutron-8770-account-create-update-tphgw" Nov 28 16:31:21 crc kubenswrapper[4909]: I1128 16:31:21.708979 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dghsk\" (UniqueName: \"kubernetes.io/projected/79e6b32d-eeda-4bfa-8872-e2581a703aa5-kube-api-access-dghsk\") pod \"neutron-db-create-fhbph\" (UID: \"79e6b32d-eeda-4bfa-8872-e2581a703aa5\") " pod="openstack/neutron-db-create-fhbph" Nov 28 16:31:21 crc kubenswrapper[4909]: I1128 16:31:21.807363 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-fhbph" Nov 28 16:31:21 crc kubenswrapper[4909]: I1128 16:31:21.816899 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-8770-account-create-update-tphgw" Nov 28 16:31:22 crc kubenswrapper[4909]: I1128 16:31:22.903061 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-lqx45"] Nov 28 16:31:22 crc kubenswrapper[4909]: I1128 16:31:22.913800 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-937d-account-create-update-bmpjc"] Nov 28 16:31:22 crc kubenswrapper[4909]: I1128 16:31:22.923173 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-9799-account-create-update-w49wd"] Nov 28 16:31:22 crc kubenswrapper[4909]: W1128 16:31:22.934505 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4740f8ad_ec12_40b8_b3eb_f22149cbd07a.slice/crio-fe5c41814163bfc85bfe647590c0a113fbfcd46d88cfe522b533916616834057 WatchSource:0}: Error finding container fe5c41814163bfc85bfe647590c0a113fbfcd46d88cfe522b533916616834057: Status 404 returned error can't find the container with id fe5c41814163bfc85bfe647590c0a113fbfcd46d88cfe522b533916616834057 Nov 28 16:31:23 crc kubenswrapper[4909]: I1128 16:31:23.092371 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-vlgkn"] Nov 28 16:31:23 crc kubenswrapper[4909]: I1128 16:31:23.098994 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-8770-account-create-update-tphgw"] Nov 28 16:31:23 crc kubenswrapper[4909]: I1128 16:31:23.105111 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-fhbph"] Nov 28 16:31:23 crc kubenswrapper[4909]: I1128 16:31:23.110401 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-xhz8l"] Nov 28 16:31:23 crc kubenswrapper[4909]: I1128 16:31:23.914923 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-lqx45" event={"ID":"7648e31f-ca65-4302-903a-abf1fe2aa860","Type":"ContainerStarted","Data":"924605355daa8ab67ecac293d88b22db1eb3b8cec72a07d4296acbe05fea16d3"} Nov 28 16:31:23 crc kubenswrapper[4909]: I1128 16:31:23.915452 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-937d-account-create-update-bmpjc" event={"ID":"469389c1-482e-4968-b969-163e760e87f2","Type":"ContainerStarted","Data":"84e17004bba773c50b178d09c34a694cd3d7b7bf06eab02c65f250c1c73e78d4"} Nov 28 16:31:23 crc kubenswrapper[4909]: I1128 16:31:23.915467 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-9799-account-create-update-w49wd" event={"ID":"4740f8ad-ec12-40b8-b3eb-f22149cbd07a","Type":"ContainerStarted","Data":"fe5c41814163bfc85bfe647590c0a113fbfcd46d88cfe522b533916616834057"} Nov 28 16:31:24 crc kubenswrapper[4909]: I1128 16:31:24.922599 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-vlgkn" event={"ID":"b561f9ee-f192-4215-a832-f60fd675206e","Type":"ContainerStarted","Data":"5bbd03a7dc98d3cfb0c4fa7717a727685303a174c020d3ad73cd58240a227c2c"} Nov 28 16:31:24 crc kubenswrapper[4909]: I1128 16:31:24.924675 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-8770-account-create-update-tphgw" event={"ID":"9384b65d-c575-4479-99be-eb182b848bb5","Type":"ContainerStarted","Data":"f164922848dff90cf87137347b41dacad6a4873166ea264e5e1b883cfae804bc"} Nov 28 16:31:24 crc kubenswrapper[4909]: I1128 16:31:24.925713 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-fhbph" event={"ID":"79e6b32d-eeda-4bfa-8872-e2581a703aa5","Type":"ContainerStarted","Data":"42071f6d59709970c7f5aba61fef6646a1e38dd741c3139940c8fb1f4927cf67"} Nov 28 16:31:24 crc kubenswrapper[4909]: I1128 16:31:24.927216 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-xhz8l" event={"ID":"970bbe40-62b2-4c32-8f8f-6b36abe92607","Type":"ContainerStarted","Data":"b6015d13bbaa1a4eba93301554665c64577b8404812593db811997392ba52861"} Nov 28 16:31:25 crc kubenswrapper[4909]: I1128 16:31:25.985247 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-fhbph" event={"ID":"79e6b32d-eeda-4bfa-8872-e2581a703aa5","Type":"ContainerStarted","Data":"312e25d9f0fde457f4417d6363e4360b0a69a29525131691497a7915ca3db2ab"} Nov 28 16:31:25 crc kubenswrapper[4909]: I1128 16:31:25.988004 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-lqx45" event={"ID":"7648e31f-ca65-4302-903a-abf1fe2aa860","Type":"ContainerStarted","Data":"4d967a924bb9f3661cbaa046a03c44466b87bb582b5e43bc95fede216a25720e"} Nov 28 16:31:25 crc kubenswrapper[4909]: I1128 16:31:25.990218 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-937d-account-create-update-bmpjc" event={"ID":"469389c1-482e-4968-b969-163e760e87f2","Type":"ContainerStarted","Data":"ab680f3690017a9ac709a17864c1695750297aac86880cad19fbe1957e9e96cf"} Nov 28 16:31:25 crc kubenswrapper[4909]: I1128 16:31:25.992437 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-vlgkn" event={"ID":"b561f9ee-f192-4215-a832-f60fd675206e","Type":"ContainerStarted","Data":"8c0f8e8d2130828381ded896af12e6ddd0f48282a02278e34abc81a2ab38a850"} Nov 28 16:31:25 crc kubenswrapper[4909]: I1128 16:31:25.993945 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-8770-account-create-update-tphgw" event={"ID":"9384b65d-c575-4479-99be-eb182b848bb5","Type":"ContainerStarted","Data":"89da003334cfba5cb7ceaf4db57edbaa578763aa0e7456486f5ae6a90f2ceb86"} Nov 28 16:31:25 crc kubenswrapper[4909]: I1128 16:31:25.996158 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-9799-account-create-update-w49wd" event={"ID":"4740f8ad-ec12-40b8-b3eb-f22149cbd07a","Type":"ContainerStarted","Data":"f6d78d914a06108a8a1936303120febaccb4ca250cbf426cf7dfa81e792a33fc"} Nov 28 16:31:26 crc kubenswrapper[4909]: I1128 16:31:26.011885 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-db-create-fhbph" podStartSLOduration=5.011864405 podStartE2EDuration="5.011864405s" podCreationTimestamp="2025-11-28 16:31:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:31:26.003286855 +0000 UTC m=+1268.399971379" watchObservedRunningTime="2025-11-28 16:31:26.011864405 +0000 UTC m=+1268.408548939" Nov 28 16:31:26 crc kubenswrapper[4909]: I1128 16:31:26.027942 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-937d-account-create-update-bmpjc" podStartSLOduration=5.027923167 podStartE2EDuration="5.027923167s" podCreationTimestamp="2025-11-28 16:31:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:31:26.020762204 +0000 UTC m=+1268.417446748" watchObservedRunningTime="2025-11-28 16:31:26.027923167 +0000 UTC m=+1268.424607691" Nov 28 16:31:26 crc kubenswrapper[4909]: I1128 16:31:26.044842 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-db-create-lqx45" podStartSLOduration=5.04482125 podStartE2EDuration="5.04482125s" podCreationTimestamp="2025-11-28 16:31:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:31:26.035638924 +0000 UTC m=+1268.432323458" watchObservedRunningTime="2025-11-28 16:31:26.04482125 +0000 UTC m=+1268.441505774" Nov 28 16:31:26 crc kubenswrapper[4909]: I1128 16:31:26.059018 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-8770-account-create-update-tphgw" podStartSLOduration=5.058999451 podStartE2EDuration="5.058999451s" podCreationTimestamp="2025-11-28 16:31:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:31:26.050510893 +0000 UTC m=+1268.447195427" watchObservedRunningTime="2025-11-28 16:31:26.058999451 +0000 UTC m=+1268.455683975" Nov 28 16:31:26 crc kubenswrapper[4909]: I1128 16:31:26.066359 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-9799-account-create-update-w49wd" podStartSLOduration=5.066342458 podStartE2EDuration="5.066342458s" podCreationTimestamp="2025-11-28 16:31:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:31:26.063206494 +0000 UTC m=+1268.459891028" watchObservedRunningTime="2025-11-28 16:31:26.066342458 +0000 UTC m=+1268.463026982" Nov 28 16:31:26 crc kubenswrapper[4909]: I1128 16:31:26.084492 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-db-create-vlgkn" podStartSLOduration=5.084475555 podStartE2EDuration="5.084475555s" podCreationTimestamp="2025-11-28 16:31:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:31:26.077805356 +0000 UTC m=+1268.474489880" watchObservedRunningTime="2025-11-28 16:31:26.084475555 +0000 UTC m=+1268.481160069" Nov 28 16:31:26 crc kubenswrapper[4909]: I1128 16:31:26.534869 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-74dc88fc-cvh5p" Nov 28 16:31:26 crc kubenswrapper[4909]: I1128 16:31:26.588783 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-b8fbc5445-rvm96"] Nov 28 16:31:26 crc kubenswrapper[4909]: I1128 16:31:26.589100 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-b8fbc5445-rvm96" podUID="524d2697-3bb0-4029-a2d2-1211f9fc8fc5" containerName="dnsmasq-dns" containerID="cri-o://bff5a818f773719ec61196a5ef335b3aa8b2c1fdb3309845fc51ffaa68703ed1" gracePeriod=10 Nov 28 16:31:27 crc kubenswrapper[4909]: I1128 16:31:27.007846 4909 generic.go:334] "Generic (PLEG): container finished" podID="79e6b32d-eeda-4bfa-8872-e2581a703aa5" containerID="312e25d9f0fde457f4417d6363e4360b0a69a29525131691497a7915ca3db2ab" exitCode=0 Nov 28 16:31:27 crc kubenswrapper[4909]: I1128 16:31:27.008238 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-fhbph" event={"ID":"79e6b32d-eeda-4bfa-8872-e2581a703aa5","Type":"ContainerDied","Data":"312e25d9f0fde457f4417d6363e4360b0a69a29525131691497a7915ca3db2ab"} Nov 28 16:31:27 crc kubenswrapper[4909]: I1128 16:31:27.018735 4909 generic.go:334] "Generic (PLEG): container finished" podID="524d2697-3bb0-4029-a2d2-1211f9fc8fc5" containerID="bff5a818f773719ec61196a5ef335b3aa8b2c1fdb3309845fc51ffaa68703ed1" exitCode=0 Nov 28 16:31:27 crc kubenswrapper[4909]: I1128 16:31:27.018861 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-b8fbc5445-rvm96" event={"ID":"524d2697-3bb0-4029-a2d2-1211f9fc8fc5","Type":"ContainerDied","Data":"bff5a818f773719ec61196a5ef335b3aa8b2c1fdb3309845fc51ffaa68703ed1"} Nov 28 16:31:27 crc kubenswrapper[4909]: I1128 16:31:27.030324 4909 generic.go:334] "Generic (PLEG): container finished" podID="9384b65d-c575-4479-99be-eb182b848bb5" containerID="89da003334cfba5cb7ceaf4db57edbaa578763aa0e7456486f5ae6a90f2ceb86" exitCode=0 Nov 28 16:31:27 crc kubenswrapper[4909]: I1128 16:31:27.030483 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-8770-account-create-update-tphgw" event={"ID":"9384b65d-c575-4479-99be-eb182b848bb5","Type":"ContainerDied","Data":"89da003334cfba5cb7ceaf4db57edbaa578763aa0e7456486f5ae6a90f2ceb86"} Nov 28 16:31:27 crc kubenswrapper[4909]: I1128 16:31:27.036482 4909 generic.go:334] "Generic (PLEG): container finished" podID="4740f8ad-ec12-40b8-b3eb-f22149cbd07a" containerID="f6d78d914a06108a8a1936303120febaccb4ca250cbf426cf7dfa81e792a33fc" exitCode=0 Nov 28 16:31:27 crc kubenswrapper[4909]: I1128 16:31:27.036527 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-9799-account-create-update-w49wd" event={"ID":"4740f8ad-ec12-40b8-b3eb-f22149cbd07a","Type":"ContainerDied","Data":"f6d78d914a06108a8a1936303120febaccb4ca250cbf426cf7dfa81e792a33fc"} Nov 28 16:31:27 crc kubenswrapper[4909]: I1128 16:31:27.052323 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"af476a0f-b390-443d-b7a5-14181e7c7bc7","Type":"ContainerStarted","Data":"4d6f16f0949b97c95bc1814668d3795dce74816c7c22e56931a221f9c9af6515"} Nov 28 16:31:27 crc kubenswrapper[4909]: I1128 16:31:27.052372 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"af476a0f-b390-443d-b7a5-14181e7c7bc7","Type":"ContainerStarted","Data":"0555790df47cc73f744971b0906de125a73508760c08642f116f54367b3effa9"} Nov 28 16:31:27 crc kubenswrapper[4909]: I1128 16:31:27.059082 4909 generic.go:334] "Generic (PLEG): container finished" podID="7648e31f-ca65-4302-903a-abf1fe2aa860" containerID="4d967a924bb9f3661cbaa046a03c44466b87bb582b5e43bc95fede216a25720e" exitCode=0 Nov 28 16:31:27 crc kubenswrapper[4909]: I1128 16:31:27.061844 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-lqx45" event={"ID":"7648e31f-ca65-4302-903a-abf1fe2aa860","Type":"ContainerDied","Data":"4d967a924bb9f3661cbaa046a03c44466b87bb582b5e43bc95fede216a25720e"} Nov 28 16:31:27 crc kubenswrapper[4909]: I1128 16:31:27.065696 4909 generic.go:334] "Generic (PLEG): container finished" podID="469389c1-482e-4968-b969-163e760e87f2" containerID="ab680f3690017a9ac709a17864c1695750297aac86880cad19fbe1957e9e96cf" exitCode=0 Nov 28 16:31:27 crc kubenswrapper[4909]: I1128 16:31:27.065848 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-937d-account-create-update-bmpjc" event={"ID":"469389c1-482e-4968-b969-163e760e87f2","Type":"ContainerDied","Data":"ab680f3690017a9ac709a17864c1695750297aac86880cad19fbe1957e9e96cf"} Nov 28 16:31:27 crc kubenswrapper[4909]: I1128 16:31:27.067893 4909 generic.go:334] "Generic (PLEG): container finished" podID="b561f9ee-f192-4215-a832-f60fd675206e" containerID="8c0f8e8d2130828381ded896af12e6ddd0f48282a02278e34abc81a2ab38a850" exitCode=0 Nov 28 16:31:27 crc kubenswrapper[4909]: I1128 16:31:27.067941 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-vlgkn" event={"ID":"b561f9ee-f192-4215-a832-f60fd675206e","Type":"ContainerDied","Data":"8c0f8e8d2130828381ded896af12e6ddd0f48282a02278e34abc81a2ab38a850"} Nov 28 16:31:27 crc kubenswrapper[4909]: I1128 16:31:27.130605 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-b8fbc5445-rvm96" Nov 28 16:31:27 crc kubenswrapper[4909]: I1128 16:31:27.315467 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/524d2697-3bb0-4029-a2d2-1211f9fc8fc5-ovsdbserver-sb\") pod \"524d2697-3bb0-4029-a2d2-1211f9fc8fc5\" (UID: \"524d2697-3bb0-4029-a2d2-1211f9fc8fc5\") " Nov 28 16:31:27 crc kubenswrapper[4909]: I1128 16:31:27.315522 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/524d2697-3bb0-4029-a2d2-1211f9fc8fc5-dns-svc\") pod \"524d2697-3bb0-4029-a2d2-1211f9fc8fc5\" (UID: \"524d2697-3bb0-4029-a2d2-1211f9fc8fc5\") " Nov 28 16:31:27 crc kubenswrapper[4909]: I1128 16:31:27.315600 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g6n8h\" (UniqueName: \"kubernetes.io/projected/524d2697-3bb0-4029-a2d2-1211f9fc8fc5-kube-api-access-g6n8h\") pod \"524d2697-3bb0-4029-a2d2-1211f9fc8fc5\" (UID: \"524d2697-3bb0-4029-a2d2-1211f9fc8fc5\") " Nov 28 16:31:27 crc kubenswrapper[4909]: I1128 16:31:27.315804 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/524d2697-3bb0-4029-a2d2-1211f9fc8fc5-ovsdbserver-nb\") pod \"524d2697-3bb0-4029-a2d2-1211f9fc8fc5\" (UID: \"524d2697-3bb0-4029-a2d2-1211f9fc8fc5\") " Nov 28 16:31:27 crc kubenswrapper[4909]: I1128 16:31:27.315846 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/524d2697-3bb0-4029-a2d2-1211f9fc8fc5-config\") pod \"524d2697-3bb0-4029-a2d2-1211f9fc8fc5\" (UID: \"524d2697-3bb0-4029-a2d2-1211f9fc8fc5\") " Nov 28 16:31:27 crc kubenswrapper[4909]: I1128 16:31:27.324429 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/524d2697-3bb0-4029-a2d2-1211f9fc8fc5-kube-api-access-g6n8h" (OuterVolumeSpecName: "kube-api-access-g6n8h") pod "524d2697-3bb0-4029-a2d2-1211f9fc8fc5" (UID: "524d2697-3bb0-4029-a2d2-1211f9fc8fc5"). InnerVolumeSpecName "kube-api-access-g6n8h". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:31:27 crc kubenswrapper[4909]: I1128 16:31:27.359282 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/524d2697-3bb0-4029-a2d2-1211f9fc8fc5-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "524d2697-3bb0-4029-a2d2-1211f9fc8fc5" (UID: "524d2697-3bb0-4029-a2d2-1211f9fc8fc5"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:31:27 crc kubenswrapper[4909]: I1128 16:31:27.361407 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/524d2697-3bb0-4029-a2d2-1211f9fc8fc5-config" (OuterVolumeSpecName: "config") pod "524d2697-3bb0-4029-a2d2-1211f9fc8fc5" (UID: "524d2697-3bb0-4029-a2d2-1211f9fc8fc5"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:31:27 crc kubenswrapper[4909]: I1128 16:31:27.363915 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/524d2697-3bb0-4029-a2d2-1211f9fc8fc5-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "524d2697-3bb0-4029-a2d2-1211f9fc8fc5" (UID: "524d2697-3bb0-4029-a2d2-1211f9fc8fc5"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:31:27 crc kubenswrapper[4909]: I1128 16:31:27.364229 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/524d2697-3bb0-4029-a2d2-1211f9fc8fc5-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "524d2697-3bb0-4029-a2d2-1211f9fc8fc5" (UID: "524d2697-3bb0-4029-a2d2-1211f9fc8fc5"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:31:27 crc kubenswrapper[4909]: I1128 16:31:27.417092 4909 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/524d2697-3bb0-4029-a2d2-1211f9fc8fc5-config\") on node \"crc\" DevicePath \"\"" Nov 28 16:31:27 crc kubenswrapper[4909]: I1128 16:31:27.417123 4909 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/524d2697-3bb0-4029-a2d2-1211f9fc8fc5-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 28 16:31:27 crc kubenswrapper[4909]: I1128 16:31:27.417133 4909 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/524d2697-3bb0-4029-a2d2-1211f9fc8fc5-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 16:31:27 crc kubenswrapper[4909]: I1128 16:31:27.417143 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g6n8h\" (UniqueName: \"kubernetes.io/projected/524d2697-3bb0-4029-a2d2-1211f9fc8fc5-kube-api-access-g6n8h\") on node \"crc\" DevicePath \"\"" Nov 28 16:31:27 crc kubenswrapper[4909]: I1128 16:31:27.417152 4909 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/524d2697-3bb0-4029-a2d2-1211f9fc8fc5-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 28 16:31:28 crc kubenswrapper[4909]: I1128 16:31:28.080756 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"af476a0f-b390-443d-b7a5-14181e7c7bc7","Type":"ContainerStarted","Data":"5fb91605f618e075deb2aac1d02ba547d7690726cbd7cbd378c8171a086d9018"} Nov 28 16:31:28 crc kubenswrapper[4909]: I1128 16:31:28.080829 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"af476a0f-b390-443d-b7a5-14181e7c7bc7","Type":"ContainerStarted","Data":"e537fff9ca9b1d1abd731931ae9c78538cb3c3b7ac87bc65d5b181dd8dc9988e"} Nov 28 16:31:28 crc kubenswrapper[4909]: I1128 16:31:28.082557 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-b8fbc5445-rvm96" Nov 28 16:31:28 crc kubenswrapper[4909]: I1128 16:31:28.082558 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-b8fbc5445-rvm96" event={"ID":"524d2697-3bb0-4029-a2d2-1211f9fc8fc5","Type":"ContainerDied","Data":"565c6cdaeaf6293fc468cfbfe40deb4bffee19abdd0660464f10d8dfd775d628"} Nov 28 16:31:28 crc kubenswrapper[4909]: I1128 16:31:28.082636 4909 scope.go:117] "RemoveContainer" containerID="bff5a818f773719ec61196a5ef335b3aa8b2c1fdb3309845fc51ffaa68703ed1" Nov 28 16:31:28 crc kubenswrapper[4909]: I1128 16:31:28.113592 4909 scope.go:117] "RemoveContainer" containerID="c5fdc1d67f914e9abb01dcd6e33bd0e89f9d463fd4794c90aeab2d33ad4d7c1d" Nov 28 16:31:28 crc kubenswrapper[4909]: I1128 16:31:28.115376 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-b8fbc5445-rvm96"] Nov 28 16:31:28 crc kubenswrapper[4909]: I1128 16:31:28.126784 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-b8fbc5445-rvm96"] Nov 28 16:31:29 crc kubenswrapper[4909]: I1128 16:31:29.926573 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="524d2697-3bb0-4029-a2d2-1211f9fc8fc5" path="/var/lib/kubelet/pods/524d2697-3bb0-4029-a2d2-1211f9fc8fc5/volumes" Nov 28 16:31:32 crc kubenswrapper[4909]: I1128 16:31:32.015512 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-937d-account-create-update-bmpjc" Nov 28 16:31:32 crc kubenswrapper[4909]: I1128 16:31:32.051007 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-vlgkn" Nov 28 16:31:32 crc kubenswrapper[4909]: I1128 16:31:32.076005 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-lqx45" Nov 28 16:31:32 crc kubenswrapper[4909]: I1128 16:31:32.082340 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-8770-account-create-update-tphgw" Nov 28 16:31:32 crc kubenswrapper[4909]: I1128 16:31:32.093860 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-fhbph" Nov 28 16:31:32 crc kubenswrapper[4909]: I1128 16:31:32.101565 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/469389c1-482e-4968-b969-163e760e87f2-operator-scripts\") pod \"469389c1-482e-4968-b969-163e760e87f2\" (UID: \"469389c1-482e-4968-b969-163e760e87f2\") " Nov 28 16:31:32 crc kubenswrapper[4909]: I1128 16:31:32.101608 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5sj4t\" (UniqueName: \"kubernetes.io/projected/b561f9ee-f192-4215-a832-f60fd675206e-kube-api-access-5sj4t\") pod \"b561f9ee-f192-4215-a832-f60fd675206e\" (UID: \"b561f9ee-f192-4215-a832-f60fd675206e\") " Nov 28 16:31:32 crc kubenswrapper[4909]: I1128 16:31:32.101670 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9m6wt\" (UniqueName: \"kubernetes.io/projected/7648e31f-ca65-4302-903a-abf1fe2aa860-kube-api-access-9m6wt\") pod \"7648e31f-ca65-4302-903a-abf1fe2aa860\" (UID: \"7648e31f-ca65-4302-903a-abf1fe2aa860\") " Nov 28 16:31:32 crc kubenswrapper[4909]: I1128 16:31:32.101734 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7648e31f-ca65-4302-903a-abf1fe2aa860-operator-scripts\") pod \"7648e31f-ca65-4302-903a-abf1fe2aa860\" (UID: \"7648e31f-ca65-4302-903a-abf1fe2aa860\") " Nov 28 16:31:32 crc kubenswrapper[4909]: I1128 16:31:32.102846 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-9799-account-create-update-w49wd" Nov 28 16:31:32 crc kubenswrapper[4909]: I1128 16:31:32.104948 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/469389c1-482e-4968-b969-163e760e87f2-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "469389c1-482e-4968-b969-163e760e87f2" (UID: "469389c1-482e-4968-b969-163e760e87f2"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:31:32 crc kubenswrapper[4909]: I1128 16:31:32.107332 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7648e31f-ca65-4302-903a-abf1fe2aa860-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "7648e31f-ca65-4302-903a-abf1fe2aa860" (UID: "7648e31f-ca65-4302-903a-abf1fe2aa860"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:31:32 crc kubenswrapper[4909]: I1128 16:31:32.108503 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b561f9ee-f192-4215-a832-f60fd675206e-kube-api-access-5sj4t" (OuterVolumeSpecName: "kube-api-access-5sj4t") pod "b561f9ee-f192-4215-a832-f60fd675206e" (UID: "b561f9ee-f192-4215-a832-f60fd675206e"). InnerVolumeSpecName "kube-api-access-5sj4t". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:31:32 crc kubenswrapper[4909]: I1128 16:31:32.109448 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7648e31f-ca65-4302-903a-abf1fe2aa860-kube-api-access-9m6wt" (OuterVolumeSpecName: "kube-api-access-9m6wt") pod "7648e31f-ca65-4302-903a-abf1fe2aa860" (UID: "7648e31f-ca65-4302-903a-abf1fe2aa860"). InnerVolumeSpecName "kube-api-access-9m6wt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:31:32 crc kubenswrapper[4909]: I1128 16:31:32.135801 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-8770-account-create-update-tphgw" event={"ID":"9384b65d-c575-4479-99be-eb182b848bb5","Type":"ContainerDied","Data":"f164922848dff90cf87137347b41dacad6a4873166ea264e5e1b883cfae804bc"} Nov 28 16:31:32 crc kubenswrapper[4909]: I1128 16:31:32.135844 4909 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f164922848dff90cf87137347b41dacad6a4873166ea264e5e1b883cfae804bc" Nov 28 16:31:32 crc kubenswrapper[4909]: I1128 16:31:32.135901 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-8770-account-create-update-tphgw" Nov 28 16:31:32 crc kubenswrapper[4909]: I1128 16:31:32.138123 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-9799-account-create-update-w49wd" event={"ID":"4740f8ad-ec12-40b8-b3eb-f22149cbd07a","Type":"ContainerDied","Data":"fe5c41814163bfc85bfe647590c0a113fbfcd46d88cfe522b533916616834057"} Nov 28 16:31:32 crc kubenswrapper[4909]: I1128 16:31:32.138146 4909 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="fe5c41814163bfc85bfe647590c0a113fbfcd46d88cfe522b533916616834057" Nov 28 16:31:32 crc kubenswrapper[4909]: I1128 16:31:32.138184 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-9799-account-create-update-w49wd" Nov 28 16:31:32 crc kubenswrapper[4909]: I1128 16:31:32.140062 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-fhbph" event={"ID":"79e6b32d-eeda-4bfa-8872-e2581a703aa5","Type":"ContainerDied","Data":"42071f6d59709970c7f5aba61fef6646a1e38dd741c3139940c8fb1f4927cf67"} Nov 28 16:31:32 crc kubenswrapper[4909]: I1128 16:31:32.140109 4909 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="42071f6d59709970c7f5aba61fef6646a1e38dd741c3139940c8fb1f4927cf67" Nov 28 16:31:32 crc kubenswrapper[4909]: I1128 16:31:32.140148 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-fhbph" Nov 28 16:31:32 crc kubenswrapper[4909]: I1128 16:31:32.145950 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-xhz8l" event={"ID":"970bbe40-62b2-4c32-8f8f-6b36abe92607","Type":"ContainerStarted","Data":"4b78e9d3d2edc4545741c20fd85514898c5bba454bef6b22b492f5f8a652b138"} Nov 28 16:31:32 crc kubenswrapper[4909]: I1128 16:31:32.147438 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-lqx45" event={"ID":"7648e31f-ca65-4302-903a-abf1fe2aa860","Type":"ContainerDied","Data":"924605355daa8ab67ecac293d88b22db1eb3b8cec72a07d4296acbe05fea16d3"} Nov 28 16:31:32 crc kubenswrapper[4909]: I1128 16:31:32.147475 4909 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="924605355daa8ab67ecac293d88b22db1eb3b8cec72a07d4296acbe05fea16d3" Nov 28 16:31:32 crc kubenswrapper[4909]: I1128 16:31:32.147533 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-lqx45" Nov 28 16:31:32 crc kubenswrapper[4909]: I1128 16:31:32.149318 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-937d-account-create-update-bmpjc" event={"ID":"469389c1-482e-4968-b969-163e760e87f2","Type":"ContainerDied","Data":"84e17004bba773c50b178d09c34a694cd3d7b7bf06eab02c65f250c1c73e78d4"} Nov 28 16:31:32 crc kubenswrapper[4909]: I1128 16:31:32.149348 4909 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="84e17004bba773c50b178d09c34a694cd3d7b7bf06eab02c65f250c1c73e78d4" Nov 28 16:31:32 crc kubenswrapper[4909]: I1128 16:31:32.149397 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-937d-account-create-update-bmpjc" Nov 28 16:31:32 crc kubenswrapper[4909]: I1128 16:31:32.150731 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-vlgkn" event={"ID":"b561f9ee-f192-4215-a832-f60fd675206e","Type":"ContainerDied","Data":"5bbd03a7dc98d3cfb0c4fa7717a727685303a174c020d3ad73cd58240a227c2c"} Nov 28 16:31:32 crc kubenswrapper[4909]: I1128 16:31:32.150754 4909 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5bbd03a7dc98d3cfb0c4fa7717a727685303a174c020d3ad73cd58240a227c2c" Nov 28 16:31:32 crc kubenswrapper[4909]: I1128 16:31:32.150786 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-vlgkn" Nov 28 16:31:32 crc kubenswrapper[4909]: I1128 16:31:32.161873 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-db-sync-xhz8l" podStartSLOduration=4.12109967 podStartE2EDuration="11.161853447s" podCreationTimestamp="2025-11-28 16:31:21 +0000 UTC" firstStartedPulling="2025-11-28 16:31:24.864129861 +0000 UTC m=+1267.260814385" lastFinishedPulling="2025-11-28 16:31:31.904883638 +0000 UTC m=+1274.301568162" observedRunningTime="2025-11-28 16:31:32.161780675 +0000 UTC m=+1274.558465229" watchObservedRunningTime="2025-11-28 16:31:32.161853447 +0000 UTC m=+1274.558537971" Nov 28 16:31:32 crc kubenswrapper[4909]: I1128 16:31:32.203290 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b561f9ee-f192-4215-a832-f60fd675206e-operator-scripts\") pod \"b561f9ee-f192-4215-a832-f60fd675206e\" (UID: \"b561f9ee-f192-4215-a832-f60fd675206e\") " Nov 28 16:31:32 crc kubenswrapper[4909]: I1128 16:31:32.203337 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9384b65d-c575-4479-99be-eb182b848bb5-operator-scripts\") pod \"9384b65d-c575-4479-99be-eb182b848bb5\" (UID: \"9384b65d-c575-4479-99be-eb182b848bb5\") " Nov 28 16:31:32 crc kubenswrapper[4909]: I1128 16:31:32.203385 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6zznk\" (UniqueName: \"kubernetes.io/projected/9384b65d-c575-4479-99be-eb182b848bb5-kube-api-access-6zznk\") pod \"9384b65d-c575-4479-99be-eb182b848bb5\" (UID: \"9384b65d-c575-4479-99be-eb182b848bb5\") " Nov 28 16:31:32 crc kubenswrapper[4909]: I1128 16:31:32.203408 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4740f8ad-ec12-40b8-b3eb-f22149cbd07a-operator-scripts\") pod \"4740f8ad-ec12-40b8-b3eb-f22149cbd07a\" (UID: \"4740f8ad-ec12-40b8-b3eb-f22149cbd07a\") " Nov 28 16:31:32 crc kubenswrapper[4909]: I1128 16:31:32.203857 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9384b65d-c575-4479-99be-eb182b848bb5-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "9384b65d-c575-4479-99be-eb182b848bb5" (UID: "9384b65d-c575-4479-99be-eb182b848bb5"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:31:32 crc kubenswrapper[4909]: I1128 16:31:32.203952 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b561f9ee-f192-4215-a832-f60fd675206e-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "b561f9ee-f192-4215-a832-f60fd675206e" (UID: "b561f9ee-f192-4215-a832-f60fd675206e"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:31:32 crc kubenswrapper[4909]: I1128 16:31:32.204206 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4740f8ad-ec12-40b8-b3eb-f22149cbd07a-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "4740f8ad-ec12-40b8-b3eb-f22149cbd07a" (UID: "4740f8ad-ec12-40b8-b3eb-f22149cbd07a"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:31:32 crc kubenswrapper[4909]: I1128 16:31:32.204282 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dghsk\" (UniqueName: \"kubernetes.io/projected/79e6b32d-eeda-4bfa-8872-e2581a703aa5-kube-api-access-dghsk\") pod \"79e6b32d-eeda-4bfa-8872-e2581a703aa5\" (UID: \"79e6b32d-eeda-4bfa-8872-e2581a703aa5\") " Nov 28 16:31:32 crc kubenswrapper[4909]: I1128 16:31:32.204328 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5r6p6\" (UniqueName: \"kubernetes.io/projected/469389c1-482e-4968-b969-163e760e87f2-kube-api-access-5r6p6\") pod \"469389c1-482e-4968-b969-163e760e87f2\" (UID: \"469389c1-482e-4968-b969-163e760e87f2\") " Nov 28 16:31:32 crc kubenswrapper[4909]: I1128 16:31:32.204369 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/79e6b32d-eeda-4bfa-8872-e2581a703aa5-operator-scripts\") pod \"79e6b32d-eeda-4bfa-8872-e2581a703aa5\" (UID: \"79e6b32d-eeda-4bfa-8872-e2581a703aa5\") " Nov 28 16:31:32 crc kubenswrapper[4909]: I1128 16:31:32.204447 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mb2xv\" (UniqueName: \"kubernetes.io/projected/4740f8ad-ec12-40b8-b3eb-f22149cbd07a-kube-api-access-mb2xv\") pod \"4740f8ad-ec12-40b8-b3eb-f22149cbd07a\" (UID: \"4740f8ad-ec12-40b8-b3eb-f22149cbd07a\") " Nov 28 16:31:32 crc kubenswrapper[4909]: I1128 16:31:32.204801 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/79e6b32d-eeda-4bfa-8872-e2581a703aa5-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "79e6b32d-eeda-4bfa-8872-e2581a703aa5" (UID: "79e6b32d-eeda-4bfa-8872-e2581a703aa5"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:31:32 crc kubenswrapper[4909]: I1128 16:31:32.204839 4909 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/469389c1-482e-4968-b969-163e760e87f2-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 16:31:32 crc kubenswrapper[4909]: I1128 16:31:32.204854 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5sj4t\" (UniqueName: \"kubernetes.io/projected/b561f9ee-f192-4215-a832-f60fd675206e-kube-api-access-5sj4t\") on node \"crc\" DevicePath \"\"" Nov 28 16:31:32 crc kubenswrapper[4909]: I1128 16:31:32.204864 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9m6wt\" (UniqueName: \"kubernetes.io/projected/7648e31f-ca65-4302-903a-abf1fe2aa860-kube-api-access-9m6wt\") on node \"crc\" DevicePath \"\"" Nov 28 16:31:32 crc kubenswrapper[4909]: I1128 16:31:32.204970 4909 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7648e31f-ca65-4302-903a-abf1fe2aa860-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 16:31:32 crc kubenswrapper[4909]: I1128 16:31:32.205115 4909 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b561f9ee-f192-4215-a832-f60fd675206e-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 16:31:32 crc kubenswrapper[4909]: I1128 16:31:32.205130 4909 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9384b65d-c575-4479-99be-eb182b848bb5-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 16:31:32 crc kubenswrapper[4909]: I1128 16:31:32.205141 4909 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4740f8ad-ec12-40b8-b3eb-f22149cbd07a-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 16:31:32 crc kubenswrapper[4909]: I1128 16:31:32.206853 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9384b65d-c575-4479-99be-eb182b848bb5-kube-api-access-6zznk" (OuterVolumeSpecName: "kube-api-access-6zznk") pod "9384b65d-c575-4479-99be-eb182b848bb5" (UID: "9384b65d-c575-4479-99be-eb182b848bb5"). InnerVolumeSpecName "kube-api-access-6zznk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:31:32 crc kubenswrapper[4909]: I1128 16:31:32.207501 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/469389c1-482e-4968-b969-163e760e87f2-kube-api-access-5r6p6" (OuterVolumeSpecName: "kube-api-access-5r6p6") pod "469389c1-482e-4968-b969-163e760e87f2" (UID: "469389c1-482e-4968-b969-163e760e87f2"). InnerVolumeSpecName "kube-api-access-5r6p6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:31:32 crc kubenswrapper[4909]: I1128 16:31:32.207559 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/79e6b32d-eeda-4bfa-8872-e2581a703aa5-kube-api-access-dghsk" (OuterVolumeSpecName: "kube-api-access-dghsk") pod "79e6b32d-eeda-4bfa-8872-e2581a703aa5" (UID: "79e6b32d-eeda-4bfa-8872-e2581a703aa5"). InnerVolumeSpecName "kube-api-access-dghsk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:31:32 crc kubenswrapper[4909]: I1128 16:31:32.207802 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4740f8ad-ec12-40b8-b3eb-f22149cbd07a-kube-api-access-mb2xv" (OuterVolumeSpecName: "kube-api-access-mb2xv") pod "4740f8ad-ec12-40b8-b3eb-f22149cbd07a" (UID: "4740f8ad-ec12-40b8-b3eb-f22149cbd07a"). InnerVolumeSpecName "kube-api-access-mb2xv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:31:32 crc kubenswrapper[4909]: I1128 16:31:32.306293 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mb2xv\" (UniqueName: \"kubernetes.io/projected/4740f8ad-ec12-40b8-b3eb-f22149cbd07a-kube-api-access-mb2xv\") on node \"crc\" DevicePath \"\"" Nov 28 16:31:32 crc kubenswrapper[4909]: I1128 16:31:32.306595 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6zznk\" (UniqueName: \"kubernetes.io/projected/9384b65d-c575-4479-99be-eb182b848bb5-kube-api-access-6zznk\") on node \"crc\" DevicePath \"\"" Nov 28 16:31:32 crc kubenswrapper[4909]: I1128 16:31:32.306609 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dghsk\" (UniqueName: \"kubernetes.io/projected/79e6b32d-eeda-4bfa-8872-e2581a703aa5-kube-api-access-dghsk\") on node \"crc\" DevicePath \"\"" Nov 28 16:31:32 crc kubenswrapper[4909]: I1128 16:31:32.306622 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5r6p6\" (UniqueName: \"kubernetes.io/projected/469389c1-482e-4968-b969-163e760e87f2-kube-api-access-5r6p6\") on node \"crc\" DevicePath \"\"" Nov 28 16:31:32 crc kubenswrapper[4909]: I1128 16:31:32.306638 4909 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/79e6b32d-eeda-4bfa-8872-e2581a703aa5-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 16:31:33 crc kubenswrapper[4909]: I1128 16:31:33.169338 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"af476a0f-b390-443d-b7a5-14181e7c7bc7","Type":"ContainerStarted","Data":"ea215891e9e527c761e9c1fee97f230f011da309562acb7bce70287bd0410c66"} Nov 28 16:31:34 crc kubenswrapper[4909]: I1128 16:31:34.184964 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"af476a0f-b390-443d-b7a5-14181e7c7bc7","Type":"ContainerStarted","Data":"f27ae8741dd55f00cb98a8fb2353da4cf744518b48ce853d95d2679d8887ef94"} Nov 28 16:31:34 crc kubenswrapper[4909]: I1128 16:31:34.185298 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"af476a0f-b390-443d-b7a5-14181e7c7bc7","Type":"ContainerStarted","Data":"7cc9830ede6c460701043fa486da1fb48a9626227dd752474528ad1c78113d8a"} Nov 28 16:31:35 crc kubenswrapper[4909]: I1128 16:31:35.198747 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"af476a0f-b390-443d-b7a5-14181e7c7bc7","Type":"ContainerStarted","Data":"a9da75fb1065909a22a80afa31dcf4a18f089d1a8658fc535e3bcc82fe8ac3a4"} Nov 28 16:31:35 crc kubenswrapper[4909]: I1128 16:31:35.199048 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"af476a0f-b390-443d-b7a5-14181e7c7bc7","Type":"ContainerStarted","Data":"dc11615ceb380e3360e5cbe4640e562e4e49cb1fd342fce0adce73bd5cb5460b"} Nov 28 16:31:36 crc kubenswrapper[4909]: I1128 16:31:36.215394 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"af476a0f-b390-443d-b7a5-14181e7c7bc7","Type":"ContainerStarted","Data":"98ad30563ab1b4b11f32f1a8f225fc528006c2c8fcdf166079a8a955004b7948"} Nov 28 16:31:36 crc kubenswrapper[4909]: I1128 16:31:36.216139 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"af476a0f-b390-443d-b7a5-14181e7c7bc7","Type":"ContainerStarted","Data":"d413cfaf4ea4f22a5ed6f16b2e1f0edf2c4c5fa640499e9a5165eac333f204d1"} Nov 28 16:31:36 crc kubenswrapper[4909]: I1128 16:31:36.259458 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-storage-0" podStartSLOduration=38.126619486 podStartE2EDuration="53.259437427s" podCreationTimestamp="2025-11-28 16:30:43 +0000 UTC" firstStartedPulling="2025-11-28 16:31:17.61285801 +0000 UTC m=+1260.009542534" lastFinishedPulling="2025-11-28 16:31:32.745675951 +0000 UTC m=+1275.142360475" observedRunningTime="2025-11-28 16:31:36.255587744 +0000 UTC m=+1278.652272268" watchObservedRunningTime="2025-11-28 16:31:36.259437427 +0000 UTC m=+1278.656121971" Nov 28 16:31:36 crc kubenswrapper[4909]: I1128 16:31:36.660414 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-895cf5cf-rkrcn"] Nov 28 16:31:36 crc kubenswrapper[4909]: E1128 16:31:36.660755 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="469389c1-482e-4968-b969-163e760e87f2" containerName="mariadb-account-create-update" Nov 28 16:31:36 crc kubenswrapper[4909]: I1128 16:31:36.660770 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="469389c1-482e-4968-b969-163e760e87f2" containerName="mariadb-account-create-update" Nov 28 16:31:36 crc kubenswrapper[4909]: E1128 16:31:36.660792 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="524d2697-3bb0-4029-a2d2-1211f9fc8fc5" containerName="init" Nov 28 16:31:36 crc kubenswrapper[4909]: I1128 16:31:36.660802 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="524d2697-3bb0-4029-a2d2-1211f9fc8fc5" containerName="init" Nov 28 16:31:36 crc kubenswrapper[4909]: E1128 16:31:36.660818 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4740f8ad-ec12-40b8-b3eb-f22149cbd07a" containerName="mariadb-account-create-update" Nov 28 16:31:36 crc kubenswrapper[4909]: I1128 16:31:36.660824 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="4740f8ad-ec12-40b8-b3eb-f22149cbd07a" containerName="mariadb-account-create-update" Nov 28 16:31:36 crc kubenswrapper[4909]: E1128 16:31:36.660844 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9384b65d-c575-4479-99be-eb182b848bb5" containerName="mariadb-account-create-update" Nov 28 16:31:36 crc kubenswrapper[4909]: I1128 16:31:36.660851 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="9384b65d-c575-4479-99be-eb182b848bb5" containerName="mariadb-account-create-update" Nov 28 16:31:36 crc kubenswrapper[4909]: E1128 16:31:36.660864 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="524d2697-3bb0-4029-a2d2-1211f9fc8fc5" containerName="dnsmasq-dns" Nov 28 16:31:36 crc kubenswrapper[4909]: I1128 16:31:36.660872 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="524d2697-3bb0-4029-a2d2-1211f9fc8fc5" containerName="dnsmasq-dns" Nov 28 16:31:36 crc kubenswrapper[4909]: E1128 16:31:36.660880 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="79e6b32d-eeda-4bfa-8872-e2581a703aa5" containerName="mariadb-database-create" Nov 28 16:31:36 crc kubenswrapper[4909]: I1128 16:31:36.660886 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="79e6b32d-eeda-4bfa-8872-e2581a703aa5" containerName="mariadb-database-create" Nov 28 16:31:36 crc kubenswrapper[4909]: E1128 16:31:36.660893 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b561f9ee-f192-4215-a832-f60fd675206e" containerName="mariadb-database-create" Nov 28 16:31:36 crc kubenswrapper[4909]: I1128 16:31:36.660899 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="b561f9ee-f192-4215-a832-f60fd675206e" containerName="mariadb-database-create" Nov 28 16:31:36 crc kubenswrapper[4909]: E1128 16:31:36.660908 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7648e31f-ca65-4302-903a-abf1fe2aa860" containerName="mariadb-database-create" Nov 28 16:31:36 crc kubenswrapper[4909]: I1128 16:31:36.660914 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="7648e31f-ca65-4302-903a-abf1fe2aa860" containerName="mariadb-database-create" Nov 28 16:31:36 crc kubenswrapper[4909]: I1128 16:31:36.661080 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="524d2697-3bb0-4029-a2d2-1211f9fc8fc5" containerName="dnsmasq-dns" Nov 28 16:31:36 crc kubenswrapper[4909]: I1128 16:31:36.661097 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="469389c1-482e-4968-b969-163e760e87f2" containerName="mariadb-account-create-update" Nov 28 16:31:36 crc kubenswrapper[4909]: I1128 16:31:36.661111 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="9384b65d-c575-4479-99be-eb182b848bb5" containerName="mariadb-account-create-update" Nov 28 16:31:36 crc kubenswrapper[4909]: I1128 16:31:36.661122 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="4740f8ad-ec12-40b8-b3eb-f22149cbd07a" containerName="mariadb-account-create-update" Nov 28 16:31:36 crc kubenswrapper[4909]: I1128 16:31:36.661138 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="79e6b32d-eeda-4bfa-8872-e2581a703aa5" containerName="mariadb-database-create" Nov 28 16:31:36 crc kubenswrapper[4909]: I1128 16:31:36.661146 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="7648e31f-ca65-4302-903a-abf1fe2aa860" containerName="mariadb-database-create" Nov 28 16:31:36 crc kubenswrapper[4909]: I1128 16:31:36.661162 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="b561f9ee-f192-4215-a832-f60fd675206e" containerName="mariadb-database-create" Nov 28 16:31:36 crc kubenswrapper[4909]: I1128 16:31:36.669949 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-895cf5cf-rkrcn" Nov 28 16:31:36 crc kubenswrapper[4909]: I1128 16:31:36.672797 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-swift-storage-0" Nov 28 16:31:36 crc kubenswrapper[4909]: I1128 16:31:36.674480 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-895cf5cf-rkrcn"] Nov 28 16:31:36 crc kubenswrapper[4909]: I1128 16:31:36.771327 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/12883252-f9ad-417c-a0de-c191f705082d-ovsdbserver-sb\") pod \"dnsmasq-dns-895cf5cf-rkrcn\" (UID: \"12883252-f9ad-417c-a0de-c191f705082d\") " pod="openstack/dnsmasq-dns-895cf5cf-rkrcn" Nov 28 16:31:36 crc kubenswrapper[4909]: I1128 16:31:36.771400 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/12883252-f9ad-417c-a0de-c191f705082d-dns-swift-storage-0\") pod \"dnsmasq-dns-895cf5cf-rkrcn\" (UID: \"12883252-f9ad-417c-a0de-c191f705082d\") " pod="openstack/dnsmasq-dns-895cf5cf-rkrcn" Nov 28 16:31:36 crc kubenswrapper[4909]: I1128 16:31:36.771550 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4bxrl\" (UniqueName: \"kubernetes.io/projected/12883252-f9ad-417c-a0de-c191f705082d-kube-api-access-4bxrl\") pod \"dnsmasq-dns-895cf5cf-rkrcn\" (UID: \"12883252-f9ad-417c-a0de-c191f705082d\") " pod="openstack/dnsmasq-dns-895cf5cf-rkrcn" Nov 28 16:31:36 crc kubenswrapper[4909]: I1128 16:31:36.771675 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/12883252-f9ad-417c-a0de-c191f705082d-dns-svc\") pod \"dnsmasq-dns-895cf5cf-rkrcn\" (UID: \"12883252-f9ad-417c-a0de-c191f705082d\") " pod="openstack/dnsmasq-dns-895cf5cf-rkrcn" Nov 28 16:31:36 crc kubenswrapper[4909]: I1128 16:31:36.771713 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/12883252-f9ad-417c-a0de-c191f705082d-config\") pod \"dnsmasq-dns-895cf5cf-rkrcn\" (UID: \"12883252-f9ad-417c-a0de-c191f705082d\") " pod="openstack/dnsmasq-dns-895cf5cf-rkrcn" Nov 28 16:31:36 crc kubenswrapper[4909]: I1128 16:31:36.771763 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/12883252-f9ad-417c-a0de-c191f705082d-ovsdbserver-nb\") pod \"dnsmasq-dns-895cf5cf-rkrcn\" (UID: \"12883252-f9ad-417c-a0de-c191f705082d\") " pod="openstack/dnsmasq-dns-895cf5cf-rkrcn" Nov 28 16:31:36 crc kubenswrapper[4909]: I1128 16:31:36.872815 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4bxrl\" (UniqueName: \"kubernetes.io/projected/12883252-f9ad-417c-a0de-c191f705082d-kube-api-access-4bxrl\") pod \"dnsmasq-dns-895cf5cf-rkrcn\" (UID: \"12883252-f9ad-417c-a0de-c191f705082d\") " pod="openstack/dnsmasq-dns-895cf5cf-rkrcn" Nov 28 16:31:36 crc kubenswrapper[4909]: I1128 16:31:36.872917 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/12883252-f9ad-417c-a0de-c191f705082d-dns-svc\") pod \"dnsmasq-dns-895cf5cf-rkrcn\" (UID: \"12883252-f9ad-417c-a0de-c191f705082d\") " pod="openstack/dnsmasq-dns-895cf5cf-rkrcn" Nov 28 16:31:36 crc kubenswrapper[4909]: I1128 16:31:36.872952 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/12883252-f9ad-417c-a0de-c191f705082d-config\") pod \"dnsmasq-dns-895cf5cf-rkrcn\" (UID: \"12883252-f9ad-417c-a0de-c191f705082d\") " pod="openstack/dnsmasq-dns-895cf5cf-rkrcn" Nov 28 16:31:36 crc kubenswrapper[4909]: I1128 16:31:36.873014 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/12883252-f9ad-417c-a0de-c191f705082d-ovsdbserver-nb\") pod \"dnsmasq-dns-895cf5cf-rkrcn\" (UID: \"12883252-f9ad-417c-a0de-c191f705082d\") " pod="openstack/dnsmasq-dns-895cf5cf-rkrcn" Nov 28 16:31:36 crc kubenswrapper[4909]: I1128 16:31:36.874159 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/12883252-f9ad-417c-a0de-c191f705082d-ovsdbserver-sb\") pod \"dnsmasq-dns-895cf5cf-rkrcn\" (UID: \"12883252-f9ad-417c-a0de-c191f705082d\") " pod="openstack/dnsmasq-dns-895cf5cf-rkrcn" Nov 28 16:31:36 crc kubenswrapper[4909]: I1128 16:31:36.874226 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/12883252-f9ad-417c-a0de-c191f705082d-dns-swift-storage-0\") pod \"dnsmasq-dns-895cf5cf-rkrcn\" (UID: \"12883252-f9ad-417c-a0de-c191f705082d\") " pod="openstack/dnsmasq-dns-895cf5cf-rkrcn" Nov 28 16:31:36 crc kubenswrapper[4909]: I1128 16:31:36.874263 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/12883252-f9ad-417c-a0de-c191f705082d-ovsdbserver-nb\") pod \"dnsmasq-dns-895cf5cf-rkrcn\" (UID: \"12883252-f9ad-417c-a0de-c191f705082d\") " pod="openstack/dnsmasq-dns-895cf5cf-rkrcn" Nov 28 16:31:36 crc kubenswrapper[4909]: I1128 16:31:36.874067 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/12883252-f9ad-417c-a0de-c191f705082d-dns-svc\") pod \"dnsmasq-dns-895cf5cf-rkrcn\" (UID: \"12883252-f9ad-417c-a0de-c191f705082d\") " pod="openstack/dnsmasq-dns-895cf5cf-rkrcn" Nov 28 16:31:36 crc kubenswrapper[4909]: I1128 16:31:36.874071 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/12883252-f9ad-417c-a0de-c191f705082d-config\") pod \"dnsmasq-dns-895cf5cf-rkrcn\" (UID: \"12883252-f9ad-417c-a0de-c191f705082d\") " pod="openstack/dnsmasq-dns-895cf5cf-rkrcn" Nov 28 16:31:36 crc kubenswrapper[4909]: I1128 16:31:36.874770 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/12883252-f9ad-417c-a0de-c191f705082d-ovsdbserver-sb\") pod \"dnsmasq-dns-895cf5cf-rkrcn\" (UID: \"12883252-f9ad-417c-a0de-c191f705082d\") " pod="openstack/dnsmasq-dns-895cf5cf-rkrcn" Nov 28 16:31:36 crc kubenswrapper[4909]: I1128 16:31:36.875140 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/12883252-f9ad-417c-a0de-c191f705082d-dns-swift-storage-0\") pod \"dnsmasq-dns-895cf5cf-rkrcn\" (UID: \"12883252-f9ad-417c-a0de-c191f705082d\") " pod="openstack/dnsmasq-dns-895cf5cf-rkrcn" Nov 28 16:31:36 crc kubenswrapper[4909]: I1128 16:31:36.898357 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4bxrl\" (UniqueName: \"kubernetes.io/projected/12883252-f9ad-417c-a0de-c191f705082d-kube-api-access-4bxrl\") pod \"dnsmasq-dns-895cf5cf-rkrcn\" (UID: \"12883252-f9ad-417c-a0de-c191f705082d\") " pod="openstack/dnsmasq-dns-895cf5cf-rkrcn" Nov 28 16:31:36 crc kubenswrapper[4909]: I1128 16:31:36.989365 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-895cf5cf-rkrcn" Nov 28 16:31:38 crc kubenswrapper[4909]: I1128 16:31:37.436634 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-895cf5cf-rkrcn"] Nov 28 16:31:38 crc kubenswrapper[4909]: I1128 16:31:38.236920 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-895cf5cf-rkrcn" event={"ID":"12883252-f9ad-417c-a0de-c191f705082d","Type":"ContainerStarted","Data":"f28a310428db4dd736408e290f4e6851feb79f62dc3e638ff1963b2276e26408"} Nov 28 16:31:38 crc kubenswrapper[4909]: I1128 16:31:38.237355 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-895cf5cf-rkrcn" event={"ID":"12883252-f9ad-417c-a0de-c191f705082d","Type":"ContainerStarted","Data":"d4845a28fd2f83252a9eee912c4e852f45f66190f6893b1045162aaf0366e9f6"} Nov 28 16:31:39 crc kubenswrapper[4909]: I1128 16:31:39.249750 4909 generic.go:334] "Generic (PLEG): container finished" podID="12883252-f9ad-417c-a0de-c191f705082d" containerID="f28a310428db4dd736408e290f4e6851feb79f62dc3e638ff1963b2276e26408" exitCode=0 Nov 28 16:31:39 crc kubenswrapper[4909]: I1128 16:31:39.249813 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-895cf5cf-rkrcn" event={"ID":"12883252-f9ad-417c-a0de-c191f705082d","Type":"ContainerDied","Data":"f28a310428db4dd736408e290f4e6851feb79f62dc3e638ff1963b2276e26408"} Nov 28 16:31:40 crc kubenswrapper[4909]: I1128 16:31:40.261868 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-895cf5cf-rkrcn" event={"ID":"12883252-f9ad-417c-a0de-c191f705082d","Type":"ContainerStarted","Data":"e310d7c9491842ef2798420d19a65596295716b1a00ce15380982f754fc33ec4"} Nov 28 16:31:40 crc kubenswrapper[4909]: I1128 16:31:40.262318 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-895cf5cf-rkrcn" Nov 28 16:31:40 crc kubenswrapper[4909]: I1128 16:31:40.290102 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-895cf5cf-rkrcn" podStartSLOduration=4.29008018 podStartE2EDuration="4.29008018s" podCreationTimestamp="2025-11-28 16:31:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:31:40.286903355 +0000 UTC m=+1282.683587879" watchObservedRunningTime="2025-11-28 16:31:40.29008018 +0000 UTC m=+1282.686764714" Nov 28 16:31:46 crc kubenswrapper[4909]: I1128 16:31:46.340625 4909 generic.go:334] "Generic (PLEG): container finished" podID="970bbe40-62b2-4c32-8f8f-6b36abe92607" containerID="4b78e9d3d2edc4545741c20fd85514898c5bba454bef6b22b492f5f8a652b138" exitCode=0 Nov 28 16:31:46 crc kubenswrapper[4909]: I1128 16:31:46.340747 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-xhz8l" event={"ID":"970bbe40-62b2-4c32-8f8f-6b36abe92607","Type":"ContainerDied","Data":"4b78e9d3d2edc4545741c20fd85514898c5bba454bef6b22b492f5f8a652b138"} Nov 28 16:31:46 crc kubenswrapper[4909]: I1128 16:31:46.991822 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-895cf5cf-rkrcn" Nov 28 16:31:47 crc kubenswrapper[4909]: I1128 16:31:47.053459 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-74dc88fc-cvh5p"] Nov 28 16:31:47 crc kubenswrapper[4909]: I1128 16:31:47.053833 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-74dc88fc-cvh5p" podUID="886ced6c-0930-40c8-903f-16146cd8994e" containerName="dnsmasq-dns" containerID="cri-o://9ad9179f1c4cb74cc6246d262eaeb8f0011cd0e7b11bdc7780a08e8b6bba596e" gracePeriod=10 Nov 28 16:31:47 crc kubenswrapper[4909]: I1128 16:31:47.697892 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-xhz8l" Nov 28 16:31:47 crc kubenswrapper[4909]: I1128 16:31:47.874138 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/970bbe40-62b2-4c32-8f8f-6b36abe92607-combined-ca-bundle\") pod \"970bbe40-62b2-4c32-8f8f-6b36abe92607\" (UID: \"970bbe40-62b2-4c32-8f8f-6b36abe92607\") " Nov 28 16:31:47 crc kubenswrapper[4909]: I1128 16:31:47.874217 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-knpnt\" (UniqueName: \"kubernetes.io/projected/970bbe40-62b2-4c32-8f8f-6b36abe92607-kube-api-access-knpnt\") pod \"970bbe40-62b2-4c32-8f8f-6b36abe92607\" (UID: \"970bbe40-62b2-4c32-8f8f-6b36abe92607\") " Nov 28 16:31:47 crc kubenswrapper[4909]: I1128 16:31:47.874290 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/970bbe40-62b2-4c32-8f8f-6b36abe92607-config-data\") pod \"970bbe40-62b2-4c32-8f8f-6b36abe92607\" (UID: \"970bbe40-62b2-4c32-8f8f-6b36abe92607\") " Nov 28 16:31:47 crc kubenswrapper[4909]: I1128 16:31:47.880172 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/970bbe40-62b2-4c32-8f8f-6b36abe92607-kube-api-access-knpnt" (OuterVolumeSpecName: "kube-api-access-knpnt") pod "970bbe40-62b2-4c32-8f8f-6b36abe92607" (UID: "970bbe40-62b2-4c32-8f8f-6b36abe92607"). InnerVolumeSpecName "kube-api-access-knpnt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:31:47 crc kubenswrapper[4909]: I1128 16:31:47.907141 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/970bbe40-62b2-4c32-8f8f-6b36abe92607-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "970bbe40-62b2-4c32-8f8f-6b36abe92607" (UID: "970bbe40-62b2-4c32-8f8f-6b36abe92607"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:31:47 crc kubenswrapper[4909]: I1128 16:31:47.937431 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/970bbe40-62b2-4c32-8f8f-6b36abe92607-config-data" (OuterVolumeSpecName: "config-data") pod "970bbe40-62b2-4c32-8f8f-6b36abe92607" (UID: "970bbe40-62b2-4c32-8f8f-6b36abe92607"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:31:47 crc kubenswrapper[4909]: I1128 16:31:47.976967 4909 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/970bbe40-62b2-4c32-8f8f-6b36abe92607-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:31:47 crc kubenswrapper[4909]: I1128 16:31:47.977250 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-knpnt\" (UniqueName: \"kubernetes.io/projected/970bbe40-62b2-4c32-8f8f-6b36abe92607-kube-api-access-knpnt\") on node \"crc\" DevicePath \"\"" Nov 28 16:31:47 crc kubenswrapper[4909]: I1128 16:31:47.977303 4909 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/970bbe40-62b2-4c32-8f8f-6b36abe92607-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:31:48 crc kubenswrapper[4909]: I1128 16:31:48.357632 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-xhz8l" event={"ID":"970bbe40-62b2-4c32-8f8f-6b36abe92607","Type":"ContainerDied","Data":"b6015d13bbaa1a4eba93301554665c64577b8404812593db811997392ba52861"} Nov 28 16:31:48 crc kubenswrapper[4909]: I1128 16:31:48.357966 4909 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b6015d13bbaa1a4eba93301554665c64577b8404812593db811997392ba52861" Nov 28 16:31:48 crc kubenswrapper[4909]: I1128 16:31:48.357650 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-xhz8l" Nov 28 16:31:48 crc kubenswrapper[4909]: I1128 16:31:48.360332 4909 generic.go:334] "Generic (PLEG): container finished" podID="886ced6c-0930-40c8-903f-16146cd8994e" containerID="9ad9179f1c4cb74cc6246d262eaeb8f0011cd0e7b11bdc7780a08e8b6bba596e" exitCode=0 Nov 28 16:31:48 crc kubenswrapper[4909]: I1128 16:31:48.360361 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-74dc88fc-cvh5p" event={"ID":"886ced6c-0930-40c8-903f-16146cd8994e","Type":"ContainerDied","Data":"9ad9179f1c4cb74cc6246d262eaeb8f0011cd0e7b11bdc7780a08e8b6bba596e"} Nov 28 16:31:48 crc kubenswrapper[4909]: I1128 16:31:48.642695 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6c9c9f998c-gdtbt"] Nov 28 16:31:48 crc kubenswrapper[4909]: E1128 16:31:48.644059 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="970bbe40-62b2-4c32-8f8f-6b36abe92607" containerName="keystone-db-sync" Nov 28 16:31:48 crc kubenswrapper[4909]: I1128 16:31:48.644074 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="970bbe40-62b2-4c32-8f8f-6b36abe92607" containerName="keystone-db-sync" Nov 28 16:31:48 crc kubenswrapper[4909]: I1128 16:31:48.644244 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="970bbe40-62b2-4c32-8f8f-6b36abe92607" containerName="keystone-db-sync" Nov 28 16:31:48 crc kubenswrapper[4909]: I1128 16:31:48.645392 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6c9c9f998c-gdtbt" Nov 28 16:31:48 crc kubenswrapper[4909]: I1128 16:31:48.664428 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6c9c9f998c-gdtbt"] Nov 28 16:31:48 crc kubenswrapper[4909]: I1128 16:31:48.669441 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-598lt"] Nov 28 16:31:48 crc kubenswrapper[4909]: I1128 16:31:48.670636 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-598lt" Nov 28 16:31:48 crc kubenswrapper[4909]: I1128 16:31:48.711379 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 28 16:31:48 crc kubenswrapper[4909]: I1128 16:31:48.711632 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 28 16:31:48 crc kubenswrapper[4909]: I1128 16:31:48.711774 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-fmn76" Nov 28 16:31:48 crc kubenswrapper[4909]: I1128 16:31:48.713708 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 28 16:31:48 crc kubenswrapper[4909]: I1128 16:31:48.719047 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Nov 28 16:31:48 crc kubenswrapper[4909]: I1128 16:31:48.743519 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-598lt"] Nov 28 16:31:48 crc kubenswrapper[4909]: I1128 16:31:48.793520 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/8610eac9-5723-4cbd-95b0-cd81f99369d5-dns-swift-storage-0\") pod \"dnsmasq-dns-6c9c9f998c-gdtbt\" (UID: \"8610eac9-5723-4cbd-95b0-cd81f99369d5\") " pod="openstack/dnsmasq-dns-6c9c9f998c-gdtbt" Nov 28 16:31:48 crc kubenswrapper[4909]: I1128 16:31:48.793671 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/77700d2a-0ae0-421c-9ee0-63aaa32a5428-combined-ca-bundle\") pod \"keystone-bootstrap-598lt\" (UID: \"77700d2a-0ae0-421c-9ee0-63aaa32a5428\") " pod="openstack/keystone-bootstrap-598lt" Nov 28 16:31:48 crc kubenswrapper[4909]: I1128 16:31:48.793713 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/77700d2a-0ae0-421c-9ee0-63aaa32a5428-config-data\") pod \"keystone-bootstrap-598lt\" (UID: \"77700d2a-0ae0-421c-9ee0-63aaa32a5428\") " pod="openstack/keystone-bootstrap-598lt" Nov 28 16:31:48 crc kubenswrapper[4909]: I1128 16:31:48.793815 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/77700d2a-0ae0-421c-9ee0-63aaa32a5428-fernet-keys\") pod \"keystone-bootstrap-598lt\" (UID: \"77700d2a-0ae0-421c-9ee0-63aaa32a5428\") " pod="openstack/keystone-bootstrap-598lt" Nov 28 16:31:48 crc kubenswrapper[4909]: I1128 16:31:48.793861 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8610eac9-5723-4cbd-95b0-cd81f99369d5-dns-svc\") pod \"dnsmasq-dns-6c9c9f998c-gdtbt\" (UID: \"8610eac9-5723-4cbd-95b0-cd81f99369d5\") " pod="openstack/dnsmasq-dns-6c9c9f998c-gdtbt" Nov 28 16:31:48 crc kubenswrapper[4909]: I1128 16:31:48.793915 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/77700d2a-0ae0-421c-9ee0-63aaa32a5428-scripts\") pod \"keystone-bootstrap-598lt\" (UID: \"77700d2a-0ae0-421c-9ee0-63aaa32a5428\") " pod="openstack/keystone-bootstrap-598lt" Nov 28 16:31:48 crc kubenswrapper[4909]: I1128 16:31:48.793937 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j97fg\" (UniqueName: \"kubernetes.io/projected/77700d2a-0ae0-421c-9ee0-63aaa32a5428-kube-api-access-j97fg\") pod \"keystone-bootstrap-598lt\" (UID: \"77700d2a-0ae0-421c-9ee0-63aaa32a5428\") " pod="openstack/keystone-bootstrap-598lt" Nov 28 16:31:48 crc kubenswrapper[4909]: I1128 16:31:48.793965 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/77700d2a-0ae0-421c-9ee0-63aaa32a5428-credential-keys\") pod \"keystone-bootstrap-598lt\" (UID: \"77700d2a-0ae0-421c-9ee0-63aaa32a5428\") " pod="openstack/keystone-bootstrap-598lt" Nov 28 16:31:48 crc kubenswrapper[4909]: I1128 16:31:48.794022 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8610eac9-5723-4cbd-95b0-cd81f99369d5-ovsdbserver-nb\") pod \"dnsmasq-dns-6c9c9f998c-gdtbt\" (UID: \"8610eac9-5723-4cbd-95b0-cd81f99369d5\") " pod="openstack/dnsmasq-dns-6c9c9f998c-gdtbt" Nov 28 16:31:48 crc kubenswrapper[4909]: I1128 16:31:48.794133 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8610eac9-5723-4cbd-95b0-cd81f99369d5-config\") pod \"dnsmasq-dns-6c9c9f998c-gdtbt\" (UID: \"8610eac9-5723-4cbd-95b0-cd81f99369d5\") " pod="openstack/dnsmasq-dns-6c9c9f998c-gdtbt" Nov 28 16:31:48 crc kubenswrapper[4909]: I1128 16:31:48.794171 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8610eac9-5723-4cbd-95b0-cd81f99369d5-ovsdbserver-sb\") pod \"dnsmasq-dns-6c9c9f998c-gdtbt\" (UID: \"8610eac9-5723-4cbd-95b0-cd81f99369d5\") " pod="openstack/dnsmasq-dns-6c9c9f998c-gdtbt" Nov 28 16:31:48 crc kubenswrapper[4909]: I1128 16:31:48.794196 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-stghz\" (UniqueName: \"kubernetes.io/projected/8610eac9-5723-4cbd-95b0-cd81f99369d5-kube-api-access-stghz\") pod \"dnsmasq-dns-6c9c9f998c-gdtbt\" (UID: \"8610eac9-5723-4cbd-95b0-cd81f99369d5\") " pod="openstack/dnsmasq-dns-6c9c9f998c-gdtbt" Nov 28 16:31:48 crc kubenswrapper[4909]: I1128 16:31:48.888375 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-sync-fcstr"] Nov 28 16:31:48 crc kubenswrapper[4909]: I1128 16:31:48.890335 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-fcstr" Nov 28 16:31:48 crc kubenswrapper[4909]: I1128 16:31:48.898290 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Nov 28 16:31:48 crc kubenswrapper[4909]: I1128 16:31:48.898808 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/77700d2a-0ae0-421c-9ee0-63aaa32a5428-fernet-keys\") pod \"keystone-bootstrap-598lt\" (UID: \"77700d2a-0ae0-421c-9ee0-63aaa32a5428\") " pod="openstack/keystone-bootstrap-598lt" Nov 28 16:31:48 crc kubenswrapper[4909]: I1128 16:31:48.898874 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8610eac9-5723-4cbd-95b0-cd81f99369d5-dns-svc\") pod \"dnsmasq-dns-6c9c9f998c-gdtbt\" (UID: \"8610eac9-5723-4cbd-95b0-cd81f99369d5\") " pod="openstack/dnsmasq-dns-6c9c9f998c-gdtbt" Nov 28 16:31:48 crc kubenswrapper[4909]: I1128 16:31:48.898919 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/77700d2a-0ae0-421c-9ee0-63aaa32a5428-scripts\") pod \"keystone-bootstrap-598lt\" (UID: \"77700d2a-0ae0-421c-9ee0-63aaa32a5428\") " pod="openstack/keystone-bootstrap-598lt" Nov 28 16:31:48 crc kubenswrapper[4909]: I1128 16:31:48.898951 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j97fg\" (UniqueName: \"kubernetes.io/projected/77700d2a-0ae0-421c-9ee0-63aaa32a5428-kube-api-access-j97fg\") pod \"keystone-bootstrap-598lt\" (UID: \"77700d2a-0ae0-421c-9ee0-63aaa32a5428\") " pod="openstack/keystone-bootstrap-598lt" Nov 28 16:31:48 crc kubenswrapper[4909]: I1128 16:31:48.898978 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/77700d2a-0ae0-421c-9ee0-63aaa32a5428-credential-keys\") pod \"keystone-bootstrap-598lt\" (UID: \"77700d2a-0ae0-421c-9ee0-63aaa32a5428\") " pod="openstack/keystone-bootstrap-598lt" Nov 28 16:31:48 crc kubenswrapper[4909]: I1128 16:31:48.899018 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8610eac9-5723-4cbd-95b0-cd81f99369d5-ovsdbserver-nb\") pod \"dnsmasq-dns-6c9c9f998c-gdtbt\" (UID: \"8610eac9-5723-4cbd-95b0-cd81f99369d5\") " pod="openstack/dnsmasq-dns-6c9c9f998c-gdtbt" Nov 28 16:31:48 crc kubenswrapper[4909]: I1128 16:31:48.899077 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8610eac9-5723-4cbd-95b0-cd81f99369d5-config\") pod \"dnsmasq-dns-6c9c9f998c-gdtbt\" (UID: \"8610eac9-5723-4cbd-95b0-cd81f99369d5\") " pod="openstack/dnsmasq-dns-6c9c9f998c-gdtbt" Nov 28 16:31:48 crc kubenswrapper[4909]: I1128 16:31:48.899106 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8610eac9-5723-4cbd-95b0-cd81f99369d5-ovsdbserver-sb\") pod \"dnsmasq-dns-6c9c9f998c-gdtbt\" (UID: \"8610eac9-5723-4cbd-95b0-cd81f99369d5\") " pod="openstack/dnsmasq-dns-6c9c9f998c-gdtbt" Nov 28 16:31:48 crc kubenswrapper[4909]: I1128 16:31:48.899137 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-stghz\" (UniqueName: \"kubernetes.io/projected/8610eac9-5723-4cbd-95b0-cd81f99369d5-kube-api-access-stghz\") pod \"dnsmasq-dns-6c9c9f998c-gdtbt\" (UID: \"8610eac9-5723-4cbd-95b0-cd81f99369d5\") " pod="openstack/dnsmasq-dns-6c9c9f998c-gdtbt" Nov 28 16:31:48 crc kubenswrapper[4909]: I1128 16:31:48.899176 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/8610eac9-5723-4cbd-95b0-cd81f99369d5-dns-swift-storage-0\") pod \"dnsmasq-dns-6c9c9f998c-gdtbt\" (UID: \"8610eac9-5723-4cbd-95b0-cd81f99369d5\") " pod="openstack/dnsmasq-dns-6c9c9f998c-gdtbt" Nov 28 16:31:48 crc kubenswrapper[4909]: I1128 16:31:48.899229 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/77700d2a-0ae0-421c-9ee0-63aaa32a5428-combined-ca-bundle\") pod \"keystone-bootstrap-598lt\" (UID: \"77700d2a-0ae0-421c-9ee0-63aaa32a5428\") " pod="openstack/keystone-bootstrap-598lt" Nov 28 16:31:48 crc kubenswrapper[4909]: I1128 16:31:48.899260 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/77700d2a-0ae0-421c-9ee0-63aaa32a5428-config-data\") pod \"keystone-bootstrap-598lt\" (UID: \"77700d2a-0ae0-421c-9ee0-63aaa32a5428\") " pod="openstack/keystone-bootstrap-598lt" Nov 28 16:31:48 crc kubenswrapper[4909]: I1128 16:31:48.900149 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Nov 28 16:31:48 crc kubenswrapper[4909]: I1128 16:31:48.901330 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8610eac9-5723-4cbd-95b0-cd81f99369d5-ovsdbserver-nb\") pod \"dnsmasq-dns-6c9c9f998c-gdtbt\" (UID: \"8610eac9-5723-4cbd-95b0-cd81f99369d5\") " pod="openstack/dnsmasq-dns-6c9c9f998c-gdtbt" Nov 28 16:31:48 crc kubenswrapper[4909]: I1128 16:31:48.904158 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-vwqt8" Nov 28 16:31:48 crc kubenswrapper[4909]: I1128 16:31:48.904236 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8610eac9-5723-4cbd-95b0-cd81f99369d5-config\") pod \"dnsmasq-dns-6c9c9f998c-gdtbt\" (UID: \"8610eac9-5723-4cbd-95b0-cd81f99369d5\") " pod="openstack/dnsmasq-dns-6c9c9f998c-gdtbt" Nov 28 16:31:48 crc kubenswrapper[4909]: I1128 16:31:48.909468 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8610eac9-5723-4cbd-95b0-cd81f99369d5-ovsdbserver-sb\") pod \"dnsmasq-dns-6c9c9f998c-gdtbt\" (UID: \"8610eac9-5723-4cbd-95b0-cd81f99369d5\") " pod="openstack/dnsmasq-dns-6c9c9f998c-gdtbt" Nov 28 16:31:48 crc kubenswrapper[4909]: I1128 16:31:48.910084 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/77700d2a-0ae0-421c-9ee0-63aaa32a5428-scripts\") pod \"keystone-bootstrap-598lt\" (UID: \"77700d2a-0ae0-421c-9ee0-63aaa32a5428\") " pod="openstack/keystone-bootstrap-598lt" Nov 28 16:31:48 crc kubenswrapper[4909]: I1128 16:31:48.910824 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/8610eac9-5723-4cbd-95b0-cd81f99369d5-dns-swift-storage-0\") pod \"dnsmasq-dns-6c9c9f998c-gdtbt\" (UID: \"8610eac9-5723-4cbd-95b0-cd81f99369d5\") " pod="openstack/dnsmasq-dns-6c9c9f998c-gdtbt" Nov 28 16:31:48 crc kubenswrapper[4909]: I1128 16:31:48.912089 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8610eac9-5723-4cbd-95b0-cd81f99369d5-dns-svc\") pod \"dnsmasq-dns-6c9c9f998c-gdtbt\" (UID: \"8610eac9-5723-4cbd-95b0-cd81f99369d5\") " pod="openstack/dnsmasq-dns-6c9c9f998c-gdtbt" Nov 28 16:31:48 crc kubenswrapper[4909]: I1128 16:31:48.945611 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j97fg\" (UniqueName: \"kubernetes.io/projected/77700d2a-0ae0-421c-9ee0-63aaa32a5428-kube-api-access-j97fg\") pod \"keystone-bootstrap-598lt\" (UID: \"77700d2a-0ae0-421c-9ee0-63aaa32a5428\") " pod="openstack/keystone-bootstrap-598lt" Nov 28 16:31:48 crc kubenswrapper[4909]: I1128 16:31:48.947651 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/77700d2a-0ae0-421c-9ee0-63aaa32a5428-config-data\") pod \"keystone-bootstrap-598lt\" (UID: \"77700d2a-0ae0-421c-9ee0-63aaa32a5428\") " pod="openstack/keystone-bootstrap-598lt" Nov 28 16:31:48 crc kubenswrapper[4909]: I1128 16:31:48.950908 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/77700d2a-0ae0-421c-9ee0-63aaa32a5428-credential-keys\") pod \"keystone-bootstrap-598lt\" (UID: \"77700d2a-0ae0-421c-9ee0-63aaa32a5428\") " pod="openstack/keystone-bootstrap-598lt" Nov 28 16:31:48 crc kubenswrapper[4909]: I1128 16:31:48.958055 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/77700d2a-0ae0-421c-9ee0-63aaa32a5428-combined-ca-bundle\") pod \"keystone-bootstrap-598lt\" (UID: \"77700d2a-0ae0-421c-9ee0-63aaa32a5428\") " pod="openstack/keystone-bootstrap-598lt" Nov 28 16:31:48 crc kubenswrapper[4909]: I1128 16:31:48.958708 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/77700d2a-0ae0-421c-9ee0-63aaa32a5428-fernet-keys\") pod \"keystone-bootstrap-598lt\" (UID: \"77700d2a-0ae0-421c-9ee0-63aaa32a5428\") " pod="openstack/keystone-bootstrap-598lt" Nov 28 16:31:48 crc kubenswrapper[4909]: I1128 16:31:48.976301 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-stghz\" (UniqueName: \"kubernetes.io/projected/8610eac9-5723-4cbd-95b0-cd81f99369d5-kube-api-access-stghz\") pod \"dnsmasq-dns-6c9c9f998c-gdtbt\" (UID: \"8610eac9-5723-4cbd-95b0-cd81f99369d5\") " pod="openstack/dnsmasq-dns-6c9c9f998c-gdtbt" Nov 28 16:31:48 crc kubenswrapper[4909]: I1128 16:31:48.982589 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-fcstr"] Nov 28 16:31:49 crc kubenswrapper[4909]: I1128 16:31:49.001712 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-27sb9\" (UniqueName: \"kubernetes.io/projected/29c42f34-c4bc-433c-b0d7-a0a8acf595db-kube-api-access-27sb9\") pod \"neutron-db-sync-fcstr\" (UID: \"29c42f34-c4bc-433c-b0d7-a0a8acf595db\") " pod="openstack/neutron-db-sync-fcstr" Nov 28 16:31:49 crc kubenswrapper[4909]: I1128 16:31:49.001825 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/29c42f34-c4bc-433c-b0d7-a0a8acf595db-config\") pod \"neutron-db-sync-fcstr\" (UID: \"29c42f34-c4bc-433c-b0d7-a0a8acf595db\") " pod="openstack/neutron-db-sync-fcstr" Nov 28 16:31:49 crc kubenswrapper[4909]: I1128 16:31:49.001889 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/29c42f34-c4bc-433c-b0d7-a0a8acf595db-combined-ca-bundle\") pod \"neutron-db-sync-fcstr\" (UID: \"29c42f34-c4bc-433c-b0d7-a0a8acf595db\") " pod="openstack/neutron-db-sync-fcstr" Nov 28 16:31:49 crc kubenswrapper[4909]: I1128 16:31:49.020330 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 28 16:31:49 crc kubenswrapper[4909]: I1128 16:31:49.023084 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 16:31:49 crc kubenswrapper[4909]: I1128 16:31:49.034714 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 28 16:31:49 crc kubenswrapper[4909]: I1128 16:31:49.040323 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6c9c9f998c-gdtbt" Nov 28 16:31:49 crc kubenswrapper[4909]: I1128 16:31:49.050200 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 28 16:31:49 crc kubenswrapper[4909]: I1128 16:31:49.056187 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-598lt" Nov 28 16:31:49 crc kubenswrapper[4909]: I1128 16:31:49.067323 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 28 16:31:49 crc kubenswrapper[4909]: I1128 16:31:49.082733 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-sync-cj8sl"] Nov 28 16:31:49 crc kubenswrapper[4909]: I1128 16:31:49.083800 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-cj8sl" Nov 28 16:31:49 crc kubenswrapper[4909]: I1128 16:31:49.100023 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Nov 28 16:31:49 crc kubenswrapper[4909]: I1128 16:31:49.100241 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-59vjz" Nov 28 16:31:49 crc kubenswrapper[4909]: I1128 16:31:49.100369 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Nov 28 16:31:49 crc kubenswrapper[4909]: I1128 16:31:49.103620 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/403021b8-fd7a-4823-9f99-622829f4d935-scripts\") pod \"ceilometer-0\" (UID: \"403021b8-fd7a-4823-9f99-622829f4d935\") " pod="openstack/ceilometer-0" Nov 28 16:31:49 crc kubenswrapper[4909]: I1128 16:31:49.103673 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/403021b8-fd7a-4823-9f99-622829f4d935-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"403021b8-fd7a-4823-9f99-622829f4d935\") " pod="openstack/ceilometer-0" Nov 28 16:31:49 crc kubenswrapper[4909]: I1128 16:31:49.103714 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-27sb9\" (UniqueName: \"kubernetes.io/projected/29c42f34-c4bc-433c-b0d7-a0a8acf595db-kube-api-access-27sb9\") pod \"neutron-db-sync-fcstr\" (UID: \"29c42f34-c4bc-433c-b0d7-a0a8acf595db\") " pod="openstack/neutron-db-sync-fcstr" Nov 28 16:31:49 crc kubenswrapper[4909]: I1128 16:31:49.103729 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/403021b8-fd7a-4823-9f99-622829f4d935-run-httpd\") pod \"ceilometer-0\" (UID: \"403021b8-fd7a-4823-9f99-622829f4d935\") " pod="openstack/ceilometer-0" Nov 28 16:31:49 crc kubenswrapper[4909]: I1128 16:31:49.103786 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kt7dc\" (UniqueName: \"kubernetes.io/projected/403021b8-fd7a-4823-9f99-622829f4d935-kube-api-access-kt7dc\") pod \"ceilometer-0\" (UID: \"403021b8-fd7a-4823-9f99-622829f4d935\") " pod="openstack/ceilometer-0" Nov 28 16:31:49 crc kubenswrapper[4909]: I1128 16:31:49.103811 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/403021b8-fd7a-4823-9f99-622829f4d935-log-httpd\") pod \"ceilometer-0\" (UID: \"403021b8-fd7a-4823-9f99-622829f4d935\") " pod="openstack/ceilometer-0" Nov 28 16:31:49 crc kubenswrapper[4909]: I1128 16:31:49.103832 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/403021b8-fd7a-4823-9f99-622829f4d935-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"403021b8-fd7a-4823-9f99-622829f4d935\") " pod="openstack/ceilometer-0" Nov 28 16:31:49 crc kubenswrapper[4909]: I1128 16:31:49.103852 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/29c42f34-c4bc-433c-b0d7-a0a8acf595db-config\") pod \"neutron-db-sync-fcstr\" (UID: \"29c42f34-c4bc-433c-b0d7-a0a8acf595db\") " pod="openstack/neutron-db-sync-fcstr" Nov 28 16:31:49 crc kubenswrapper[4909]: I1128 16:31:49.103884 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/29c42f34-c4bc-433c-b0d7-a0a8acf595db-combined-ca-bundle\") pod \"neutron-db-sync-fcstr\" (UID: \"29c42f34-c4bc-433c-b0d7-a0a8acf595db\") " pod="openstack/neutron-db-sync-fcstr" Nov 28 16:31:49 crc kubenswrapper[4909]: I1128 16:31:49.103904 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/403021b8-fd7a-4823-9f99-622829f4d935-config-data\") pod \"ceilometer-0\" (UID: \"403021b8-fd7a-4823-9f99-622829f4d935\") " pod="openstack/ceilometer-0" Nov 28 16:31:49 crc kubenswrapper[4909]: I1128 16:31:49.129388 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-cj8sl"] Nov 28 16:31:49 crc kubenswrapper[4909]: I1128 16:31:49.137517 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/29c42f34-c4bc-433c-b0d7-a0a8acf595db-combined-ca-bundle\") pod \"neutron-db-sync-fcstr\" (UID: \"29c42f34-c4bc-433c-b0d7-a0a8acf595db\") " pod="openstack/neutron-db-sync-fcstr" Nov 28 16:31:49 crc kubenswrapper[4909]: I1128 16:31:49.153422 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/29c42f34-c4bc-433c-b0d7-a0a8acf595db-config\") pod \"neutron-db-sync-fcstr\" (UID: \"29c42f34-c4bc-433c-b0d7-a0a8acf595db\") " pod="openstack/neutron-db-sync-fcstr" Nov 28 16:31:49 crc kubenswrapper[4909]: I1128 16:31:49.168334 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-sync-6wh46"] Nov 28 16:31:49 crc kubenswrapper[4909]: I1128 16:31:49.170473 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-6wh46" Nov 28 16:31:49 crc kubenswrapper[4909]: I1128 16:31:49.177827 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-6wh46"] Nov 28 16:31:49 crc kubenswrapper[4909]: I1128 16:31:49.180999 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Nov 28 16:31:49 crc kubenswrapper[4909]: I1128 16:31:49.181763 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-cjq6w" Nov 28 16:31:49 crc kubenswrapper[4909]: I1128 16:31:49.199093 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-27sb9\" (UniqueName: \"kubernetes.io/projected/29c42f34-c4bc-433c-b0d7-a0a8acf595db-kube-api-access-27sb9\") pod \"neutron-db-sync-fcstr\" (UID: \"29c42f34-c4bc-433c-b0d7-a0a8acf595db\") " pod="openstack/neutron-db-sync-fcstr" Nov 28 16:31:49 crc kubenswrapper[4909]: I1128 16:31:49.205907 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6c9c9f998c-gdtbt"] Nov 28 16:31:49 crc kubenswrapper[4909]: I1128 16:31:49.206987 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/403021b8-fd7a-4823-9f99-622829f4d935-scripts\") pod \"ceilometer-0\" (UID: \"403021b8-fd7a-4823-9f99-622829f4d935\") " pod="openstack/ceilometer-0" Nov 28 16:31:49 crc kubenswrapper[4909]: I1128 16:31:49.207032 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/148d191b-98d8-4e26-a335-1bfb373f4f07-scripts\") pod \"cinder-db-sync-cj8sl\" (UID: \"148d191b-98d8-4e26-a335-1bfb373f4f07\") " pod="openstack/cinder-db-sync-cj8sl" Nov 28 16:31:49 crc kubenswrapper[4909]: I1128 16:31:49.207062 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/403021b8-fd7a-4823-9f99-622829f4d935-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"403021b8-fd7a-4823-9f99-622829f4d935\") " pod="openstack/ceilometer-0" Nov 28 16:31:49 crc kubenswrapper[4909]: I1128 16:31:49.207089 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/148d191b-98d8-4e26-a335-1bfb373f4f07-combined-ca-bundle\") pod \"cinder-db-sync-cj8sl\" (UID: \"148d191b-98d8-4e26-a335-1bfb373f4f07\") " pod="openstack/cinder-db-sync-cj8sl" Nov 28 16:31:49 crc kubenswrapper[4909]: I1128 16:31:49.207127 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/403021b8-fd7a-4823-9f99-622829f4d935-run-httpd\") pod \"ceilometer-0\" (UID: \"403021b8-fd7a-4823-9f99-622829f4d935\") " pod="openstack/ceilometer-0" Nov 28 16:31:49 crc kubenswrapper[4909]: I1128 16:31:49.207157 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kt7dc\" (UniqueName: \"kubernetes.io/projected/403021b8-fd7a-4823-9f99-622829f4d935-kube-api-access-kt7dc\") pod \"ceilometer-0\" (UID: \"403021b8-fd7a-4823-9f99-622829f4d935\") " pod="openstack/ceilometer-0" Nov 28 16:31:49 crc kubenswrapper[4909]: I1128 16:31:49.207187 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jrxm7\" (UniqueName: \"kubernetes.io/projected/148d191b-98d8-4e26-a335-1bfb373f4f07-kube-api-access-jrxm7\") pod \"cinder-db-sync-cj8sl\" (UID: \"148d191b-98d8-4e26-a335-1bfb373f4f07\") " pod="openstack/cinder-db-sync-cj8sl" Nov 28 16:31:49 crc kubenswrapper[4909]: I1128 16:31:49.207216 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/403021b8-fd7a-4823-9f99-622829f4d935-log-httpd\") pod \"ceilometer-0\" (UID: \"403021b8-fd7a-4823-9f99-622829f4d935\") " pod="openstack/ceilometer-0" Nov 28 16:31:49 crc kubenswrapper[4909]: I1128 16:31:49.207246 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/403021b8-fd7a-4823-9f99-622829f4d935-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"403021b8-fd7a-4823-9f99-622829f4d935\") " pod="openstack/ceilometer-0" Nov 28 16:31:49 crc kubenswrapper[4909]: I1128 16:31:49.207299 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/148d191b-98d8-4e26-a335-1bfb373f4f07-etc-machine-id\") pod \"cinder-db-sync-cj8sl\" (UID: \"148d191b-98d8-4e26-a335-1bfb373f4f07\") " pod="openstack/cinder-db-sync-cj8sl" Nov 28 16:31:49 crc kubenswrapper[4909]: I1128 16:31:49.207358 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/403021b8-fd7a-4823-9f99-622829f4d935-config-data\") pod \"ceilometer-0\" (UID: \"403021b8-fd7a-4823-9f99-622829f4d935\") " pod="openstack/ceilometer-0" Nov 28 16:31:49 crc kubenswrapper[4909]: I1128 16:31:49.207386 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/148d191b-98d8-4e26-a335-1bfb373f4f07-config-data\") pod \"cinder-db-sync-cj8sl\" (UID: \"148d191b-98d8-4e26-a335-1bfb373f4f07\") " pod="openstack/cinder-db-sync-cj8sl" Nov 28 16:31:49 crc kubenswrapper[4909]: I1128 16:31:49.207433 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/148d191b-98d8-4e26-a335-1bfb373f4f07-db-sync-config-data\") pod \"cinder-db-sync-cj8sl\" (UID: \"148d191b-98d8-4e26-a335-1bfb373f4f07\") " pod="openstack/cinder-db-sync-cj8sl" Nov 28 16:31:49 crc kubenswrapper[4909]: I1128 16:31:49.212577 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/403021b8-fd7a-4823-9f99-622829f4d935-log-httpd\") pod \"ceilometer-0\" (UID: \"403021b8-fd7a-4823-9f99-622829f4d935\") " pod="openstack/ceilometer-0" Nov 28 16:31:49 crc kubenswrapper[4909]: I1128 16:31:49.213667 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/403021b8-fd7a-4823-9f99-622829f4d935-run-httpd\") pod \"ceilometer-0\" (UID: \"403021b8-fd7a-4823-9f99-622829f4d935\") " pod="openstack/ceilometer-0" Nov 28 16:31:49 crc kubenswrapper[4909]: I1128 16:31:49.238475 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/403021b8-fd7a-4823-9f99-622829f4d935-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"403021b8-fd7a-4823-9f99-622829f4d935\") " pod="openstack/ceilometer-0" Nov 28 16:31:49 crc kubenswrapper[4909]: I1128 16:31:49.241198 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/403021b8-fd7a-4823-9f99-622829f4d935-scripts\") pod \"ceilometer-0\" (UID: \"403021b8-fd7a-4823-9f99-622829f4d935\") " pod="openstack/ceilometer-0" Nov 28 16:31:49 crc kubenswrapper[4909]: I1128 16:31:49.246884 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/403021b8-fd7a-4823-9f99-622829f4d935-config-data\") pod \"ceilometer-0\" (UID: \"403021b8-fd7a-4823-9f99-622829f4d935\") " pod="openstack/ceilometer-0" Nov 28 16:31:49 crc kubenswrapper[4909]: I1128 16:31:49.253564 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/403021b8-fd7a-4823-9f99-622829f4d935-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"403021b8-fd7a-4823-9f99-622829f4d935\") " pod="openstack/ceilometer-0" Nov 28 16:31:49 crc kubenswrapper[4909]: I1128 16:31:49.304715 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-sync-ngll9"] Nov 28 16:31:49 crc kubenswrapper[4909]: I1128 16:31:49.306474 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-ngll9" Nov 28 16:31:49 crc kubenswrapper[4909]: I1128 16:31:49.308348 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/148d191b-98d8-4e26-a335-1bfb373f4f07-scripts\") pod \"cinder-db-sync-cj8sl\" (UID: \"148d191b-98d8-4e26-a335-1bfb373f4f07\") " pod="openstack/cinder-db-sync-cj8sl" Nov 28 16:31:49 crc kubenswrapper[4909]: I1128 16:31:49.308387 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/148d191b-98d8-4e26-a335-1bfb373f4f07-combined-ca-bundle\") pod \"cinder-db-sync-cj8sl\" (UID: \"148d191b-98d8-4e26-a335-1bfb373f4f07\") " pod="openstack/cinder-db-sync-cj8sl" Nov 28 16:31:49 crc kubenswrapper[4909]: I1128 16:31:49.308421 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xjpgz\" (UniqueName: \"kubernetes.io/projected/fbc4977f-f846-4428-9fb6-558811c3e65b-kube-api-access-xjpgz\") pod \"barbican-db-sync-6wh46\" (UID: \"fbc4977f-f846-4428-9fb6-558811c3e65b\") " pod="openstack/barbican-db-sync-6wh46" Nov 28 16:31:49 crc kubenswrapper[4909]: I1128 16:31:49.308440 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/fbc4977f-f846-4428-9fb6-558811c3e65b-db-sync-config-data\") pod \"barbican-db-sync-6wh46\" (UID: \"fbc4977f-f846-4428-9fb6-558811c3e65b\") " pod="openstack/barbican-db-sync-6wh46" Nov 28 16:31:49 crc kubenswrapper[4909]: I1128 16:31:49.308473 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jrxm7\" (UniqueName: \"kubernetes.io/projected/148d191b-98d8-4e26-a335-1bfb373f4f07-kube-api-access-jrxm7\") pod \"cinder-db-sync-cj8sl\" (UID: \"148d191b-98d8-4e26-a335-1bfb373f4f07\") " pod="openstack/cinder-db-sync-cj8sl" Nov 28 16:31:49 crc kubenswrapper[4909]: I1128 16:31:49.308497 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fbc4977f-f846-4428-9fb6-558811c3e65b-combined-ca-bundle\") pod \"barbican-db-sync-6wh46\" (UID: \"fbc4977f-f846-4428-9fb6-558811c3e65b\") " pod="openstack/barbican-db-sync-6wh46" Nov 28 16:31:49 crc kubenswrapper[4909]: I1128 16:31:49.308529 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/148d191b-98d8-4e26-a335-1bfb373f4f07-etc-machine-id\") pod \"cinder-db-sync-cj8sl\" (UID: \"148d191b-98d8-4e26-a335-1bfb373f4f07\") " pod="openstack/cinder-db-sync-cj8sl" Nov 28 16:31:49 crc kubenswrapper[4909]: I1128 16:31:49.308560 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/148d191b-98d8-4e26-a335-1bfb373f4f07-config-data\") pod \"cinder-db-sync-cj8sl\" (UID: \"148d191b-98d8-4e26-a335-1bfb373f4f07\") " pod="openstack/cinder-db-sync-cj8sl" Nov 28 16:31:49 crc kubenswrapper[4909]: I1128 16:31:49.308592 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/148d191b-98d8-4e26-a335-1bfb373f4f07-db-sync-config-data\") pod \"cinder-db-sync-cj8sl\" (UID: \"148d191b-98d8-4e26-a335-1bfb373f4f07\") " pod="openstack/cinder-db-sync-cj8sl" Nov 28 16:31:49 crc kubenswrapper[4909]: I1128 16:31:49.311003 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-ngll9"] Nov 28 16:31:49 crc kubenswrapper[4909]: I1128 16:31:49.314556 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Nov 28 16:31:49 crc kubenswrapper[4909]: I1128 16:31:49.314755 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Nov 28 16:31:49 crc kubenswrapper[4909]: I1128 16:31:49.314864 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-fft54" Nov 28 16:31:49 crc kubenswrapper[4909]: I1128 16:31:49.315154 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kt7dc\" (UniqueName: \"kubernetes.io/projected/403021b8-fd7a-4823-9f99-622829f4d935-kube-api-access-kt7dc\") pod \"ceilometer-0\" (UID: \"403021b8-fd7a-4823-9f99-622829f4d935\") " pod="openstack/ceilometer-0" Nov 28 16:31:49 crc kubenswrapper[4909]: I1128 16:31:49.315423 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/148d191b-98d8-4e26-a335-1bfb373f4f07-etc-machine-id\") pod \"cinder-db-sync-cj8sl\" (UID: \"148d191b-98d8-4e26-a335-1bfb373f4f07\") " pod="openstack/cinder-db-sync-cj8sl" Nov 28 16:31:49 crc kubenswrapper[4909]: I1128 16:31:49.320503 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/148d191b-98d8-4e26-a335-1bfb373f4f07-combined-ca-bundle\") pod \"cinder-db-sync-cj8sl\" (UID: \"148d191b-98d8-4e26-a335-1bfb373f4f07\") " pod="openstack/cinder-db-sync-cj8sl" Nov 28 16:31:49 crc kubenswrapper[4909]: I1128 16:31:49.321491 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-57c957c4ff-zlrjj"] Nov 28 16:31:49 crc kubenswrapper[4909]: I1128 16:31:49.323555 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57c957c4ff-zlrjj" Nov 28 16:31:49 crc kubenswrapper[4909]: I1128 16:31:49.331047 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/148d191b-98d8-4e26-a335-1bfb373f4f07-db-sync-config-data\") pod \"cinder-db-sync-cj8sl\" (UID: \"148d191b-98d8-4e26-a335-1bfb373f4f07\") " pod="openstack/cinder-db-sync-cj8sl" Nov 28 16:31:49 crc kubenswrapper[4909]: I1128 16:31:49.331414 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/148d191b-98d8-4e26-a335-1bfb373f4f07-scripts\") pod \"cinder-db-sync-cj8sl\" (UID: \"148d191b-98d8-4e26-a335-1bfb373f4f07\") " pod="openstack/cinder-db-sync-cj8sl" Nov 28 16:31:49 crc kubenswrapper[4909]: I1128 16:31:49.332273 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/148d191b-98d8-4e26-a335-1bfb373f4f07-config-data\") pod \"cinder-db-sync-cj8sl\" (UID: \"148d191b-98d8-4e26-a335-1bfb373f4f07\") " pod="openstack/cinder-db-sync-cj8sl" Nov 28 16:31:49 crc kubenswrapper[4909]: I1128 16:31:49.355882 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-57c957c4ff-zlrjj"] Nov 28 16:31:49 crc kubenswrapper[4909]: I1128 16:31:49.363821 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-fcstr" Nov 28 16:31:49 crc kubenswrapper[4909]: I1128 16:31:49.396533 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jrxm7\" (UniqueName: \"kubernetes.io/projected/148d191b-98d8-4e26-a335-1bfb373f4f07-kube-api-access-jrxm7\") pod \"cinder-db-sync-cj8sl\" (UID: \"148d191b-98d8-4e26-a335-1bfb373f4f07\") " pod="openstack/cinder-db-sync-cj8sl" Nov 28 16:31:49 crc kubenswrapper[4909]: I1128 16:31:49.428298 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e525be76-a5cf-472c-91b6-c602362d9877-ovsdbserver-sb\") pod \"dnsmasq-dns-57c957c4ff-zlrjj\" (UID: \"e525be76-a5cf-472c-91b6-c602362d9877\") " pod="openstack/dnsmasq-dns-57c957c4ff-zlrjj" Nov 28 16:31:49 crc kubenswrapper[4909]: I1128 16:31:49.428363 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2d62f293-d022-4d92-915b-f83c3fa157a7-config-data\") pod \"placement-db-sync-ngll9\" (UID: \"2d62f293-d022-4d92-915b-f83c3fa157a7\") " pod="openstack/placement-db-sync-ngll9" Nov 28 16:31:49 crc kubenswrapper[4909]: I1128 16:31:49.428391 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e525be76-a5cf-472c-91b6-c602362d9877-config\") pod \"dnsmasq-dns-57c957c4ff-zlrjj\" (UID: \"e525be76-a5cf-472c-91b6-c602362d9877\") " pod="openstack/dnsmasq-dns-57c957c4ff-zlrjj" Nov 28 16:31:49 crc kubenswrapper[4909]: I1128 16:31:49.428441 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9zl8l\" (UniqueName: \"kubernetes.io/projected/e525be76-a5cf-472c-91b6-c602362d9877-kube-api-access-9zl8l\") pod \"dnsmasq-dns-57c957c4ff-zlrjj\" (UID: \"e525be76-a5cf-472c-91b6-c602362d9877\") " pod="openstack/dnsmasq-dns-57c957c4ff-zlrjj" Nov 28 16:31:49 crc kubenswrapper[4909]: I1128 16:31:49.428467 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e525be76-a5cf-472c-91b6-c602362d9877-dns-swift-storage-0\") pod \"dnsmasq-dns-57c957c4ff-zlrjj\" (UID: \"e525be76-a5cf-472c-91b6-c602362d9877\") " pod="openstack/dnsmasq-dns-57c957c4ff-zlrjj" Nov 28 16:31:49 crc kubenswrapper[4909]: I1128 16:31:49.428485 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e525be76-a5cf-472c-91b6-c602362d9877-ovsdbserver-nb\") pod \"dnsmasq-dns-57c957c4ff-zlrjj\" (UID: \"e525be76-a5cf-472c-91b6-c602362d9877\") " pod="openstack/dnsmasq-dns-57c957c4ff-zlrjj" Nov 28 16:31:49 crc kubenswrapper[4909]: I1128 16:31:49.428510 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2d62f293-d022-4d92-915b-f83c3fa157a7-combined-ca-bundle\") pod \"placement-db-sync-ngll9\" (UID: \"2d62f293-d022-4d92-915b-f83c3fa157a7\") " pod="openstack/placement-db-sync-ngll9" Nov 28 16:31:49 crc kubenswrapper[4909]: I1128 16:31:49.428541 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e525be76-a5cf-472c-91b6-c602362d9877-dns-svc\") pod \"dnsmasq-dns-57c957c4ff-zlrjj\" (UID: \"e525be76-a5cf-472c-91b6-c602362d9877\") " pod="openstack/dnsmasq-dns-57c957c4ff-zlrjj" Nov 28 16:31:49 crc kubenswrapper[4909]: I1128 16:31:49.428565 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2d62f293-d022-4d92-915b-f83c3fa157a7-logs\") pod \"placement-db-sync-ngll9\" (UID: \"2d62f293-d022-4d92-915b-f83c3fa157a7\") " pod="openstack/placement-db-sync-ngll9" Nov 28 16:31:49 crc kubenswrapper[4909]: I1128 16:31:49.428597 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/fbc4977f-f846-4428-9fb6-558811c3e65b-db-sync-config-data\") pod \"barbican-db-sync-6wh46\" (UID: \"fbc4977f-f846-4428-9fb6-558811c3e65b\") " pod="openstack/barbican-db-sync-6wh46" Nov 28 16:31:49 crc kubenswrapper[4909]: I1128 16:31:49.428619 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xjpgz\" (UniqueName: \"kubernetes.io/projected/fbc4977f-f846-4428-9fb6-558811c3e65b-kube-api-access-xjpgz\") pod \"barbican-db-sync-6wh46\" (UID: \"fbc4977f-f846-4428-9fb6-558811c3e65b\") " pod="openstack/barbican-db-sync-6wh46" Nov 28 16:31:49 crc kubenswrapper[4909]: I1128 16:31:49.428668 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w6477\" (UniqueName: \"kubernetes.io/projected/2d62f293-d022-4d92-915b-f83c3fa157a7-kube-api-access-w6477\") pod \"placement-db-sync-ngll9\" (UID: \"2d62f293-d022-4d92-915b-f83c3fa157a7\") " pod="openstack/placement-db-sync-ngll9" Nov 28 16:31:49 crc kubenswrapper[4909]: I1128 16:31:49.428700 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2d62f293-d022-4d92-915b-f83c3fa157a7-scripts\") pod \"placement-db-sync-ngll9\" (UID: \"2d62f293-d022-4d92-915b-f83c3fa157a7\") " pod="openstack/placement-db-sync-ngll9" Nov 28 16:31:49 crc kubenswrapper[4909]: I1128 16:31:49.428737 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fbc4977f-f846-4428-9fb6-558811c3e65b-combined-ca-bundle\") pod \"barbican-db-sync-6wh46\" (UID: \"fbc4977f-f846-4428-9fb6-558811c3e65b\") " pod="openstack/barbican-db-sync-6wh46" Nov 28 16:31:49 crc kubenswrapper[4909]: I1128 16:31:49.439398 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/fbc4977f-f846-4428-9fb6-558811c3e65b-db-sync-config-data\") pod \"barbican-db-sync-6wh46\" (UID: \"fbc4977f-f846-4428-9fb6-558811c3e65b\") " pod="openstack/barbican-db-sync-6wh46" Nov 28 16:31:49 crc kubenswrapper[4909]: I1128 16:31:49.454536 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fbc4977f-f846-4428-9fb6-558811c3e65b-combined-ca-bundle\") pod \"barbican-db-sync-6wh46\" (UID: \"fbc4977f-f846-4428-9fb6-558811c3e65b\") " pod="openstack/barbican-db-sync-6wh46" Nov 28 16:31:49 crc kubenswrapper[4909]: I1128 16:31:49.460443 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xjpgz\" (UniqueName: \"kubernetes.io/projected/fbc4977f-f846-4428-9fb6-558811c3e65b-kube-api-access-xjpgz\") pod \"barbican-db-sync-6wh46\" (UID: \"fbc4977f-f846-4428-9fb6-558811c3e65b\") " pod="openstack/barbican-db-sync-6wh46" Nov 28 16:31:49 crc kubenswrapper[4909]: I1128 16:31:49.504559 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 16:31:49 crc kubenswrapper[4909]: I1128 16:31:49.535243 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-cj8sl" Nov 28 16:31:49 crc kubenswrapper[4909]: I1128 16:31:49.535899 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e525be76-a5cf-472c-91b6-c602362d9877-ovsdbserver-sb\") pod \"dnsmasq-dns-57c957c4ff-zlrjj\" (UID: \"e525be76-a5cf-472c-91b6-c602362d9877\") " pod="openstack/dnsmasq-dns-57c957c4ff-zlrjj" Nov 28 16:31:49 crc kubenswrapper[4909]: I1128 16:31:49.535925 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2d62f293-d022-4d92-915b-f83c3fa157a7-config-data\") pod \"placement-db-sync-ngll9\" (UID: \"2d62f293-d022-4d92-915b-f83c3fa157a7\") " pod="openstack/placement-db-sync-ngll9" Nov 28 16:31:49 crc kubenswrapper[4909]: I1128 16:31:49.535946 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e525be76-a5cf-472c-91b6-c602362d9877-config\") pod \"dnsmasq-dns-57c957c4ff-zlrjj\" (UID: \"e525be76-a5cf-472c-91b6-c602362d9877\") " pod="openstack/dnsmasq-dns-57c957c4ff-zlrjj" Nov 28 16:31:49 crc kubenswrapper[4909]: I1128 16:31:49.535981 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9zl8l\" (UniqueName: \"kubernetes.io/projected/e525be76-a5cf-472c-91b6-c602362d9877-kube-api-access-9zl8l\") pod \"dnsmasq-dns-57c957c4ff-zlrjj\" (UID: \"e525be76-a5cf-472c-91b6-c602362d9877\") " pod="openstack/dnsmasq-dns-57c957c4ff-zlrjj" Nov 28 16:31:49 crc kubenswrapper[4909]: I1128 16:31:49.535999 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e525be76-a5cf-472c-91b6-c602362d9877-dns-swift-storage-0\") pod \"dnsmasq-dns-57c957c4ff-zlrjj\" (UID: \"e525be76-a5cf-472c-91b6-c602362d9877\") " pod="openstack/dnsmasq-dns-57c957c4ff-zlrjj" Nov 28 16:31:49 crc kubenswrapper[4909]: I1128 16:31:49.536014 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e525be76-a5cf-472c-91b6-c602362d9877-ovsdbserver-nb\") pod \"dnsmasq-dns-57c957c4ff-zlrjj\" (UID: \"e525be76-a5cf-472c-91b6-c602362d9877\") " pod="openstack/dnsmasq-dns-57c957c4ff-zlrjj" Nov 28 16:31:49 crc kubenswrapper[4909]: I1128 16:31:49.536033 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2d62f293-d022-4d92-915b-f83c3fa157a7-combined-ca-bundle\") pod \"placement-db-sync-ngll9\" (UID: \"2d62f293-d022-4d92-915b-f83c3fa157a7\") " pod="openstack/placement-db-sync-ngll9" Nov 28 16:31:49 crc kubenswrapper[4909]: I1128 16:31:49.536057 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e525be76-a5cf-472c-91b6-c602362d9877-dns-svc\") pod \"dnsmasq-dns-57c957c4ff-zlrjj\" (UID: \"e525be76-a5cf-472c-91b6-c602362d9877\") " pod="openstack/dnsmasq-dns-57c957c4ff-zlrjj" Nov 28 16:31:49 crc kubenswrapper[4909]: I1128 16:31:49.536074 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2d62f293-d022-4d92-915b-f83c3fa157a7-logs\") pod \"placement-db-sync-ngll9\" (UID: \"2d62f293-d022-4d92-915b-f83c3fa157a7\") " pod="openstack/placement-db-sync-ngll9" Nov 28 16:31:49 crc kubenswrapper[4909]: I1128 16:31:49.536102 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w6477\" (UniqueName: \"kubernetes.io/projected/2d62f293-d022-4d92-915b-f83c3fa157a7-kube-api-access-w6477\") pod \"placement-db-sync-ngll9\" (UID: \"2d62f293-d022-4d92-915b-f83c3fa157a7\") " pod="openstack/placement-db-sync-ngll9" Nov 28 16:31:49 crc kubenswrapper[4909]: I1128 16:31:49.536121 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2d62f293-d022-4d92-915b-f83c3fa157a7-scripts\") pod \"placement-db-sync-ngll9\" (UID: \"2d62f293-d022-4d92-915b-f83c3fa157a7\") " pod="openstack/placement-db-sync-ngll9" Nov 28 16:31:49 crc kubenswrapper[4909]: I1128 16:31:49.537460 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e525be76-a5cf-472c-91b6-c602362d9877-dns-svc\") pod \"dnsmasq-dns-57c957c4ff-zlrjj\" (UID: \"e525be76-a5cf-472c-91b6-c602362d9877\") " pod="openstack/dnsmasq-dns-57c957c4ff-zlrjj" Nov 28 16:31:49 crc kubenswrapper[4909]: I1128 16:31:49.538051 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e525be76-a5cf-472c-91b6-c602362d9877-ovsdbserver-nb\") pod \"dnsmasq-dns-57c957c4ff-zlrjj\" (UID: \"e525be76-a5cf-472c-91b6-c602362d9877\") " pod="openstack/dnsmasq-dns-57c957c4ff-zlrjj" Nov 28 16:31:49 crc kubenswrapper[4909]: I1128 16:31:49.538106 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2d62f293-d022-4d92-915b-f83c3fa157a7-logs\") pod \"placement-db-sync-ngll9\" (UID: \"2d62f293-d022-4d92-915b-f83c3fa157a7\") " pod="openstack/placement-db-sync-ngll9" Nov 28 16:31:49 crc kubenswrapper[4909]: I1128 16:31:49.538262 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e525be76-a5cf-472c-91b6-c602362d9877-dns-swift-storage-0\") pod \"dnsmasq-dns-57c957c4ff-zlrjj\" (UID: \"e525be76-a5cf-472c-91b6-c602362d9877\") " pod="openstack/dnsmasq-dns-57c957c4ff-zlrjj" Nov 28 16:31:49 crc kubenswrapper[4909]: I1128 16:31:49.538995 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e525be76-a5cf-472c-91b6-c602362d9877-config\") pod \"dnsmasq-dns-57c957c4ff-zlrjj\" (UID: \"e525be76-a5cf-472c-91b6-c602362d9877\") " pod="openstack/dnsmasq-dns-57c957c4ff-zlrjj" Nov 28 16:31:49 crc kubenswrapper[4909]: I1128 16:31:49.539996 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e525be76-a5cf-472c-91b6-c602362d9877-ovsdbserver-sb\") pod \"dnsmasq-dns-57c957c4ff-zlrjj\" (UID: \"e525be76-a5cf-472c-91b6-c602362d9877\") " pod="openstack/dnsmasq-dns-57c957c4ff-zlrjj" Nov 28 16:31:49 crc kubenswrapper[4909]: I1128 16:31:49.543263 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2d62f293-d022-4d92-915b-f83c3fa157a7-combined-ca-bundle\") pod \"placement-db-sync-ngll9\" (UID: \"2d62f293-d022-4d92-915b-f83c3fa157a7\") " pod="openstack/placement-db-sync-ngll9" Nov 28 16:31:49 crc kubenswrapper[4909]: I1128 16:31:49.544226 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2d62f293-d022-4d92-915b-f83c3fa157a7-config-data\") pod \"placement-db-sync-ngll9\" (UID: \"2d62f293-d022-4d92-915b-f83c3fa157a7\") " pod="openstack/placement-db-sync-ngll9" Nov 28 16:31:49 crc kubenswrapper[4909]: I1128 16:31:49.546825 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2d62f293-d022-4d92-915b-f83c3fa157a7-scripts\") pod \"placement-db-sync-ngll9\" (UID: \"2d62f293-d022-4d92-915b-f83c3fa157a7\") " pod="openstack/placement-db-sync-ngll9" Nov 28 16:31:49 crc kubenswrapper[4909]: I1128 16:31:49.552544 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-6wh46" Nov 28 16:31:49 crc kubenswrapper[4909]: I1128 16:31:49.556566 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w6477\" (UniqueName: \"kubernetes.io/projected/2d62f293-d022-4d92-915b-f83c3fa157a7-kube-api-access-w6477\") pod \"placement-db-sync-ngll9\" (UID: \"2d62f293-d022-4d92-915b-f83c3fa157a7\") " pod="openstack/placement-db-sync-ngll9" Nov 28 16:31:49 crc kubenswrapper[4909]: I1128 16:31:49.562327 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9zl8l\" (UniqueName: \"kubernetes.io/projected/e525be76-a5cf-472c-91b6-c602362d9877-kube-api-access-9zl8l\") pod \"dnsmasq-dns-57c957c4ff-zlrjj\" (UID: \"e525be76-a5cf-472c-91b6-c602362d9877\") " pod="openstack/dnsmasq-dns-57c957c4ff-zlrjj" Nov 28 16:31:49 crc kubenswrapper[4909]: I1128 16:31:49.611846 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-ngll9" Nov 28 16:31:49 crc kubenswrapper[4909]: I1128 16:31:49.627450 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57c957c4ff-zlrjj" Nov 28 16:31:49 crc kubenswrapper[4909]: I1128 16:31:49.892501 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 16:31:49 crc kubenswrapper[4909]: I1128 16:31:49.894375 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 28 16:31:49 crc kubenswrapper[4909]: I1128 16:31:49.898177 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Nov 28 16:31:49 crc kubenswrapper[4909]: I1128 16:31:49.898519 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Nov 28 16:31:49 crc kubenswrapper[4909]: I1128 16:31:49.899418 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-fzds2" Nov 28 16:31:49 crc kubenswrapper[4909]: I1128 16:31:49.900952 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Nov 28 16:31:49 crc kubenswrapper[4909]: I1128 16:31:49.934971 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 16:31:49 crc kubenswrapper[4909]: I1128 16:31:49.935016 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 16:31:49 crc kubenswrapper[4909]: I1128 16:31:49.943706 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 28 16:31:49 crc kubenswrapper[4909]: I1128 16:31:49.950030 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Nov 28 16:31:49 crc kubenswrapper[4909]: I1128 16:31:49.950270 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Nov 28 16:31:49 crc kubenswrapper[4909]: I1128 16:31:49.952740 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 16:31:50 crc kubenswrapper[4909]: I1128 16:31:50.009950 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6c9c9f998c-gdtbt"] Nov 28 16:31:50 crc kubenswrapper[4909]: W1128 16:31:50.014267 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8610eac9_5723_4cbd_95b0_cd81f99369d5.slice/crio-37594af4e907c10f27354fed394f0756fb9b37445b1b4977986f9b3437052e4f WatchSource:0}: Error finding container 37594af4e907c10f27354fed394f0756fb9b37445b1b4977986f9b3437052e4f: Status 404 returned error can't find the container with id 37594af4e907c10f27354fed394f0756fb9b37445b1b4977986f9b3437052e4f Nov 28 16:31:50 crc kubenswrapper[4909]: I1128 16:31:50.087075 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vvsh5\" (UniqueName: \"kubernetes.io/projected/537db953-469c-4d69-a3ed-9be676f44be7-kube-api-access-vvsh5\") pod \"glance-default-internal-api-0\" (UID: \"537db953-469c-4d69-a3ed-9be676f44be7\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:31:50 crc kubenswrapper[4909]: I1128 16:31:50.087511 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a35f2df9-5b1e-488d-99e7-9e541b997330-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"a35f2df9-5b1e-488d-99e7-9e541b997330\") " pod="openstack/glance-default-external-api-0" Nov 28 16:31:50 crc kubenswrapper[4909]: I1128 16:31:50.087543 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a35f2df9-5b1e-488d-99e7-9e541b997330-scripts\") pod \"glance-default-external-api-0\" (UID: \"a35f2df9-5b1e-488d-99e7-9e541b997330\") " pod="openstack/glance-default-external-api-0" Nov 28 16:31:50 crc kubenswrapper[4909]: I1128 16:31:50.087569 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/537db953-469c-4d69-a3ed-9be676f44be7-config-data\") pod \"glance-default-internal-api-0\" (UID: \"537db953-469c-4d69-a3ed-9be676f44be7\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:31:50 crc kubenswrapper[4909]: I1128 16:31:50.087679 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a35f2df9-5b1e-488d-99e7-9e541b997330-config-data\") pod \"glance-default-external-api-0\" (UID: \"a35f2df9-5b1e-488d-99e7-9e541b997330\") " pod="openstack/glance-default-external-api-0" Nov 28 16:31:50 crc kubenswrapper[4909]: I1128 16:31:50.090146 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"glance-default-external-api-0\" (UID: \"a35f2df9-5b1e-488d-99e7-9e541b997330\") " pod="openstack/glance-default-external-api-0" Nov 28 16:31:50 crc kubenswrapper[4909]: I1128 16:31:50.090298 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/537db953-469c-4d69-a3ed-9be676f44be7-logs\") pod \"glance-default-internal-api-0\" (UID: \"537db953-469c-4d69-a3ed-9be676f44be7\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:31:50 crc kubenswrapper[4909]: I1128 16:31:50.090341 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/537db953-469c-4d69-a3ed-9be676f44be7-scripts\") pod \"glance-default-internal-api-0\" (UID: \"537db953-469c-4d69-a3ed-9be676f44be7\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:31:50 crc kubenswrapper[4909]: I1128 16:31:50.090407 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/a35f2df9-5b1e-488d-99e7-9e541b997330-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"a35f2df9-5b1e-488d-99e7-9e541b997330\") " pod="openstack/glance-default-external-api-0" Nov 28 16:31:50 crc kubenswrapper[4909]: I1128 16:31:50.090489 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/537db953-469c-4d69-a3ed-9be676f44be7-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"537db953-469c-4d69-a3ed-9be676f44be7\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:31:50 crc kubenswrapper[4909]: I1128 16:31:50.090583 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a35f2df9-5b1e-488d-99e7-9e541b997330-logs\") pod \"glance-default-external-api-0\" (UID: \"a35f2df9-5b1e-488d-99e7-9e541b997330\") " pod="openstack/glance-default-external-api-0" Nov 28 16:31:50 crc kubenswrapper[4909]: I1128 16:31:50.090612 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/537db953-469c-4d69-a3ed-9be676f44be7-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"537db953-469c-4d69-a3ed-9be676f44be7\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:31:50 crc kubenswrapper[4909]: I1128 16:31:50.090628 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/a35f2df9-5b1e-488d-99e7-9e541b997330-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"a35f2df9-5b1e-488d-99e7-9e541b997330\") " pod="openstack/glance-default-external-api-0" Nov 28 16:31:50 crc kubenswrapper[4909]: I1128 16:31:50.090675 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lx4fs\" (UniqueName: \"kubernetes.io/projected/a35f2df9-5b1e-488d-99e7-9e541b997330-kube-api-access-lx4fs\") pod \"glance-default-external-api-0\" (UID: \"a35f2df9-5b1e-488d-99e7-9e541b997330\") " pod="openstack/glance-default-external-api-0" Nov 28 16:31:50 crc kubenswrapper[4909]: I1128 16:31:50.090774 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/537db953-469c-4d69-a3ed-9be676f44be7-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"537db953-469c-4d69-a3ed-9be676f44be7\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:31:50 crc kubenswrapper[4909]: I1128 16:31:50.090797 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-internal-api-0\" (UID: \"537db953-469c-4d69-a3ed-9be676f44be7\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:31:50 crc kubenswrapper[4909]: I1128 16:31:50.102255 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-74dc88fc-cvh5p" Nov 28 16:31:50 crc kubenswrapper[4909]: I1128 16:31:50.192499 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4wkp2\" (UniqueName: \"kubernetes.io/projected/886ced6c-0930-40c8-903f-16146cd8994e-kube-api-access-4wkp2\") pod \"886ced6c-0930-40c8-903f-16146cd8994e\" (UID: \"886ced6c-0930-40c8-903f-16146cd8994e\") " Nov 28 16:31:50 crc kubenswrapper[4909]: I1128 16:31:50.192602 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/886ced6c-0930-40c8-903f-16146cd8994e-config\") pod \"886ced6c-0930-40c8-903f-16146cd8994e\" (UID: \"886ced6c-0930-40c8-903f-16146cd8994e\") " Nov 28 16:31:50 crc kubenswrapper[4909]: I1128 16:31:50.192621 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/886ced6c-0930-40c8-903f-16146cd8994e-dns-svc\") pod \"886ced6c-0930-40c8-903f-16146cd8994e\" (UID: \"886ced6c-0930-40c8-903f-16146cd8994e\") " Nov 28 16:31:50 crc kubenswrapper[4909]: I1128 16:31:50.192716 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/886ced6c-0930-40c8-903f-16146cd8994e-ovsdbserver-sb\") pod \"886ced6c-0930-40c8-903f-16146cd8994e\" (UID: \"886ced6c-0930-40c8-903f-16146cd8994e\") " Nov 28 16:31:50 crc kubenswrapper[4909]: I1128 16:31:50.192751 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/886ced6c-0930-40c8-903f-16146cd8994e-ovsdbserver-nb\") pod \"886ced6c-0930-40c8-903f-16146cd8994e\" (UID: \"886ced6c-0930-40c8-903f-16146cd8994e\") " Nov 28 16:31:50 crc kubenswrapper[4909]: I1128 16:31:50.192933 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a35f2df9-5b1e-488d-99e7-9e541b997330-scripts\") pod \"glance-default-external-api-0\" (UID: \"a35f2df9-5b1e-488d-99e7-9e541b997330\") " pod="openstack/glance-default-external-api-0" Nov 28 16:31:50 crc kubenswrapper[4909]: I1128 16:31:50.192962 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/537db953-469c-4d69-a3ed-9be676f44be7-config-data\") pod \"glance-default-internal-api-0\" (UID: \"537db953-469c-4d69-a3ed-9be676f44be7\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:31:50 crc kubenswrapper[4909]: I1128 16:31:50.192986 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a35f2df9-5b1e-488d-99e7-9e541b997330-config-data\") pod \"glance-default-external-api-0\" (UID: \"a35f2df9-5b1e-488d-99e7-9e541b997330\") " pod="openstack/glance-default-external-api-0" Nov 28 16:31:50 crc kubenswrapper[4909]: I1128 16:31:50.193006 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"glance-default-external-api-0\" (UID: \"a35f2df9-5b1e-488d-99e7-9e541b997330\") " pod="openstack/glance-default-external-api-0" Nov 28 16:31:50 crc kubenswrapper[4909]: I1128 16:31:50.193030 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/537db953-469c-4d69-a3ed-9be676f44be7-logs\") pod \"glance-default-internal-api-0\" (UID: \"537db953-469c-4d69-a3ed-9be676f44be7\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:31:50 crc kubenswrapper[4909]: I1128 16:31:50.193046 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/537db953-469c-4d69-a3ed-9be676f44be7-scripts\") pod \"glance-default-internal-api-0\" (UID: \"537db953-469c-4d69-a3ed-9be676f44be7\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:31:50 crc kubenswrapper[4909]: I1128 16:31:50.193078 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/a35f2df9-5b1e-488d-99e7-9e541b997330-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"a35f2df9-5b1e-488d-99e7-9e541b997330\") " pod="openstack/glance-default-external-api-0" Nov 28 16:31:50 crc kubenswrapper[4909]: I1128 16:31:50.193111 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/537db953-469c-4d69-a3ed-9be676f44be7-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"537db953-469c-4d69-a3ed-9be676f44be7\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:31:50 crc kubenswrapper[4909]: I1128 16:31:50.193147 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a35f2df9-5b1e-488d-99e7-9e541b997330-logs\") pod \"glance-default-external-api-0\" (UID: \"a35f2df9-5b1e-488d-99e7-9e541b997330\") " pod="openstack/glance-default-external-api-0" Nov 28 16:31:50 crc kubenswrapper[4909]: I1128 16:31:50.193164 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/537db953-469c-4d69-a3ed-9be676f44be7-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"537db953-469c-4d69-a3ed-9be676f44be7\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:31:50 crc kubenswrapper[4909]: I1128 16:31:50.193179 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/a35f2df9-5b1e-488d-99e7-9e541b997330-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"a35f2df9-5b1e-488d-99e7-9e541b997330\") " pod="openstack/glance-default-external-api-0" Nov 28 16:31:50 crc kubenswrapper[4909]: I1128 16:31:50.193194 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lx4fs\" (UniqueName: \"kubernetes.io/projected/a35f2df9-5b1e-488d-99e7-9e541b997330-kube-api-access-lx4fs\") pod \"glance-default-external-api-0\" (UID: \"a35f2df9-5b1e-488d-99e7-9e541b997330\") " pod="openstack/glance-default-external-api-0" Nov 28 16:31:50 crc kubenswrapper[4909]: I1128 16:31:50.193225 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/537db953-469c-4d69-a3ed-9be676f44be7-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"537db953-469c-4d69-a3ed-9be676f44be7\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:31:50 crc kubenswrapper[4909]: I1128 16:31:50.193244 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-internal-api-0\" (UID: \"537db953-469c-4d69-a3ed-9be676f44be7\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:31:50 crc kubenswrapper[4909]: I1128 16:31:50.193265 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vvsh5\" (UniqueName: \"kubernetes.io/projected/537db953-469c-4d69-a3ed-9be676f44be7-kube-api-access-vvsh5\") pod \"glance-default-internal-api-0\" (UID: \"537db953-469c-4d69-a3ed-9be676f44be7\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:31:50 crc kubenswrapper[4909]: I1128 16:31:50.193285 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a35f2df9-5b1e-488d-99e7-9e541b997330-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"a35f2df9-5b1e-488d-99e7-9e541b997330\") " pod="openstack/glance-default-external-api-0" Nov 28 16:31:50 crc kubenswrapper[4909]: I1128 16:31:50.195098 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/a35f2df9-5b1e-488d-99e7-9e541b997330-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"a35f2df9-5b1e-488d-99e7-9e541b997330\") " pod="openstack/glance-default-external-api-0" Nov 28 16:31:50 crc kubenswrapper[4909]: I1128 16:31:50.197076 4909 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-internal-api-0\" (UID: \"537db953-469c-4d69-a3ed-9be676f44be7\") device mount path \"/mnt/openstack/pv09\"" pod="openstack/glance-default-internal-api-0" Nov 28 16:31:50 crc kubenswrapper[4909]: I1128 16:31:50.198604 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a35f2df9-5b1e-488d-99e7-9e541b997330-logs\") pod \"glance-default-external-api-0\" (UID: \"a35f2df9-5b1e-488d-99e7-9e541b997330\") " pod="openstack/glance-default-external-api-0" Nov 28 16:31:50 crc kubenswrapper[4909]: I1128 16:31:50.199341 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/537db953-469c-4d69-a3ed-9be676f44be7-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"537db953-469c-4d69-a3ed-9be676f44be7\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:31:50 crc kubenswrapper[4909]: I1128 16:31:50.199943 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/886ced6c-0930-40c8-903f-16146cd8994e-kube-api-access-4wkp2" (OuterVolumeSpecName: "kube-api-access-4wkp2") pod "886ced6c-0930-40c8-903f-16146cd8994e" (UID: "886ced6c-0930-40c8-903f-16146cd8994e"). InnerVolumeSpecName "kube-api-access-4wkp2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:31:50 crc kubenswrapper[4909]: I1128 16:31:50.200800 4909 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"glance-default-external-api-0\" (UID: \"a35f2df9-5b1e-488d-99e7-9e541b997330\") device mount path \"/mnt/openstack/pv03\"" pod="openstack/glance-default-external-api-0" Nov 28 16:31:50 crc kubenswrapper[4909]: I1128 16:31:50.201097 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/537db953-469c-4d69-a3ed-9be676f44be7-logs\") pod \"glance-default-internal-api-0\" (UID: \"537db953-469c-4d69-a3ed-9be676f44be7\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:31:50 crc kubenswrapper[4909]: I1128 16:31:50.214668 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/537db953-469c-4d69-a3ed-9be676f44be7-scripts\") pod \"glance-default-internal-api-0\" (UID: \"537db953-469c-4d69-a3ed-9be676f44be7\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:31:50 crc kubenswrapper[4909]: I1128 16:31:50.222426 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/a35f2df9-5b1e-488d-99e7-9e541b997330-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"a35f2df9-5b1e-488d-99e7-9e541b997330\") " pod="openstack/glance-default-external-api-0" Nov 28 16:31:50 crc kubenswrapper[4909]: I1128 16:31:50.230898 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a35f2df9-5b1e-488d-99e7-9e541b997330-scripts\") pod \"glance-default-external-api-0\" (UID: \"a35f2df9-5b1e-488d-99e7-9e541b997330\") " pod="openstack/glance-default-external-api-0" Nov 28 16:31:50 crc kubenswrapper[4909]: I1128 16:31:50.230909 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/537db953-469c-4d69-a3ed-9be676f44be7-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"537db953-469c-4d69-a3ed-9be676f44be7\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:31:50 crc kubenswrapper[4909]: I1128 16:31:50.234393 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a35f2df9-5b1e-488d-99e7-9e541b997330-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"a35f2df9-5b1e-488d-99e7-9e541b997330\") " pod="openstack/glance-default-external-api-0" Nov 28 16:31:50 crc kubenswrapper[4909]: I1128 16:31:50.238071 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/537db953-469c-4d69-a3ed-9be676f44be7-config-data\") pod \"glance-default-internal-api-0\" (UID: \"537db953-469c-4d69-a3ed-9be676f44be7\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:31:50 crc kubenswrapper[4909]: I1128 16:31:50.244449 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lx4fs\" (UniqueName: \"kubernetes.io/projected/a35f2df9-5b1e-488d-99e7-9e541b997330-kube-api-access-lx4fs\") pod \"glance-default-external-api-0\" (UID: \"a35f2df9-5b1e-488d-99e7-9e541b997330\") " pod="openstack/glance-default-external-api-0" Nov 28 16:31:50 crc kubenswrapper[4909]: I1128 16:31:50.249078 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-internal-api-0\" (UID: \"537db953-469c-4d69-a3ed-9be676f44be7\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:31:50 crc kubenswrapper[4909]: I1128 16:31:50.251412 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/537db953-469c-4d69-a3ed-9be676f44be7-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"537db953-469c-4d69-a3ed-9be676f44be7\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:31:50 crc kubenswrapper[4909]: I1128 16:31:50.259520 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a35f2df9-5b1e-488d-99e7-9e541b997330-config-data\") pod \"glance-default-external-api-0\" (UID: \"a35f2df9-5b1e-488d-99e7-9e541b997330\") " pod="openstack/glance-default-external-api-0" Nov 28 16:31:50 crc kubenswrapper[4909]: I1128 16:31:50.268029 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-fcstr"] Nov 28 16:31:50 crc kubenswrapper[4909]: I1128 16:31:50.273000 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vvsh5\" (UniqueName: \"kubernetes.io/projected/537db953-469c-4d69-a3ed-9be676f44be7-kube-api-access-vvsh5\") pod \"glance-default-internal-api-0\" (UID: \"537db953-469c-4d69-a3ed-9be676f44be7\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:31:50 crc kubenswrapper[4909]: I1128 16:31:50.277256 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 28 16:31:50 crc kubenswrapper[4909]: I1128 16:31:50.289799 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-598lt"] Nov 28 16:31:50 crc kubenswrapper[4909]: I1128 16:31:50.297875 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4wkp2\" (UniqueName: \"kubernetes.io/projected/886ced6c-0930-40c8-903f-16146cd8994e-kube-api-access-4wkp2\") on node \"crc\" DevicePath \"\"" Nov 28 16:31:50 crc kubenswrapper[4909]: W1128 16:31:50.300555 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod29c42f34_c4bc_433c_b0d7_a0a8acf595db.slice/crio-e8e9e26e7d69fc7aeb23b667ccb57f7d97deea6b8c208c6efa28ce163e7028b8 WatchSource:0}: Error finding container e8e9e26e7d69fc7aeb23b667ccb57f7d97deea6b8c208c6efa28ce163e7028b8: Status 404 returned error can't find the container with id e8e9e26e7d69fc7aeb23b667ccb57f7d97deea6b8c208c6efa28ce163e7028b8 Nov 28 16:31:50 crc kubenswrapper[4909]: I1128 16:31:50.309334 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/886ced6c-0930-40c8-903f-16146cd8994e-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "886ced6c-0930-40c8-903f-16146cd8994e" (UID: "886ced6c-0930-40c8-903f-16146cd8994e"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:31:50 crc kubenswrapper[4909]: I1128 16:31:50.313200 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"glance-default-external-api-0\" (UID: \"a35f2df9-5b1e-488d-99e7-9e541b997330\") " pod="openstack/glance-default-external-api-0" Nov 28 16:31:50 crc kubenswrapper[4909]: I1128 16:31:50.326180 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/886ced6c-0930-40c8-903f-16146cd8994e-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "886ced6c-0930-40c8-903f-16146cd8994e" (UID: "886ced6c-0930-40c8-903f-16146cd8994e"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:31:50 crc kubenswrapper[4909]: I1128 16:31:50.326285 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/886ced6c-0930-40c8-903f-16146cd8994e-config" (OuterVolumeSpecName: "config") pod "886ced6c-0930-40c8-903f-16146cd8994e" (UID: "886ced6c-0930-40c8-903f-16146cd8994e"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:31:50 crc kubenswrapper[4909]: I1128 16:31:50.346319 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/886ced6c-0930-40c8-903f-16146cd8994e-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "886ced6c-0930-40c8-903f-16146cd8994e" (UID: "886ced6c-0930-40c8-903f-16146cd8994e"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:31:50 crc kubenswrapper[4909]: I1128 16:31:50.399846 4909 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/886ced6c-0930-40c8-903f-16146cd8994e-config\") on node \"crc\" DevicePath \"\"" Nov 28 16:31:50 crc kubenswrapper[4909]: I1128 16:31:50.400206 4909 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/886ced6c-0930-40c8-903f-16146cd8994e-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 16:31:50 crc kubenswrapper[4909]: I1128 16:31:50.400218 4909 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/886ced6c-0930-40c8-903f-16146cd8994e-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 28 16:31:50 crc kubenswrapper[4909]: I1128 16:31:50.400231 4909 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/886ced6c-0930-40c8-903f-16146cd8994e-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 28 16:31:50 crc kubenswrapper[4909]: I1128 16:31:50.417707 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6c9c9f998c-gdtbt" event={"ID":"8610eac9-5723-4cbd-95b0-cd81f99369d5","Type":"ContainerStarted","Data":"37594af4e907c10f27354fed394f0756fb9b37445b1b4977986f9b3437052e4f"} Nov 28 16:31:50 crc kubenswrapper[4909]: I1128 16:31:50.427943 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-fcstr" event={"ID":"29c42f34-c4bc-433c-b0d7-a0a8acf595db","Type":"ContainerStarted","Data":"e8e9e26e7d69fc7aeb23b667ccb57f7d97deea6b8c208c6efa28ce163e7028b8"} Nov 28 16:31:50 crc kubenswrapper[4909]: I1128 16:31:50.429965 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-598lt" event={"ID":"77700d2a-0ae0-421c-9ee0-63aaa32a5428","Type":"ContainerStarted","Data":"14c8c7ac2f7e0569ff7e6c6a92823061844e003b076b636a5def4da91fcd19dd"} Nov 28 16:31:50 crc kubenswrapper[4909]: I1128 16:31:50.434038 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-cj8sl"] Nov 28 16:31:50 crc kubenswrapper[4909]: I1128 16:31:50.444965 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-74dc88fc-cvh5p" event={"ID":"886ced6c-0930-40c8-903f-16146cd8994e","Type":"ContainerDied","Data":"67430a65bc0e8c8ff0643e4dc80c56469446a2cdd2b6de4ce93ab1f9af1abd67"} Nov 28 16:31:50 crc kubenswrapper[4909]: I1128 16:31:50.445027 4909 scope.go:117] "RemoveContainer" containerID="9ad9179f1c4cb74cc6246d262eaeb8f0011cd0e7b11bdc7780a08e8b6bba596e" Nov 28 16:31:50 crc kubenswrapper[4909]: I1128 16:31:50.445547 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-74dc88fc-cvh5p" Nov 28 16:31:50 crc kubenswrapper[4909]: W1128 16:31:50.457958 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod148d191b_98d8_4e26_a335_1bfb373f4f07.slice/crio-8609b4d1fe7a13d68a36a7ec73e48bd3d144219ba0375c85d955bd0e75693d1b WatchSource:0}: Error finding container 8609b4d1fe7a13d68a36a7ec73e48bd3d144219ba0375c85d955bd0e75693d1b: Status 404 returned error can't find the container with id 8609b4d1fe7a13d68a36a7ec73e48bd3d144219ba0375c85d955bd0e75693d1b Nov 28 16:31:50 crc kubenswrapper[4909]: I1128 16:31:50.521737 4909 scope.go:117] "RemoveContainer" containerID="dbdadc1bcc34558d89af786f93a444efc87620a0c8ac8d55f5db6d1716d267fe" Nov 28 16:31:50 crc kubenswrapper[4909]: I1128 16:31:50.526806 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-6wh46"] Nov 28 16:31:50 crc kubenswrapper[4909]: I1128 16:31:50.534856 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-ngll9"] Nov 28 16:31:50 crc kubenswrapper[4909]: W1128 16:31:50.550186 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podfbc4977f_f846_4428_9fb6_558811c3e65b.slice/crio-f6faa27677c7df0fe9d6331b2c14b09f4b2561d21374b4c29439104e038b2324 WatchSource:0}: Error finding container f6faa27677c7df0fe9d6331b2c14b09f4b2561d21374b4c29439104e038b2324: Status 404 returned error can't find the container with id f6faa27677c7df0fe9d6331b2c14b09f4b2561d21374b4c29439104e038b2324 Nov 28 16:31:50 crc kubenswrapper[4909]: I1128 16:31:50.552261 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 28 16:31:50 crc kubenswrapper[4909]: I1128 16:31:50.559822 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-74dc88fc-cvh5p"] Nov 28 16:31:50 crc kubenswrapper[4909]: I1128 16:31:50.566537 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-74dc88fc-cvh5p"] Nov 28 16:31:50 crc kubenswrapper[4909]: I1128 16:31:50.573781 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 28 16:31:50 crc kubenswrapper[4909]: I1128 16:31:50.580950 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-57c957c4ff-zlrjj"] Nov 28 16:31:51 crc kubenswrapper[4909]: W1128 16:31:51.440804 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod537db953_469c_4d69_a3ed_9be676f44be7.slice/crio-7cbb4835a8136d01bc62414c676a0c964ca1e4616fe886a2f38fa2953a446bb9 WatchSource:0}: Error finding container 7cbb4835a8136d01bc62414c676a0c964ca1e4616fe886a2f38fa2953a446bb9: Status 404 returned error can't find the container with id 7cbb4835a8136d01bc62414c676a0c964ca1e4616fe886a2f38fa2953a446bb9 Nov 28 16:31:51 crc kubenswrapper[4909]: I1128 16:31:51.441475 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 16:31:51 crc kubenswrapper[4909]: I1128 16:31:51.452780 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57c957c4ff-zlrjj" event={"ID":"e525be76-a5cf-472c-91b6-c602362d9877","Type":"ContainerStarted","Data":"3038956ff3574df85c351690e9740a8f4541bd8fb4d36295f5c8c3b1cdf62705"} Nov 28 16:31:51 crc kubenswrapper[4909]: I1128 16:31:51.455372 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-ngll9" event={"ID":"2d62f293-d022-4d92-915b-f83c3fa157a7","Type":"ContainerStarted","Data":"8a522f616b775e066da064a1560049a1a668c2c6c27976870cce096384d03627"} Nov 28 16:31:51 crc kubenswrapper[4909]: I1128 16:31:51.456371 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"537db953-469c-4d69-a3ed-9be676f44be7","Type":"ContainerStarted","Data":"7cbb4835a8136d01bc62414c676a0c964ca1e4616fe886a2f38fa2953a446bb9"} Nov 28 16:31:51 crc kubenswrapper[4909]: I1128 16:31:51.457509 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"403021b8-fd7a-4823-9f99-622829f4d935","Type":"ContainerStarted","Data":"7b59dfb079d7a33b4b7baf4f3829af44a1b4d5b0e1f531562c57d9be26f51565"} Nov 28 16:31:51 crc kubenswrapper[4909]: I1128 16:31:51.461300 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-6wh46" event={"ID":"fbc4977f-f846-4428-9fb6-558811c3e65b","Type":"ContainerStarted","Data":"f6faa27677c7df0fe9d6331b2c14b09f4b2561d21374b4c29439104e038b2324"} Nov 28 16:31:51 crc kubenswrapper[4909]: I1128 16:31:51.462326 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-cj8sl" event={"ID":"148d191b-98d8-4e26-a335-1bfb373f4f07","Type":"ContainerStarted","Data":"8609b4d1fe7a13d68a36a7ec73e48bd3d144219ba0375c85d955bd0e75693d1b"} Nov 28 16:31:51 crc kubenswrapper[4909]: I1128 16:31:51.922302 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="886ced6c-0930-40c8-903f-16146cd8994e" path="/var/lib/kubelet/pods/886ced6c-0930-40c8-903f-16146cd8994e/volumes" Nov 28 16:31:51 crc kubenswrapper[4909]: I1128 16:31:51.980208 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 16:31:52 crc kubenswrapper[4909]: I1128 16:31:52.019028 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 28 16:31:52 crc kubenswrapper[4909]: I1128 16:31:52.056479 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 16:31:52 crc kubenswrapper[4909]: I1128 16:31:52.495252 4909 generic.go:334] "Generic (PLEG): container finished" podID="e525be76-a5cf-472c-91b6-c602362d9877" containerID="afc7960de6d7d1c58beb323f82e507631698c08c9bf3d0502de2ef29f40a95c7" exitCode=0 Nov 28 16:31:52 crc kubenswrapper[4909]: I1128 16:31:52.495761 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57c957c4ff-zlrjj" event={"ID":"e525be76-a5cf-472c-91b6-c602362d9877","Type":"ContainerDied","Data":"afc7960de6d7d1c58beb323f82e507631698c08c9bf3d0502de2ef29f40a95c7"} Nov 28 16:31:52 crc kubenswrapper[4909]: I1128 16:31:52.499077 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-598lt" event={"ID":"77700d2a-0ae0-421c-9ee0-63aaa32a5428","Type":"ContainerStarted","Data":"ada4fc1a9bf251c75fd0b917cc3f6fe1006748ce560d0b4a79f1f22185d7e0ee"} Nov 28 16:31:52 crc kubenswrapper[4909]: I1128 16:31:52.503244 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"537db953-469c-4d69-a3ed-9be676f44be7","Type":"ContainerStarted","Data":"004ed61ff10b2b3785414259b79868173515f918c042c9b45b77485d8d26cd96"} Nov 28 16:31:52 crc kubenswrapper[4909]: I1128 16:31:52.506823 4909 generic.go:334] "Generic (PLEG): container finished" podID="8610eac9-5723-4cbd-95b0-cd81f99369d5" containerID="62ed359571c97f194dd5b2d85a311167e613dd48a4c6843738b62dbc026c6382" exitCode=0 Nov 28 16:31:52 crc kubenswrapper[4909]: I1128 16:31:52.506877 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6c9c9f998c-gdtbt" event={"ID":"8610eac9-5723-4cbd-95b0-cd81f99369d5","Type":"ContainerDied","Data":"62ed359571c97f194dd5b2d85a311167e613dd48a4c6843738b62dbc026c6382"} Nov 28 16:31:52 crc kubenswrapper[4909]: I1128 16:31:52.515152 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 16:31:52 crc kubenswrapper[4909]: I1128 16:31:52.515445 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-fcstr" event={"ID":"29c42f34-c4bc-433c-b0d7-a0a8acf595db","Type":"ContainerStarted","Data":"77eec265c191d92518e9615ece2181462c2321d96a38aae2b1a5320db748c0bb"} Nov 28 16:31:52 crc kubenswrapper[4909]: I1128 16:31:52.566032 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-598lt" podStartSLOduration=4.566006109 podStartE2EDuration="4.566006109s" podCreationTimestamp="2025-11-28 16:31:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:31:52.559220787 +0000 UTC m=+1294.955905321" watchObservedRunningTime="2025-11-28 16:31:52.566006109 +0000 UTC m=+1294.962690633" Nov 28 16:31:52 crc kubenswrapper[4909]: I1128 16:31:52.596673 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-db-sync-fcstr" podStartSLOduration=4.596629081 podStartE2EDuration="4.596629081s" podCreationTimestamp="2025-11-28 16:31:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:31:52.585761209 +0000 UTC m=+1294.982445743" watchObservedRunningTime="2025-11-28 16:31:52.596629081 +0000 UTC m=+1294.993313605" Nov 28 16:31:52 crc kubenswrapper[4909]: I1128 16:31:52.818360 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6c9c9f998c-gdtbt" Nov 28 16:31:52 crc kubenswrapper[4909]: I1128 16:31:52.944234 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8610eac9-5723-4cbd-95b0-cd81f99369d5-dns-svc\") pod \"8610eac9-5723-4cbd-95b0-cd81f99369d5\" (UID: \"8610eac9-5723-4cbd-95b0-cd81f99369d5\") " Nov 28 16:31:52 crc kubenswrapper[4909]: I1128 16:31:52.944308 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-stghz\" (UniqueName: \"kubernetes.io/projected/8610eac9-5723-4cbd-95b0-cd81f99369d5-kube-api-access-stghz\") pod \"8610eac9-5723-4cbd-95b0-cd81f99369d5\" (UID: \"8610eac9-5723-4cbd-95b0-cd81f99369d5\") " Nov 28 16:31:52 crc kubenswrapper[4909]: I1128 16:31:52.944360 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8610eac9-5723-4cbd-95b0-cd81f99369d5-ovsdbserver-sb\") pod \"8610eac9-5723-4cbd-95b0-cd81f99369d5\" (UID: \"8610eac9-5723-4cbd-95b0-cd81f99369d5\") " Nov 28 16:31:52 crc kubenswrapper[4909]: I1128 16:31:52.944438 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8610eac9-5723-4cbd-95b0-cd81f99369d5-ovsdbserver-nb\") pod \"8610eac9-5723-4cbd-95b0-cd81f99369d5\" (UID: \"8610eac9-5723-4cbd-95b0-cd81f99369d5\") " Nov 28 16:31:52 crc kubenswrapper[4909]: I1128 16:31:52.944493 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8610eac9-5723-4cbd-95b0-cd81f99369d5-config\") pod \"8610eac9-5723-4cbd-95b0-cd81f99369d5\" (UID: \"8610eac9-5723-4cbd-95b0-cd81f99369d5\") " Nov 28 16:31:52 crc kubenswrapper[4909]: I1128 16:31:52.944532 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/8610eac9-5723-4cbd-95b0-cd81f99369d5-dns-swift-storage-0\") pod \"8610eac9-5723-4cbd-95b0-cd81f99369d5\" (UID: \"8610eac9-5723-4cbd-95b0-cd81f99369d5\") " Nov 28 16:31:52 crc kubenswrapper[4909]: I1128 16:31:52.962242 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8610eac9-5723-4cbd-95b0-cd81f99369d5-kube-api-access-stghz" (OuterVolumeSpecName: "kube-api-access-stghz") pod "8610eac9-5723-4cbd-95b0-cd81f99369d5" (UID: "8610eac9-5723-4cbd-95b0-cd81f99369d5"). InnerVolumeSpecName "kube-api-access-stghz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:31:52 crc kubenswrapper[4909]: I1128 16:31:52.968472 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8610eac9-5723-4cbd-95b0-cd81f99369d5-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "8610eac9-5723-4cbd-95b0-cd81f99369d5" (UID: "8610eac9-5723-4cbd-95b0-cd81f99369d5"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:31:52 crc kubenswrapper[4909]: I1128 16:31:52.969049 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8610eac9-5723-4cbd-95b0-cd81f99369d5-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "8610eac9-5723-4cbd-95b0-cd81f99369d5" (UID: "8610eac9-5723-4cbd-95b0-cd81f99369d5"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:31:52 crc kubenswrapper[4909]: I1128 16:31:52.972748 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8610eac9-5723-4cbd-95b0-cd81f99369d5-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "8610eac9-5723-4cbd-95b0-cd81f99369d5" (UID: "8610eac9-5723-4cbd-95b0-cd81f99369d5"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:31:52 crc kubenswrapper[4909]: I1128 16:31:52.974041 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8610eac9-5723-4cbd-95b0-cd81f99369d5-config" (OuterVolumeSpecName: "config") pod "8610eac9-5723-4cbd-95b0-cd81f99369d5" (UID: "8610eac9-5723-4cbd-95b0-cd81f99369d5"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:31:52 crc kubenswrapper[4909]: I1128 16:31:52.984928 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8610eac9-5723-4cbd-95b0-cd81f99369d5-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "8610eac9-5723-4cbd-95b0-cd81f99369d5" (UID: "8610eac9-5723-4cbd-95b0-cd81f99369d5"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:31:53 crc kubenswrapper[4909]: I1128 16:31:53.047014 4909 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8610eac9-5723-4cbd-95b0-cd81f99369d5-config\") on node \"crc\" DevicePath \"\"" Nov 28 16:31:53 crc kubenswrapper[4909]: I1128 16:31:53.047046 4909 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/8610eac9-5723-4cbd-95b0-cd81f99369d5-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 28 16:31:53 crc kubenswrapper[4909]: I1128 16:31:53.047058 4909 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8610eac9-5723-4cbd-95b0-cd81f99369d5-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 16:31:53 crc kubenswrapper[4909]: I1128 16:31:53.047067 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-stghz\" (UniqueName: \"kubernetes.io/projected/8610eac9-5723-4cbd-95b0-cd81f99369d5-kube-api-access-stghz\") on node \"crc\" DevicePath \"\"" Nov 28 16:31:53 crc kubenswrapper[4909]: I1128 16:31:53.047075 4909 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8610eac9-5723-4cbd-95b0-cd81f99369d5-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 28 16:31:53 crc kubenswrapper[4909]: I1128 16:31:53.047084 4909 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8610eac9-5723-4cbd-95b0-cd81f99369d5-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 28 16:31:53 crc kubenswrapper[4909]: I1128 16:31:53.531105 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6c9c9f998c-gdtbt" event={"ID":"8610eac9-5723-4cbd-95b0-cd81f99369d5","Type":"ContainerDied","Data":"37594af4e907c10f27354fed394f0756fb9b37445b1b4977986f9b3437052e4f"} Nov 28 16:31:53 crc kubenswrapper[4909]: I1128 16:31:53.531197 4909 scope.go:117] "RemoveContainer" containerID="62ed359571c97f194dd5b2d85a311167e613dd48a4c6843738b62dbc026c6382" Nov 28 16:31:53 crc kubenswrapper[4909]: I1128 16:31:53.531337 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6c9c9f998c-gdtbt" Nov 28 16:31:53 crc kubenswrapper[4909]: I1128 16:31:53.541581 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57c957c4ff-zlrjj" event={"ID":"e525be76-a5cf-472c-91b6-c602362d9877","Type":"ContainerStarted","Data":"8be88a5c79574dd45b097de4f349fe3c1ef09fd8b0795929171fcf223c404f1f"} Nov 28 16:31:53 crc kubenswrapper[4909]: I1128 16:31:53.542725 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-57c957c4ff-zlrjj" Nov 28 16:31:53 crc kubenswrapper[4909]: I1128 16:31:53.546735 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"537db953-469c-4d69-a3ed-9be676f44be7","Type":"ContainerStarted","Data":"82f6e8b6d2681eb62ac08ca31f588c062a645ae62aea964cd88558d182d709a1"} Nov 28 16:31:53 crc kubenswrapper[4909]: I1128 16:31:53.547032 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="537db953-469c-4d69-a3ed-9be676f44be7" containerName="glance-log" containerID="cri-o://004ed61ff10b2b3785414259b79868173515f918c042c9b45b77485d8d26cd96" gracePeriod=30 Nov 28 16:31:53 crc kubenswrapper[4909]: I1128 16:31:53.547279 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="537db953-469c-4d69-a3ed-9be676f44be7" containerName="glance-httpd" containerID="cri-o://82f6e8b6d2681eb62ac08ca31f588c062a645ae62aea964cd88558d182d709a1" gracePeriod=30 Nov 28 16:31:53 crc kubenswrapper[4909]: I1128 16:31:53.559599 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"a35f2df9-5b1e-488d-99e7-9e541b997330","Type":"ContainerStarted","Data":"6c260636beb5317942af42fdece31e900a2dac87694bf2faba3da9a8668eafcc"} Nov 28 16:31:53 crc kubenswrapper[4909]: I1128 16:31:53.559642 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"a35f2df9-5b1e-488d-99e7-9e541b997330","Type":"ContainerStarted","Data":"b5161f002c27095df9c18248c99d1f9574448818e968d0415ac0bf574fb12bcc"} Nov 28 16:31:53 crc kubenswrapper[4909]: I1128 16:31:53.562295 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-57c957c4ff-zlrjj" podStartSLOduration=4.562278656 podStartE2EDuration="4.562278656s" podCreationTimestamp="2025-11-28 16:31:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:31:53.559251474 +0000 UTC m=+1295.955935999" watchObservedRunningTime="2025-11-28 16:31:53.562278656 +0000 UTC m=+1295.958963180" Nov 28 16:31:53 crc kubenswrapper[4909]: I1128 16:31:53.599807 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=5.599790363 podStartE2EDuration="5.599790363s" podCreationTimestamp="2025-11-28 16:31:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:31:53.586062584 +0000 UTC m=+1295.982747108" watchObservedRunningTime="2025-11-28 16:31:53.599790363 +0000 UTC m=+1295.996474887" Nov 28 16:31:53 crc kubenswrapper[4909]: I1128 16:31:53.766096 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6c9c9f998c-gdtbt"] Nov 28 16:31:53 crc kubenswrapper[4909]: I1128 16:31:53.774863 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6c9c9f998c-gdtbt"] Nov 28 16:31:53 crc kubenswrapper[4909]: I1128 16:31:53.924617 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8610eac9-5723-4cbd-95b0-cd81f99369d5" path="/var/lib/kubelet/pods/8610eac9-5723-4cbd-95b0-cd81f99369d5/volumes" Nov 28 16:31:54 crc kubenswrapper[4909]: I1128 16:31:54.581853 4909 generic.go:334] "Generic (PLEG): container finished" podID="537db953-469c-4d69-a3ed-9be676f44be7" containerID="82f6e8b6d2681eb62ac08ca31f588c062a645ae62aea964cd88558d182d709a1" exitCode=143 Nov 28 16:31:54 crc kubenswrapper[4909]: I1128 16:31:54.581895 4909 generic.go:334] "Generic (PLEG): container finished" podID="537db953-469c-4d69-a3ed-9be676f44be7" containerID="004ed61ff10b2b3785414259b79868173515f918c042c9b45b77485d8d26cd96" exitCode=143 Nov 28 16:31:54 crc kubenswrapper[4909]: I1128 16:31:54.581972 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"537db953-469c-4d69-a3ed-9be676f44be7","Type":"ContainerDied","Data":"82f6e8b6d2681eb62ac08ca31f588c062a645ae62aea964cd88558d182d709a1"} Nov 28 16:31:54 crc kubenswrapper[4909]: I1128 16:31:54.582012 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"537db953-469c-4d69-a3ed-9be676f44be7","Type":"ContainerDied","Data":"004ed61ff10b2b3785414259b79868173515f918c042c9b45b77485d8d26cd96"} Nov 28 16:31:56 crc kubenswrapper[4909]: I1128 16:31:56.607046 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"a35f2df9-5b1e-488d-99e7-9e541b997330","Type":"ContainerStarted","Data":"0a3777e195c1565876e6ec5673a3e4c7d9d52965f5471f40e68c68ad5b0b3810"} Nov 28 16:31:56 crc kubenswrapper[4909]: I1128 16:31:56.607341 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="a35f2df9-5b1e-488d-99e7-9e541b997330" containerName="glance-log" containerID="cri-o://6c260636beb5317942af42fdece31e900a2dac87694bf2faba3da9a8668eafcc" gracePeriod=30 Nov 28 16:31:56 crc kubenswrapper[4909]: I1128 16:31:56.607838 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="a35f2df9-5b1e-488d-99e7-9e541b997330" containerName="glance-httpd" containerID="cri-o://0a3777e195c1565876e6ec5673a3e4c7d9d52965f5471f40e68c68ad5b0b3810" gracePeriod=30 Nov 28 16:31:56 crc kubenswrapper[4909]: I1128 16:31:56.641188 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=8.641167577000001 podStartE2EDuration="8.641167577s" podCreationTimestamp="2025-11-28 16:31:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:31:56.640986112 +0000 UTC m=+1299.037670636" watchObservedRunningTime="2025-11-28 16:31:56.641167577 +0000 UTC m=+1299.037852101" Nov 28 16:31:58 crc kubenswrapper[4909]: I1128 16:31:58.629920 4909 generic.go:334] "Generic (PLEG): container finished" podID="a35f2df9-5b1e-488d-99e7-9e541b997330" containerID="6c260636beb5317942af42fdece31e900a2dac87694bf2faba3da9a8668eafcc" exitCode=143 Nov 28 16:31:58 crc kubenswrapper[4909]: I1128 16:31:58.630056 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"a35f2df9-5b1e-488d-99e7-9e541b997330","Type":"ContainerDied","Data":"6c260636beb5317942af42fdece31e900a2dac87694bf2faba3da9a8668eafcc"} Nov 28 16:31:59 crc kubenswrapper[4909]: I1128 16:31:59.629880 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-57c957c4ff-zlrjj" Nov 28 16:31:59 crc kubenswrapper[4909]: I1128 16:31:59.700990 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-895cf5cf-rkrcn"] Nov 28 16:31:59 crc kubenswrapper[4909]: I1128 16:31:59.701254 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-895cf5cf-rkrcn" podUID="12883252-f9ad-417c-a0de-c191f705082d" containerName="dnsmasq-dns" containerID="cri-o://e310d7c9491842ef2798420d19a65596295716b1a00ce15380982f754fc33ec4" gracePeriod=10 Nov 28 16:32:01 crc kubenswrapper[4909]: I1128 16:32:01.655391 4909 generic.go:334] "Generic (PLEG): container finished" podID="12883252-f9ad-417c-a0de-c191f705082d" containerID="e310d7c9491842ef2798420d19a65596295716b1a00ce15380982f754fc33ec4" exitCode=0 Nov 28 16:32:01 crc kubenswrapper[4909]: I1128 16:32:01.655468 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-895cf5cf-rkrcn" event={"ID":"12883252-f9ad-417c-a0de-c191f705082d","Type":"ContainerDied","Data":"e310d7c9491842ef2798420d19a65596295716b1a00ce15380982f754fc33ec4"} Nov 28 16:32:01 crc kubenswrapper[4909]: I1128 16:32:01.658510 4909 generic.go:334] "Generic (PLEG): container finished" podID="a35f2df9-5b1e-488d-99e7-9e541b997330" containerID="0a3777e195c1565876e6ec5673a3e4c7d9d52965f5471f40e68c68ad5b0b3810" exitCode=0 Nov 28 16:32:01 crc kubenswrapper[4909]: I1128 16:32:01.658536 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"a35f2df9-5b1e-488d-99e7-9e541b997330","Type":"ContainerDied","Data":"0a3777e195c1565876e6ec5673a3e4c7d9d52965f5471f40e68c68ad5b0b3810"} Nov 28 16:32:01 crc kubenswrapper[4909]: I1128 16:32:01.990315 4909 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-895cf5cf-rkrcn" podUID="12883252-f9ad-417c-a0de-c191f705082d" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.133:5353: connect: connection refused" Nov 28 16:32:03 crc kubenswrapper[4909]: I1128 16:32:03.050384 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 28 16:32:03 crc kubenswrapper[4909]: I1128 16:32:03.156550 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"537db953-469c-4d69-a3ed-9be676f44be7\" (UID: \"537db953-469c-4d69-a3ed-9be676f44be7\") " Nov 28 16:32:03 crc kubenswrapper[4909]: I1128 16:32:03.156634 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/537db953-469c-4d69-a3ed-9be676f44be7-combined-ca-bundle\") pod \"537db953-469c-4d69-a3ed-9be676f44be7\" (UID: \"537db953-469c-4d69-a3ed-9be676f44be7\") " Nov 28 16:32:03 crc kubenswrapper[4909]: I1128 16:32:03.156702 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/537db953-469c-4d69-a3ed-9be676f44be7-logs\") pod \"537db953-469c-4d69-a3ed-9be676f44be7\" (UID: \"537db953-469c-4d69-a3ed-9be676f44be7\") " Nov 28 16:32:03 crc kubenswrapper[4909]: I1128 16:32:03.156755 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/537db953-469c-4d69-a3ed-9be676f44be7-httpd-run\") pod \"537db953-469c-4d69-a3ed-9be676f44be7\" (UID: \"537db953-469c-4d69-a3ed-9be676f44be7\") " Nov 28 16:32:03 crc kubenswrapper[4909]: I1128 16:32:03.156822 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/537db953-469c-4d69-a3ed-9be676f44be7-config-data\") pod \"537db953-469c-4d69-a3ed-9be676f44be7\" (UID: \"537db953-469c-4d69-a3ed-9be676f44be7\") " Nov 28 16:32:03 crc kubenswrapper[4909]: I1128 16:32:03.156868 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vvsh5\" (UniqueName: \"kubernetes.io/projected/537db953-469c-4d69-a3ed-9be676f44be7-kube-api-access-vvsh5\") pod \"537db953-469c-4d69-a3ed-9be676f44be7\" (UID: \"537db953-469c-4d69-a3ed-9be676f44be7\") " Nov 28 16:32:03 crc kubenswrapper[4909]: I1128 16:32:03.156889 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/537db953-469c-4d69-a3ed-9be676f44be7-scripts\") pod \"537db953-469c-4d69-a3ed-9be676f44be7\" (UID: \"537db953-469c-4d69-a3ed-9be676f44be7\") " Nov 28 16:32:03 crc kubenswrapper[4909]: I1128 16:32:03.156974 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/537db953-469c-4d69-a3ed-9be676f44be7-internal-tls-certs\") pod \"537db953-469c-4d69-a3ed-9be676f44be7\" (UID: \"537db953-469c-4d69-a3ed-9be676f44be7\") " Nov 28 16:32:03 crc kubenswrapper[4909]: I1128 16:32:03.157603 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/537db953-469c-4d69-a3ed-9be676f44be7-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "537db953-469c-4d69-a3ed-9be676f44be7" (UID: "537db953-469c-4d69-a3ed-9be676f44be7"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:32:03 crc kubenswrapper[4909]: I1128 16:32:03.158065 4909 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/537db953-469c-4d69-a3ed-9be676f44be7-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 28 16:32:03 crc kubenswrapper[4909]: I1128 16:32:03.158091 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/537db953-469c-4d69-a3ed-9be676f44be7-logs" (OuterVolumeSpecName: "logs") pod "537db953-469c-4d69-a3ed-9be676f44be7" (UID: "537db953-469c-4d69-a3ed-9be676f44be7"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:32:03 crc kubenswrapper[4909]: I1128 16:32:03.166609 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/537db953-469c-4d69-a3ed-9be676f44be7-kube-api-access-vvsh5" (OuterVolumeSpecName: "kube-api-access-vvsh5") pod "537db953-469c-4d69-a3ed-9be676f44be7" (UID: "537db953-469c-4d69-a3ed-9be676f44be7"). InnerVolumeSpecName "kube-api-access-vvsh5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:32:03 crc kubenswrapper[4909]: I1128 16:32:03.175879 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/537db953-469c-4d69-a3ed-9be676f44be7-scripts" (OuterVolumeSpecName: "scripts") pod "537db953-469c-4d69-a3ed-9be676f44be7" (UID: "537db953-469c-4d69-a3ed-9be676f44be7"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:32:03 crc kubenswrapper[4909]: I1128 16:32:03.179787 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage09-crc" (OuterVolumeSpecName: "glance") pod "537db953-469c-4d69-a3ed-9be676f44be7" (UID: "537db953-469c-4d69-a3ed-9be676f44be7"). InnerVolumeSpecName "local-storage09-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 28 16:32:03 crc kubenswrapper[4909]: I1128 16:32:03.183064 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/537db953-469c-4d69-a3ed-9be676f44be7-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "537db953-469c-4d69-a3ed-9be676f44be7" (UID: "537db953-469c-4d69-a3ed-9be676f44be7"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:32:03 crc kubenswrapper[4909]: I1128 16:32:03.231458 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/537db953-469c-4d69-a3ed-9be676f44be7-config-data" (OuterVolumeSpecName: "config-data") pod "537db953-469c-4d69-a3ed-9be676f44be7" (UID: "537db953-469c-4d69-a3ed-9be676f44be7"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:32:03 crc kubenswrapper[4909]: I1128 16:32:03.236563 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/537db953-469c-4d69-a3ed-9be676f44be7-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "537db953-469c-4d69-a3ed-9be676f44be7" (UID: "537db953-469c-4d69-a3ed-9be676f44be7"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:32:03 crc kubenswrapper[4909]: I1128 16:32:03.259385 4909 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/537db953-469c-4d69-a3ed-9be676f44be7-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 16:32:03 crc kubenswrapper[4909]: I1128 16:32:03.259453 4909 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") on node \"crc\" " Nov 28 16:32:03 crc kubenswrapper[4909]: I1128 16:32:03.259465 4909 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/537db953-469c-4d69-a3ed-9be676f44be7-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:32:03 crc kubenswrapper[4909]: I1128 16:32:03.259476 4909 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/537db953-469c-4d69-a3ed-9be676f44be7-logs\") on node \"crc\" DevicePath \"\"" Nov 28 16:32:03 crc kubenswrapper[4909]: I1128 16:32:03.259487 4909 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/537db953-469c-4d69-a3ed-9be676f44be7-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:32:03 crc kubenswrapper[4909]: I1128 16:32:03.259500 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vvsh5\" (UniqueName: \"kubernetes.io/projected/537db953-469c-4d69-a3ed-9be676f44be7-kube-api-access-vvsh5\") on node \"crc\" DevicePath \"\"" Nov 28 16:32:03 crc kubenswrapper[4909]: I1128 16:32:03.259514 4909 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/537db953-469c-4d69-a3ed-9be676f44be7-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 16:32:03 crc kubenswrapper[4909]: I1128 16:32:03.280786 4909 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage09-crc" (UniqueName: "kubernetes.io/local-volume/local-storage09-crc") on node "crc" Nov 28 16:32:03 crc kubenswrapper[4909]: I1128 16:32:03.361091 4909 reconciler_common.go:293] "Volume detached for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") on node \"crc\" DevicePath \"\"" Nov 28 16:32:03 crc kubenswrapper[4909]: I1128 16:32:03.676401 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"537db953-469c-4d69-a3ed-9be676f44be7","Type":"ContainerDied","Data":"7cbb4835a8136d01bc62414c676a0c964ca1e4616fe886a2f38fa2953a446bb9"} Nov 28 16:32:03 crc kubenswrapper[4909]: I1128 16:32:03.676451 4909 scope.go:117] "RemoveContainer" containerID="82f6e8b6d2681eb62ac08ca31f588c062a645ae62aea964cd88558d182d709a1" Nov 28 16:32:03 crc kubenswrapper[4909]: I1128 16:32:03.676556 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 28 16:32:03 crc kubenswrapper[4909]: I1128 16:32:03.716284 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 16:32:03 crc kubenswrapper[4909]: I1128 16:32:03.718704 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 16:32:03 crc kubenswrapper[4909]: I1128 16:32:03.743129 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 16:32:03 crc kubenswrapper[4909]: E1128 16:32:03.744193 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="886ced6c-0930-40c8-903f-16146cd8994e" containerName="dnsmasq-dns" Nov 28 16:32:03 crc kubenswrapper[4909]: I1128 16:32:03.744210 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="886ced6c-0930-40c8-903f-16146cd8994e" containerName="dnsmasq-dns" Nov 28 16:32:03 crc kubenswrapper[4909]: E1128 16:32:03.744228 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8610eac9-5723-4cbd-95b0-cd81f99369d5" containerName="init" Nov 28 16:32:03 crc kubenswrapper[4909]: I1128 16:32:03.744234 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="8610eac9-5723-4cbd-95b0-cd81f99369d5" containerName="init" Nov 28 16:32:03 crc kubenswrapper[4909]: E1128 16:32:03.744245 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="537db953-469c-4d69-a3ed-9be676f44be7" containerName="glance-log" Nov 28 16:32:03 crc kubenswrapper[4909]: I1128 16:32:03.744251 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="537db953-469c-4d69-a3ed-9be676f44be7" containerName="glance-log" Nov 28 16:32:03 crc kubenswrapper[4909]: E1128 16:32:03.744261 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="886ced6c-0930-40c8-903f-16146cd8994e" containerName="init" Nov 28 16:32:03 crc kubenswrapper[4909]: I1128 16:32:03.744267 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="886ced6c-0930-40c8-903f-16146cd8994e" containerName="init" Nov 28 16:32:03 crc kubenswrapper[4909]: E1128 16:32:03.744288 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="537db953-469c-4d69-a3ed-9be676f44be7" containerName="glance-httpd" Nov 28 16:32:03 crc kubenswrapper[4909]: I1128 16:32:03.744294 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="537db953-469c-4d69-a3ed-9be676f44be7" containerName="glance-httpd" Nov 28 16:32:03 crc kubenswrapper[4909]: I1128 16:32:03.744478 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="8610eac9-5723-4cbd-95b0-cd81f99369d5" containerName="init" Nov 28 16:32:03 crc kubenswrapper[4909]: I1128 16:32:03.744496 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="886ced6c-0930-40c8-903f-16146cd8994e" containerName="dnsmasq-dns" Nov 28 16:32:03 crc kubenswrapper[4909]: I1128 16:32:03.744509 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="537db953-469c-4d69-a3ed-9be676f44be7" containerName="glance-httpd" Nov 28 16:32:03 crc kubenswrapper[4909]: I1128 16:32:03.744518 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="537db953-469c-4d69-a3ed-9be676f44be7" containerName="glance-log" Nov 28 16:32:03 crc kubenswrapper[4909]: I1128 16:32:03.747536 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 28 16:32:03 crc kubenswrapper[4909]: I1128 16:32:03.751278 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Nov 28 16:32:03 crc kubenswrapper[4909]: I1128 16:32:03.751520 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Nov 28 16:32:03 crc kubenswrapper[4909]: I1128 16:32:03.756677 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 16:32:03 crc kubenswrapper[4909]: I1128 16:32:03.875030 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e9618898-4d20-4c0d-ab4d-1dc67e0f109a-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"e9618898-4d20-4c0d-ab4d-1dc67e0f109a\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:32:03 crc kubenswrapper[4909]: I1128 16:32:03.875074 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/e9618898-4d20-4c0d-ab4d-1dc67e0f109a-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"e9618898-4d20-4c0d-ab4d-1dc67e0f109a\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:32:03 crc kubenswrapper[4909]: I1128 16:32:03.875134 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e9618898-4d20-4c0d-ab4d-1dc67e0f109a-scripts\") pod \"glance-default-internal-api-0\" (UID: \"e9618898-4d20-4c0d-ab4d-1dc67e0f109a\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:32:03 crc kubenswrapper[4909]: I1128 16:32:03.875165 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kb5vt\" (UniqueName: \"kubernetes.io/projected/e9618898-4d20-4c0d-ab4d-1dc67e0f109a-kube-api-access-kb5vt\") pod \"glance-default-internal-api-0\" (UID: \"e9618898-4d20-4c0d-ab4d-1dc67e0f109a\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:32:03 crc kubenswrapper[4909]: I1128 16:32:03.875180 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e9618898-4d20-4c0d-ab4d-1dc67e0f109a-config-data\") pod \"glance-default-internal-api-0\" (UID: \"e9618898-4d20-4c0d-ab4d-1dc67e0f109a\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:32:03 crc kubenswrapper[4909]: I1128 16:32:03.875197 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e9618898-4d20-4c0d-ab4d-1dc67e0f109a-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"e9618898-4d20-4c0d-ab4d-1dc67e0f109a\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:32:03 crc kubenswrapper[4909]: I1128 16:32:03.875229 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e9618898-4d20-4c0d-ab4d-1dc67e0f109a-logs\") pod \"glance-default-internal-api-0\" (UID: \"e9618898-4d20-4c0d-ab4d-1dc67e0f109a\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:32:03 crc kubenswrapper[4909]: I1128 16:32:03.875267 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-internal-api-0\" (UID: \"e9618898-4d20-4c0d-ab4d-1dc67e0f109a\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:32:03 crc kubenswrapper[4909]: I1128 16:32:03.914447 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="537db953-469c-4d69-a3ed-9be676f44be7" path="/var/lib/kubelet/pods/537db953-469c-4d69-a3ed-9be676f44be7/volumes" Nov 28 16:32:03 crc kubenswrapper[4909]: I1128 16:32:03.977050 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kb5vt\" (UniqueName: \"kubernetes.io/projected/e9618898-4d20-4c0d-ab4d-1dc67e0f109a-kube-api-access-kb5vt\") pod \"glance-default-internal-api-0\" (UID: \"e9618898-4d20-4c0d-ab4d-1dc67e0f109a\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:32:03 crc kubenswrapper[4909]: I1128 16:32:03.977102 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e9618898-4d20-4c0d-ab4d-1dc67e0f109a-config-data\") pod \"glance-default-internal-api-0\" (UID: \"e9618898-4d20-4c0d-ab4d-1dc67e0f109a\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:32:03 crc kubenswrapper[4909]: I1128 16:32:03.977133 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e9618898-4d20-4c0d-ab4d-1dc67e0f109a-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"e9618898-4d20-4c0d-ab4d-1dc67e0f109a\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:32:03 crc kubenswrapper[4909]: I1128 16:32:03.977179 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e9618898-4d20-4c0d-ab4d-1dc67e0f109a-logs\") pod \"glance-default-internal-api-0\" (UID: \"e9618898-4d20-4c0d-ab4d-1dc67e0f109a\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:32:03 crc kubenswrapper[4909]: I1128 16:32:03.977219 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-internal-api-0\" (UID: \"e9618898-4d20-4c0d-ab4d-1dc67e0f109a\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:32:03 crc kubenswrapper[4909]: I1128 16:32:03.977310 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e9618898-4d20-4c0d-ab4d-1dc67e0f109a-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"e9618898-4d20-4c0d-ab4d-1dc67e0f109a\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:32:03 crc kubenswrapper[4909]: I1128 16:32:03.977335 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/e9618898-4d20-4c0d-ab4d-1dc67e0f109a-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"e9618898-4d20-4c0d-ab4d-1dc67e0f109a\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:32:03 crc kubenswrapper[4909]: I1128 16:32:03.977390 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e9618898-4d20-4c0d-ab4d-1dc67e0f109a-scripts\") pod \"glance-default-internal-api-0\" (UID: \"e9618898-4d20-4c0d-ab4d-1dc67e0f109a\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:32:03 crc kubenswrapper[4909]: I1128 16:32:03.977466 4909 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-internal-api-0\" (UID: \"e9618898-4d20-4c0d-ab4d-1dc67e0f109a\") device mount path \"/mnt/openstack/pv09\"" pod="openstack/glance-default-internal-api-0" Nov 28 16:32:03 crc kubenswrapper[4909]: I1128 16:32:03.977768 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e9618898-4d20-4c0d-ab4d-1dc67e0f109a-logs\") pod \"glance-default-internal-api-0\" (UID: \"e9618898-4d20-4c0d-ab4d-1dc67e0f109a\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:32:03 crc kubenswrapper[4909]: I1128 16:32:03.978011 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/e9618898-4d20-4c0d-ab4d-1dc67e0f109a-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"e9618898-4d20-4c0d-ab4d-1dc67e0f109a\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:32:03 crc kubenswrapper[4909]: I1128 16:32:03.982511 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e9618898-4d20-4c0d-ab4d-1dc67e0f109a-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"e9618898-4d20-4c0d-ab4d-1dc67e0f109a\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:32:03 crc kubenswrapper[4909]: I1128 16:32:03.982852 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e9618898-4d20-4c0d-ab4d-1dc67e0f109a-config-data\") pod \"glance-default-internal-api-0\" (UID: \"e9618898-4d20-4c0d-ab4d-1dc67e0f109a\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:32:03 crc kubenswrapper[4909]: I1128 16:32:03.983128 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e9618898-4d20-4c0d-ab4d-1dc67e0f109a-scripts\") pod \"glance-default-internal-api-0\" (UID: \"e9618898-4d20-4c0d-ab4d-1dc67e0f109a\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:32:03 crc kubenswrapper[4909]: I1128 16:32:03.983260 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e9618898-4d20-4c0d-ab4d-1dc67e0f109a-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"e9618898-4d20-4c0d-ab4d-1dc67e0f109a\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:32:04 crc kubenswrapper[4909]: I1128 16:32:04.000027 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kb5vt\" (UniqueName: \"kubernetes.io/projected/e9618898-4d20-4c0d-ab4d-1dc67e0f109a-kube-api-access-kb5vt\") pod \"glance-default-internal-api-0\" (UID: \"e9618898-4d20-4c0d-ab4d-1dc67e0f109a\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:32:04 crc kubenswrapper[4909]: I1128 16:32:04.011373 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-internal-api-0\" (UID: \"e9618898-4d20-4c0d-ab4d-1dc67e0f109a\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:32:04 crc kubenswrapper[4909]: I1128 16:32:04.081613 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 28 16:32:05 crc kubenswrapper[4909]: I1128 16:32:05.314428 4909 scope.go:117] "RemoveContainer" containerID="004ed61ff10b2b3785414259b79868173515f918c042c9b45b77485d8d26cd96" Nov 28 16:32:05 crc kubenswrapper[4909]: E1128 16:32:05.318770 4909 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-placement-api:current-podified" Nov 28 16:32:05 crc kubenswrapper[4909]: E1128 16:32:05.318967 4909 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:placement-db-sync,Image:quay.io/podified-antelope-centos9/openstack-placement-api:current-podified,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:logs,ReadOnly:false,MountPath:/var/log/placement,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:placement-dbsync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-w6477,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42482,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod placement-db-sync-ngll9_openstack(2d62f293-d022-4d92-915b-f83c3fa157a7): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 16:32:05 crc kubenswrapper[4909]: E1128 16:32:05.320112 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"placement-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/placement-db-sync-ngll9" podUID="2d62f293-d022-4d92-915b-f83c3fa157a7" Nov 28 16:32:05 crc kubenswrapper[4909]: I1128 16:32:05.406269 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 28 16:32:05 crc kubenswrapper[4909]: I1128 16:32:05.504829 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a35f2df9-5b1e-488d-99e7-9e541b997330-config-data\") pod \"a35f2df9-5b1e-488d-99e7-9e541b997330\" (UID: \"a35f2df9-5b1e-488d-99e7-9e541b997330\") " Nov 28 16:32:05 crc kubenswrapper[4909]: I1128 16:32:05.504890 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a35f2df9-5b1e-488d-99e7-9e541b997330-logs\") pod \"a35f2df9-5b1e-488d-99e7-9e541b997330\" (UID: \"a35f2df9-5b1e-488d-99e7-9e541b997330\") " Nov 28 16:32:05 crc kubenswrapper[4909]: I1128 16:32:05.504925 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a35f2df9-5b1e-488d-99e7-9e541b997330-combined-ca-bundle\") pod \"a35f2df9-5b1e-488d-99e7-9e541b997330\" (UID: \"a35f2df9-5b1e-488d-99e7-9e541b997330\") " Nov 28 16:32:05 crc kubenswrapper[4909]: I1128 16:32:05.504952 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"a35f2df9-5b1e-488d-99e7-9e541b997330\" (UID: \"a35f2df9-5b1e-488d-99e7-9e541b997330\") " Nov 28 16:32:05 crc kubenswrapper[4909]: I1128 16:32:05.504988 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lx4fs\" (UniqueName: \"kubernetes.io/projected/a35f2df9-5b1e-488d-99e7-9e541b997330-kube-api-access-lx4fs\") pod \"a35f2df9-5b1e-488d-99e7-9e541b997330\" (UID: \"a35f2df9-5b1e-488d-99e7-9e541b997330\") " Nov 28 16:32:05 crc kubenswrapper[4909]: I1128 16:32:05.505027 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a35f2df9-5b1e-488d-99e7-9e541b997330-scripts\") pod \"a35f2df9-5b1e-488d-99e7-9e541b997330\" (UID: \"a35f2df9-5b1e-488d-99e7-9e541b997330\") " Nov 28 16:32:05 crc kubenswrapper[4909]: I1128 16:32:05.505083 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/a35f2df9-5b1e-488d-99e7-9e541b997330-public-tls-certs\") pod \"a35f2df9-5b1e-488d-99e7-9e541b997330\" (UID: \"a35f2df9-5b1e-488d-99e7-9e541b997330\") " Nov 28 16:32:05 crc kubenswrapper[4909]: I1128 16:32:05.505128 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/a35f2df9-5b1e-488d-99e7-9e541b997330-httpd-run\") pod \"a35f2df9-5b1e-488d-99e7-9e541b997330\" (UID: \"a35f2df9-5b1e-488d-99e7-9e541b997330\") " Nov 28 16:32:05 crc kubenswrapper[4909]: I1128 16:32:05.505831 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a35f2df9-5b1e-488d-99e7-9e541b997330-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "a35f2df9-5b1e-488d-99e7-9e541b997330" (UID: "a35f2df9-5b1e-488d-99e7-9e541b997330"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:32:05 crc kubenswrapper[4909]: I1128 16:32:05.506164 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a35f2df9-5b1e-488d-99e7-9e541b997330-logs" (OuterVolumeSpecName: "logs") pod "a35f2df9-5b1e-488d-99e7-9e541b997330" (UID: "a35f2df9-5b1e-488d-99e7-9e541b997330"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:32:05 crc kubenswrapper[4909]: I1128 16:32:05.509837 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a35f2df9-5b1e-488d-99e7-9e541b997330-kube-api-access-lx4fs" (OuterVolumeSpecName: "kube-api-access-lx4fs") pod "a35f2df9-5b1e-488d-99e7-9e541b997330" (UID: "a35f2df9-5b1e-488d-99e7-9e541b997330"). InnerVolumeSpecName "kube-api-access-lx4fs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:32:05 crc kubenswrapper[4909]: I1128 16:32:05.509871 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a35f2df9-5b1e-488d-99e7-9e541b997330-scripts" (OuterVolumeSpecName: "scripts") pod "a35f2df9-5b1e-488d-99e7-9e541b997330" (UID: "a35f2df9-5b1e-488d-99e7-9e541b997330"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:32:05 crc kubenswrapper[4909]: I1128 16:32:05.510774 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage03-crc" (OuterVolumeSpecName: "glance") pod "a35f2df9-5b1e-488d-99e7-9e541b997330" (UID: "a35f2df9-5b1e-488d-99e7-9e541b997330"). InnerVolumeSpecName "local-storage03-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 28 16:32:05 crc kubenswrapper[4909]: I1128 16:32:05.532149 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a35f2df9-5b1e-488d-99e7-9e541b997330-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a35f2df9-5b1e-488d-99e7-9e541b997330" (UID: "a35f2df9-5b1e-488d-99e7-9e541b997330"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:32:05 crc kubenswrapper[4909]: I1128 16:32:05.550945 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a35f2df9-5b1e-488d-99e7-9e541b997330-config-data" (OuterVolumeSpecName: "config-data") pod "a35f2df9-5b1e-488d-99e7-9e541b997330" (UID: "a35f2df9-5b1e-488d-99e7-9e541b997330"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:32:05 crc kubenswrapper[4909]: I1128 16:32:05.554008 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a35f2df9-5b1e-488d-99e7-9e541b997330-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "a35f2df9-5b1e-488d-99e7-9e541b997330" (UID: "a35f2df9-5b1e-488d-99e7-9e541b997330"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:32:05 crc kubenswrapper[4909]: I1128 16:32:05.607114 4909 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a35f2df9-5b1e-488d-99e7-9e541b997330-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 16:32:05 crc kubenswrapper[4909]: I1128 16:32:05.607150 4909 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/a35f2df9-5b1e-488d-99e7-9e541b997330-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 16:32:05 crc kubenswrapper[4909]: I1128 16:32:05.607160 4909 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/a35f2df9-5b1e-488d-99e7-9e541b997330-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 28 16:32:05 crc kubenswrapper[4909]: I1128 16:32:05.607169 4909 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a35f2df9-5b1e-488d-99e7-9e541b997330-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:32:05 crc kubenswrapper[4909]: I1128 16:32:05.607178 4909 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a35f2df9-5b1e-488d-99e7-9e541b997330-logs\") on node \"crc\" DevicePath \"\"" Nov 28 16:32:05 crc kubenswrapper[4909]: I1128 16:32:05.607187 4909 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a35f2df9-5b1e-488d-99e7-9e541b997330-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:32:05 crc kubenswrapper[4909]: I1128 16:32:05.607225 4909 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") on node \"crc\" " Nov 28 16:32:05 crc kubenswrapper[4909]: I1128 16:32:05.607266 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lx4fs\" (UniqueName: \"kubernetes.io/projected/a35f2df9-5b1e-488d-99e7-9e541b997330-kube-api-access-lx4fs\") on node \"crc\" DevicePath \"\"" Nov 28 16:32:05 crc kubenswrapper[4909]: I1128 16:32:05.626292 4909 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage03-crc" (UniqueName: "kubernetes.io/local-volume/local-storage03-crc") on node "crc" Nov 28 16:32:05 crc kubenswrapper[4909]: I1128 16:32:05.700529 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"a35f2df9-5b1e-488d-99e7-9e541b997330","Type":"ContainerDied","Data":"b5161f002c27095df9c18248c99d1f9574448818e968d0415ac0bf574fb12bcc"} Nov 28 16:32:05 crc kubenswrapper[4909]: I1128 16:32:05.700547 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 28 16:32:05 crc kubenswrapper[4909]: E1128 16:32:05.704113 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"placement-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-placement-api:current-podified\\\"\"" pod="openstack/placement-db-sync-ngll9" podUID="2d62f293-d022-4d92-915b-f83c3fa157a7" Nov 28 16:32:05 crc kubenswrapper[4909]: I1128 16:32:05.710100 4909 reconciler_common.go:293] "Volume detached for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") on node \"crc\" DevicePath \"\"" Nov 28 16:32:05 crc kubenswrapper[4909]: I1128 16:32:05.776721 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 16:32:05 crc kubenswrapper[4909]: I1128 16:32:05.786642 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 16:32:05 crc kubenswrapper[4909]: I1128 16:32:05.800475 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 16:32:05 crc kubenswrapper[4909]: E1128 16:32:05.800841 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a35f2df9-5b1e-488d-99e7-9e541b997330" containerName="glance-httpd" Nov 28 16:32:05 crc kubenswrapper[4909]: I1128 16:32:05.800857 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="a35f2df9-5b1e-488d-99e7-9e541b997330" containerName="glance-httpd" Nov 28 16:32:05 crc kubenswrapper[4909]: E1128 16:32:05.800892 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a35f2df9-5b1e-488d-99e7-9e541b997330" containerName="glance-log" Nov 28 16:32:05 crc kubenswrapper[4909]: I1128 16:32:05.800898 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="a35f2df9-5b1e-488d-99e7-9e541b997330" containerName="glance-log" Nov 28 16:32:05 crc kubenswrapper[4909]: I1128 16:32:05.813127 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="a35f2df9-5b1e-488d-99e7-9e541b997330" containerName="glance-log" Nov 28 16:32:05 crc kubenswrapper[4909]: I1128 16:32:05.813191 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="a35f2df9-5b1e-488d-99e7-9e541b997330" containerName="glance-httpd" Nov 28 16:32:05 crc kubenswrapper[4909]: I1128 16:32:05.814394 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 28 16:32:05 crc kubenswrapper[4909]: I1128 16:32:05.817015 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Nov 28 16:32:05 crc kubenswrapper[4909]: I1128 16:32:05.818479 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 16:32:05 crc kubenswrapper[4909]: I1128 16:32:05.822010 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Nov 28 16:32:05 crc kubenswrapper[4909]: I1128 16:32:05.913941 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1ed2a549-d75f-4b1e-9d00-6eca4f86957f-scripts\") pod \"glance-default-external-api-0\" (UID: \"1ed2a549-d75f-4b1e-9d00-6eca4f86957f\") " pod="openstack/glance-default-external-api-0" Nov 28 16:32:05 crc kubenswrapper[4909]: I1128 16:32:05.913989 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1ed2a549-d75f-4b1e-9d00-6eca4f86957f-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"1ed2a549-d75f-4b1e-9d00-6eca4f86957f\") " pod="openstack/glance-default-external-api-0" Nov 28 16:32:05 crc kubenswrapper[4909]: I1128 16:32:05.914042 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/1ed2a549-d75f-4b1e-9d00-6eca4f86957f-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"1ed2a549-d75f-4b1e-9d00-6eca4f86957f\") " pod="openstack/glance-default-external-api-0" Nov 28 16:32:05 crc kubenswrapper[4909]: I1128 16:32:05.914064 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1ed2a549-d75f-4b1e-9d00-6eca4f86957f-logs\") pod \"glance-default-external-api-0\" (UID: \"1ed2a549-d75f-4b1e-9d00-6eca4f86957f\") " pod="openstack/glance-default-external-api-0" Nov 28 16:32:05 crc kubenswrapper[4909]: I1128 16:32:05.914094 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"glance-default-external-api-0\" (UID: \"1ed2a549-d75f-4b1e-9d00-6eca4f86957f\") " pod="openstack/glance-default-external-api-0" Nov 28 16:32:05 crc kubenswrapper[4909]: I1128 16:32:05.914117 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t2nqz\" (UniqueName: \"kubernetes.io/projected/1ed2a549-d75f-4b1e-9d00-6eca4f86957f-kube-api-access-t2nqz\") pod \"glance-default-external-api-0\" (UID: \"1ed2a549-d75f-4b1e-9d00-6eca4f86957f\") " pod="openstack/glance-default-external-api-0" Nov 28 16:32:05 crc kubenswrapper[4909]: I1128 16:32:05.914173 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/1ed2a549-d75f-4b1e-9d00-6eca4f86957f-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"1ed2a549-d75f-4b1e-9d00-6eca4f86957f\") " pod="openstack/glance-default-external-api-0" Nov 28 16:32:05 crc kubenswrapper[4909]: I1128 16:32:05.914355 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1ed2a549-d75f-4b1e-9d00-6eca4f86957f-config-data\") pod \"glance-default-external-api-0\" (UID: \"1ed2a549-d75f-4b1e-9d00-6eca4f86957f\") " pod="openstack/glance-default-external-api-0" Nov 28 16:32:05 crc kubenswrapper[4909]: I1128 16:32:05.915731 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a35f2df9-5b1e-488d-99e7-9e541b997330" path="/var/lib/kubelet/pods/a35f2df9-5b1e-488d-99e7-9e541b997330/volumes" Nov 28 16:32:06 crc kubenswrapper[4909]: I1128 16:32:06.016166 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1ed2a549-d75f-4b1e-9d00-6eca4f86957f-config-data\") pod \"glance-default-external-api-0\" (UID: \"1ed2a549-d75f-4b1e-9d00-6eca4f86957f\") " pod="openstack/glance-default-external-api-0" Nov 28 16:32:06 crc kubenswrapper[4909]: I1128 16:32:06.016233 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1ed2a549-d75f-4b1e-9d00-6eca4f86957f-scripts\") pod \"glance-default-external-api-0\" (UID: \"1ed2a549-d75f-4b1e-9d00-6eca4f86957f\") " pod="openstack/glance-default-external-api-0" Nov 28 16:32:06 crc kubenswrapper[4909]: I1128 16:32:06.016260 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1ed2a549-d75f-4b1e-9d00-6eca4f86957f-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"1ed2a549-d75f-4b1e-9d00-6eca4f86957f\") " pod="openstack/glance-default-external-api-0" Nov 28 16:32:06 crc kubenswrapper[4909]: I1128 16:32:06.016300 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/1ed2a549-d75f-4b1e-9d00-6eca4f86957f-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"1ed2a549-d75f-4b1e-9d00-6eca4f86957f\") " pod="openstack/glance-default-external-api-0" Nov 28 16:32:06 crc kubenswrapper[4909]: I1128 16:32:06.016316 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1ed2a549-d75f-4b1e-9d00-6eca4f86957f-logs\") pod \"glance-default-external-api-0\" (UID: \"1ed2a549-d75f-4b1e-9d00-6eca4f86957f\") " pod="openstack/glance-default-external-api-0" Nov 28 16:32:06 crc kubenswrapper[4909]: I1128 16:32:06.016342 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t2nqz\" (UniqueName: \"kubernetes.io/projected/1ed2a549-d75f-4b1e-9d00-6eca4f86957f-kube-api-access-t2nqz\") pod \"glance-default-external-api-0\" (UID: \"1ed2a549-d75f-4b1e-9d00-6eca4f86957f\") " pod="openstack/glance-default-external-api-0" Nov 28 16:32:06 crc kubenswrapper[4909]: I1128 16:32:06.016363 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"glance-default-external-api-0\" (UID: \"1ed2a549-d75f-4b1e-9d00-6eca4f86957f\") " pod="openstack/glance-default-external-api-0" Nov 28 16:32:06 crc kubenswrapper[4909]: I1128 16:32:06.016436 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/1ed2a549-d75f-4b1e-9d00-6eca4f86957f-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"1ed2a549-d75f-4b1e-9d00-6eca4f86957f\") " pod="openstack/glance-default-external-api-0" Nov 28 16:32:06 crc kubenswrapper[4909]: I1128 16:32:06.017387 4909 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"glance-default-external-api-0\" (UID: \"1ed2a549-d75f-4b1e-9d00-6eca4f86957f\") device mount path \"/mnt/openstack/pv03\"" pod="openstack/glance-default-external-api-0" Nov 28 16:32:06 crc kubenswrapper[4909]: I1128 16:32:06.017507 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/1ed2a549-d75f-4b1e-9d00-6eca4f86957f-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"1ed2a549-d75f-4b1e-9d00-6eca4f86957f\") " pod="openstack/glance-default-external-api-0" Nov 28 16:32:06 crc kubenswrapper[4909]: I1128 16:32:06.018998 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1ed2a549-d75f-4b1e-9d00-6eca4f86957f-logs\") pod \"glance-default-external-api-0\" (UID: \"1ed2a549-d75f-4b1e-9d00-6eca4f86957f\") " pod="openstack/glance-default-external-api-0" Nov 28 16:32:06 crc kubenswrapper[4909]: I1128 16:32:06.021556 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/1ed2a549-d75f-4b1e-9d00-6eca4f86957f-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"1ed2a549-d75f-4b1e-9d00-6eca4f86957f\") " pod="openstack/glance-default-external-api-0" Nov 28 16:32:06 crc kubenswrapper[4909]: I1128 16:32:06.021638 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1ed2a549-d75f-4b1e-9d00-6eca4f86957f-scripts\") pod \"glance-default-external-api-0\" (UID: \"1ed2a549-d75f-4b1e-9d00-6eca4f86957f\") " pod="openstack/glance-default-external-api-0" Nov 28 16:32:06 crc kubenswrapper[4909]: I1128 16:32:06.022672 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1ed2a549-d75f-4b1e-9d00-6eca4f86957f-config-data\") pod \"glance-default-external-api-0\" (UID: \"1ed2a549-d75f-4b1e-9d00-6eca4f86957f\") " pod="openstack/glance-default-external-api-0" Nov 28 16:32:06 crc kubenswrapper[4909]: I1128 16:32:06.023776 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1ed2a549-d75f-4b1e-9d00-6eca4f86957f-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"1ed2a549-d75f-4b1e-9d00-6eca4f86957f\") " pod="openstack/glance-default-external-api-0" Nov 28 16:32:06 crc kubenswrapper[4909]: I1128 16:32:06.034713 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t2nqz\" (UniqueName: \"kubernetes.io/projected/1ed2a549-d75f-4b1e-9d00-6eca4f86957f-kube-api-access-t2nqz\") pod \"glance-default-external-api-0\" (UID: \"1ed2a549-d75f-4b1e-9d00-6eca4f86957f\") " pod="openstack/glance-default-external-api-0" Nov 28 16:32:06 crc kubenswrapper[4909]: I1128 16:32:06.044783 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"glance-default-external-api-0\" (UID: \"1ed2a549-d75f-4b1e-9d00-6eca4f86957f\") " pod="openstack/glance-default-external-api-0" Nov 28 16:32:06 crc kubenswrapper[4909]: I1128 16:32:06.144235 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 28 16:32:06 crc kubenswrapper[4909]: I1128 16:32:06.712805 4909 generic.go:334] "Generic (PLEG): container finished" podID="77700d2a-0ae0-421c-9ee0-63aaa32a5428" containerID="ada4fc1a9bf251c75fd0b917cc3f6fe1006748ce560d0b4a79f1f22185d7e0ee" exitCode=0 Nov 28 16:32:06 crc kubenswrapper[4909]: I1128 16:32:06.712895 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-598lt" event={"ID":"77700d2a-0ae0-421c-9ee0-63aaa32a5428","Type":"ContainerDied","Data":"ada4fc1a9bf251c75fd0b917cc3f6fe1006748ce560d0b4a79f1f22185d7e0ee"} Nov 28 16:32:11 crc kubenswrapper[4909]: I1128 16:32:11.990054 4909 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-895cf5cf-rkrcn" podUID="12883252-f9ad-417c-a0de-c191f705082d" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.133:5353: i/o timeout" Nov 28 16:32:16 crc kubenswrapper[4909]: I1128 16:32:16.685670 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-895cf5cf-rkrcn" Nov 28 16:32:16 crc kubenswrapper[4909]: I1128 16:32:16.689088 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-598lt" Nov 28 16:32:16 crc kubenswrapper[4909]: I1128 16:32:16.795334 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-598lt" event={"ID":"77700d2a-0ae0-421c-9ee0-63aaa32a5428","Type":"ContainerDied","Data":"14c8c7ac2f7e0569ff7e6c6a92823061844e003b076b636a5def4da91fcd19dd"} Nov 28 16:32:16 crc kubenswrapper[4909]: I1128 16:32:16.795416 4909 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="14c8c7ac2f7e0569ff7e6c6a92823061844e003b076b636a5def4da91fcd19dd" Nov 28 16:32:16 crc kubenswrapper[4909]: I1128 16:32:16.795464 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-598lt" Nov 28 16:32:16 crc kubenswrapper[4909]: I1128 16:32:16.797511 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-895cf5cf-rkrcn" event={"ID":"12883252-f9ad-417c-a0de-c191f705082d","Type":"ContainerDied","Data":"d4845a28fd2f83252a9eee912c4e852f45f66190f6893b1045162aaf0366e9f6"} Nov 28 16:32:16 crc kubenswrapper[4909]: I1128 16:32:16.797536 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-895cf5cf-rkrcn" Nov 28 16:32:16 crc kubenswrapper[4909]: I1128 16:32:16.816035 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/12883252-f9ad-417c-a0de-c191f705082d-ovsdbserver-nb\") pod \"12883252-f9ad-417c-a0de-c191f705082d\" (UID: \"12883252-f9ad-417c-a0de-c191f705082d\") " Nov 28 16:32:16 crc kubenswrapper[4909]: I1128 16:32:16.816086 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/77700d2a-0ae0-421c-9ee0-63aaa32a5428-credential-keys\") pod \"77700d2a-0ae0-421c-9ee0-63aaa32a5428\" (UID: \"77700d2a-0ae0-421c-9ee0-63aaa32a5428\") " Nov 28 16:32:16 crc kubenswrapper[4909]: I1128 16:32:16.816157 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/77700d2a-0ae0-421c-9ee0-63aaa32a5428-fernet-keys\") pod \"77700d2a-0ae0-421c-9ee0-63aaa32a5428\" (UID: \"77700d2a-0ae0-421c-9ee0-63aaa32a5428\") " Nov 28 16:32:16 crc kubenswrapper[4909]: I1128 16:32:16.816242 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/77700d2a-0ae0-421c-9ee0-63aaa32a5428-scripts\") pod \"77700d2a-0ae0-421c-9ee0-63aaa32a5428\" (UID: \"77700d2a-0ae0-421c-9ee0-63aaa32a5428\") " Nov 28 16:32:16 crc kubenswrapper[4909]: I1128 16:32:16.816276 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/77700d2a-0ae0-421c-9ee0-63aaa32a5428-combined-ca-bundle\") pod \"77700d2a-0ae0-421c-9ee0-63aaa32a5428\" (UID: \"77700d2a-0ae0-421c-9ee0-63aaa32a5428\") " Nov 28 16:32:16 crc kubenswrapper[4909]: I1128 16:32:16.816298 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/77700d2a-0ae0-421c-9ee0-63aaa32a5428-config-data\") pod \"77700d2a-0ae0-421c-9ee0-63aaa32a5428\" (UID: \"77700d2a-0ae0-421c-9ee0-63aaa32a5428\") " Nov 28 16:32:16 crc kubenswrapper[4909]: I1128 16:32:16.816322 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4bxrl\" (UniqueName: \"kubernetes.io/projected/12883252-f9ad-417c-a0de-c191f705082d-kube-api-access-4bxrl\") pod \"12883252-f9ad-417c-a0de-c191f705082d\" (UID: \"12883252-f9ad-417c-a0de-c191f705082d\") " Nov 28 16:32:16 crc kubenswrapper[4909]: I1128 16:32:16.816368 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/12883252-f9ad-417c-a0de-c191f705082d-dns-swift-storage-0\") pod \"12883252-f9ad-417c-a0de-c191f705082d\" (UID: \"12883252-f9ad-417c-a0de-c191f705082d\") " Nov 28 16:32:16 crc kubenswrapper[4909]: I1128 16:32:16.816385 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/12883252-f9ad-417c-a0de-c191f705082d-dns-svc\") pod \"12883252-f9ad-417c-a0de-c191f705082d\" (UID: \"12883252-f9ad-417c-a0de-c191f705082d\") " Nov 28 16:32:16 crc kubenswrapper[4909]: I1128 16:32:16.816411 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/12883252-f9ad-417c-a0de-c191f705082d-ovsdbserver-sb\") pod \"12883252-f9ad-417c-a0de-c191f705082d\" (UID: \"12883252-f9ad-417c-a0de-c191f705082d\") " Nov 28 16:32:16 crc kubenswrapper[4909]: I1128 16:32:16.816430 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j97fg\" (UniqueName: \"kubernetes.io/projected/77700d2a-0ae0-421c-9ee0-63aaa32a5428-kube-api-access-j97fg\") pod \"77700d2a-0ae0-421c-9ee0-63aaa32a5428\" (UID: \"77700d2a-0ae0-421c-9ee0-63aaa32a5428\") " Nov 28 16:32:16 crc kubenswrapper[4909]: I1128 16:32:16.816503 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/12883252-f9ad-417c-a0de-c191f705082d-config\") pod \"12883252-f9ad-417c-a0de-c191f705082d\" (UID: \"12883252-f9ad-417c-a0de-c191f705082d\") " Nov 28 16:32:16 crc kubenswrapper[4909]: I1128 16:32:16.823406 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/77700d2a-0ae0-421c-9ee0-63aaa32a5428-kube-api-access-j97fg" (OuterVolumeSpecName: "kube-api-access-j97fg") pod "77700d2a-0ae0-421c-9ee0-63aaa32a5428" (UID: "77700d2a-0ae0-421c-9ee0-63aaa32a5428"). InnerVolumeSpecName "kube-api-access-j97fg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:32:16 crc kubenswrapper[4909]: I1128 16:32:16.823545 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/77700d2a-0ae0-421c-9ee0-63aaa32a5428-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "77700d2a-0ae0-421c-9ee0-63aaa32a5428" (UID: "77700d2a-0ae0-421c-9ee0-63aaa32a5428"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:32:16 crc kubenswrapper[4909]: I1128 16:32:16.823746 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/77700d2a-0ae0-421c-9ee0-63aaa32a5428-scripts" (OuterVolumeSpecName: "scripts") pod "77700d2a-0ae0-421c-9ee0-63aaa32a5428" (UID: "77700d2a-0ae0-421c-9ee0-63aaa32a5428"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:32:16 crc kubenswrapper[4909]: I1128 16:32:16.829856 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/77700d2a-0ae0-421c-9ee0-63aaa32a5428-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "77700d2a-0ae0-421c-9ee0-63aaa32a5428" (UID: "77700d2a-0ae0-421c-9ee0-63aaa32a5428"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:32:16 crc kubenswrapper[4909]: I1128 16:32:16.834309 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/12883252-f9ad-417c-a0de-c191f705082d-kube-api-access-4bxrl" (OuterVolumeSpecName: "kube-api-access-4bxrl") pod "12883252-f9ad-417c-a0de-c191f705082d" (UID: "12883252-f9ad-417c-a0de-c191f705082d"). InnerVolumeSpecName "kube-api-access-4bxrl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:32:16 crc kubenswrapper[4909]: I1128 16:32:16.849853 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/77700d2a-0ae0-421c-9ee0-63aaa32a5428-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "77700d2a-0ae0-421c-9ee0-63aaa32a5428" (UID: "77700d2a-0ae0-421c-9ee0-63aaa32a5428"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:32:16 crc kubenswrapper[4909]: I1128 16:32:16.854244 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/77700d2a-0ae0-421c-9ee0-63aaa32a5428-config-data" (OuterVolumeSpecName: "config-data") pod "77700d2a-0ae0-421c-9ee0-63aaa32a5428" (UID: "77700d2a-0ae0-421c-9ee0-63aaa32a5428"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:32:16 crc kubenswrapper[4909]: I1128 16:32:16.865042 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/12883252-f9ad-417c-a0de-c191f705082d-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "12883252-f9ad-417c-a0de-c191f705082d" (UID: "12883252-f9ad-417c-a0de-c191f705082d"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:32:16 crc kubenswrapper[4909]: I1128 16:32:16.871774 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/12883252-f9ad-417c-a0de-c191f705082d-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "12883252-f9ad-417c-a0de-c191f705082d" (UID: "12883252-f9ad-417c-a0de-c191f705082d"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:32:16 crc kubenswrapper[4909]: I1128 16:32:16.872068 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/12883252-f9ad-417c-a0de-c191f705082d-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "12883252-f9ad-417c-a0de-c191f705082d" (UID: "12883252-f9ad-417c-a0de-c191f705082d"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:32:16 crc kubenswrapper[4909]: I1128 16:32:16.874230 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/12883252-f9ad-417c-a0de-c191f705082d-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "12883252-f9ad-417c-a0de-c191f705082d" (UID: "12883252-f9ad-417c-a0de-c191f705082d"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:32:16 crc kubenswrapper[4909]: I1128 16:32:16.879971 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/12883252-f9ad-417c-a0de-c191f705082d-config" (OuterVolumeSpecName: "config") pod "12883252-f9ad-417c-a0de-c191f705082d" (UID: "12883252-f9ad-417c-a0de-c191f705082d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:32:16 crc kubenswrapper[4909]: I1128 16:32:16.918461 4909 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/77700d2a-0ae0-421c-9ee0-63aaa32a5428-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 28 16:32:16 crc kubenswrapper[4909]: I1128 16:32:16.918513 4909 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/77700d2a-0ae0-421c-9ee0-63aaa32a5428-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 16:32:16 crc kubenswrapper[4909]: I1128 16:32:16.918522 4909 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/77700d2a-0ae0-421c-9ee0-63aaa32a5428-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:32:16 crc kubenswrapper[4909]: I1128 16:32:16.918533 4909 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/77700d2a-0ae0-421c-9ee0-63aaa32a5428-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:32:16 crc kubenswrapper[4909]: I1128 16:32:16.918542 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4bxrl\" (UniqueName: \"kubernetes.io/projected/12883252-f9ad-417c-a0de-c191f705082d-kube-api-access-4bxrl\") on node \"crc\" DevicePath \"\"" Nov 28 16:32:16 crc kubenswrapper[4909]: I1128 16:32:16.918552 4909 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/12883252-f9ad-417c-a0de-c191f705082d-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 28 16:32:16 crc kubenswrapper[4909]: I1128 16:32:16.918561 4909 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/12883252-f9ad-417c-a0de-c191f705082d-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 16:32:16 crc kubenswrapper[4909]: I1128 16:32:16.918569 4909 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/12883252-f9ad-417c-a0de-c191f705082d-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 28 16:32:16 crc kubenswrapper[4909]: I1128 16:32:16.918577 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-j97fg\" (UniqueName: \"kubernetes.io/projected/77700d2a-0ae0-421c-9ee0-63aaa32a5428-kube-api-access-j97fg\") on node \"crc\" DevicePath \"\"" Nov 28 16:32:16 crc kubenswrapper[4909]: I1128 16:32:16.918585 4909 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/12883252-f9ad-417c-a0de-c191f705082d-config\") on node \"crc\" DevicePath \"\"" Nov 28 16:32:16 crc kubenswrapper[4909]: I1128 16:32:16.918596 4909 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/12883252-f9ad-417c-a0de-c191f705082d-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 28 16:32:16 crc kubenswrapper[4909]: I1128 16:32:16.918606 4909 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/77700d2a-0ae0-421c-9ee0-63aaa32a5428-credential-keys\") on node \"crc\" DevicePath \"\"" Nov 28 16:32:16 crc kubenswrapper[4909]: I1128 16:32:16.991703 4909 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-895cf5cf-rkrcn" podUID="12883252-f9ad-417c-a0de-c191f705082d" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.133:5353: i/o timeout" Nov 28 16:32:16 crc kubenswrapper[4909]: I1128 16:32:16.991796 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-895cf5cf-rkrcn" Nov 28 16:32:17 crc kubenswrapper[4909]: I1128 16:32:17.136932 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-895cf5cf-rkrcn"] Nov 28 16:32:17 crc kubenswrapper[4909]: I1128 16:32:17.143935 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-895cf5cf-rkrcn"] Nov 28 16:32:17 crc kubenswrapper[4909]: E1128 16:32:17.495268 4909 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-barbican-api:current-podified" Nov 28 16:32:17 crc kubenswrapper[4909]: E1128 16:32:17.495716 4909 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:barbican-db-sync,Image:quay.io/podified-antelope-centos9/openstack-barbican-api:current-podified,Command:[/bin/bash],Args:[-c barbican-manage db upgrade],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/barbican/barbican.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-xjpgz,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42403,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:*42403,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod barbican-db-sync-6wh46_openstack(fbc4977f-f846-4428-9fb6-558811c3e65b): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 16:32:17 crc kubenswrapper[4909]: E1128 16:32:17.496810 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"barbican-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/barbican-db-sync-6wh46" podUID="fbc4977f-f846-4428-9fb6-558811c3e65b" Nov 28 16:32:17 crc kubenswrapper[4909]: I1128 16:32:17.792089 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-598lt"] Nov 28 16:32:17 crc kubenswrapper[4909]: I1128 16:32:17.797847 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-598lt"] Nov 28 16:32:17 crc kubenswrapper[4909]: E1128 16:32:17.806145 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"barbican-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-barbican-api:current-podified\\\"\"" pod="openstack/barbican-db-sync-6wh46" podUID="fbc4977f-f846-4428-9fb6-558811c3e65b" Nov 28 16:32:17 crc kubenswrapper[4909]: I1128 16:32:17.892691 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-6w86s"] Nov 28 16:32:17 crc kubenswrapper[4909]: E1128 16:32:17.893165 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="77700d2a-0ae0-421c-9ee0-63aaa32a5428" containerName="keystone-bootstrap" Nov 28 16:32:17 crc kubenswrapper[4909]: I1128 16:32:17.893184 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="77700d2a-0ae0-421c-9ee0-63aaa32a5428" containerName="keystone-bootstrap" Nov 28 16:32:17 crc kubenswrapper[4909]: E1128 16:32:17.893201 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="12883252-f9ad-417c-a0de-c191f705082d" containerName="dnsmasq-dns" Nov 28 16:32:17 crc kubenswrapper[4909]: I1128 16:32:17.893208 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="12883252-f9ad-417c-a0de-c191f705082d" containerName="dnsmasq-dns" Nov 28 16:32:17 crc kubenswrapper[4909]: E1128 16:32:17.893228 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="12883252-f9ad-417c-a0de-c191f705082d" containerName="init" Nov 28 16:32:17 crc kubenswrapper[4909]: I1128 16:32:17.893234 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="12883252-f9ad-417c-a0de-c191f705082d" containerName="init" Nov 28 16:32:17 crc kubenswrapper[4909]: I1128 16:32:17.893623 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="77700d2a-0ae0-421c-9ee0-63aaa32a5428" containerName="keystone-bootstrap" Nov 28 16:32:17 crc kubenswrapper[4909]: I1128 16:32:17.893703 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="12883252-f9ad-417c-a0de-c191f705082d" containerName="dnsmasq-dns" Nov 28 16:32:17 crc kubenswrapper[4909]: I1128 16:32:17.894369 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-6w86s" Nov 28 16:32:17 crc kubenswrapper[4909]: I1128 16:32:17.896227 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 28 16:32:17 crc kubenswrapper[4909]: I1128 16:32:17.896414 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 28 16:32:17 crc kubenswrapper[4909]: I1128 16:32:17.897089 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 28 16:32:17 crc kubenswrapper[4909]: I1128 16:32:17.897261 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Nov 28 16:32:17 crc kubenswrapper[4909]: I1128 16:32:17.897370 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-fmn76" Nov 28 16:32:17 crc kubenswrapper[4909]: I1128 16:32:17.917391 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="12883252-f9ad-417c-a0de-c191f705082d" path="/var/lib/kubelet/pods/12883252-f9ad-417c-a0de-c191f705082d/volumes" Nov 28 16:32:17 crc kubenswrapper[4909]: I1128 16:32:17.918021 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="77700d2a-0ae0-421c-9ee0-63aaa32a5428" path="/var/lib/kubelet/pods/77700d2a-0ae0-421c-9ee0-63aaa32a5428/volumes" Nov 28 16:32:17 crc kubenswrapper[4909]: I1128 16:32:17.918581 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-6w86s"] Nov 28 16:32:17 crc kubenswrapper[4909]: I1128 16:32:17.946584 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a09f19f7-b777-4314-9b4e-acdac8cf783a-combined-ca-bundle\") pod \"keystone-bootstrap-6w86s\" (UID: \"a09f19f7-b777-4314-9b4e-acdac8cf783a\") " pod="openstack/keystone-bootstrap-6w86s" Nov 28 16:32:17 crc kubenswrapper[4909]: I1128 16:32:17.946677 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a09f19f7-b777-4314-9b4e-acdac8cf783a-scripts\") pod \"keystone-bootstrap-6w86s\" (UID: \"a09f19f7-b777-4314-9b4e-acdac8cf783a\") " pod="openstack/keystone-bootstrap-6w86s" Nov 28 16:32:17 crc kubenswrapper[4909]: I1128 16:32:17.946782 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qm5n8\" (UniqueName: \"kubernetes.io/projected/a09f19f7-b777-4314-9b4e-acdac8cf783a-kube-api-access-qm5n8\") pod \"keystone-bootstrap-6w86s\" (UID: \"a09f19f7-b777-4314-9b4e-acdac8cf783a\") " pod="openstack/keystone-bootstrap-6w86s" Nov 28 16:32:17 crc kubenswrapper[4909]: I1128 16:32:17.946868 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/a09f19f7-b777-4314-9b4e-acdac8cf783a-credential-keys\") pod \"keystone-bootstrap-6w86s\" (UID: \"a09f19f7-b777-4314-9b4e-acdac8cf783a\") " pod="openstack/keystone-bootstrap-6w86s" Nov 28 16:32:17 crc kubenswrapper[4909]: I1128 16:32:17.947075 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/a09f19f7-b777-4314-9b4e-acdac8cf783a-fernet-keys\") pod \"keystone-bootstrap-6w86s\" (UID: \"a09f19f7-b777-4314-9b4e-acdac8cf783a\") " pod="openstack/keystone-bootstrap-6w86s" Nov 28 16:32:17 crc kubenswrapper[4909]: I1128 16:32:17.947115 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a09f19f7-b777-4314-9b4e-acdac8cf783a-config-data\") pod \"keystone-bootstrap-6w86s\" (UID: \"a09f19f7-b777-4314-9b4e-acdac8cf783a\") " pod="openstack/keystone-bootstrap-6w86s" Nov 28 16:32:18 crc kubenswrapper[4909]: I1128 16:32:18.049371 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a09f19f7-b777-4314-9b4e-acdac8cf783a-combined-ca-bundle\") pod \"keystone-bootstrap-6w86s\" (UID: \"a09f19f7-b777-4314-9b4e-acdac8cf783a\") " pod="openstack/keystone-bootstrap-6w86s" Nov 28 16:32:18 crc kubenswrapper[4909]: I1128 16:32:18.049445 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a09f19f7-b777-4314-9b4e-acdac8cf783a-scripts\") pod \"keystone-bootstrap-6w86s\" (UID: \"a09f19f7-b777-4314-9b4e-acdac8cf783a\") " pod="openstack/keystone-bootstrap-6w86s" Nov 28 16:32:18 crc kubenswrapper[4909]: I1128 16:32:18.049510 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qm5n8\" (UniqueName: \"kubernetes.io/projected/a09f19f7-b777-4314-9b4e-acdac8cf783a-kube-api-access-qm5n8\") pod \"keystone-bootstrap-6w86s\" (UID: \"a09f19f7-b777-4314-9b4e-acdac8cf783a\") " pod="openstack/keystone-bootstrap-6w86s" Nov 28 16:32:18 crc kubenswrapper[4909]: I1128 16:32:18.049533 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/a09f19f7-b777-4314-9b4e-acdac8cf783a-credential-keys\") pod \"keystone-bootstrap-6w86s\" (UID: \"a09f19f7-b777-4314-9b4e-acdac8cf783a\") " pod="openstack/keystone-bootstrap-6w86s" Nov 28 16:32:18 crc kubenswrapper[4909]: I1128 16:32:18.049612 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/a09f19f7-b777-4314-9b4e-acdac8cf783a-fernet-keys\") pod \"keystone-bootstrap-6w86s\" (UID: \"a09f19f7-b777-4314-9b4e-acdac8cf783a\") " pod="openstack/keystone-bootstrap-6w86s" Nov 28 16:32:18 crc kubenswrapper[4909]: I1128 16:32:18.049649 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a09f19f7-b777-4314-9b4e-acdac8cf783a-config-data\") pod \"keystone-bootstrap-6w86s\" (UID: \"a09f19f7-b777-4314-9b4e-acdac8cf783a\") " pod="openstack/keystone-bootstrap-6w86s" Nov 28 16:32:18 crc kubenswrapper[4909]: I1128 16:32:18.054463 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a09f19f7-b777-4314-9b4e-acdac8cf783a-scripts\") pod \"keystone-bootstrap-6w86s\" (UID: \"a09f19f7-b777-4314-9b4e-acdac8cf783a\") " pod="openstack/keystone-bootstrap-6w86s" Nov 28 16:32:18 crc kubenswrapper[4909]: I1128 16:32:18.056076 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/a09f19f7-b777-4314-9b4e-acdac8cf783a-credential-keys\") pod \"keystone-bootstrap-6w86s\" (UID: \"a09f19f7-b777-4314-9b4e-acdac8cf783a\") " pod="openstack/keystone-bootstrap-6w86s" Nov 28 16:32:18 crc kubenswrapper[4909]: I1128 16:32:18.056142 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a09f19f7-b777-4314-9b4e-acdac8cf783a-combined-ca-bundle\") pod \"keystone-bootstrap-6w86s\" (UID: \"a09f19f7-b777-4314-9b4e-acdac8cf783a\") " pod="openstack/keystone-bootstrap-6w86s" Nov 28 16:32:18 crc kubenswrapper[4909]: I1128 16:32:18.056171 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/a09f19f7-b777-4314-9b4e-acdac8cf783a-fernet-keys\") pod \"keystone-bootstrap-6w86s\" (UID: \"a09f19f7-b777-4314-9b4e-acdac8cf783a\") " pod="openstack/keystone-bootstrap-6w86s" Nov 28 16:32:18 crc kubenswrapper[4909]: I1128 16:32:18.059931 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a09f19f7-b777-4314-9b4e-acdac8cf783a-config-data\") pod \"keystone-bootstrap-6w86s\" (UID: \"a09f19f7-b777-4314-9b4e-acdac8cf783a\") " pod="openstack/keystone-bootstrap-6w86s" Nov 28 16:32:18 crc kubenswrapper[4909]: I1128 16:32:18.066844 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qm5n8\" (UniqueName: \"kubernetes.io/projected/a09f19f7-b777-4314-9b4e-acdac8cf783a-kube-api-access-qm5n8\") pod \"keystone-bootstrap-6w86s\" (UID: \"a09f19f7-b777-4314-9b4e-acdac8cf783a\") " pod="openstack/keystone-bootstrap-6w86s" Nov 28 16:32:18 crc kubenswrapper[4909]: I1128 16:32:18.225503 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-6w86s" Nov 28 16:32:18 crc kubenswrapper[4909]: I1128 16:32:18.923587 4909 scope.go:117] "RemoveContainer" containerID="0a3777e195c1565876e6ec5673a3e4c7d9d52965f5471f40e68c68ad5b0b3810" Nov 28 16:32:19 crc kubenswrapper[4909]: E1128 16:32:19.024705 4909 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified" Nov 28 16:32:19 crc kubenswrapper[4909]: E1128 16:32:19.025333 4909 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:cinder-db-sync,Image:quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_set_configs && /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:etc-machine-id,ReadOnly:true,MountPath:/etc/machine-id,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/config-data/merged,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/cinder/cinder.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:db-sync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-jrxm7,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:*0,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cinder-db-sync-cj8sl_openstack(148d191b-98d8-4e26-a335-1bfb373f4f07): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 16:32:19 crc kubenswrapper[4909]: E1128 16:32:19.026591 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/cinder-db-sync-cj8sl" podUID="148d191b-98d8-4e26-a335-1bfb373f4f07" Nov 28 16:32:19 crc kubenswrapper[4909]: I1128 16:32:19.242141 4909 scope.go:117] "RemoveContainer" containerID="6c260636beb5317942af42fdece31e900a2dac87694bf2faba3da9a8668eafcc" Nov 28 16:32:19 crc kubenswrapper[4909]: I1128 16:32:19.402079 4909 scope.go:117] "RemoveContainer" containerID="e310d7c9491842ef2798420d19a65596295716b1a00ce15380982f754fc33ec4" Nov 28 16:32:19 crc kubenswrapper[4909]: I1128 16:32:19.430618 4909 scope.go:117] "RemoveContainer" containerID="f28a310428db4dd736408e290f4e6851feb79f62dc3e638ff1963b2276e26408" Nov 28 16:32:19 crc kubenswrapper[4909]: I1128 16:32:19.463819 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-6w86s"] Nov 28 16:32:19 crc kubenswrapper[4909]: W1128 16:32:19.474774 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda09f19f7_b777_4314_9b4e_acdac8cf783a.slice/crio-e83214303e3c3e4f2f0d204076711189c1b9ff760ff40d1dd0f261658ac021b2 WatchSource:0}: Error finding container e83214303e3c3e4f2f0d204076711189c1b9ff760ff40d1dd0f261658ac021b2: Status 404 returned error can't find the container with id e83214303e3c3e4f2f0d204076711189c1b9ff760ff40d1dd0f261658ac021b2 Nov 28 16:32:19 crc kubenswrapper[4909]: I1128 16:32:19.491938 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 16:32:19 crc kubenswrapper[4909]: I1128 16:32:19.569404 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 16:32:19 crc kubenswrapper[4909]: W1128 16:32:19.582645 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1ed2a549_d75f_4b1e_9d00_6eca4f86957f.slice/crio-f33fbb62f1f2b29260f0cba66028250bb06937ba450ea1cda7eb169b6dea3062 WatchSource:0}: Error finding container f33fbb62f1f2b29260f0cba66028250bb06937ba450ea1cda7eb169b6dea3062: Status 404 returned error can't find the container with id f33fbb62f1f2b29260f0cba66028250bb06937ba450ea1cda7eb169b6dea3062 Nov 28 16:32:20 crc kubenswrapper[4909]: I1128 16:32:20.466090 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"e9618898-4d20-4c0d-ab4d-1dc67e0f109a","Type":"ContainerStarted","Data":"f650d12360f184251edc81adb3789ea75b73835fbcff32683fdc367eeb276ef9"} Nov 28 16:32:20 crc kubenswrapper[4909]: I1128 16:32:20.468341 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"1ed2a549-d75f-4b1e-9d00-6eca4f86957f","Type":"ContainerStarted","Data":"f33fbb62f1f2b29260f0cba66028250bb06937ba450ea1cda7eb169b6dea3062"} Nov 28 16:32:20 crc kubenswrapper[4909]: I1128 16:32:20.475078 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-6w86s" event={"ID":"a09f19f7-b777-4314-9b4e-acdac8cf783a","Type":"ContainerStarted","Data":"e83214303e3c3e4f2f0d204076711189c1b9ff760ff40d1dd0f261658ac021b2"} Nov 28 16:32:20 crc kubenswrapper[4909]: E1128 16:32:20.478558 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified\\\"\"" pod="openstack/cinder-db-sync-cj8sl" podUID="148d191b-98d8-4e26-a335-1bfb373f4f07" Nov 28 16:32:21 crc kubenswrapper[4909]: I1128 16:32:21.490777 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"1ed2a549-d75f-4b1e-9d00-6eca4f86957f","Type":"ContainerStarted","Data":"93b05c0d1f290aace4bb931df0cce5c19a4896a56bff38a462e5ca772902bb88"} Nov 28 16:32:21 crc kubenswrapper[4909]: I1128 16:32:21.494288 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-ngll9" event={"ID":"2d62f293-d022-4d92-915b-f83c3fa157a7","Type":"ContainerStarted","Data":"c3b9dc337a3ea52218200ac2c915acc7e5c688e21e83792557a9a417ce242639"} Nov 28 16:32:21 crc kubenswrapper[4909]: I1128 16:32:21.499155 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"403021b8-fd7a-4823-9f99-622829f4d935","Type":"ContainerStarted","Data":"e36ac30c46e63b922b526420bbd327b8c4c53bf4ecea562152d6d344150751dd"} Nov 28 16:32:21 crc kubenswrapper[4909]: I1128 16:32:21.502890 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"e9618898-4d20-4c0d-ab4d-1dc67e0f109a","Type":"ContainerStarted","Data":"d40a4b6cfcdb1ace4ed94b638cf6d1b874b6ed7fea6b7915423222d760158ab8"} Nov 28 16:32:21 crc kubenswrapper[4909]: I1128 16:32:21.502959 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"e9618898-4d20-4c0d-ab4d-1dc67e0f109a","Type":"ContainerStarted","Data":"ddaf1e784f7ded7f9c54a05ec17a7bae8623c22eea8fc597422047afc91451a0"} Nov 28 16:32:21 crc kubenswrapper[4909]: I1128 16:32:21.506573 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-6w86s" event={"ID":"a09f19f7-b777-4314-9b4e-acdac8cf783a","Type":"ContainerStarted","Data":"baaeeafb5f0ff35dc0eed801db85347a6019df3603c8d5fda752ecad50cc5dee"} Nov 28 16:32:21 crc kubenswrapper[4909]: I1128 16:32:21.515617 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-db-sync-ngll9" podStartSLOduration=3.833599444 podStartE2EDuration="32.515596615s" podCreationTimestamp="2025-11-28 16:31:49 +0000 UTC" firstStartedPulling="2025-11-28 16:31:50.563923498 +0000 UTC m=+1292.960608022" lastFinishedPulling="2025-11-28 16:32:19.245920669 +0000 UTC m=+1321.642605193" observedRunningTime="2025-11-28 16:32:21.512862572 +0000 UTC m=+1323.909547086" watchObservedRunningTime="2025-11-28 16:32:21.515596615 +0000 UTC m=+1323.912281139" Nov 28 16:32:21 crc kubenswrapper[4909]: I1128 16:32:21.552817 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-6w86s" podStartSLOduration=4.5527990339999995 podStartE2EDuration="4.552799034s" podCreationTimestamp="2025-11-28 16:32:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:32:21.53921896 +0000 UTC m=+1323.935903534" watchObservedRunningTime="2025-11-28 16:32:21.552799034 +0000 UTC m=+1323.949483558" Nov 28 16:32:21 crc kubenswrapper[4909]: I1128 16:32:21.568132 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=18.568115265 podStartE2EDuration="18.568115265s" podCreationTimestamp="2025-11-28 16:32:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:32:21.562883445 +0000 UTC m=+1323.959567959" watchObservedRunningTime="2025-11-28 16:32:21.568115265 +0000 UTC m=+1323.964799789" Nov 28 16:32:22 crc kubenswrapper[4909]: I1128 16:32:22.521490 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"1ed2a549-d75f-4b1e-9d00-6eca4f86957f","Type":"ContainerStarted","Data":"ad28a714c25d128ef8edba41277bc5b8f460469ced5c8d03d0afd12a7a1547bf"} Nov 28 16:32:22 crc kubenswrapper[4909]: I1128 16:32:22.551147 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=17.551123716 podStartE2EDuration="17.551123716s" podCreationTimestamp="2025-11-28 16:32:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:32:22.540708256 +0000 UTC m=+1324.937392800" watchObservedRunningTime="2025-11-28 16:32:22.551123716 +0000 UTC m=+1324.947808240" Nov 28 16:32:23 crc kubenswrapper[4909]: I1128 16:32:23.528275 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"403021b8-fd7a-4823-9f99-622829f4d935","Type":"ContainerStarted","Data":"cfe793ee5a40fe2697a86b51f9d61105e2f440e85819826987d20a8cc47e8088"} Nov 28 16:32:24 crc kubenswrapper[4909]: I1128 16:32:24.083021 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 28 16:32:24 crc kubenswrapper[4909]: I1128 16:32:24.083470 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 28 16:32:24 crc kubenswrapper[4909]: I1128 16:32:24.120433 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 28 16:32:24 crc kubenswrapper[4909]: I1128 16:32:24.125186 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 28 16:32:24 crc kubenswrapper[4909]: I1128 16:32:24.536404 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 28 16:32:24 crc kubenswrapper[4909]: I1128 16:32:24.536475 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 28 16:32:26 crc kubenswrapper[4909]: I1128 16:32:26.144754 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 28 16:32:26 crc kubenswrapper[4909]: I1128 16:32:26.145057 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 28 16:32:26 crc kubenswrapper[4909]: I1128 16:32:26.172147 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 28 16:32:26 crc kubenswrapper[4909]: I1128 16:32:26.184137 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 28 16:32:26 crc kubenswrapper[4909]: I1128 16:32:26.554244 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 28 16:32:26 crc kubenswrapper[4909]: I1128 16:32:26.554317 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 28 16:32:27 crc kubenswrapper[4909]: I1128 16:32:27.748737 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 28 16:32:27 crc kubenswrapper[4909]: I1128 16:32:27.757218 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 28 16:32:28 crc kubenswrapper[4909]: I1128 16:32:28.488349 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 28 16:32:28 crc kubenswrapper[4909]: I1128 16:32:28.492649 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 28 16:32:31 crc kubenswrapper[4909]: I1128 16:32:31.598937 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-6wh46" event={"ID":"fbc4977f-f846-4428-9fb6-558811c3e65b","Type":"ContainerStarted","Data":"c45af0789eb64b0332ed9444cb13a77731843941ac7d5caa108d91d85a7f2bfa"} Nov 28 16:32:31 crc kubenswrapper[4909]: I1128 16:32:31.602375 4909 generic.go:334] "Generic (PLEG): container finished" podID="a09f19f7-b777-4314-9b4e-acdac8cf783a" containerID="baaeeafb5f0ff35dc0eed801db85347a6019df3603c8d5fda752ecad50cc5dee" exitCode=0 Nov 28 16:32:31 crc kubenswrapper[4909]: I1128 16:32:31.602453 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-6w86s" event={"ID":"a09f19f7-b777-4314-9b4e-acdac8cf783a","Type":"ContainerDied","Data":"baaeeafb5f0ff35dc0eed801db85347a6019df3603c8d5fda752ecad50cc5dee"} Nov 28 16:32:31 crc kubenswrapper[4909]: I1128 16:32:31.617124 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-db-sync-6wh46" podStartSLOduration=2.362244041 podStartE2EDuration="42.617107226s" podCreationTimestamp="2025-11-28 16:31:49 +0000 UTC" firstStartedPulling="2025-11-28 16:31:50.56214552 +0000 UTC m=+1292.958830044" lastFinishedPulling="2025-11-28 16:32:30.817008705 +0000 UTC m=+1333.213693229" observedRunningTime="2025-11-28 16:32:31.611027272 +0000 UTC m=+1334.007711796" watchObservedRunningTime="2025-11-28 16:32:31.617107226 +0000 UTC m=+1334.013791750" Nov 28 16:32:32 crc kubenswrapper[4909]: I1128 16:32:32.618439 4909 generic.go:334] "Generic (PLEG): container finished" podID="2d62f293-d022-4d92-915b-f83c3fa157a7" containerID="c3b9dc337a3ea52218200ac2c915acc7e5c688e21e83792557a9a417ce242639" exitCode=0 Nov 28 16:32:32 crc kubenswrapper[4909]: I1128 16:32:32.618508 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-ngll9" event={"ID":"2d62f293-d022-4d92-915b-f83c3fa157a7","Type":"ContainerDied","Data":"c3b9dc337a3ea52218200ac2c915acc7e5c688e21e83792557a9a417ce242639"} Nov 28 16:32:32 crc kubenswrapper[4909]: I1128 16:32:32.945867 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-6w86s" Nov 28 16:32:33 crc kubenswrapper[4909]: I1128 16:32:33.008172 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/a09f19f7-b777-4314-9b4e-acdac8cf783a-fernet-keys\") pod \"a09f19f7-b777-4314-9b4e-acdac8cf783a\" (UID: \"a09f19f7-b777-4314-9b4e-acdac8cf783a\") " Nov 28 16:32:33 crc kubenswrapper[4909]: I1128 16:32:33.008455 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a09f19f7-b777-4314-9b4e-acdac8cf783a-combined-ca-bundle\") pod \"a09f19f7-b777-4314-9b4e-acdac8cf783a\" (UID: \"a09f19f7-b777-4314-9b4e-acdac8cf783a\") " Nov 28 16:32:33 crc kubenswrapper[4909]: I1128 16:32:33.008666 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a09f19f7-b777-4314-9b4e-acdac8cf783a-scripts\") pod \"a09f19f7-b777-4314-9b4e-acdac8cf783a\" (UID: \"a09f19f7-b777-4314-9b4e-acdac8cf783a\") " Nov 28 16:32:33 crc kubenswrapper[4909]: I1128 16:32:33.008829 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a09f19f7-b777-4314-9b4e-acdac8cf783a-config-data\") pod \"a09f19f7-b777-4314-9b4e-acdac8cf783a\" (UID: \"a09f19f7-b777-4314-9b4e-acdac8cf783a\") " Nov 28 16:32:33 crc kubenswrapper[4909]: I1128 16:32:33.008909 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qm5n8\" (UniqueName: \"kubernetes.io/projected/a09f19f7-b777-4314-9b4e-acdac8cf783a-kube-api-access-qm5n8\") pod \"a09f19f7-b777-4314-9b4e-acdac8cf783a\" (UID: \"a09f19f7-b777-4314-9b4e-acdac8cf783a\") " Nov 28 16:32:33 crc kubenswrapper[4909]: I1128 16:32:33.009010 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/a09f19f7-b777-4314-9b4e-acdac8cf783a-credential-keys\") pod \"a09f19f7-b777-4314-9b4e-acdac8cf783a\" (UID: \"a09f19f7-b777-4314-9b4e-acdac8cf783a\") " Nov 28 16:32:33 crc kubenswrapper[4909]: I1128 16:32:33.014267 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a09f19f7-b777-4314-9b4e-acdac8cf783a-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "a09f19f7-b777-4314-9b4e-acdac8cf783a" (UID: "a09f19f7-b777-4314-9b4e-acdac8cf783a"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:32:33 crc kubenswrapper[4909]: I1128 16:32:33.014614 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a09f19f7-b777-4314-9b4e-acdac8cf783a-scripts" (OuterVolumeSpecName: "scripts") pod "a09f19f7-b777-4314-9b4e-acdac8cf783a" (UID: "a09f19f7-b777-4314-9b4e-acdac8cf783a"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:32:33 crc kubenswrapper[4909]: I1128 16:32:33.015119 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a09f19f7-b777-4314-9b4e-acdac8cf783a-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "a09f19f7-b777-4314-9b4e-acdac8cf783a" (UID: "a09f19f7-b777-4314-9b4e-acdac8cf783a"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:32:33 crc kubenswrapper[4909]: I1128 16:32:33.025815 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a09f19f7-b777-4314-9b4e-acdac8cf783a-kube-api-access-qm5n8" (OuterVolumeSpecName: "kube-api-access-qm5n8") pod "a09f19f7-b777-4314-9b4e-acdac8cf783a" (UID: "a09f19f7-b777-4314-9b4e-acdac8cf783a"). InnerVolumeSpecName "kube-api-access-qm5n8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:32:33 crc kubenswrapper[4909]: I1128 16:32:33.039577 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a09f19f7-b777-4314-9b4e-acdac8cf783a-config-data" (OuterVolumeSpecName: "config-data") pod "a09f19f7-b777-4314-9b4e-acdac8cf783a" (UID: "a09f19f7-b777-4314-9b4e-acdac8cf783a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:32:33 crc kubenswrapper[4909]: I1128 16:32:33.049435 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a09f19f7-b777-4314-9b4e-acdac8cf783a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a09f19f7-b777-4314-9b4e-acdac8cf783a" (UID: "a09f19f7-b777-4314-9b4e-acdac8cf783a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:32:33 crc kubenswrapper[4909]: I1128 16:32:33.111648 4909 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/a09f19f7-b777-4314-9b4e-acdac8cf783a-credential-keys\") on node \"crc\" DevicePath \"\"" Nov 28 16:32:33 crc kubenswrapper[4909]: I1128 16:32:33.111741 4909 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/a09f19f7-b777-4314-9b4e-acdac8cf783a-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 28 16:32:33 crc kubenswrapper[4909]: I1128 16:32:33.111758 4909 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a09f19f7-b777-4314-9b4e-acdac8cf783a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:32:33 crc kubenswrapper[4909]: I1128 16:32:33.111775 4909 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a09f19f7-b777-4314-9b4e-acdac8cf783a-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 16:32:33 crc kubenswrapper[4909]: I1128 16:32:33.111791 4909 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a09f19f7-b777-4314-9b4e-acdac8cf783a-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:32:33 crc kubenswrapper[4909]: I1128 16:32:33.111805 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qm5n8\" (UniqueName: \"kubernetes.io/projected/a09f19f7-b777-4314-9b4e-acdac8cf783a-kube-api-access-qm5n8\") on node \"crc\" DevicePath \"\"" Nov 28 16:32:33 crc kubenswrapper[4909]: I1128 16:32:33.632788 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-6w86s" Nov 28 16:32:33 crc kubenswrapper[4909]: I1128 16:32:33.632782 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-6w86s" event={"ID":"a09f19f7-b777-4314-9b4e-acdac8cf783a","Type":"ContainerDied","Data":"e83214303e3c3e4f2f0d204076711189c1b9ff760ff40d1dd0f261658ac021b2"} Nov 28 16:32:33 crc kubenswrapper[4909]: I1128 16:32:33.632863 4909 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e83214303e3c3e4f2f0d204076711189c1b9ff760ff40d1dd0f261658ac021b2" Nov 28 16:32:33 crc kubenswrapper[4909]: I1128 16:32:33.847389 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-8676ff5994-wjk95"] Nov 28 16:32:33 crc kubenswrapper[4909]: E1128 16:32:33.848079 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a09f19f7-b777-4314-9b4e-acdac8cf783a" containerName="keystone-bootstrap" Nov 28 16:32:33 crc kubenswrapper[4909]: I1128 16:32:33.848097 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="a09f19f7-b777-4314-9b4e-acdac8cf783a" containerName="keystone-bootstrap" Nov 28 16:32:33 crc kubenswrapper[4909]: I1128 16:32:33.848266 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="a09f19f7-b777-4314-9b4e-acdac8cf783a" containerName="keystone-bootstrap" Nov 28 16:32:33 crc kubenswrapper[4909]: I1128 16:32:33.848840 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-8676ff5994-wjk95" Nov 28 16:32:33 crc kubenswrapper[4909]: I1128 16:32:33.851766 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 28 16:32:33 crc kubenswrapper[4909]: I1128 16:32:33.851965 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 28 16:32:33 crc kubenswrapper[4909]: I1128 16:32:33.852105 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 28 16:32:33 crc kubenswrapper[4909]: I1128 16:32:33.852172 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-internal-svc" Nov 28 16:32:33 crc kubenswrapper[4909]: I1128 16:32:33.852335 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-public-svc" Nov 28 16:32:33 crc kubenswrapper[4909]: I1128 16:32:33.852571 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-fmn76" Nov 28 16:32:33 crc kubenswrapper[4909]: I1128 16:32:33.856791 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-8676ff5994-wjk95"] Nov 28 16:32:33 crc kubenswrapper[4909]: I1128 16:32:33.938960 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/19094b17-f379-494e-b377-8191ddab4924-config-data\") pod \"keystone-8676ff5994-wjk95\" (UID: \"19094b17-f379-494e-b377-8191ddab4924\") " pod="openstack/keystone-8676ff5994-wjk95" Nov 28 16:32:33 crc kubenswrapper[4909]: I1128 16:32:33.938999 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/19094b17-f379-494e-b377-8191ddab4924-combined-ca-bundle\") pod \"keystone-8676ff5994-wjk95\" (UID: \"19094b17-f379-494e-b377-8191ddab4924\") " pod="openstack/keystone-8676ff5994-wjk95" Nov 28 16:32:33 crc kubenswrapper[4909]: I1128 16:32:33.939038 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/19094b17-f379-494e-b377-8191ddab4924-internal-tls-certs\") pod \"keystone-8676ff5994-wjk95\" (UID: \"19094b17-f379-494e-b377-8191ddab4924\") " pod="openstack/keystone-8676ff5994-wjk95" Nov 28 16:32:33 crc kubenswrapper[4909]: I1128 16:32:33.939057 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-42k4s\" (UniqueName: \"kubernetes.io/projected/19094b17-f379-494e-b377-8191ddab4924-kube-api-access-42k4s\") pod \"keystone-8676ff5994-wjk95\" (UID: \"19094b17-f379-494e-b377-8191ddab4924\") " pod="openstack/keystone-8676ff5994-wjk95" Nov 28 16:32:33 crc kubenswrapper[4909]: I1128 16:32:33.939116 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/19094b17-f379-494e-b377-8191ddab4924-credential-keys\") pod \"keystone-8676ff5994-wjk95\" (UID: \"19094b17-f379-494e-b377-8191ddab4924\") " pod="openstack/keystone-8676ff5994-wjk95" Nov 28 16:32:33 crc kubenswrapper[4909]: I1128 16:32:33.939140 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/19094b17-f379-494e-b377-8191ddab4924-fernet-keys\") pod \"keystone-8676ff5994-wjk95\" (UID: \"19094b17-f379-494e-b377-8191ddab4924\") " pod="openstack/keystone-8676ff5994-wjk95" Nov 28 16:32:33 crc kubenswrapper[4909]: I1128 16:32:33.939159 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/19094b17-f379-494e-b377-8191ddab4924-scripts\") pod \"keystone-8676ff5994-wjk95\" (UID: \"19094b17-f379-494e-b377-8191ddab4924\") " pod="openstack/keystone-8676ff5994-wjk95" Nov 28 16:32:33 crc kubenswrapper[4909]: I1128 16:32:33.939220 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/19094b17-f379-494e-b377-8191ddab4924-public-tls-certs\") pod \"keystone-8676ff5994-wjk95\" (UID: \"19094b17-f379-494e-b377-8191ddab4924\") " pod="openstack/keystone-8676ff5994-wjk95" Nov 28 16:32:34 crc kubenswrapper[4909]: I1128 16:32:34.040581 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/19094b17-f379-494e-b377-8191ddab4924-config-data\") pod \"keystone-8676ff5994-wjk95\" (UID: \"19094b17-f379-494e-b377-8191ddab4924\") " pod="openstack/keystone-8676ff5994-wjk95" Nov 28 16:32:34 crc kubenswrapper[4909]: I1128 16:32:34.040851 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/19094b17-f379-494e-b377-8191ddab4924-combined-ca-bundle\") pod \"keystone-8676ff5994-wjk95\" (UID: \"19094b17-f379-494e-b377-8191ddab4924\") " pod="openstack/keystone-8676ff5994-wjk95" Nov 28 16:32:34 crc kubenswrapper[4909]: I1128 16:32:34.040965 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/19094b17-f379-494e-b377-8191ddab4924-internal-tls-certs\") pod \"keystone-8676ff5994-wjk95\" (UID: \"19094b17-f379-494e-b377-8191ddab4924\") " pod="openstack/keystone-8676ff5994-wjk95" Nov 28 16:32:34 crc kubenswrapper[4909]: I1128 16:32:34.041046 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-42k4s\" (UniqueName: \"kubernetes.io/projected/19094b17-f379-494e-b377-8191ddab4924-kube-api-access-42k4s\") pod \"keystone-8676ff5994-wjk95\" (UID: \"19094b17-f379-494e-b377-8191ddab4924\") " pod="openstack/keystone-8676ff5994-wjk95" Nov 28 16:32:34 crc kubenswrapper[4909]: I1128 16:32:34.041139 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/19094b17-f379-494e-b377-8191ddab4924-credential-keys\") pod \"keystone-8676ff5994-wjk95\" (UID: \"19094b17-f379-494e-b377-8191ddab4924\") " pod="openstack/keystone-8676ff5994-wjk95" Nov 28 16:32:34 crc kubenswrapper[4909]: I1128 16:32:34.041243 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/19094b17-f379-494e-b377-8191ddab4924-fernet-keys\") pod \"keystone-8676ff5994-wjk95\" (UID: \"19094b17-f379-494e-b377-8191ddab4924\") " pod="openstack/keystone-8676ff5994-wjk95" Nov 28 16:32:34 crc kubenswrapper[4909]: I1128 16:32:34.041326 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/19094b17-f379-494e-b377-8191ddab4924-scripts\") pod \"keystone-8676ff5994-wjk95\" (UID: \"19094b17-f379-494e-b377-8191ddab4924\") " pod="openstack/keystone-8676ff5994-wjk95" Nov 28 16:32:34 crc kubenswrapper[4909]: I1128 16:32:34.041513 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/19094b17-f379-494e-b377-8191ddab4924-public-tls-certs\") pod \"keystone-8676ff5994-wjk95\" (UID: \"19094b17-f379-494e-b377-8191ddab4924\") " pod="openstack/keystone-8676ff5994-wjk95" Nov 28 16:32:34 crc kubenswrapper[4909]: I1128 16:32:34.046507 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/19094b17-f379-494e-b377-8191ddab4924-public-tls-certs\") pod \"keystone-8676ff5994-wjk95\" (UID: \"19094b17-f379-494e-b377-8191ddab4924\") " pod="openstack/keystone-8676ff5994-wjk95" Nov 28 16:32:34 crc kubenswrapper[4909]: I1128 16:32:34.046851 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/19094b17-f379-494e-b377-8191ddab4924-internal-tls-certs\") pod \"keystone-8676ff5994-wjk95\" (UID: \"19094b17-f379-494e-b377-8191ddab4924\") " pod="openstack/keystone-8676ff5994-wjk95" Nov 28 16:32:34 crc kubenswrapper[4909]: I1128 16:32:34.047137 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/19094b17-f379-494e-b377-8191ddab4924-config-data\") pod \"keystone-8676ff5994-wjk95\" (UID: \"19094b17-f379-494e-b377-8191ddab4924\") " pod="openstack/keystone-8676ff5994-wjk95" Nov 28 16:32:34 crc kubenswrapper[4909]: I1128 16:32:34.047184 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/19094b17-f379-494e-b377-8191ddab4924-fernet-keys\") pod \"keystone-8676ff5994-wjk95\" (UID: \"19094b17-f379-494e-b377-8191ddab4924\") " pod="openstack/keystone-8676ff5994-wjk95" Nov 28 16:32:34 crc kubenswrapper[4909]: I1128 16:32:34.048783 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/19094b17-f379-494e-b377-8191ddab4924-combined-ca-bundle\") pod \"keystone-8676ff5994-wjk95\" (UID: \"19094b17-f379-494e-b377-8191ddab4924\") " pod="openstack/keystone-8676ff5994-wjk95" Nov 28 16:32:34 crc kubenswrapper[4909]: I1128 16:32:34.049831 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/19094b17-f379-494e-b377-8191ddab4924-credential-keys\") pod \"keystone-8676ff5994-wjk95\" (UID: \"19094b17-f379-494e-b377-8191ddab4924\") " pod="openstack/keystone-8676ff5994-wjk95" Nov 28 16:32:34 crc kubenswrapper[4909]: I1128 16:32:34.050601 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/19094b17-f379-494e-b377-8191ddab4924-scripts\") pod \"keystone-8676ff5994-wjk95\" (UID: \"19094b17-f379-494e-b377-8191ddab4924\") " pod="openstack/keystone-8676ff5994-wjk95" Nov 28 16:32:34 crc kubenswrapper[4909]: I1128 16:32:34.060898 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-42k4s\" (UniqueName: \"kubernetes.io/projected/19094b17-f379-494e-b377-8191ddab4924-kube-api-access-42k4s\") pod \"keystone-8676ff5994-wjk95\" (UID: \"19094b17-f379-494e-b377-8191ddab4924\") " pod="openstack/keystone-8676ff5994-wjk95" Nov 28 16:32:34 crc kubenswrapper[4909]: I1128 16:32:34.168436 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-8676ff5994-wjk95" Nov 28 16:32:38 crc kubenswrapper[4909]: I1128 16:32:38.227819 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-ngll9" Nov 28 16:32:38 crc kubenswrapper[4909]: I1128 16:32:38.313231 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2d62f293-d022-4d92-915b-f83c3fa157a7-config-data\") pod \"2d62f293-d022-4d92-915b-f83c3fa157a7\" (UID: \"2d62f293-d022-4d92-915b-f83c3fa157a7\") " Nov 28 16:32:38 crc kubenswrapper[4909]: I1128 16:32:38.313318 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2d62f293-d022-4d92-915b-f83c3fa157a7-scripts\") pod \"2d62f293-d022-4d92-915b-f83c3fa157a7\" (UID: \"2d62f293-d022-4d92-915b-f83c3fa157a7\") " Nov 28 16:32:38 crc kubenswrapper[4909]: I1128 16:32:38.313388 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2d62f293-d022-4d92-915b-f83c3fa157a7-combined-ca-bundle\") pod \"2d62f293-d022-4d92-915b-f83c3fa157a7\" (UID: \"2d62f293-d022-4d92-915b-f83c3fa157a7\") " Nov 28 16:32:38 crc kubenswrapper[4909]: I1128 16:32:38.313489 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2d62f293-d022-4d92-915b-f83c3fa157a7-logs\") pod \"2d62f293-d022-4d92-915b-f83c3fa157a7\" (UID: \"2d62f293-d022-4d92-915b-f83c3fa157a7\") " Nov 28 16:32:38 crc kubenswrapper[4909]: I1128 16:32:38.313519 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w6477\" (UniqueName: \"kubernetes.io/projected/2d62f293-d022-4d92-915b-f83c3fa157a7-kube-api-access-w6477\") pod \"2d62f293-d022-4d92-915b-f83c3fa157a7\" (UID: \"2d62f293-d022-4d92-915b-f83c3fa157a7\") " Nov 28 16:32:38 crc kubenswrapper[4909]: I1128 16:32:38.314038 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2d62f293-d022-4d92-915b-f83c3fa157a7-logs" (OuterVolumeSpecName: "logs") pod "2d62f293-d022-4d92-915b-f83c3fa157a7" (UID: "2d62f293-d022-4d92-915b-f83c3fa157a7"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:32:38 crc kubenswrapper[4909]: I1128 16:32:38.320909 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2d62f293-d022-4d92-915b-f83c3fa157a7-kube-api-access-w6477" (OuterVolumeSpecName: "kube-api-access-w6477") pod "2d62f293-d022-4d92-915b-f83c3fa157a7" (UID: "2d62f293-d022-4d92-915b-f83c3fa157a7"). InnerVolumeSpecName "kube-api-access-w6477". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:32:38 crc kubenswrapper[4909]: I1128 16:32:38.321876 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2d62f293-d022-4d92-915b-f83c3fa157a7-scripts" (OuterVolumeSpecName: "scripts") pod "2d62f293-d022-4d92-915b-f83c3fa157a7" (UID: "2d62f293-d022-4d92-915b-f83c3fa157a7"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:32:38 crc kubenswrapper[4909]: I1128 16:32:38.336547 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2d62f293-d022-4d92-915b-f83c3fa157a7-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "2d62f293-d022-4d92-915b-f83c3fa157a7" (UID: "2d62f293-d022-4d92-915b-f83c3fa157a7"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:32:38 crc kubenswrapper[4909]: I1128 16:32:38.338755 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2d62f293-d022-4d92-915b-f83c3fa157a7-config-data" (OuterVolumeSpecName: "config-data") pod "2d62f293-d022-4d92-915b-f83c3fa157a7" (UID: "2d62f293-d022-4d92-915b-f83c3fa157a7"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:32:38 crc kubenswrapper[4909]: I1128 16:32:38.416217 4909 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2d62f293-d022-4d92-915b-f83c3fa157a7-logs\") on node \"crc\" DevicePath \"\"" Nov 28 16:32:38 crc kubenswrapper[4909]: I1128 16:32:38.416252 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w6477\" (UniqueName: \"kubernetes.io/projected/2d62f293-d022-4d92-915b-f83c3fa157a7-kube-api-access-w6477\") on node \"crc\" DevicePath \"\"" Nov 28 16:32:38 crc kubenswrapper[4909]: I1128 16:32:38.416266 4909 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2d62f293-d022-4d92-915b-f83c3fa157a7-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:32:38 crc kubenswrapper[4909]: I1128 16:32:38.416281 4909 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2d62f293-d022-4d92-915b-f83c3fa157a7-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 16:32:38 crc kubenswrapper[4909]: I1128 16:32:38.416291 4909 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2d62f293-d022-4d92-915b-f83c3fa157a7-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:32:38 crc kubenswrapper[4909]: I1128 16:32:38.676410 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-ngll9" event={"ID":"2d62f293-d022-4d92-915b-f83c3fa157a7","Type":"ContainerDied","Data":"8a522f616b775e066da064a1560049a1a668c2c6c27976870cce096384d03627"} Nov 28 16:32:38 crc kubenswrapper[4909]: I1128 16:32:38.676455 4909 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8a522f616b775e066da064a1560049a1a668c2c6c27976870cce096384d03627" Nov 28 16:32:38 crc kubenswrapper[4909]: I1128 16:32:38.676470 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-ngll9" Nov 28 16:32:39 crc kubenswrapper[4909]: I1128 16:32:39.426943 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-7bcd585886-f6h7k"] Nov 28 16:32:39 crc kubenswrapper[4909]: E1128 16:32:39.427619 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2d62f293-d022-4d92-915b-f83c3fa157a7" containerName="placement-db-sync" Nov 28 16:32:39 crc kubenswrapper[4909]: I1128 16:32:39.427632 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="2d62f293-d022-4d92-915b-f83c3fa157a7" containerName="placement-db-sync" Nov 28 16:32:39 crc kubenswrapper[4909]: I1128 16:32:39.427828 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="2d62f293-d022-4d92-915b-f83c3fa157a7" containerName="placement-db-sync" Nov 28 16:32:39 crc kubenswrapper[4909]: I1128 16:32:39.428826 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-7bcd585886-f6h7k" Nov 28 16:32:39 crc kubenswrapper[4909]: I1128 16:32:39.442769 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-7bcd585886-f6h7k"] Nov 28 16:32:39 crc kubenswrapper[4909]: I1128 16:32:39.443219 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Nov 28 16:32:39 crc kubenswrapper[4909]: I1128 16:32:39.443395 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Nov 28 16:32:39 crc kubenswrapper[4909]: I1128 16:32:39.443609 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-internal-svc" Nov 28 16:32:39 crc kubenswrapper[4909]: I1128 16:32:39.443780 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-public-svc" Nov 28 16:32:39 crc kubenswrapper[4909]: I1128 16:32:39.443897 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-fft54" Nov 28 16:32:39 crc kubenswrapper[4909]: I1128 16:32:39.531706 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/d7651107-0120-4611-87d0-be009f3749d7-public-tls-certs\") pod \"placement-7bcd585886-f6h7k\" (UID: \"d7651107-0120-4611-87d0-be009f3749d7\") " pod="openstack/placement-7bcd585886-f6h7k" Nov 28 16:32:39 crc kubenswrapper[4909]: I1128 16:32:39.531781 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d7651107-0120-4611-87d0-be009f3749d7-combined-ca-bundle\") pod \"placement-7bcd585886-f6h7k\" (UID: \"d7651107-0120-4611-87d0-be009f3749d7\") " pod="openstack/placement-7bcd585886-f6h7k" Nov 28 16:32:39 crc kubenswrapper[4909]: I1128 16:32:39.531821 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d7651107-0120-4611-87d0-be009f3749d7-logs\") pod \"placement-7bcd585886-f6h7k\" (UID: \"d7651107-0120-4611-87d0-be009f3749d7\") " pod="openstack/placement-7bcd585886-f6h7k" Nov 28 16:32:39 crc kubenswrapper[4909]: I1128 16:32:39.531846 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dbwjk\" (UniqueName: \"kubernetes.io/projected/d7651107-0120-4611-87d0-be009f3749d7-kube-api-access-dbwjk\") pod \"placement-7bcd585886-f6h7k\" (UID: \"d7651107-0120-4611-87d0-be009f3749d7\") " pod="openstack/placement-7bcd585886-f6h7k" Nov 28 16:32:39 crc kubenswrapper[4909]: I1128 16:32:39.531907 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d7651107-0120-4611-87d0-be009f3749d7-internal-tls-certs\") pod \"placement-7bcd585886-f6h7k\" (UID: \"d7651107-0120-4611-87d0-be009f3749d7\") " pod="openstack/placement-7bcd585886-f6h7k" Nov 28 16:32:39 crc kubenswrapper[4909]: I1128 16:32:39.531974 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d7651107-0120-4611-87d0-be009f3749d7-scripts\") pod \"placement-7bcd585886-f6h7k\" (UID: \"d7651107-0120-4611-87d0-be009f3749d7\") " pod="openstack/placement-7bcd585886-f6h7k" Nov 28 16:32:39 crc kubenswrapper[4909]: I1128 16:32:39.532051 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d7651107-0120-4611-87d0-be009f3749d7-config-data\") pod \"placement-7bcd585886-f6h7k\" (UID: \"d7651107-0120-4611-87d0-be009f3749d7\") " pod="openstack/placement-7bcd585886-f6h7k" Nov 28 16:32:39 crc kubenswrapper[4909]: I1128 16:32:39.634048 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d7651107-0120-4611-87d0-be009f3749d7-scripts\") pod \"placement-7bcd585886-f6h7k\" (UID: \"d7651107-0120-4611-87d0-be009f3749d7\") " pod="openstack/placement-7bcd585886-f6h7k" Nov 28 16:32:39 crc kubenswrapper[4909]: I1128 16:32:39.634158 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d7651107-0120-4611-87d0-be009f3749d7-config-data\") pod \"placement-7bcd585886-f6h7k\" (UID: \"d7651107-0120-4611-87d0-be009f3749d7\") " pod="openstack/placement-7bcd585886-f6h7k" Nov 28 16:32:39 crc kubenswrapper[4909]: I1128 16:32:39.634251 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/d7651107-0120-4611-87d0-be009f3749d7-public-tls-certs\") pod \"placement-7bcd585886-f6h7k\" (UID: \"d7651107-0120-4611-87d0-be009f3749d7\") " pod="openstack/placement-7bcd585886-f6h7k" Nov 28 16:32:39 crc kubenswrapper[4909]: I1128 16:32:39.634287 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d7651107-0120-4611-87d0-be009f3749d7-combined-ca-bundle\") pod \"placement-7bcd585886-f6h7k\" (UID: \"d7651107-0120-4611-87d0-be009f3749d7\") " pod="openstack/placement-7bcd585886-f6h7k" Nov 28 16:32:39 crc kubenswrapper[4909]: I1128 16:32:39.637585 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dbwjk\" (UniqueName: \"kubernetes.io/projected/d7651107-0120-4611-87d0-be009f3749d7-kube-api-access-dbwjk\") pod \"placement-7bcd585886-f6h7k\" (UID: \"d7651107-0120-4611-87d0-be009f3749d7\") " pod="openstack/placement-7bcd585886-f6h7k" Nov 28 16:32:39 crc kubenswrapper[4909]: I1128 16:32:39.637616 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d7651107-0120-4611-87d0-be009f3749d7-logs\") pod \"placement-7bcd585886-f6h7k\" (UID: \"d7651107-0120-4611-87d0-be009f3749d7\") " pod="openstack/placement-7bcd585886-f6h7k" Nov 28 16:32:39 crc kubenswrapper[4909]: I1128 16:32:39.637648 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d7651107-0120-4611-87d0-be009f3749d7-internal-tls-certs\") pod \"placement-7bcd585886-f6h7k\" (UID: \"d7651107-0120-4611-87d0-be009f3749d7\") " pod="openstack/placement-7bcd585886-f6h7k" Nov 28 16:32:39 crc kubenswrapper[4909]: I1128 16:32:39.640617 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d7651107-0120-4611-87d0-be009f3749d7-logs\") pod \"placement-7bcd585886-f6h7k\" (UID: \"d7651107-0120-4611-87d0-be009f3749d7\") " pod="openstack/placement-7bcd585886-f6h7k" Nov 28 16:32:39 crc kubenswrapper[4909]: I1128 16:32:39.641079 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d7651107-0120-4611-87d0-be009f3749d7-combined-ca-bundle\") pod \"placement-7bcd585886-f6h7k\" (UID: \"d7651107-0120-4611-87d0-be009f3749d7\") " pod="openstack/placement-7bcd585886-f6h7k" Nov 28 16:32:39 crc kubenswrapper[4909]: I1128 16:32:39.641076 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d7651107-0120-4611-87d0-be009f3749d7-internal-tls-certs\") pod \"placement-7bcd585886-f6h7k\" (UID: \"d7651107-0120-4611-87d0-be009f3749d7\") " pod="openstack/placement-7bcd585886-f6h7k" Nov 28 16:32:39 crc kubenswrapper[4909]: I1128 16:32:39.641169 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d7651107-0120-4611-87d0-be009f3749d7-config-data\") pod \"placement-7bcd585886-f6h7k\" (UID: \"d7651107-0120-4611-87d0-be009f3749d7\") " pod="openstack/placement-7bcd585886-f6h7k" Nov 28 16:32:39 crc kubenswrapper[4909]: I1128 16:32:39.979971 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d7651107-0120-4611-87d0-be009f3749d7-scripts\") pod \"placement-7bcd585886-f6h7k\" (UID: \"d7651107-0120-4611-87d0-be009f3749d7\") " pod="openstack/placement-7bcd585886-f6h7k" Nov 28 16:32:39 crc kubenswrapper[4909]: I1128 16:32:39.981026 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/d7651107-0120-4611-87d0-be009f3749d7-public-tls-certs\") pod \"placement-7bcd585886-f6h7k\" (UID: \"d7651107-0120-4611-87d0-be009f3749d7\") " pod="openstack/placement-7bcd585886-f6h7k" Nov 28 16:32:39 crc kubenswrapper[4909]: I1128 16:32:39.984737 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dbwjk\" (UniqueName: \"kubernetes.io/projected/d7651107-0120-4611-87d0-be009f3749d7-kube-api-access-dbwjk\") pod \"placement-7bcd585886-f6h7k\" (UID: \"d7651107-0120-4611-87d0-be009f3749d7\") " pod="openstack/placement-7bcd585886-f6h7k" Nov 28 16:32:40 crc kubenswrapper[4909]: I1128 16:32:40.109091 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-7bcd585886-f6h7k" Nov 28 16:32:45 crc kubenswrapper[4909]: E1128 16:32:45.342469 4909 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/sg-core:latest" Nov 28 16:32:45 crc kubenswrapper[4909]: E1128 16:32:45.343096 4909 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:sg-core,Image:quay.io/openstack-k8s-operators/sg-core:latest,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:sg-core-conf-yaml,ReadOnly:false,MountPath:/etc/sg-core.conf.yaml,SubPath:sg-core.conf.yaml,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-kt7dc,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(403021b8-fd7a-4823-9f99-622829f4d935): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 16:32:45 crc kubenswrapper[4909]: I1128 16:32:45.782697 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-7bcd585886-f6h7k"] Nov 28 16:32:45 crc kubenswrapper[4909]: W1128 16:32:45.786069 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd7651107_0120_4611_87d0_be009f3749d7.slice/crio-4ad31d32da06cf49ec254691efea35419db7b0910b39f95e73b313cb0412df86 WatchSource:0}: Error finding container 4ad31d32da06cf49ec254691efea35419db7b0910b39f95e73b313cb0412df86: Status 404 returned error can't find the container with id 4ad31d32da06cf49ec254691efea35419db7b0910b39f95e73b313cb0412df86 Nov 28 16:32:45 crc kubenswrapper[4909]: I1128 16:32:45.844031 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-8676ff5994-wjk95"] Nov 28 16:32:46 crc kubenswrapper[4909]: I1128 16:32:46.748146 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-7bcd585886-f6h7k" event={"ID":"d7651107-0120-4611-87d0-be009f3749d7","Type":"ContainerStarted","Data":"f5457f347b25c89d470eee3116b8c1baa0d18385fa471e46a66b59d37d629001"} Nov 28 16:32:46 crc kubenswrapper[4909]: I1128 16:32:46.748977 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-7bcd585886-f6h7k" event={"ID":"d7651107-0120-4611-87d0-be009f3749d7","Type":"ContainerStarted","Data":"4ad31d32da06cf49ec254691efea35419db7b0910b39f95e73b313cb0412df86"} Nov 28 16:32:46 crc kubenswrapper[4909]: I1128 16:32:46.750584 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-8676ff5994-wjk95" event={"ID":"19094b17-f379-494e-b377-8191ddab4924","Type":"ContainerStarted","Data":"c758b61754fb6095949cc04fca00fab2b1a68ab0a205fa48bd04afb7cdc48dca"} Nov 28 16:32:46 crc kubenswrapper[4909]: I1128 16:32:46.750639 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-8676ff5994-wjk95" event={"ID":"19094b17-f379-494e-b377-8191ddab4924","Type":"ContainerStarted","Data":"c3b929b88bd53587b3ecd169d8d2c3d40d9b7098845c0ae7b4378ffb2f091f9d"} Nov 28 16:32:46 crc kubenswrapper[4909]: I1128 16:32:46.751860 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/keystone-8676ff5994-wjk95" Nov 28 16:32:46 crc kubenswrapper[4909]: I1128 16:32:46.753759 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-cj8sl" event={"ID":"148d191b-98d8-4e26-a335-1bfb373f4f07","Type":"ContainerStarted","Data":"f12da04a1ce03bbd5f18972fc45e9a4b43b48df4a153d6335fb8de7e7922ae5b"} Nov 28 16:32:46 crc kubenswrapper[4909]: I1128 16:32:46.788339 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-8676ff5994-wjk95" podStartSLOduration=13.788312118 podStartE2EDuration="13.788312118s" podCreationTimestamp="2025-11-28 16:32:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:32:46.781322401 +0000 UTC m=+1349.178006935" watchObservedRunningTime="2025-11-28 16:32:46.788312118 +0000 UTC m=+1349.184996642" Nov 28 16:32:46 crc kubenswrapper[4909]: I1128 16:32:46.805868 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-db-sync-cj8sl" podStartSLOduration=3.88998916 podStartE2EDuration="58.805848849s" podCreationTimestamp="2025-11-28 16:31:48 +0000 UTC" firstStartedPulling="2025-11-28 16:31:50.463145892 +0000 UTC m=+1292.859830416" lastFinishedPulling="2025-11-28 16:32:45.379005581 +0000 UTC m=+1347.775690105" observedRunningTime="2025-11-28 16:32:46.803699031 +0000 UTC m=+1349.200383555" watchObservedRunningTime="2025-11-28 16:32:46.805848849 +0000 UTC m=+1349.202533393" Nov 28 16:32:47 crc kubenswrapper[4909]: I1128 16:32:47.763377 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-7bcd585886-f6h7k" event={"ID":"d7651107-0120-4611-87d0-be009f3749d7","Type":"ContainerStarted","Data":"b1b648d707bec46e03074f6ddbe73bc4787a1ff840f797f11edd6e2f52984f64"} Nov 28 16:32:47 crc kubenswrapper[4909]: I1128 16:32:47.789411 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-7bcd585886-f6h7k" podStartSLOduration=8.789377004 podStartE2EDuration="8.789377004s" podCreationTimestamp="2025-11-28 16:32:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:32:47.784229535 +0000 UTC m=+1350.180914079" watchObservedRunningTime="2025-11-28 16:32:47.789377004 +0000 UTC m=+1350.186061528" Nov 28 16:32:48 crc kubenswrapper[4909]: I1128 16:32:48.773028 4909 generic.go:334] "Generic (PLEG): container finished" podID="fbc4977f-f846-4428-9fb6-558811c3e65b" containerID="c45af0789eb64b0332ed9444cb13a77731843941ac7d5caa108d91d85a7f2bfa" exitCode=0 Nov 28 16:32:48 crc kubenswrapper[4909]: I1128 16:32:48.773108 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-6wh46" event={"ID":"fbc4977f-f846-4428-9fb6-558811c3e65b","Type":"ContainerDied","Data":"c45af0789eb64b0332ed9444cb13a77731843941ac7d5caa108d91d85a7f2bfa"} Nov 28 16:32:48 crc kubenswrapper[4909]: I1128 16:32:48.773884 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-7bcd585886-f6h7k" Nov 28 16:32:48 crc kubenswrapper[4909]: I1128 16:32:48.773906 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-7bcd585886-f6h7k" Nov 28 16:32:51 crc kubenswrapper[4909]: I1128 16:32:51.140035 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-6wh46" Nov 28 16:32:51 crc kubenswrapper[4909]: I1128 16:32:51.242787 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/fbc4977f-f846-4428-9fb6-558811c3e65b-db-sync-config-data\") pod \"fbc4977f-f846-4428-9fb6-558811c3e65b\" (UID: \"fbc4977f-f846-4428-9fb6-558811c3e65b\") " Nov 28 16:32:51 crc kubenswrapper[4909]: I1128 16:32:51.242850 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fbc4977f-f846-4428-9fb6-558811c3e65b-combined-ca-bundle\") pod \"fbc4977f-f846-4428-9fb6-558811c3e65b\" (UID: \"fbc4977f-f846-4428-9fb6-558811c3e65b\") " Nov 28 16:32:51 crc kubenswrapper[4909]: I1128 16:32:51.242950 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xjpgz\" (UniqueName: \"kubernetes.io/projected/fbc4977f-f846-4428-9fb6-558811c3e65b-kube-api-access-xjpgz\") pod \"fbc4977f-f846-4428-9fb6-558811c3e65b\" (UID: \"fbc4977f-f846-4428-9fb6-558811c3e65b\") " Nov 28 16:32:51 crc kubenswrapper[4909]: I1128 16:32:51.248592 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fbc4977f-f846-4428-9fb6-558811c3e65b-kube-api-access-xjpgz" (OuterVolumeSpecName: "kube-api-access-xjpgz") pod "fbc4977f-f846-4428-9fb6-558811c3e65b" (UID: "fbc4977f-f846-4428-9fb6-558811c3e65b"). InnerVolumeSpecName "kube-api-access-xjpgz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:32:51 crc kubenswrapper[4909]: I1128 16:32:51.249151 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fbc4977f-f846-4428-9fb6-558811c3e65b-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "fbc4977f-f846-4428-9fb6-558811c3e65b" (UID: "fbc4977f-f846-4428-9fb6-558811c3e65b"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:32:51 crc kubenswrapper[4909]: I1128 16:32:51.285809 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fbc4977f-f846-4428-9fb6-558811c3e65b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "fbc4977f-f846-4428-9fb6-558811c3e65b" (UID: "fbc4977f-f846-4428-9fb6-558811c3e65b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:32:51 crc kubenswrapper[4909]: I1128 16:32:51.344997 4909 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/fbc4977f-f846-4428-9fb6-558811c3e65b-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:32:51 crc kubenswrapper[4909]: I1128 16:32:51.345030 4909 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fbc4977f-f846-4428-9fb6-558811c3e65b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:32:51 crc kubenswrapper[4909]: I1128 16:32:51.345043 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xjpgz\" (UniqueName: \"kubernetes.io/projected/fbc4977f-f846-4428-9fb6-558811c3e65b-kube-api-access-xjpgz\") on node \"crc\" DevicePath \"\"" Nov 28 16:32:51 crc kubenswrapper[4909]: I1128 16:32:51.807047 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-6wh46" event={"ID":"fbc4977f-f846-4428-9fb6-558811c3e65b","Type":"ContainerDied","Data":"f6faa27677c7df0fe9d6331b2c14b09f4b2561d21374b4c29439104e038b2324"} Nov 28 16:32:51 crc kubenswrapper[4909]: I1128 16:32:51.807088 4909 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f6faa27677c7df0fe9d6331b2c14b09f4b2561d21374b4c29439104e038b2324" Nov 28 16:32:51 crc kubenswrapper[4909]: I1128 16:32:51.807118 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-6wh46" Nov 28 16:32:52 crc kubenswrapper[4909]: I1128 16:32:52.438390 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-worker-6c95ffb47-q5ls2"] Nov 28 16:32:52 crc kubenswrapper[4909]: E1128 16:32:52.439679 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fbc4977f-f846-4428-9fb6-558811c3e65b" containerName="barbican-db-sync" Nov 28 16:32:52 crc kubenswrapper[4909]: I1128 16:32:52.439697 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="fbc4977f-f846-4428-9fb6-558811c3e65b" containerName="barbican-db-sync" Nov 28 16:32:52 crc kubenswrapper[4909]: I1128 16:32:52.439934 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="fbc4977f-f846-4428-9fb6-558811c3e65b" containerName="barbican-db-sync" Nov 28 16:32:52 crc kubenswrapper[4909]: I1128 16:32:52.441222 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-6c95ffb47-q5ls2" Nov 28 16:32:52 crc kubenswrapper[4909]: I1128 16:32:52.449228 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-worker-config-data" Nov 28 16:32:52 crc kubenswrapper[4909]: I1128 16:32:52.449565 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-cjq6w" Nov 28 16:32:52 crc kubenswrapper[4909]: I1128 16:32:52.449713 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Nov 28 16:32:52 crc kubenswrapper[4909]: I1128 16:32:52.466636 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-6c95ffb47-q5ls2"] Nov 28 16:32:52 crc kubenswrapper[4909]: I1128 16:32:52.473473 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-keystone-listener-585576c97d-fvkcs"] Nov 28 16:32:52 crc kubenswrapper[4909]: I1128 16:32:52.474895 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-585576c97d-fvkcs" Nov 28 16:32:52 crc kubenswrapper[4909]: I1128 16:32:52.481166 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-keystone-listener-config-data" Nov 28 16:32:52 crc kubenswrapper[4909]: I1128 16:32:52.540667 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-585576c97d-fvkcs"] Nov 28 16:32:52 crc kubenswrapper[4909]: E1128 16:32:52.552151 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"sg-core\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/ceilometer-0" podUID="403021b8-fd7a-4823-9f99-622829f4d935" Nov 28 16:32:52 crc kubenswrapper[4909]: I1128 16:32:52.562034 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6d66f584d7-k7t2k"] Nov 28 16:32:52 crc kubenswrapper[4909]: I1128 16:32:52.565588 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6d66f584d7-k7t2k" Nov 28 16:32:52 crc kubenswrapper[4909]: I1128 16:32:52.567917 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/175903ef-59e0-4c1f-820f-bd3d2692462d-logs\") pod \"barbican-worker-6c95ffb47-q5ls2\" (UID: \"175903ef-59e0-4c1f-820f-bd3d2692462d\") " pod="openstack/barbican-worker-6c95ffb47-q5ls2" Nov 28 16:32:52 crc kubenswrapper[4909]: I1128 16:32:52.567964 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/175903ef-59e0-4c1f-820f-bd3d2692462d-config-data-custom\") pod \"barbican-worker-6c95ffb47-q5ls2\" (UID: \"175903ef-59e0-4c1f-820f-bd3d2692462d\") " pod="openstack/barbican-worker-6c95ffb47-q5ls2" Nov 28 16:32:52 crc kubenswrapper[4909]: I1128 16:32:52.568032 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f4qnd\" (UniqueName: \"kubernetes.io/projected/175903ef-59e0-4c1f-820f-bd3d2692462d-kube-api-access-f4qnd\") pod \"barbican-worker-6c95ffb47-q5ls2\" (UID: \"175903ef-59e0-4c1f-820f-bd3d2692462d\") " pod="openstack/barbican-worker-6c95ffb47-q5ls2" Nov 28 16:32:52 crc kubenswrapper[4909]: I1128 16:32:52.568123 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/175903ef-59e0-4c1f-820f-bd3d2692462d-config-data\") pod \"barbican-worker-6c95ffb47-q5ls2\" (UID: \"175903ef-59e0-4c1f-820f-bd3d2692462d\") " pod="openstack/barbican-worker-6c95ffb47-q5ls2" Nov 28 16:32:52 crc kubenswrapper[4909]: I1128 16:32:52.568209 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/175903ef-59e0-4c1f-820f-bd3d2692462d-combined-ca-bundle\") pod \"barbican-worker-6c95ffb47-q5ls2\" (UID: \"175903ef-59e0-4c1f-820f-bd3d2692462d\") " pod="openstack/barbican-worker-6c95ffb47-q5ls2" Nov 28 16:32:52 crc kubenswrapper[4909]: I1128 16:32:52.573873 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6d66f584d7-k7t2k"] Nov 28 16:32:52 crc kubenswrapper[4909]: I1128 16:32:52.678719 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/175903ef-59e0-4c1f-820f-bd3d2692462d-config-data\") pod \"barbican-worker-6c95ffb47-q5ls2\" (UID: \"175903ef-59e0-4c1f-820f-bd3d2692462d\") " pod="openstack/barbican-worker-6c95ffb47-q5ls2" Nov 28 16:32:52 crc kubenswrapper[4909]: I1128 16:32:52.678790 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/175903ef-59e0-4c1f-820f-bd3d2692462d-combined-ca-bundle\") pod \"barbican-worker-6c95ffb47-q5ls2\" (UID: \"175903ef-59e0-4c1f-820f-bd3d2692462d\") " pod="openstack/barbican-worker-6c95ffb47-q5ls2" Nov 28 16:32:52 crc kubenswrapper[4909]: I1128 16:32:52.678834 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e086b29e-c7fb-45a4-a6f2-c30508f1b25a-logs\") pod \"barbican-keystone-listener-585576c97d-fvkcs\" (UID: \"e086b29e-c7fb-45a4-a6f2-c30508f1b25a\") " pod="openstack/barbican-keystone-listener-585576c97d-fvkcs" Nov 28 16:32:52 crc kubenswrapper[4909]: I1128 16:32:52.678863 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/e086b29e-c7fb-45a4-a6f2-c30508f1b25a-config-data-custom\") pod \"barbican-keystone-listener-585576c97d-fvkcs\" (UID: \"e086b29e-c7fb-45a4-a6f2-c30508f1b25a\") " pod="openstack/barbican-keystone-listener-585576c97d-fvkcs" Nov 28 16:32:52 crc kubenswrapper[4909]: I1128 16:32:52.678890 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e086b29e-c7fb-45a4-a6f2-c30508f1b25a-combined-ca-bundle\") pod \"barbican-keystone-listener-585576c97d-fvkcs\" (UID: \"e086b29e-c7fb-45a4-a6f2-c30508f1b25a\") " pod="openstack/barbican-keystone-listener-585576c97d-fvkcs" Nov 28 16:32:52 crc kubenswrapper[4909]: I1128 16:32:52.678917 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/175903ef-59e0-4c1f-820f-bd3d2692462d-logs\") pod \"barbican-worker-6c95ffb47-q5ls2\" (UID: \"175903ef-59e0-4c1f-820f-bd3d2692462d\") " pod="openstack/barbican-worker-6c95ffb47-q5ls2" Nov 28 16:32:52 crc kubenswrapper[4909]: I1128 16:32:52.678953 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/175903ef-59e0-4c1f-820f-bd3d2692462d-config-data-custom\") pod \"barbican-worker-6c95ffb47-q5ls2\" (UID: \"175903ef-59e0-4c1f-820f-bd3d2692462d\") " pod="openstack/barbican-worker-6c95ffb47-q5ls2" Nov 28 16:32:52 crc kubenswrapper[4909]: I1128 16:32:52.678977 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2e366404-716f-40c0-8481-f146be059d52-ovsdbserver-sb\") pod \"dnsmasq-dns-6d66f584d7-k7t2k\" (UID: \"2e366404-716f-40c0-8481-f146be059d52\") " pod="openstack/dnsmasq-dns-6d66f584d7-k7t2k" Nov 28 16:32:52 crc kubenswrapper[4909]: I1128 16:32:52.679000 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xfvtg\" (UniqueName: \"kubernetes.io/projected/2e366404-716f-40c0-8481-f146be059d52-kube-api-access-xfvtg\") pod \"dnsmasq-dns-6d66f584d7-k7t2k\" (UID: \"2e366404-716f-40c0-8481-f146be059d52\") " pod="openstack/dnsmasq-dns-6d66f584d7-k7t2k" Nov 28 16:32:52 crc kubenswrapper[4909]: I1128 16:32:52.679031 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2e366404-716f-40c0-8481-f146be059d52-config\") pod \"dnsmasq-dns-6d66f584d7-k7t2k\" (UID: \"2e366404-716f-40c0-8481-f146be059d52\") " pod="openstack/dnsmasq-dns-6d66f584d7-k7t2k" Nov 28 16:32:52 crc kubenswrapper[4909]: I1128 16:32:52.679062 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/2e366404-716f-40c0-8481-f146be059d52-dns-swift-storage-0\") pod \"dnsmasq-dns-6d66f584d7-k7t2k\" (UID: \"2e366404-716f-40c0-8481-f146be059d52\") " pod="openstack/dnsmasq-dns-6d66f584d7-k7t2k" Nov 28 16:32:52 crc kubenswrapper[4909]: I1128 16:32:52.679110 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f4qnd\" (UniqueName: \"kubernetes.io/projected/175903ef-59e0-4c1f-820f-bd3d2692462d-kube-api-access-f4qnd\") pod \"barbican-worker-6c95ffb47-q5ls2\" (UID: \"175903ef-59e0-4c1f-820f-bd3d2692462d\") " pod="openstack/barbican-worker-6c95ffb47-q5ls2" Nov 28 16:32:52 crc kubenswrapper[4909]: I1128 16:32:52.679156 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e086b29e-c7fb-45a4-a6f2-c30508f1b25a-config-data\") pod \"barbican-keystone-listener-585576c97d-fvkcs\" (UID: \"e086b29e-c7fb-45a4-a6f2-c30508f1b25a\") " pod="openstack/barbican-keystone-listener-585576c97d-fvkcs" Nov 28 16:32:52 crc kubenswrapper[4909]: I1128 16:32:52.679197 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-424v7\" (UniqueName: \"kubernetes.io/projected/e086b29e-c7fb-45a4-a6f2-c30508f1b25a-kube-api-access-424v7\") pod \"barbican-keystone-listener-585576c97d-fvkcs\" (UID: \"e086b29e-c7fb-45a4-a6f2-c30508f1b25a\") " pod="openstack/barbican-keystone-listener-585576c97d-fvkcs" Nov 28 16:32:52 crc kubenswrapper[4909]: I1128 16:32:52.679227 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2e366404-716f-40c0-8481-f146be059d52-dns-svc\") pod \"dnsmasq-dns-6d66f584d7-k7t2k\" (UID: \"2e366404-716f-40c0-8481-f146be059d52\") " pod="openstack/dnsmasq-dns-6d66f584d7-k7t2k" Nov 28 16:32:52 crc kubenswrapper[4909]: I1128 16:32:52.679262 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2e366404-716f-40c0-8481-f146be059d52-ovsdbserver-nb\") pod \"dnsmasq-dns-6d66f584d7-k7t2k\" (UID: \"2e366404-716f-40c0-8481-f146be059d52\") " pod="openstack/dnsmasq-dns-6d66f584d7-k7t2k" Nov 28 16:32:52 crc kubenswrapper[4909]: I1128 16:32:52.681316 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/175903ef-59e0-4c1f-820f-bd3d2692462d-logs\") pod \"barbican-worker-6c95ffb47-q5ls2\" (UID: \"175903ef-59e0-4c1f-820f-bd3d2692462d\") " pod="openstack/barbican-worker-6c95ffb47-q5ls2" Nov 28 16:32:52 crc kubenswrapper[4909]: I1128 16:32:52.690215 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/175903ef-59e0-4c1f-820f-bd3d2692462d-config-data-custom\") pod \"barbican-worker-6c95ffb47-q5ls2\" (UID: \"175903ef-59e0-4c1f-820f-bd3d2692462d\") " pod="openstack/barbican-worker-6c95ffb47-q5ls2" Nov 28 16:32:52 crc kubenswrapper[4909]: I1128 16:32:52.691409 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/175903ef-59e0-4c1f-820f-bd3d2692462d-combined-ca-bundle\") pod \"barbican-worker-6c95ffb47-q5ls2\" (UID: \"175903ef-59e0-4c1f-820f-bd3d2692462d\") " pod="openstack/barbican-worker-6c95ffb47-q5ls2" Nov 28 16:32:52 crc kubenswrapper[4909]: I1128 16:32:52.707236 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f4qnd\" (UniqueName: \"kubernetes.io/projected/175903ef-59e0-4c1f-820f-bd3d2692462d-kube-api-access-f4qnd\") pod \"barbican-worker-6c95ffb47-q5ls2\" (UID: \"175903ef-59e0-4c1f-820f-bd3d2692462d\") " pod="openstack/barbican-worker-6c95ffb47-q5ls2" Nov 28 16:32:52 crc kubenswrapper[4909]: I1128 16:32:52.716224 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/175903ef-59e0-4c1f-820f-bd3d2692462d-config-data\") pod \"barbican-worker-6c95ffb47-q5ls2\" (UID: \"175903ef-59e0-4c1f-820f-bd3d2692462d\") " pod="openstack/barbican-worker-6c95ffb47-q5ls2" Nov 28 16:32:52 crc kubenswrapper[4909]: I1128 16:32:52.741771 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-c6688d798-rv8rb"] Nov 28 16:32:52 crc kubenswrapper[4909]: I1128 16:32:52.743470 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-c6688d798-rv8rb" Nov 28 16:32:52 crc kubenswrapper[4909]: I1128 16:32:52.751469 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-api-config-data" Nov 28 16:32:52 crc kubenswrapper[4909]: I1128 16:32:52.777026 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-6c95ffb47-q5ls2" Nov 28 16:32:52 crc kubenswrapper[4909]: I1128 16:32:52.780424 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e086b29e-c7fb-45a4-a6f2-c30508f1b25a-config-data\") pod \"barbican-keystone-listener-585576c97d-fvkcs\" (UID: \"e086b29e-c7fb-45a4-a6f2-c30508f1b25a\") " pod="openstack/barbican-keystone-listener-585576c97d-fvkcs" Nov 28 16:32:52 crc kubenswrapper[4909]: I1128 16:32:52.780507 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-424v7\" (UniqueName: \"kubernetes.io/projected/e086b29e-c7fb-45a4-a6f2-c30508f1b25a-kube-api-access-424v7\") pod \"barbican-keystone-listener-585576c97d-fvkcs\" (UID: \"e086b29e-c7fb-45a4-a6f2-c30508f1b25a\") " pod="openstack/barbican-keystone-listener-585576c97d-fvkcs" Nov 28 16:32:52 crc kubenswrapper[4909]: I1128 16:32:52.780543 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2e366404-716f-40c0-8481-f146be059d52-dns-svc\") pod \"dnsmasq-dns-6d66f584d7-k7t2k\" (UID: \"2e366404-716f-40c0-8481-f146be059d52\") " pod="openstack/dnsmasq-dns-6d66f584d7-k7t2k" Nov 28 16:32:52 crc kubenswrapper[4909]: I1128 16:32:52.780578 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2e366404-716f-40c0-8481-f146be059d52-ovsdbserver-nb\") pod \"dnsmasq-dns-6d66f584d7-k7t2k\" (UID: \"2e366404-716f-40c0-8481-f146be059d52\") " pod="openstack/dnsmasq-dns-6d66f584d7-k7t2k" Nov 28 16:32:52 crc kubenswrapper[4909]: I1128 16:32:52.780640 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e086b29e-c7fb-45a4-a6f2-c30508f1b25a-logs\") pod \"barbican-keystone-listener-585576c97d-fvkcs\" (UID: \"e086b29e-c7fb-45a4-a6f2-c30508f1b25a\") " pod="openstack/barbican-keystone-listener-585576c97d-fvkcs" Nov 28 16:32:52 crc kubenswrapper[4909]: I1128 16:32:52.780686 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/e086b29e-c7fb-45a4-a6f2-c30508f1b25a-config-data-custom\") pod \"barbican-keystone-listener-585576c97d-fvkcs\" (UID: \"e086b29e-c7fb-45a4-a6f2-c30508f1b25a\") " pod="openstack/barbican-keystone-listener-585576c97d-fvkcs" Nov 28 16:32:52 crc kubenswrapper[4909]: I1128 16:32:52.780711 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e086b29e-c7fb-45a4-a6f2-c30508f1b25a-combined-ca-bundle\") pod \"barbican-keystone-listener-585576c97d-fvkcs\" (UID: \"e086b29e-c7fb-45a4-a6f2-c30508f1b25a\") " pod="openstack/barbican-keystone-listener-585576c97d-fvkcs" Nov 28 16:32:52 crc kubenswrapper[4909]: I1128 16:32:52.780757 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2e366404-716f-40c0-8481-f146be059d52-ovsdbserver-sb\") pod \"dnsmasq-dns-6d66f584d7-k7t2k\" (UID: \"2e366404-716f-40c0-8481-f146be059d52\") " pod="openstack/dnsmasq-dns-6d66f584d7-k7t2k" Nov 28 16:32:52 crc kubenswrapper[4909]: I1128 16:32:52.780786 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xfvtg\" (UniqueName: \"kubernetes.io/projected/2e366404-716f-40c0-8481-f146be059d52-kube-api-access-xfvtg\") pod \"dnsmasq-dns-6d66f584d7-k7t2k\" (UID: \"2e366404-716f-40c0-8481-f146be059d52\") " pod="openstack/dnsmasq-dns-6d66f584d7-k7t2k" Nov 28 16:32:52 crc kubenswrapper[4909]: I1128 16:32:52.780817 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2e366404-716f-40c0-8481-f146be059d52-config\") pod \"dnsmasq-dns-6d66f584d7-k7t2k\" (UID: \"2e366404-716f-40c0-8481-f146be059d52\") " pod="openstack/dnsmasq-dns-6d66f584d7-k7t2k" Nov 28 16:32:52 crc kubenswrapper[4909]: I1128 16:32:52.780849 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/2e366404-716f-40c0-8481-f146be059d52-dns-swift-storage-0\") pod \"dnsmasq-dns-6d66f584d7-k7t2k\" (UID: \"2e366404-716f-40c0-8481-f146be059d52\") " pod="openstack/dnsmasq-dns-6d66f584d7-k7t2k" Nov 28 16:32:52 crc kubenswrapper[4909]: I1128 16:32:52.781788 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/2e366404-716f-40c0-8481-f146be059d52-dns-swift-storage-0\") pod \"dnsmasq-dns-6d66f584d7-k7t2k\" (UID: \"2e366404-716f-40c0-8481-f146be059d52\") " pod="openstack/dnsmasq-dns-6d66f584d7-k7t2k" Nov 28 16:32:52 crc kubenswrapper[4909]: I1128 16:32:52.784371 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2e366404-716f-40c0-8481-f146be059d52-dns-svc\") pod \"dnsmasq-dns-6d66f584d7-k7t2k\" (UID: \"2e366404-716f-40c0-8481-f146be059d52\") " pod="openstack/dnsmasq-dns-6d66f584d7-k7t2k" Nov 28 16:32:52 crc kubenswrapper[4909]: I1128 16:32:52.785486 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2e366404-716f-40c0-8481-f146be059d52-ovsdbserver-nb\") pod \"dnsmasq-dns-6d66f584d7-k7t2k\" (UID: \"2e366404-716f-40c0-8481-f146be059d52\") " pod="openstack/dnsmasq-dns-6d66f584d7-k7t2k" Nov 28 16:32:52 crc kubenswrapper[4909]: I1128 16:32:52.785757 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e086b29e-c7fb-45a4-a6f2-c30508f1b25a-logs\") pod \"barbican-keystone-listener-585576c97d-fvkcs\" (UID: \"e086b29e-c7fb-45a4-a6f2-c30508f1b25a\") " pod="openstack/barbican-keystone-listener-585576c97d-fvkcs" Nov 28 16:32:52 crc kubenswrapper[4909]: I1128 16:32:52.786248 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2e366404-716f-40c0-8481-f146be059d52-ovsdbserver-sb\") pod \"dnsmasq-dns-6d66f584d7-k7t2k\" (UID: \"2e366404-716f-40c0-8481-f146be059d52\") " pod="openstack/dnsmasq-dns-6d66f584d7-k7t2k" Nov 28 16:32:52 crc kubenswrapper[4909]: I1128 16:32:52.787290 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2e366404-716f-40c0-8481-f146be059d52-config\") pod \"dnsmasq-dns-6d66f584d7-k7t2k\" (UID: \"2e366404-716f-40c0-8481-f146be059d52\") " pod="openstack/dnsmasq-dns-6d66f584d7-k7t2k" Nov 28 16:32:52 crc kubenswrapper[4909]: I1128 16:32:52.790205 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e086b29e-c7fb-45a4-a6f2-c30508f1b25a-config-data\") pod \"barbican-keystone-listener-585576c97d-fvkcs\" (UID: \"e086b29e-c7fb-45a4-a6f2-c30508f1b25a\") " pod="openstack/barbican-keystone-listener-585576c97d-fvkcs" Nov 28 16:32:52 crc kubenswrapper[4909]: I1128 16:32:52.795687 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e086b29e-c7fb-45a4-a6f2-c30508f1b25a-combined-ca-bundle\") pod \"barbican-keystone-listener-585576c97d-fvkcs\" (UID: \"e086b29e-c7fb-45a4-a6f2-c30508f1b25a\") " pod="openstack/barbican-keystone-listener-585576c97d-fvkcs" Nov 28 16:32:52 crc kubenswrapper[4909]: I1128 16:32:52.796182 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/e086b29e-c7fb-45a4-a6f2-c30508f1b25a-config-data-custom\") pod \"barbican-keystone-listener-585576c97d-fvkcs\" (UID: \"e086b29e-c7fb-45a4-a6f2-c30508f1b25a\") " pod="openstack/barbican-keystone-listener-585576c97d-fvkcs" Nov 28 16:32:52 crc kubenswrapper[4909]: I1128 16:32:52.806773 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-c6688d798-rv8rb"] Nov 28 16:32:52 crc kubenswrapper[4909]: I1128 16:32:52.807725 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xfvtg\" (UniqueName: \"kubernetes.io/projected/2e366404-716f-40c0-8481-f146be059d52-kube-api-access-xfvtg\") pod \"dnsmasq-dns-6d66f584d7-k7t2k\" (UID: \"2e366404-716f-40c0-8481-f146be059d52\") " pod="openstack/dnsmasq-dns-6d66f584d7-k7t2k" Nov 28 16:32:52 crc kubenswrapper[4909]: I1128 16:32:52.832830 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-424v7\" (UniqueName: \"kubernetes.io/projected/e086b29e-c7fb-45a4-a6f2-c30508f1b25a-kube-api-access-424v7\") pod \"barbican-keystone-listener-585576c97d-fvkcs\" (UID: \"e086b29e-c7fb-45a4-a6f2-c30508f1b25a\") " pod="openstack/barbican-keystone-listener-585576c97d-fvkcs" Nov 28 16:32:52 crc kubenswrapper[4909]: I1128 16:32:52.897401 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/017980d9-7e8a-48d2-af4c-9251526eeb2b-logs\") pod \"barbican-api-c6688d798-rv8rb\" (UID: \"017980d9-7e8a-48d2-af4c-9251526eeb2b\") " pod="openstack/barbican-api-c6688d798-rv8rb" Nov 28 16:32:52 crc kubenswrapper[4909]: I1128 16:32:52.897561 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/017980d9-7e8a-48d2-af4c-9251526eeb2b-combined-ca-bundle\") pod \"barbican-api-c6688d798-rv8rb\" (UID: \"017980d9-7e8a-48d2-af4c-9251526eeb2b\") " pod="openstack/barbican-api-c6688d798-rv8rb" Nov 28 16:32:52 crc kubenswrapper[4909]: I1128 16:32:52.897644 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/017980d9-7e8a-48d2-af4c-9251526eeb2b-config-data\") pod \"barbican-api-c6688d798-rv8rb\" (UID: \"017980d9-7e8a-48d2-af4c-9251526eeb2b\") " pod="openstack/barbican-api-c6688d798-rv8rb" Nov 28 16:32:52 crc kubenswrapper[4909]: I1128 16:32:52.897814 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4d5hb\" (UniqueName: \"kubernetes.io/projected/017980d9-7e8a-48d2-af4c-9251526eeb2b-kube-api-access-4d5hb\") pod \"barbican-api-c6688d798-rv8rb\" (UID: \"017980d9-7e8a-48d2-af4c-9251526eeb2b\") " pod="openstack/barbican-api-c6688d798-rv8rb" Nov 28 16:32:52 crc kubenswrapper[4909]: I1128 16:32:52.897866 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/017980d9-7e8a-48d2-af4c-9251526eeb2b-config-data-custom\") pod \"barbican-api-c6688d798-rv8rb\" (UID: \"017980d9-7e8a-48d2-af4c-9251526eeb2b\") " pod="openstack/barbican-api-c6688d798-rv8rb" Nov 28 16:32:52 crc kubenswrapper[4909]: I1128 16:32:52.916539 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6d66f584d7-k7t2k" Nov 28 16:32:52 crc kubenswrapper[4909]: I1128 16:32:52.924262 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"403021b8-fd7a-4823-9f99-622829f4d935","Type":"ContainerStarted","Data":"cdf71b20d3f9d2593d7f4c8cb7dd45494865ea0ae01c47186ea6e9b07989d017"} Nov 28 16:32:52 crc kubenswrapper[4909]: I1128 16:32:52.925017 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="403021b8-fd7a-4823-9f99-622829f4d935" containerName="ceilometer-central-agent" containerID="cri-o://e36ac30c46e63b922b526420bbd327b8c4c53bf4ecea562152d6d344150751dd" gracePeriod=30 Nov 28 16:32:52 crc kubenswrapper[4909]: I1128 16:32:52.925433 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 28 16:32:52 crc kubenswrapper[4909]: I1128 16:32:52.925611 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="403021b8-fd7a-4823-9f99-622829f4d935" containerName="ceilometer-notification-agent" containerID="cri-o://cfe793ee5a40fe2697a86b51f9d61105e2f440e85819826987d20a8cc47e8088" gracePeriod=30 Nov 28 16:32:52 crc kubenswrapper[4909]: I1128 16:32:52.925738 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="403021b8-fd7a-4823-9f99-622829f4d935" containerName="proxy-httpd" containerID="cri-o://cdf71b20d3f9d2593d7f4c8cb7dd45494865ea0ae01c47186ea6e9b07989d017" gracePeriod=30 Nov 28 16:32:52 crc kubenswrapper[4909]: I1128 16:32:52.943523 4909 generic.go:334] "Generic (PLEG): container finished" podID="148d191b-98d8-4e26-a335-1bfb373f4f07" containerID="f12da04a1ce03bbd5f18972fc45e9a4b43b48df4a153d6335fb8de7e7922ae5b" exitCode=0 Nov 28 16:32:52 crc kubenswrapper[4909]: I1128 16:32:52.943565 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-cj8sl" event={"ID":"148d191b-98d8-4e26-a335-1bfb373f4f07","Type":"ContainerDied","Data":"f12da04a1ce03bbd5f18972fc45e9a4b43b48df4a153d6335fb8de7e7922ae5b"} Nov 28 16:32:52 crc kubenswrapper[4909]: I1128 16:32:52.999538 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/017980d9-7e8a-48d2-af4c-9251526eeb2b-logs\") pod \"barbican-api-c6688d798-rv8rb\" (UID: \"017980d9-7e8a-48d2-af4c-9251526eeb2b\") " pod="openstack/barbican-api-c6688d798-rv8rb" Nov 28 16:32:52 crc kubenswrapper[4909]: I1128 16:32:52.999619 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/017980d9-7e8a-48d2-af4c-9251526eeb2b-combined-ca-bundle\") pod \"barbican-api-c6688d798-rv8rb\" (UID: \"017980d9-7e8a-48d2-af4c-9251526eeb2b\") " pod="openstack/barbican-api-c6688d798-rv8rb" Nov 28 16:32:52 crc kubenswrapper[4909]: I1128 16:32:52.999647 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/017980d9-7e8a-48d2-af4c-9251526eeb2b-config-data\") pod \"barbican-api-c6688d798-rv8rb\" (UID: \"017980d9-7e8a-48d2-af4c-9251526eeb2b\") " pod="openstack/barbican-api-c6688d798-rv8rb" Nov 28 16:32:52 crc kubenswrapper[4909]: I1128 16:32:52.999710 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4d5hb\" (UniqueName: \"kubernetes.io/projected/017980d9-7e8a-48d2-af4c-9251526eeb2b-kube-api-access-4d5hb\") pod \"barbican-api-c6688d798-rv8rb\" (UID: \"017980d9-7e8a-48d2-af4c-9251526eeb2b\") " pod="openstack/barbican-api-c6688d798-rv8rb" Nov 28 16:32:52 crc kubenswrapper[4909]: I1128 16:32:52.999826 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/017980d9-7e8a-48d2-af4c-9251526eeb2b-config-data-custom\") pod \"barbican-api-c6688d798-rv8rb\" (UID: \"017980d9-7e8a-48d2-af4c-9251526eeb2b\") " pod="openstack/barbican-api-c6688d798-rv8rb" Nov 28 16:32:53 crc kubenswrapper[4909]: I1128 16:32:53.001445 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/017980d9-7e8a-48d2-af4c-9251526eeb2b-logs\") pod \"barbican-api-c6688d798-rv8rb\" (UID: \"017980d9-7e8a-48d2-af4c-9251526eeb2b\") " pod="openstack/barbican-api-c6688d798-rv8rb" Nov 28 16:32:53 crc kubenswrapper[4909]: I1128 16:32:53.006013 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/017980d9-7e8a-48d2-af4c-9251526eeb2b-config-data-custom\") pod \"barbican-api-c6688d798-rv8rb\" (UID: \"017980d9-7e8a-48d2-af4c-9251526eeb2b\") " pod="openstack/barbican-api-c6688d798-rv8rb" Nov 28 16:32:53 crc kubenswrapper[4909]: I1128 16:32:53.007891 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/017980d9-7e8a-48d2-af4c-9251526eeb2b-combined-ca-bundle\") pod \"barbican-api-c6688d798-rv8rb\" (UID: \"017980d9-7e8a-48d2-af4c-9251526eeb2b\") " pod="openstack/barbican-api-c6688d798-rv8rb" Nov 28 16:32:53 crc kubenswrapper[4909]: I1128 16:32:53.008537 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/017980d9-7e8a-48d2-af4c-9251526eeb2b-config-data\") pod \"barbican-api-c6688d798-rv8rb\" (UID: \"017980d9-7e8a-48d2-af4c-9251526eeb2b\") " pod="openstack/barbican-api-c6688d798-rv8rb" Nov 28 16:32:53 crc kubenswrapper[4909]: I1128 16:32:53.020472 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4d5hb\" (UniqueName: \"kubernetes.io/projected/017980d9-7e8a-48d2-af4c-9251526eeb2b-kube-api-access-4d5hb\") pod \"barbican-api-c6688d798-rv8rb\" (UID: \"017980d9-7e8a-48d2-af4c-9251526eeb2b\") " pod="openstack/barbican-api-c6688d798-rv8rb" Nov 28 16:32:53 crc kubenswrapper[4909]: I1128 16:32:53.101372 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-585576c97d-fvkcs" Nov 28 16:32:53 crc kubenswrapper[4909]: I1128 16:32:53.103960 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-c6688d798-rv8rb" Nov 28 16:32:53 crc kubenswrapper[4909]: I1128 16:32:53.381742 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-6c95ffb47-q5ls2"] Nov 28 16:32:53 crc kubenswrapper[4909]: I1128 16:32:53.454198 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6d66f584d7-k7t2k"] Nov 28 16:32:53 crc kubenswrapper[4909]: I1128 16:32:53.651058 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-585576c97d-fvkcs"] Nov 28 16:32:53 crc kubenswrapper[4909]: W1128 16:32:53.653560 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode086b29e_c7fb_45a4_a6f2_c30508f1b25a.slice/crio-d41b06983d45f5cd27ddd2af51a3f720a0ea4c59305d9052b2da5d7556796074 WatchSource:0}: Error finding container d41b06983d45f5cd27ddd2af51a3f720a0ea4c59305d9052b2da5d7556796074: Status 404 returned error can't find the container with id d41b06983d45f5cd27ddd2af51a3f720a0ea4c59305d9052b2da5d7556796074 Nov 28 16:32:53 crc kubenswrapper[4909]: W1128 16:32:53.655439 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod017980d9_7e8a_48d2_af4c_9251526eeb2b.slice/crio-5648aae709d89fd73a2b7f886742c695b3472bdd16413a0fd9ece2ecb0ac06ef WatchSource:0}: Error finding container 5648aae709d89fd73a2b7f886742c695b3472bdd16413a0fd9ece2ecb0ac06ef: Status 404 returned error can't find the container with id 5648aae709d89fd73a2b7f886742c695b3472bdd16413a0fd9ece2ecb0ac06ef Nov 28 16:32:53 crc kubenswrapper[4909]: I1128 16:32:53.657402 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-c6688d798-rv8rb"] Nov 28 16:32:53 crc kubenswrapper[4909]: I1128 16:32:53.972708 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-c6688d798-rv8rb" event={"ID":"017980d9-7e8a-48d2-af4c-9251526eeb2b","Type":"ContainerStarted","Data":"429dd0fa8094935415ce88e1ed5b0f0c5314da9990aed267bba5db2691fca85c"} Nov 28 16:32:53 crc kubenswrapper[4909]: I1128 16:32:53.973106 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-c6688d798-rv8rb" event={"ID":"017980d9-7e8a-48d2-af4c-9251526eeb2b","Type":"ContainerStarted","Data":"5648aae709d89fd73a2b7f886742c695b3472bdd16413a0fd9ece2ecb0ac06ef"} Nov 28 16:32:53 crc kubenswrapper[4909]: I1128 16:32:53.975979 4909 generic.go:334] "Generic (PLEG): container finished" podID="2e366404-716f-40c0-8481-f146be059d52" containerID="f42e6a87ad1a6b88b43786e266bea67e3b457adb522d3a8c6e4d4e7f8d05a8bd" exitCode=0 Nov 28 16:32:53 crc kubenswrapper[4909]: I1128 16:32:53.976045 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6d66f584d7-k7t2k" event={"ID":"2e366404-716f-40c0-8481-f146be059d52","Type":"ContainerDied","Data":"f42e6a87ad1a6b88b43786e266bea67e3b457adb522d3a8c6e4d4e7f8d05a8bd"} Nov 28 16:32:53 crc kubenswrapper[4909]: I1128 16:32:53.976066 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6d66f584d7-k7t2k" event={"ID":"2e366404-716f-40c0-8481-f146be059d52","Type":"ContainerStarted","Data":"13ee9706a11cfd6149a0cd3806249883c8c2624536b4da91e87b6cff90ea4709"} Nov 28 16:32:53 crc kubenswrapper[4909]: I1128 16:32:53.979931 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-6c95ffb47-q5ls2" event={"ID":"175903ef-59e0-4c1f-820f-bd3d2692462d","Type":"ContainerStarted","Data":"7e5e798026bdcb44572f2fef1737c88674e5cd832b7e7ac9747d50d221925db0"} Nov 28 16:32:53 crc kubenswrapper[4909]: I1128 16:32:53.984117 4909 generic.go:334] "Generic (PLEG): container finished" podID="403021b8-fd7a-4823-9f99-622829f4d935" containerID="e36ac30c46e63b922b526420bbd327b8c4c53bf4ecea562152d6d344150751dd" exitCode=0 Nov 28 16:32:53 crc kubenswrapper[4909]: I1128 16:32:53.984773 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"403021b8-fd7a-4823-9f99-622829f4d935","Type":"ContainerDied","Data":"e36ac30c46e63b922b526420bbd327b8c4c53bf4ecea562152d6d344150751dd"} Nov 28 16:32:53 crc kubenswrapper[4909]: I1128 16:32:53.986939 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-585576c97d-fvkcs" event={"ID":"e086b29e-c7fb-45a4-a6f2-c30508f1b25a","Type":"ContainerStarted","Data":"d41b06983d45f5cd27ddd2af51a3f720a0ea4c59305d9052b2da5d7556796074"} Nov 28 16:32:54 crc kubenswrapper[4909]: I1128 16:32:54.352979 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-cj8sl" Nov 28 16:32:54 crc kubenswrapper[4909]: I1128 16:32:54.429740 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/148d191b-98d8-4e26-a335-1bfb373f4f07-etc-machine-id\") pod \"148d191b-98d8-4e26-a335-1bfb373f4f07\" (UID: \"148d191b-98d8-4e26-a335-1bfb373f4f07\") " Nov 28 16:32:54 crc kubenswrapper[4909]: I1128 16:32:54.429817 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/148d191b-98d8-4e26-a335-1bfb373f4f07-config-data\") pod \"148d191b-98d8-4e26-a335-1bfb373f4f07\" (UID: \"148d191b-98d8-4e26-a335-1bfb373f4f07\") " Nov 28 16:32:54 crc kubenswrapper[4909]: I1128 16:32:54.429870 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/148d191b-98d8-4e26-a335-1bfb373f4f07-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "148d191b-98d8-4e26-a335-1bfb373f4f07" (UID: "148d191b-98d8-4e26-a335-1bfb373f4f07"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 16:32:54 crc kubenswrapper[4909]: I1128 16:32:54.429883 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/148d191b-98d8-4e26-a335-1bfb373f4f07-combined-ca-bundle\") pod \"148d191b-98d8-4e26-a335-1bfb373f4f07\" (UID: \"148d191b-98d8-4e26-a335-1bfb373f4f07\") " Nov 28 16:32:54 crc kubenswrapper[4909]: I1128 16:32:54.429946 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/148d191b-98d8-4e26-a335-1bfb373f4f07-db-sync-config-data\") pod \"148d191b-98d8-4e26-a335-1bfb373f4f07\" (UID: \"148d191b-98d8-4e26-a335-1bfb373f4f07\") " Nov 28 16:32:54 crc kubenswrapper[4909]: I1128 16:32:54.430033 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/148d191b-98d8-4e26-a335-1bfb373f4f07-scripts\") pod \"148d191b-98d8-4e26-a335-1bfb373f4f07\" (UID: \"148d191b-98d8-4e26-a335-1bfb373f4f07\") " Nov 28 16:32:54 crc kubenswrapper[4909]: I1128 16:32:54.430152 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jrxm7\" (UniqueName: \"kubernetes.io/projected/148d191b-98d8-4e26-a335-1bfb373f4f07-kube-api-access-jrxm7\") pod \"148d191b-98d8-4e26-a335-1bfb373f4f07\" (UID: \"148d191b-98d8-4e26-a335-1bfb373f4f07\") " Nov 28 16:32:54 crc kubenswrapper[4909]: I1128 16:32:54.430689 4909 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/148d191b-98d8-4e26-a335-1bfb373f4f07-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 28 16:32:54 crc kubenswrapper[4909]: I1128 16:32:54.436364 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/148d191b-98d8-4e26-a335-1bfb373f4f07-scripts" (OuterVolumeSpecName: "scripts") pod "148d191b-98d8-4e26-a335-1bfb373f4f07" (UID: "148d191b-98d8-4e26-a335-1bfb373f4f07"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:32:54 crc kubenswrapper[4909]: I1128 16:32:54.444908 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/148d191b-98d8-4e26-a335-1bfb373f4f07-kube-api-access-jrxm7" (OuterVolumeSpecName: "kube-api-access-jrxm7") pod "148d191b-98d8-4e26-a335-1bfb373f4f07" (UID: "148d191b-98d8-4e26-a335-1bfb373f4f07"). InnerVolumeSpecName "kube-api-access-jrxm7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:32:54 crc kubenswrapper[4909]: I1128 16:32:54.447894 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/148d191b-98d8-4e26-a335-1bfb373f4f07-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "148d191b-98d8-4e26-a335-1bfb373f4f07" (UID: "148d191b-98d8-4e26-a335-1bfb373f4f07"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:32:54 crc kubenswrapper[4909]: I1128 16:32:54.466306 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/148d191b-98d8-4e26-a335-1bfb373f4f07-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "148d191b-98d8-4e26-a335-1bfb373f4f07" (UID: "148d191b-98d8-4e26-a335-1bfb373f4f07"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:32:54 crc kubenswrapper[4909]: I1128 16:32:54.496903 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/148d191b-98d8-4e26-a335-1bfb373f4f07-config-data" (OuterVolumeSpecName: "config-data") pod "148d191b-98d8-4e26-a335-1bfb373f4f07" (UID: "148d191b-98d8-4e26-a335-1bfb373f4f07"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:32:54 crc kubenswrapper[4909]: I1128 16:32:54.532182 4909 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/148d191b-98d8-4e26-a335-1bfb373f4f07-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 16:32:54 crc kubenswrapper[4909]: I1128 16:32:54.532234 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jrxm7\" (UniqueName: \"kubernetes.io/projected/148d191b-98d8-4e26-a335-1bfb373f4f07-kube-api-access-jrxm7\") on node \"crc\" DevicePath \"\"" Nov 28 16:32:54 crc kubenswrapper[4909]: I1128 16:32:54.532247 4909 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/148d191b-98d8-4e26-a335-1bfb373f4f07-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:32:54 crc kubenswrapper[4909]: I1128 16:32:54.532258 4909 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/148d191b-98d8-4e26-a335-1bfb373f4f07-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:32:54 crc kubenswrapper[4909]: I1128 16:32:54.532266 4909 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/148d191b-98d8-4e26-a335-1bfb373f4f07-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:32:55 crc kubenswrapper[4909]: I1128 16:32:55.001509 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6d66f584d7-k7t2k" event={"ID":"2e366404-716f-40c0-8481-f146be059d52","Type":"ContainerStarted","Data":"1eb9fbd56644aa830ac5e7776d178d0b5b01e349c8b653b77a4512d8a96c93a2"} Nov 28 16:32:55 crc kubenswrapper[4909]: I1128 16:32:55.002228 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-6d66f584d7-k7t2k" Nov 28 16:32:55 crc kubenswrapper[4909]: I1128 16:32:55.005928 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-cj8sl" event={"ID":"148d191b-98d8-4e26-a335-1bfb373f4f07","Type":"ContainerDied","Data":"8609b4d1fe7a13d68a36a7ec73e48bd3d144219ba0375c85d955bd0e75693d1b"} Nov 28 16:32:55 crc kubenswrapper[4909]: I1128 16:32:55.005974 4909 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8609b4d1fe7a13d68a36a7ec73e48bd3d144219ba0375c85d955bd0e75693d1b" Nov 28 16:32:55 crc kubenswrapper[4909]: I1128 16:32:55.006047 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-cj8sl" Nov 28 16:32:55 crc kubenswrapper[4909]: I1128 16:32:55.013450 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-c6688d798-rv8rb" event={"ID":"017980d9-7e8a-48d2-af4c-9251526eeb2b","Type":"ContainerStarted","Data":"265365f3053ad5e8490ed5d37cd247f5274927934a22d34f4ecfa26d70eb2022"} Nov 28 16:32:55 crc kubenswrapper[4909]: I1128 16:32:55.014514 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-c6688d798-rv8rb" Nov 28 16:32:55 crc kubenswrapper[4909]: I1128 16:32:55.014560 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-c6688d798-rv8rb" Nov 28 16:32:55 crc kubenswrapper[4909]: I1128 16:32:55.032795 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-6d66f584d7-k7t2k" podStartSLOduration=3.032776971 podStartE2EDuration="3.032776971s" podCreationTimestamp="2025-11-28 16:32:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:32:55.026270256 +0000 UTC m=+1357.422954810" watchObservedRunningTime="2025-11-28 16:32:55.032776971 +0000 UTC m=+1357.429461495" Nov 28 16:32:55 crc kubenswrapper[4909]: I1128 16:32:55.054256 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-c6688d798-rv8rb" podStartSLOduration=3.054236427 podStartE2EDuration="3.054236427s" podCreationTimestamp="2025-11-28 16:32:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:32:55.050389604 +0000 UTC m=+1357.447074138" watchObservedRunningTime="2025-11-28 16:32:55.054236427 +0000 UTC m=+1357.450920971" Nov 28 16:32:55 crc kubenswrapper[4909]: I1128 16:32:55.223401 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6d66f584d7-k7t2k"] Nov 28 16:32:55 crc kubenswrapper[4909]: I1128 16:32:55.278861 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-674b76c99f-6jwmr"] Nov 28 16:32:55 crc kubenswrapper[4909]: E1128 16:32:55.279310 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="148d191b-98d8-4e26-a335-1bfb373f4f07" containerName="cinder-db-sync" Nov 28 16:32:55 crc kubenswrapper[4909]: I1128 16:32:55.279333 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="148d191b-98d8-4e26-a335-1bfb373f4f07" containerName="cinder-db-sync" Nov 28 16:32:55 crc kubenswrapper[4909]: I1128 16:32:55.279543 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="148d191b-98d8-4e26-a335-1bfb373f4f07" containerName="cinder-db-sync" Nov 28 16:32:55 crc kubenswrapper[4909]: I1128 16:32:55.280709 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-674b76c99f-6jwmr" Nov 28 16:32:55 crc kubenswrapper[4909]: I1128 16:32:55.298028 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-674b76c99f-6jwmr"] Nov 28 16:32:55 crc kubenswrapper[4909]: I1128 16:32:55.379209 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Nov 28 16:32:55 crc kubenswrapper[4909]: I1128 16:32:55.380841 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 28 16:32:55 crc kubenswrapper[4909]: I1128 16:32:55.383820 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-59vjz" Nov 28 16:32:55 crc kubenswrapper[4909]: I1128 16:32:55.384029 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Nov 28 16:32:55 crc kubenswrapper[4909]: I1128 16:32:55.386338 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Nov 28 16:32:55 crc kubenswrapper[4909]: I1128 16:32:55.386899 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Nov 28 16:32:55 crc kubenswrapper[4909]: I1128 16:32:55.400153 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 28 16:32:55 crc kubenswrapper[4909]: I1128 16:32:55.449946 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b5156fb2-a147-4c44-95eb-8d54163395bc-dns-svc\") pod \"dnsmasq-dns-674b76c99f-6jwmr\" (UID: \"b5156fb2-a147-4c44-95eb-8d54163395bc\") " pod="openstack/dnsmasq-dns-674b76c99f-6jwmr" Nov 28 16:32:55 crc kubenswrapper[4909]: I1128 16:32:55.450095 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b5156fb2-a147-4c44-95eb-8d54163395bc-ovsdbserver-sb\") pod \"dnsmasq-dns-674b76c99f-6jwmr\" (UID: \"b5156fb2-a147-4c44-95eb-8d54163395bc\") " pod="openstack/dnsmasq-dns-674b76c99f-6jwmr" Nov 28 16:32:55 crc kubenswrapper[4909]: I1128 16:32:55.450196 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b5156fb2-a147-4c44-95eb-8d54163395bc-config\") pod \"dnsmasq-dns-674b76c99f-6jwmr\" (UID: \"b5156fb2-a147-4c44-95eb-8d54163395bc\") " pod="openstack/dnsmasq-dns-674b76c99f-6jwmr" Nov 28 16:32:55 crc kubenswrapper[4909]: I1128 16:32:55.450234 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/b5156fb2-a147-4c44-95eb-8d54163395bc-dns-swift-storage-0\") pod \"dnsmasq-dns-674b76c99f-6jwmr\" (UID: \"b5156fb2-a147-4c44-95eb-8d54163395bc\") " pod="openstack/dnsmasq-dns-674b76c99f-6jwmr" Nov 28 16:32:55 crc kubenswrapper[4909]: I1128 16:32:55.450315 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6lvn8\" (UniqueName: \"kubernetes.io/projected/b5156fb2-a147-4c44-95eb-8d54163395bc-kube-api-access-6lvn8\") pod \"dnsmasq-dns-674b76c99f-6jwmr\" (UID: \"b5156fb2-a147-4c44-95eb-8d54163395bc\") " pod="openstack/dnsmasq-dns-674b76c99f-6jwmr" Nov 28 16:32:55 crc kubenswrapper[4909]: I1128 16:32:55.450517 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b5156fb2-a147-4c44-95eb-8d54163395bc-ovsdbserver-nb\") pod \"dnsmasq-dns-674b76c99f-6jwmr\" (UID: \"b5156fb2-a147-4c44-95eb-8d54163395bc\") " pod="openstack/dnsmasq-dns-674b76c99f-6jwmr" Nov 28 16:32:55 crc kubenswrapper[4909]: I1128 16:32:55.489299 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Nov 28 16:32:55 crc kubenswrapper[4909]: I1128 16:32:55.494960 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 28 16:32:55 crc kubenswrapper[4909]: I1128 16:32:55.498747 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Nov 28 16:32:55 crc kubenswrapper[4909]: I1128 16:32:55.515014 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 28 16:32:55 crc kubenswrapper[4909]: I1128 16:32:55.552526 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b5156fb2-a147-4c44-95eb-8d54163395bc-ovsdbserver-nb\") pod \"dnsmasq-dns-674b76c99f-6jwmr\" (UID: \"b5156fb2-a147-4c44-95eb-8d54163395bc\") " pod="openstack/dnsmasq-dns-674b76c99f-6jwmr" Nov 28 16:32:55 crc kubenswrapper[4909]: I1128 16:32:55.552619 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f7b661e6-2c60-4a0c-a36b-87a3b664c9e3-scripts\") pod \"cinder-scheduler-0\" (UID: \"f7b661e6-2c60-4a0c-a36b-87a3b664c9e3\") " pod="openstack/cinder-scheduler-0" Nov 28 16:32:55 crc kubenswrapper[4909]: I1128 16:32:55.552649 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f7b661e6-2c60-4a0c-a36b-87a3b664c9e3-config-data\") pod \"cinder-scheduler-0\" (UID: \"f7b661e6-2c60-4a0c-a36b-87a3b664c9e3\") " pod="openstack/cinder-scheduler-0" Nov 28 16:32:55 crc kubenswrapper[4909]: I1128 16:32:55.552939 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b5156fb2-a147-4c44-95eb-8d54163395bc-dns-svc\") pod \"dnsmasq-dns-674b76c99f-6jwmr\" (UID: \"b5156fb2-a147-4c44-95eb-8d54163395bc\") " pod="openstack/dnsmasq-dns-674b76c99f-6jwmr" Nov 28 16:32:55 crc kubenswrapper[4909]: I1128 16:32:55.553570 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b5156fb2-a147-4c44-95eb-8d54163395bc-ovsdbserver-nb\") pod \"dnsmasq-dns-674b76c99f-6jwmr\" (UID: \"b5156fb2-a147-4c44-95eb-8d54163395bc\") " pod="openstack/dnsmasq-dns-674b76c99f-6jwmr" Nov 28 16:32:55 crc kubenswrapper[4909]: I1128 16:32:55.553807 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b5156fb2-a147-4c44-95eb-8d54163395bc-dns-svc\") pod \"dnsmasq-dns-674b76c99f-6jwmr\" (UID: \"b5156fb2-a147-4c44-95eb-8d54163395bc\") " pod="openstack/dnsmasq-dns-674b76c99f-6jwmr" Nov 28 16:32:55 crc kubenswrapper[4909]: I1128 16:32:55.554207 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f7b661e6-2c60-4a0c-a36b-87a3b664c9e3-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"f7b661e6-2c60-4a0c-a36b-87a3b664c9e3\") " pod="openstack/cinder-scheduler-0" Nov 28 16:32:55 crc kubenswrapper[4909]: I1128 16:32:55.554249 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/f7b661e6-2c60-4a0c-a36b-87a3b664c9e3-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"f7b661e6-2c60-4a0c-a36b-87a3b664c9e3\") " pod="openstack/cinder-scheduler-0" Nov 28 16:32:55 crc kubenswrapper[4909]: I1128 16:32:55.554395 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b5156fb2-a147-4c44-95eb-8d54163395bc-ovsdbserver-sb\") pod \"dnsmasq-dns-674b76c99f-6jwmr\" (UID: \"b5156fb2-a147-4c44-95eb-8d54163395bc\") " pod="openstack/dnsmasq-dns-674b76c99f-6jwmr" Nov 28 16:32:55 crc kubenswrapper[4909]: I1128 16:32:55.554466 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f7b661e6-2c60-4a0c-a36b-87a3b664c9e3-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"f7b661e6-2c60-4a0c-a36b-87a3b664c9e3\") " pod="openstack/cinder-scheduler-0" Nov 28 16:32:55 crc kubenswrapper[4909]: I1128 16:32:55.554526 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b5156fb2-a147-4c44-95eb-8d54163395bc-config\") pod \"dnsmasq-dns-674b76c99f-6jwmr\" (UID: \"b5156fb2-a147-4c44-95eb-8d54163395bc\") " pod="openstack/dnsmasq-dns-674b76c99f-6jwmr" Nov 28 16:32:55 crc kubenswrapper[4909]: I1128 16:32:55.554595 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/b5156fb2-a147-4c44-95eb-8d54163395bc-dns-swift-storage-0\") pod \"dnsmasq-dns-674b76c99f-6jwmr\" (UID: \"b5156fb2-a147-4c44-95eb-8d54163395bc\") " pod="openstack/dnsmasq-dns-674b76c99f-6jwmr" Nov 28 16:32:55 crc kubenswrapper[4909]: I1128 16:32:55.554719 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wfgpl\" (UniqueName: \"kubernetes.io/projected/f7b661e6-2c60-4a0c-a36b-87a3b664c9e3-kube-api-access-wfgpl\") pod \"cinder-scheduler-0\" (UID: \"f7b661e6-2c60-4a0c-a36b-87a3b664c9e3\") " pod="openstack/cinder-scheduler-0" Nov 28 16:32:55 crc kubenswrapper[4909]: I1128 16:32:55.554762 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6lvn8\" (UniqueName: \"kubernetes.io/projected/b5156fb2-a147-4c44-95eb-8d54163395bc-kube-api-access-6lvn8\") pod \"dnsmasq-dns-674b76c99f-6jwmr\" (UID: \"b5156fb2-a147-4c44-95eb-8d54163395bc\") " pod="openstack/dnsmasq-dns-674b76c99f-6jwmr" Nov 28 16:32:55 crc kubenswrapper[4909]: I1128 16:32:55.555096 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b5156fb2-a147-4c44-95eb-8d54163395bc-ovsdbserver-sb\") pod \"dnsmasq-dns-674b76c99f-6jwmr\" (UID: \"b5156fb2-a147-4c44-95eb-8d54163395bc\") " pod="openstack/dnsmasq-dns-674b76c99f-6jwmr" Nov 28 16:32:55 crc kubenswrapper[4909]: I1128 16:32:55.555370 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b5156fb2-a147-4c44-95eb-8d54163395bc-config\") pod \"dnsmasq-dns-674b76c99f-6jwmr\" (UID: \"b5156fb2-a147-4c44-95eb-8d54163395bc\") " pod="openstack/dnsmasq-dns-674b76c99f-6jwmr" Nov 28 16:32:55 crc kubenswrapper[4909]: I1128 16:32:55.555590 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/b5156fb2-a147-4c44-95eb-8d54163395bc-dns-swift-storage-0\") pod \"dnsmasq-dns-674b76c99f-6jwmr\" (UID: \"b5156fb2-a147-4c44-95eb-8d54163395bc\") " pod="openstack/dnsmasq-dns-674b76c99f-6jwmr" Nov 28 16:32:55 crc kubenswrapper[4909]: I1128 16:32:55.583500 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6lvn8\" (UniqueName: \"kubernetes.io/projected/b5156fb2-a147-4c44-95eb-8d54163395bc-kube-api-access-6lvn8\") pod \"dnsmasq-dns-674b76c99f-6jwmr\" (UID: \"b5156fb2-a147-4c44-95eb-8d54163395bc\") " pod="openstack/dnsmasq-dns-674b76c99f-6jwmr" Nov 28 16:32:55 crc kubenswrapper[4909]: I1128 16:32:55.599578 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-674b76c99f-6jwmr" Nov 28 16:32:55 crc kubenswrapper[4909]: I1128 16:32:55.657076 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f7b661e6-2c60-4a0c-a36b-87a3b664c9e3-scripts\") pod \"cinder-scheduler-0\" (UID: \"f7b661e6-2c60-4a0c-a36b-87a3b664c9e3\") " pod="openstack/cinder-scheduler-0" Nov 28 16:32:55 crc kubenswrapper[4909]: I1128 16:32:55.657477 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f7b661e6-2c60-4a0c-a36b-87a3b664c9e3-config-data\") pod \"cinder-scheduler-0\" (UID: \"f7b661e6-2c60-4a0c-a36b-87a3b664c9e3\") " pod="openstack/cinder-scheduler-0" Nov 28 16:32:55 crc kubenswrapper[4909]: I1128 16:32:55.657535 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d3006e8a-91fc-4f9f-8499-bc0e0169ef9b-config-data\") pod \"cinder-api-0\" (UID: \"d3006e8a-91fc-4f9f-8499-bc0e0169ef9b\") " pod="openstack/cinder-api-0" Nov 28 16:32:55 crc kubenswrapper[4909]: I1128 16:32:55.657625 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d3006e8a-91fc-4f9f-8499-bc0e0169ef9b-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"d3006e8a-91fc-4f9f-8499-bc0e0169ef9b\") " pod="openstack/cinder-api-0" Nov 28 16:32:55 crc kubenswrapper[4909]: I1128 16:32:55.657813 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f7b661e6-2c60-4a0c-a36b-87a3b664c9e3-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"f7b661e6-2c60-4a0c-a36b-87a3b664c9e3\") " pod="openstack/cinder-scheduler-0" Nov 28 16:32:55 crc kubenswrapper[4909]: I1128 16:32:55.657874 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/f7b661e6-2c60-4a0c-a36b-87a3b664c9e3-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"f7b661e6-2c60-4a0c-a36b-87a3b664c9e3\") " pod="openstack/cinder-scheduler-0" Nov 28 16:32:55 crc kubenswrapper[4909]: I1128 16:32:55.657961 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/f7b661e6-2c60-4a0c-a36b-87a3b664c9e3-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"f7b661e6-2c60-4a0c-a36b-87a3b664c9e3\") " pod="openstack/cinder-scheduler-0" Nov 28 16:32:55 crc kubenswrapper[4909]: I1128 16:32:55.657966 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d3006e8a-91fc-4f9f-8499-bc0e0169ef9b-scripts\") pod \"cinder-api-0\" (UID: \"d3006e8a-91fc-4f9f-8499-bc0e0169ef9b\") " pod="openstack/cinder-api-0" Nov 28 16:32:55 crc kubenswrapper[4909]: I1128 16:32:55.658095 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f7b661e6-2c60-4a0c-a36b-87a3b664c9e3-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"f7b661e6-2c60-4a0c-a36b-87a3b664c9e3\") " pod="openstack/cinder-scheduler-0" Nov 28 16:32:55 crc kubenswrapper[4909]: I1128 16:32:55.658124 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/d3006e8a-91fc-4f9f-8499-bc0e0169ef9b-etc-machine-id\") pod \"cinder-api-0\" (UID: \"d3006e8a-91fc-4f9f-8499-bc0e0169ef9b\") " pod="openstack/cinder-api-0" Nov 28 16:32:55 crc kubenswrapper[4909]: I1128 16:32:55.658176 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d3006e8a-91fc-4f9f-8499-bc0e0169ef9b-logs\") pod \"cinder-api-0\" (UID: \"d3006e8a-91fc-4f9f-8499-bc0e0169ef9b\") " pod="openstack/cinder-api-0" Nov 28 16:32:55 crc kubenswrapper[4909]: I1128 16:32:55.658224 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d3006e8a-91fc-4f9f-8499-bc0e0169ef9b-config-data-custom\") pod \"cinder-api-0\" (UID: \"d3006e8a-91fc-4f9f-8499-bc0e0169ef9b\") " pod="openstack/cinder-api-0" Nov 28 16:32:55 crc kubenswrapper[4909]: I1128 16:32:55.658277 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wfgpl\" (UniqueName: \"kubernetes.io/projected/f7b661e6-2c60-4a0c-a36b-87a3b664c9e3-kube-api-access-wfgpl\") pod \"cinder-scheduler-0\" (UID: \"f7b661e6-2c60-4a0c-a36b-87a3b664c9e3\") " pod="openstack/cinder-scheduler-0" Nov 28 16:32:55 crc kubenswrapper[4909]: I1128 16:32:55.658300 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hp6db\" (UniqueName: \"kubernetes.io/projected/d3006e8a-91fc-4f9f-8499-bc0e0169ef9b-kube-api-access-hp6db\") pod \"cinder-api-0\" (UID: \"d3006e8a-91fc-4f9f-8499-bc0e0169ef9b\") " pod="openstack/cinder-api-0" Nov 28 16:32:55 crc kubenswrapper[4909]: I1128 16:32:55.662228 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f7b661e6-2c60-4a0c-a36b-87a3b664c9e3-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"f7b661e6-2c60-4a0c-a36b-87a3b664c9e3\") " pod="openstack/cinder-scheduler-0" Nov 28 16:32:55 crc kubenswrapper[4909]: I1128 16:32:55.662930 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f7b661e6-2c60-4a0c-a36b-87a3b664c9e3-scripts\") pod \"cinder-scheduler-0\" (UID: \"f7b661e6-2c60-4a0c-a36b-87a3b664c9e3\") " pod="openstack/cinder-scheduler-0" Nov 28 16:32:55 crc kubenswrapper[4909]: I1128 16:32:55.666271 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f7b661e6-2c60-4a0c-a36b-87a3b664c9e3-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"f7b661e6-2c60-4a0c-a36b-87a3b664c9e3\") " pod="openstack/cinder-scheduler-0" Nov 28 16:32:55 crc kubenswrapper[4909]: I1128 16:32:55.667178 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f7b661e6-2c60-4a0c-a36b-87a3b664c9e3-config-data\") pod \"cinder-scheduler-0\" (UID: \"f7b661e6-2c60-4a0c-a36b-87a3b664c9e3\") " pod="openstack/cinder-scheduler-0" Nov 28 16:32:55 crc kubenswrapper[4909]: I1128 16:32:55.676501 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wfgpl\" (UniqueName: \"kubernetes.io/projected/f7b661e6-2c60-4a0c-a36b-87a3b664c9e3-kube-api-access-wfgpl\") pod \"cinder-scheduler-0\" (UID: \"f7b661e6-2c60-4a0c-a36b-87a3b664c9e3\") " pod="openstack/cinder-scheduler-0" Nov 28 16:32:55 crc kubenswrapper[4909]: I1128 16:32:55.695927 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 28 16:32:55 crc kubenswrapper[4909]: I1128 16:32:55.759575 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/d3006e8a-91fc-4f9f-8499-bc0e0169ef9b-etc-machine-id\") pod \"cinder-api-0\" (UID: \"d3006e8a-91fc-4f9f-8499-bc0e0169ef9b\") " pod="openstack/cinder-api-0" Nov 28 16:32:55 crc kubenswrapper[4909]: I1128 16:32:55.759624 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d3006e8a-91fc-4f9f-8499-bc0e0169ef9b-logs\") pod \"cinder-api-0\" (UID: \"d3006e8a-91fc-4f9f-8499-bc0e0169ef9b\") " pod="openstack/cinder-api-0" Nov 28 16:32:55 crc kubenswrapper[4909]: I1128 16:32:55.759677 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d3006e8a-91fc-4f9f-8499-bc0e0169ef9b-config-data-custom\") pod \"cinder-api-0\" (UID: \"d3006e8a-91fc-4f9f-8499-bc0e0169ef9b\") " pod="openstack/cinder-api-0" Nov 28 16:32:55 crc kubenswrapper[4909]: I1128 16:32:55.759705 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hp6db\" (UniqueName: \"kubernetes.io/projected/d3006e8a-91fc-4f9f-8499-bc0e0169ef9b-kube-api-access-hp6db\") pod \"cinder-api-0\" (UID: \"d3006e8a-91fc-4f9f-8499-bc0e0169ef9b\") " pod="openstack/cinder-api-0" Nov 28 16:32:55 crc kubenswrapper[4909]: I1128 16:32:55.759709 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/d3006e8a-91fc-4f9f-8499-bc0e0169ef9b-etc-machine-id\") pod \"cinder-api-0\" (UID: \"d3006e8a-91fc-4f9f-8499-bc0e0169ef9b\") " pod="openstack/cinder-api-0" Nov 28 16:32:55 crc kubenswrapper[4909]: I1128 16:32:55.759971 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d3006e8a-91fc-4f9f-8499-bc0e0169ef9b-config-data\") pod \"cinder-api-0\" (UID: \"d3006e8a-91fc-4f9f-8499-bc0e0169ef9b\") " pod="openstack/cinder-api-0" Nov 28 16:32:55 crc kubenswrapper[4909]: I1128 16:32:55.760025 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d3006e8a-91fc-4f9f-8499-bc0e0169ef9b-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"d3006e8a-91fc-4f9f-8499-bc0e0169ef9b\") " pod="openstack/cinder-api-0" Nov 28 16:32:55 crc kubenswrapper[4909]: I1128 16:32:55.760124 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d3006e8a-91fc-4f9f-8499-bc0e0169ef9b-scripts\") pod \"cinder-api-0\" (UID: \"d3006e8a-91fc-4f9f-8499-bc0e0169ef9b\") " pod="openstack/cinder-api-0" Nov 28 16:32:55 crc kubenswrapper[4909]: I1128 16:32:55.760481 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d3006e8a-91fc-4f9f-8499-bc0e0169ef9b-logs\") pod \"cinder-api-0\" (UID: \"d3006e8a-91fc-4f9f-8499-bc0e0169ef9b\") " pod="openstack/cinder-api-0" Nov 28 16:32:55 crc kubenswrapper[4909]: I1128 16:32:55.763863 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d3006e8a-91fc-4f9f-8499-bc0e0169ef9b-config-data\") pod \"cinder-api-0\" (UID: \"d3006e8a-91fc-4f9f-8499-bc0e0169ef9b\") " pod="openstack/cinder-api-0" Nov 28 16:32:55 crc kubenswrapper[4909]: I1128 16:32:55.764994 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d3006e8a-91fc-4f9f-8499-bc0e0169ef9b-scripts\") pod \"cinder-api-0\" (UID: \"d3006e8a-91fc-4f9f-8499-bc0e0169ef9b\") " pod="openstack/cinder-api-0" Nov 28 16:32:55 crc kubenswrapper[4909]: I1128 16:32:55.765588 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d3006e8a-91fc-4f9f-8499-bc0e0169ef9b-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"d3006e8a-91fc-4f9f-8499-bc0e0169ef9b\") " pod="openstack/cinder-api-0" Nov 28 16:32:55 crc kubenswrapper[4909]: I1128 16:32:55.768517 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d3006e8a-91fc-4f9f-8499-bc0e0169ef9b-config-data-custom\") pod \"cinder-api-0\" (UID: \"d3006e8a-91fc-4f9f-8499-bc0e0169ef9b\") " pod="openstack/cinder-api-0" Nov 28 16:32:55 crc kubenswrapper[4909]: I1128 16:32:55.780172 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hp6db\" (UniqueName: \"kubernetes.io/projected/d3006e8a-91fc-4f9f-8499-bc0e0169ef9b-kube-api-access-hp6db\") pod \"cinder-api-0\" (UID: \"d3006e8a-91fc-4f9f-8499-bc0e0169ef9b\") " pod="openstack/cinder-api-0" Nov 28 16:32:55 crc kubenswrapper[4909]: I1128 16:32:55.818627 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 28 16:32:56 crc kubenswrapper[4909]: I1128 16:32:56.218866 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-674b76c99f-6jwmr"] Nov 28 16:32:56 crc kubenswrapper[4909]: W1128 16:32:56.219991 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb5156fb2_a147_4c44_95eb_8d54163395bc.slice/crio-38313a32d4f20fe07d11ae4c2ed402a7ebf4a88b408352700a8dee6e876311d0 WatchSource:0}: Error finding container 38313a32d4f20fe07d11ae4c2ed402a7ebf4a88b408352700a8dee6e876311d0: Status 404 returned error can't find the container with id 38313a32d4f20fe07d11ae4c2ed402a7ebf4a88b408352700a8dee6e876311d0 Nov 28 16:32:56 crc kubenswrapper[4909]: I1128 16:32:56.304358 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 28 16:32:56 crc kubenswrapper[4909]: W1128 16:32:56.309993 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf7b661e6_2c60_4a0c_a36b_87a3b664c9e3.slice/crio-2904ec4da16cff43ed601e190040b1e1c71ab971dbf2b91d01dd2c7791a3f902 WatchSource:0}: Error finding container 2904ec4da16cff43ed601e190040b1e1c71ab971dbf2b91d01dd2c7791a3f902: Status 404 returned error can't find the container with id 2904ec4da16cff43ed601e190040b1e1c71ab971dbf2b91d01dd2c7791a3f902 Nov 28 16:32:56 crc kubenswrapper[4909]: W1128 16:32:56.405109 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd3006e8a_91fc_4f9f_8499_bc0e0169ef9b.slice/crio-8e09c50910568c6be4952cf853635245ed635b8fc3599050e1995060e33b6b96 WatchSource:0}: Error finding container 8e09c50910568c6be4952cf853635245ed635b8fc3599050e1995060e33b6b96: Status 404 returned error can't find the container with id 8e09c50910568c6be4952cf853635245ed635b8fc3599050e1995060e33b6b96 Nov 28 16:32:56 crc kubenswrapper[4909]: I1128 16:32:56.407602 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 28 16:32:57 crc kubenswrapper[4909]: I1128 16:32:57.029577 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"f7b661e6-2c60-4a0c-a36b-87a3b664c9e3","Type":"ContainerStarted","Data":"2904ec4da16cff43ed601e190040b1e1c71ab971dbf2b91d01dd2c7791a3f902"} Nov 28 16:32:57 crc kubenswrapper[4909]: I1128 16:32:57.031894 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-6c95ffb47-q5ls2" event={"ID":"175903ef-59e0-4c1f-820f-bd3d2692462d","Type":"ContainerStarted","Data":"2dedb170588deaa6f11bf8d0e9ccb4ac0fb1f6ba18fbbaac5554659c70446bce"} Nov 28 16:32:57 crc kubenswrapper[4909]: I1128 16:32:57.031932 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-6c95ffb47-q5ls2" event={"ID":"175903ef-59e0-4c1f-820f-bd3d2692462d","Type":"ContainerStarted","Data":"dbc15b80c1cd50c3f062d20e4bbfd0c4ab351bae72bf60617a069c1be00aaa4b"} Nov 28 16:32:57 crc kubenswrapper[4909]: I1128 16:32:57.034411 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"d3006e8a-91fc-4f9f-8499-bc0e0169ef9b","Type":"ContainerStarted","Data":"d3dca17f1af9c2a07b654276d016c68d0ec003cca190751daf6731ed8e0bd08c"} Nov 28 16:32:57 crc kubenswrapper[4909]: I1128 16:32:57.034451 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"d3006e8a-91fc-4f9f-8499-bc0e0169ef9b","Type":"ContainerStarted","Data":"8e09c50910568c6be4952cf853635245ed635b8fc3599050e1995060e33b6b96"} Nov 28 16:32:57 crc kubenswrapper[4909]: I1128 16:32:57.035726 4909 generic.go:334] "Generic (PLEG): container finished" podID="b5156fb2-a147-4c44-95eb-8d54163395bc" containerID="b11c8afe3d763b3fe490dc8a95028d55d3588631aaea6425626c322fac3f29b7" exitCode=0 Nov 28 16:32:57 crc kubenswrapper[4909]: I1128 16:32:57.036423 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-674b76c99f-6jwmr" event={"ID":"b5156fb2-a147-4c44-95eb-8d54163395bc","Type":"ContainerDied","Data":"b11c8afe3d763b3fe490dc8a95028d55d3588631aaea6425626c322fac3f29b7"} Nov 28 16:32:57 crc kubenswrapper[4909]: I1128 16:32:57.036445 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-674b76c99f-6jwmr" event={"ID":"b5156fb2-a147-4c44-95eb-8d54163395bc","Type":"ContainerStarted","Data":"38313a32d4f20fe07d11ae4c2ed402a7ebf4a88b408352700a8dee6e876311d0"} Nov 28 16:32:57 crc kubenswrapper[4909]: I1128 16:32:57.036551 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-6d66f584d7-k7t2k" podUID="2e366404-716f-40c0-8481-f146be059d52" containerName="dnsmasq-dns" containerID="cri-o://1eb9fbd56644aa830ac5e7776d178d0b5b01e349c8b653b77a4512d8a96c93a2" gracePeriod=10 Nov 28 16:32:57 crc kubenswrapper[4909]: I1128 16:32:57.056590 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-worker-6c95ffb47-q5ls2" podStartSLOduration=2.332174182 podStartE2EDuration="5.056569085s" podCreationTimestamp="2025-11-28 16:32:52 +0000 UTC" firstStartedPulling="2025-11-28 16:32:53.388279081 +0000 UTC m=+1355.784963615" lastFinishedPulling="2025-11-28 16:32:56.112674004 +0000 UTC m=+1358.509358518" observedRunningTime="2025-11-28 16:32:57.052420404 +0000 UTC m=+1359.449104928" watchObservedRunningTime="2025-11-28 16:32:57.056569085 +0000 UTC m=+1359.453253609" Nov 28 16:32:57 crc kubenswrapper[4909]: I1128 16:32:57.848265 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6d66f584d7-k7t2k" Nov 28 16:32:58 crc kubenswrapper[4909]: I1128 16:32:58.010086 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2e366404-716f-40c0-8481-f146be059d52-ovsdbserver-sb\") pod \"2e366404-716f-40c0-8481-f146be059d52\" (UID: \"2e366404-716f-40c0-8481-f146be059d52\") " Nov 28 16:32:58 crc kubenswrapper[4909]: I1128 16:32:58.010878 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2e366404-716f-40c0-8481-f146be059d52-dns-svc\") pod \"2e366404-716f-40c0-8481-f146be059d52\" (UID: \"2e366404-716f-40c0-8481-f146be059d52\") " Nov 28 16:32:58 crc kubenswrapper[4909]: I1128 16:32:58.010950 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2e366404-716f-40c0-8481-f146be059d52-ovsdbserver-nb\") pod \"2e366404-716f-40c0-8481-f146be059d52\" (UID: \"2e366404-716f-40c0-8481-f146be059d52\") " Nov 28 16:32:58 crc kubenswrapper[4909]: I1128 16:32:58.011039 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/2e366404-716f-40c0-8481-f146be059d52-dns-swift-storage-0\") pod \"2e366404-716f-40c0-8481-f146be059d52\" (UID: \"2e366404-716f-40c0-8481-f146be059d52\") " Nov 28 16:32:58 crc kubenswrapper[4909]: I1128 16:32:58.011091 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2e366404-716f-40c0-8481-f146be059d52-config\") pod \"2e366404-716f-40c0-8481-f146be059d52\" (UID: \"2e366404-716f-40c0-8481-f146be059d52\") " Nov 28 16:32:58 crc kubenswrapper[4909]: I1128 16:32:58.011145 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xfvtg\" (UniqueName: \"kubernetes.io/projected/2e366404-716f-40c0-8481-f146be059d52-kube-api-access-xfvtg\") pod \"2e366404-716f-40c0-8481-f146be059d52\" (UID: \"2e366404-716f-40c0-8481-f146be059d52\") " Nov 28 16:32:58 crc kubenswrapper[4909]: I1128 16:32:58.015130 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2e366404-716f-40c0-8481-f146be059d52-kube-api-access-xfvtg" (OuterVolumeSpecName: "kube-api-access-xfvtg") pod "2e366404-716f-40c0-8481-f146be059d52" (UID: "2e366404-716f-40c0-8481-f146be059d52"). InnerVolumeSpecName "kube-api-access-xfvtg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:32:58 crc kubenswrapper[4909]: I1128 16:32:58.055874 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-585576c97d-fvkcs" event={"ID":"e086b29e-c7fb-45a4-a6f2-c30508f1b25a","Type":"ContainerStarted","Data":"61af0c4690ad10f578c11a1874dcaa8f66ff04b25238b214fda8321e55f07b14"} Nov 28 16:32:58 crc kubenswrapper[4909]: I1128 16:32:58.056305 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-585576c97d-fvkcs" event={"ID":"e086b29e-c7fb-45a4-a6f2-c30508f1b25a","Type":"ContainerStarted","Data":"52e65448aef539353daa8f7db84d105cd71f70da4604565423fd5950afa26a6b"} Nov 28 16:32:58 crc kubenswrapper[4909]: I1128 16:32:58.066552 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-674b76c99f-6jwmr" event={"ID":"b5156fb2-a147-4c44-95eb-8d54163395bc","Type":"ContainerStarted","Data":"ef2b25a6f860fffb4b80e24aa34060976930ca33ec549bcfa5297c0f1c93ea03"} Nov 28 16:32:58 crc kubenswrapper[4909]: I1128 16:32:58.066744 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-674b76c99f-6jwmr" Nov 28 16:32:58 crc kubenswrapper[4909]: I1128 16:32:58.070279 4909 generic.go:334] "Generic (PLEG): container finished" podID="2e366404-716f-40c0-8481-f146be059d52" containerID="1eb9fbd56644aa830ac5e7776d178d0b5b01e349c8b653b77a4512d8a96c93a2" exitCode=0 Nov 28 16:32:58 crc kubenswrapper[4909]: I1128 16:32:58.070560 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6d66f584d7-k7t2k" event={"ID":"2e366404-716f-40c0-8481-f146be059d52","Type":"ContainerDied","Data":"1eb9fbd56644aa830ac5e7776d178d0b5b01e349c8b653b77a4512d8a96c93a2"} Nov 28 16:32:58 crc kubenswrapper[4909]: I1128 16:32:58.070759 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6d66f584d7-k7t2k" event={"ID":"2e366404-716f-40c0-8481-f146be059d52","Type":"ContainerDied","Data":"13ee9706a11cfd6149a0cd3806249883c8c2624536b4da91e87b6cff90ea4709"} Nov 28 16:32:58 crc kubenswrapper[4909]: I1128 16:32:58.070611 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6d66f584d7-k7t2k" Nov 28 16:32:58 crc kubenswrapper[4909]: I1128 16:32:58.070791 4909 scope.go:117] "RemoveContainer" containerID="1eb9fbd56644aa830ac5e7776d178d0b5b01e349c8b653b77a4512d8a96c93a2" Nov 28 16:32:58 crc kubenswrapper[4909]: I1128 16:32:58.074442 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2e366404-716f-40c0-8481-f146be059d52-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "2e366404-716f-40c0-8481-f146be059d52" (UID: "2e366404-716f-40c0-8481-f146be059d52"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:32:58 crc kubenswrapper[4909]: I1128 16:32:58.086301 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2e366404-716f-40c0-8481-f146be059d52-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "2e366404-716f-40c0-8481-f146be059d52" (UID: "2e366404-716f-40c0-8481-f146be059d52"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:32:58 crc kubenswrapper[4909]: I1128 16:32:58.090866 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-keystone-listener-585576c97d-fvkcs" podStartSLOduration=2.325193944 podStartE2EDuration="6.090848863s" podCreationTimestamp="2025-11-28 16:32:52 +0000 UTC" firstStartedPulling="2025-11-28 16:32:53.656288816 +0000 UTC m=+1356.052973340" lastFinishedPulling="2025-11-28 16:32:57.421943715 +0000 UTC m=+1359.818628259" observedRunningTime="2025-11-28 16:32:58.073795125 +0000 UTC m=+1360.470479659" watchObservedRunningTime="2025-11-28 16:32:58.090848863 +0000 UTC m=+1360.487533387" Nov 28 16:32:58 crc kubenswrapper[4909]: I1128 16:32:58.091987 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2e366404-716f-40c0-8481-f146be059d52-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "2e366404-716f-40c0-8481-f146be059d52" (UID: "2e366404-716f-40c0-8481-f146be059d52"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:32:58 crc kubenswrapper[4909]: I1128 16:32:58.095151 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2e366404-716f-40c0-8481-f146be059d52-config" (OuterVolumeSpecName: "config") pod "2e366404-716f-40c0-8481-f146be059d52" (UID: "2e366404-716f-40c0-8481-f146be059d52"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:32:58 crc kubenswrapper[4909]: I1128 16:32:58.105106 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2e366404-716f-40c0-8481-f146be059d52-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "2e366404-716f-40c0-8481-f146be059d52" (UID: "2e366404-716f-40c0-8481-f146be059d52"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:32:58 crc kubenswrapper[4909]: I1128 16:32:58.111222 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-674b76c99f-6jwmr" podStartSLOduration=3.111202329 podStartE2EDuration="3.111202329s" podCreationTimestamp="2025-11-28 16:32:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:32:58.101394746 +0000 UTC m=+1360.498079290" watchObservedRunningTime="2025-11-28 16:32:58.111202329 +0000 UTC m=+1360.507886843" Nov 28 16:32:58 crc kubenswrapper[4909]: I1128 16:32:58.113828 4909 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2e366404-716f-40c0-8481-f146be059d52-config\") on node \"crc\" DevicePath \"\"" Nov 28 16:32:58 crc kubenswrapper[4909]: I1128 16:32:58.113855 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xfvtg\" (UniqueName: \"kubernetes.io/projected/2e366404-716f-40c0-8481-f146be059d52-kube-api-access-xfvtg\") on node \"crc\" DevicePath \"\"" Nov 28 16:32:58 crc kubenswrapper[4909]: I1128 16:32:58.113875 4909 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2e366404-716f-40c0-8481-f146be059d52-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 28 16:32:58 crc kubenswrapper[4909]: I1128 16:32:58.113885 4909 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2e366404-716f-40c0-8481-f146be059d52-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 16:32:58 crc kubenswrapper[4909]: I1128 16:32:58.113893 4909 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2e366404-716f-40c0-8481-f146be059d52-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 28 16:32:58 crc kubenswrapper[4909]: I1128 16:32:58.113902 4909 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/2e366404-716f-40c0-8481-f146be059d52-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 28 16:32:58 crc kubenswrapper[4909]: I1128 16:32:58.124360 4909 scope.go:117] "RemoveContainer" containerID="f42e6a87ad1a6b88b43786e266bea67e3b457adb522d3a8c6e4d4e7f8d05a8bd" Nov 28 16:32:58 crc kubenswrapper[4909]: I1128 16:32:58.161813 4909 scope.go:117] "RemoveContainer" containerID="1eb9fbd56644aa830ac5e7776d178d0b5b01e349c8b653b77a4512d8a96c93a2" Nov 28 16:32:58 crc kubenswrapper[4909]: E1128 16:32:58.162344 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1eb9fbd56644aa830ac5e7776d178d0b5b01e349c8b653b77a4512d8a96c93a2\": container with ID starting with 1eb9fbd56644aa830ac5e7776d178d0b5b01e349c8b653b77a4512d8a96c93a2 not found: ID does not exist" containerID="1eb9fbd56644aa830ac5e7776d178d0b5b01e349c8b653b77a4512d8a96c93a2" Nov 28 16:32:58 crc kubenswrapper[4909]: I1128 16:32:58.162375 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1eb9fbd56644aa830ac5e7776d178d0b5b01e349c8b653b77a4512d8a96c93a2"} err="failed to get container status \"1eb9fbd56644aa830ac5e7776d178d0b5b01e349c8b653b77a4512d8a96c93a2\": rpc error: code = NotFound desc = could not find container \"1eb9fbd56644aa830ac5e7776d178d0b5b01e349c8b653b77a4512d8a96c93a2\": container with ID starting with 1eb9fbd56644aa830ac5e7776d178d0b5b01e349c8b653b77a4512d8a96c93a2 not found: ID does not exist" Nov 28 16:32:58 crc kubenswrapper[4909]: I1128 16:32:58.162396 4909 scope.go:117] "RemoveContainer" containerID="f42e6a87ad1a6b88b43786e266bea67e3b457adb522d3a8c6e4d4e7f8d05a8bd" Nov 28 16:32:58 crc kubenswrapper[4909]: E1128 16:32:58.164329 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f42e6a87ad1a6b88b43786e266bea67e3b457adb522d3a8c6e4d4e7f8d05a8bd\": container with ID starting with f42e6a87ad1a6b88b43786e266bea67e3b457adb522d3a8c6e4d4e7f8d05a8bd not found: ID does not exist" containerID="f42e6a87ad1a6b88b43786e266bea67e3b457adb522d3a8c6e4d4e7f8d05a8bd" Nov 28 16:32:58 crc kubenswrapper[4909]: I1128 16:32:58.164358 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f42e6a87ad1a6b88b43786e266bea67e3b457adb522d3a8c6e4d4e7f8d05a8bd"} err="failed to get container status \"f42e6a87ad1a6b88b43786e266bea67e3b457adb522d3a8c6e4d4e7f8d05a8bd\": rpc error: code = NotFound desc = could not find container \"f42e6a87ad1a6b88b43786e266bea67e3b457adb522d3a8c6e4d4e7f8d05a8bd\": container with ID starting with f42e6a87ad1a6b88b43786e266bea67e3b457adb522d3a8c6e4d4e7f8d05a8bd not found: ID does not exist" Nov 28 16:32:58 crc kubenswrapper[4909]: I1128 16:32:58.435695 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6d66f584d7-k7t2k"] Nov 28 16:32:58 crc kubenswrapper[4909]: I1128 16:32:58.483718 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6d66f584d7-k7t2k"] Nov 28 16:32:59 crc kubenswrapper[4909]: I1128 16:32:59.108689 4909 generic.go:334] "Generic (PLEG): container finished" podID="403021b8-fd7a-4823-9f99-622829f4d935" containerID="cfe793ee5a40fe2697a86b51f9d61105e2f440e85819826987d20a8cc47e8088" exitCode=0 Nov 28 16:32:59 crc kubenswrapper[4909]: I1128 16:32:59.108741 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"403021b8-fd7a-4823-9f99-622829f4d935","Type":"ContainerDied","Data":"cfe793ee5a40fe2697a86b51f9d61105e2f440e85819826987d20a8cc47e8088"} Nov 28 16:32:59 crc kubenswrapper[4909]: I1128 16:32:59.113003 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"d3006e8a-91fc-4f9f-8499-bc0e0169ef9b","Type":"ContainerStarted","Data":"865200f3aa817624b4422991e9dd3465edc4dee2880a26165ef081b1898559a7"} Nov 28 16:32:59 crc kubenswrapper[4909]: I1128 16:32:59.113642 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Nov 28 16:32:59 crc kubenswrapper[4909]: I1128 16:32:59.119623 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"f7b661e6-2c60-4a0c-a36b-87a3b664c9e3","Type":"ContainerStarted","Data":"726d1b915da1a9defb392ec7e21dada8455bcc44349949a64ffdf8368abaf563"} Nov 28 16:32:59 crc kubenswrapper[4909]: I1128 16:32:59.144544 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=4.144521981 podStartE2EDuration="4.144521981s" podCreationTimestamp="2025-11-28 16:32:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:32:59.139300321 +0000 UTC m=+1361.535984865" watchObservedRunningTime="2025-11-28 16:32:59.144521981 +0000 UTC m=+1361.541206515" Nov 28 16:32:59 crc kubenswrapper[4909]: I1128 16:32:59.816624 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Nov 28 16:32:59 crc kubenswrapper[4909]: I1128 16:32:59.914948 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2e366404-716f-40c0-8481-f146be059d52" path="/var/lib/kubelet/pods/2e366404-716f-40c0-8481-f146be059d52/volumes" Nov 28 16:33:00 crc kubenswrapper[4909]: I1128 16:33:00.131258 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"f7b661e6-2c60-4a0c-a36b-87a3b664c9e3","Type":"ContainerStarted","Data":"e3a22895d25b271db66c975ba2e0216b64c28fd1e04142dbb967340dd7b6256e"} Nov 28 16:33:00 crc kubenswrapper[4909]: I1128 16:33:00.133013 4909 generic.go:334] "Generic (PLEG): container finished" podID="29c42f34-c4bc-433c-b0d7-a0a8acf595db" containerID="77eec265c191d92518e9615ece2181462c2321d96a38aae2b1a5320db748c0bb" exitCode=0 Nov 28 16:33:00 crc kubenswrapper[4909]: I1128 16:33:00.133086 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-fcstr" event={"ID":"29c42f34-c4bc-433c-b0d7-a0a8acf595db","Type":"ContainerDied","Data":"77eec265c191d92518e9615ece2181462c2321d96a38aae2b1a5320db748c0bb"} Nov 28 16:33:00 crc kubenswrapper[4909]: I1128 16:33:00.136270 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-c6688d798-rv8rb" Nov 28 16:33:00 crc kubenswrapper[4909]: I1128 16:33:00.156914 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=3.344519645 podStartE2EDuration="5.156894442s" podCreationTimestamp="2025-11-28 16:32:55 +0000 UTC" firstStartedPulling="2025-11-28 16:32:56.313201848 +0000 UTC m=+1358.709886372" lastFinishedPulling="2025-11-28 16:32:58.125576645 +0000 UTC m=+1360.522261169" observedRunningTime="2025-11-28 16:33:00.151915768 +0000 UTC m=+1362.548600292" watchObservedRunningTime="2025-11-28 16:33:00.156894442 +0000 UTC m=+1362.553578966" Nov 28 16:33:00 crc kubenswrapper[4909]: I1128 16:33:00.318134 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-f6ddfdd4b-szlst"] Nov 28 16:33:00 crc kubenswrapper[4909]: E1128 16:33:00.319006 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2e366404-716f-40c0-8481-f146be059d52" containerName="init" Nov 28 16:33:00 crc kubenswrapper[4909]: I1128 16:33:00.319039 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="2e366404-716f-40c0-8481-f146be059d52" containerName="init" Nov 28 16:33:00 crc kubenswrapper[4909]: E1128 16:33:00.319052 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2e366404-716f-40c0-8481-f146be059d52" containerName="dnsmasq-dns" Nov 28 16:33:00 crc kubenswrapper[4909]: I1128 16:33:00.319060 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="2e366404-716f-40c0-8481-f146be059d52" containerName="dnsmasq-dns" Nov 28 16:33:00 crc kubenswrapper[4909]: I1128 16:33:00.319291 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="2e366404-716f-40c0-8481-f146be059d52" containerName="dnsmasq-dns" Nov 28 16:33:00 crc kubenswrapper[4909]: I1128 16:33:00.320768 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-f6ddfdd4b-szlst" Nov 28 16:33:00 crc kubenswrapper[4909]: I1128 16:33:00.327133 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-internal-svc" Nov 28 16:33:00 crc kubenswrapper[4909]: I1128 16:33:00.327458 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-public-svc" Nov 28 16:33:00 crc kubenswrapper[4909]: I1128 16:33:00.373828 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-f6ddfdd4b-szlst"] Nov 28 16:33:00 crc kubenswrapper[4909]: I1128 16:33:00.463647 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ffd60458-19af-464b-9649-57d25893f22a-combined-ca-bundle\") pod \"barbican-api-f6ddfdd4b-szlst\" (UID: \"ffd60458-19af-464b-9649-57d25893f22a\") " pod="openstack/barbican-api-f6ddfdd4b-szlst" Nov 28 16:33:00 crc kubenswrapper[4909]: I1128 16:33:00.463724 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ffd60458-19af-464b-9649-57d25893f22a-public-tls-certs\") pod \"barbican-api-f6ddfdd4b-szlst\" (UID: \"ffd60458-19af-464b-9649-57d25893f22a\") " pod="openstack/barbican-api-f6ddfdd4b-szlst" Nov 28 16:33:00 crc kubenswrapper[4909]: I1128 16:33:00.463995 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mbm7r\" (UniqueName: \"kubernetes.io/projected/ffd60458-19af-464b-9649-57d25893f22a-kube-api-access-mbm7r\") pod \"barbican-api-f6ddfdd4b-szlst\" (UID: \"ffd60458-19af-464b-9649-57d25893f22a\") " pod="openstack/barbican-api-f6ddfdd4b-szlst" Nov 28 16:33:00 crc kubenswrapper[4909]: I1128 16:33:00.464093 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ffd60458-19af-464b-9649-57d25893f22a-logs\") pod \"barbican-api-f6ddfdd4b-szlst\" (UID: \"ffd60458-19af-464b-9649-57d25893f22a\") " pod="openstack/barbican-api-f6ddfdd4b-szlst" Nov 28 16:33:00 crc kubenswrapper[4909]: I1128 16:33:00.464203 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ffd60458-19af-464b-9649-57d25893f22a-config-data-custom\") pod \"barbican-api-f6ddfdd4b-szlst\" (UID: \"ffd60458-19af-464b-9649-57d25893f22a\") " pod="openstack/barbican-api-f6ddfdd4b-szlst" Nov 28 16:33:00 crc kubenswrapper[4909]: I1128 16:33:00.464256 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ffd60458-19af-464b-9649-57d25893f22a-config-data\") pod \"barbican-api-f6ddfdd4b-szlst\" (UID: \"ffd60458-19af-464b-9649-57d25893f22a\") " pod="openstack/barbican-api-f6ddfdd4b-szlst" Nov 28 16:33:00 crc kubenswrapper[4909]: I1128 16:33:00.464408 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ffd60458-19af-464b-9649-57d25893f22a-internal-tls-certs\") pod \"barbican-api-f6ddfdd4b-szlst\" (UID: \"ffd60458-19af-464b-9649-57d25893f22a\") " pod="openstack/barbican-api-f6ddfdd4b-szlst" Nov 28 16:33:00 crc kubenswrapper[4909]: I1128 16:33:00.469523 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-c6688d798-rv8rb" Nov 28 16:33:00 crc kubenswrapper[4909]: I1128 16:33:00.566788 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ffd60458-19af-464b-9649-57d25893f22a-combined-ca-bundle\") pod \"barbican-api-f6ddfdd4b-szlst\" (UID: \"ffd60458-19af-464b-9649-57d25893f22a\") " pod="openstack/barbican-api-f6ddfdd4b-szlst" Nov 28 16:33:00 crc kubenswrapper[4909]: I1128 16:33:00.566868 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ffd60458-19af-464b-9649-57d25893f22a-public-tls-certs\") pod \"barbican-api-f6ddfdd4b-szlst\" (UID: \"ffd60458-19af-464b-9649-57d25893f22a\") " pod="openstack/barbican-api-f6ddfdd4b-szlst" Nov 28 16:33:00 crc kubenswrapper[4909]: I1128 16:33:00.566949 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mbm7r\" (UniqueName: \"kubernetes.io/projected/ffd60458-19af-464b-9649-57d25893f22a-kube-api-access-mbm7r\") pod \"barbican-api-f6ddfdd4b-szlst\" (UID: \"ffd60458-19af-464b-9649-57d25893f22a\") " pod="openstack/barbican-api-f6ddfdd4b-szlst" Nov 28 16:33:00 crc kubenswrapper[4909]: I1128 16:33:00.566980 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ffd60458-19af-464b-9649-57d25893f22a-logs\") pod \"barbican-api-f6ddfdd4b-szlst\" (UID: \"ffd60458-19af-464b-9649-57d25893f22a\") " pod="openstack/barbican-api-f6ddfdd4b-szlst" Nov 28 16:33:00 crc kubenswrapper[4909]: I1128 16:33:00.567027 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ffd60458-19af-464b-9649-57d25893f22a-config-data-custom\") pod \"barbican-api-f6ddfdd4b-szlst\" (UID: \"ffd60458-19af-464b-9649-57d25893f22a\") " pod="openstack/barbican-api-f6ddfdd4b-szlst" Nov 28 16:33:00 crc kubenswrapper[4909]: I1128 16:33:00.567055 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ffd60458-19af-464b-9649-57d25893f22a-config-data\") pod \"barbican-api-f6ddfdd4b-szlst\" (UID: \"ffd60458-19af-464b-9649-57d25893f22a\") " pod="openstack/barbican-api-f6ddfdd4b-szlst" Nov 28 16:33:00 crc kubenswrapper[4909]: I1128 16:33:00.567105 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ffd60458-19af-464b-9649-57d25893f22a-internal-tls-certs\") pod \"barbican-api-f6ddfdd4b-szlst\" (UID: \"ffd60458-19af-464b-9649-57d25893f22a\") " pod="openstack/barbican-api-f6ddfdd4b-szlst" Nov 28 16:33:00 crc kubenswrapper[4909]: I1128 16:33:00.568411 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ffd60458-19af-464b-9649-57d25893f22a-logs\") pod \"barbican-api-f6ddfdd4b-szlst\" (UID: \"ffd60458-19af-464b-9649-57d25893f22a\") " pod="openstack/barbican-api-f6ddfdd4b-szlst" Nov 28 16:33:00 crc kubenswrapper[4909]: I1128 16:33:00.573542 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ffd60458-19af-464b-9649-57d25893f22a-combined-ca-bundle\") pod \"barbican-api-f6ddfdd4b-szlst\" (UID: \"ffd60458-19af-464b-9649-57d25893f22a\") " pod="openstack/barbican-api-f6ddfdd4b-szlst" Nov 28 16:33:00 crc kubenswrapper[4909]: I1128 16:33:00.574261 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ffd60458-19af-464b-9649-57d25893f22a-internal-tls-certs\") pod \"barbican-api-f6ddfdd4b-szlst\" (UID: \"ffd60458-19af-464b-9649-57d25893f22a\") " pod="openstack/barbican-api-f6ddfdd4b-szlst" Nov 28 16:33:00 crc kubenswrapper[4909]: I1128 16:33:00.576373 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ffd60458-19af-464b-9649-57d25893f22a-public-tls-certs\") pod \"barbican-api-f6ddfdd4b-szlst\" (UID: \"ffd60458-19af-464b-9649-57d25893f22a\") " pod="openstack/barbican-api-f6ddfdd4b-szlst" Nov 28 16:33:00 crc kubenswrapper[4909]: I1128 16:33:00.593268 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ffd60458-19af-464b-9649-57d25893f22a-config-data-custom\") pod \"barbican-api-f6ddfdd4b-szlst\" (UID: \"ffd60458-19af-464b-9649-57d25893f22a\") " pod="openstack/barbican-api-f6ddfdd4b-szlst" Nov 28 16:33:00 crc kubenswrapper[4909]: I1128 16:33:00.593616 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ffd60458-19af-464b-9649-57d25893f22a-config-data\") pod \"barbican-api-f6ddfdd4b-szlst\" (UID: \"ffd60458-19af-464b-9649-57d25893f22a\") " pod="openstack/barbican-api-f6ddfdd4b-szlst" Nov 28 16:33:00 crc kubenswrapper[4909]: I1128 16:33:00.595815 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mbm7r\" (UniqueName: \"kubernetes.io/projected/ffd60458-19af-464b-9649-57d25893f22a-kube-api-access-mbm7r\") pod \"barbican-api-f6ddfdd4b-szlst\" (UID: \"ffd60458-19af-464b-9649-57d25893f22a\") " pod="openstack/barbican-api-f6ddfdd4b-szlst" Nov 28 16:33:00 crc kubenswrapper[4909]: I1128 16:33:00.692861 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-f6ddfdd4b-szlst" Nov 28 16:33:00 crc kubenswrapper[4909]: I1128 16:33:00.697864 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Nov 28 16:33:01 crc kubenswrapper[4909]: I1128 16:33:01.151718 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="d3006e8a-91fc-4f9f-8499-bc0e0169ef9b" containerName="cinder-api-log" containerID="cri-o://d3dca17f1af9c2a07b654276d016c68d0ec003cca190751daf6731ed8e0bd08c" gracePeriod=30 Nov 28 16:33:01 crc kubenswrapper[4909]: I1128 16:33:01.152210 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="d3006e8a-91fc-4f9f-8499-bc0e0169ef9b" containerName="cinder-api" containerID="cri-o://865200f3aa817624b4422991e9dd3465edc4dee2880a26165ef081b1898559a7" gracePeriod=30 Nov 28 16:33:01 crc kubenswrapper[4909]: I1128 16:33:01.495979 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-f6ddfdd4b-szlst"] Nov 28 16:33:01 crc kubenswrapper[4909]: I1128 16:33:01.498935 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-fcstr" Nov 28 16:33:01 crc kubenswrapper[4909]: W1128 16:33:01.502345 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podffd60458_19af_464b_9649_57d25893f22a.slice/crio-a53d479313f266911a327abd159a691cbb4811bc83d8bb71b525714484c79937 WatchSource:0}: Error finding container a53d479313f266911a327abd159a691cbb4811bc83d8bb71b525714484c79937: Status 404 returned error can't find the container with id a53d479313f266911a327abd159a691cbb4811bc83d8bb71b525714484c79937 Nov 28 16:33:01 crc kubenswrapper[4909]: I1128 16:33:01.584853 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/29c42f34-c4bc-433c-b0d7-a0a8acf595db-config\") pod \"29c42f34-c4bc-433c-b0d7-a0a8acf595db\" (UID: \"29c42f34-c4bc-433c-b0d7-a0a8acf595db\") " Nov 28 16:33:01 crc kubenswrapper[4909]: I1128 16:33:01.584954 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-27sb9\" (UniqueName: \"kubernetes.io/projected/29c42f34-c4bc-433c-b0d7-a0a8acf595db-kube-api-access-27sb9\") pod \"29c42f34-c4bc-433c-b0d7-a0a8acf595db\" (UID: \"29c42f34-c4bc-433c-b0d7-a0a8acf595db\") " Nov 28 16:33:01 crc kubenswrapper[4909]: I1128 16:33:01.585066 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/29c42f34-c4bc-433c-b0d7-a0a8acf595db-combined-ca-bundle\") pod \"29c42f34-c4bc-433c-b0d7-a0a8acf595db\" (UID: \"29c42f34-c4bc-433c-b0d7-a0a8acf595db\") " Nov 28 16:33:01 crc kubenswrapper[4909]: I1128 16:33:01.593117 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/29c42f34-c4bc-433c-b0d7-a0a8acf595db-kube-api-access-27sb9" (OuterVolumeSpecName: "kube-api-access-27sb9") pod "29c42f34-c4bc-433c-b0d7-a0a8acf595db" (UID: "29c42f34-c4bc-433c-b0d7-a0a8acf595db"). InnerVolumeSpecName "kube-api-access-27sb9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:33:01 crc kubenswrapper[4909]: I1128 16:33:01.617546 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/29c42f34-c4bc-433c-b0d7-a0a8acf595db-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "29c42f34-c4bc-433c-b0d7-a0a8acf595db" (UID: "29c42f34-c4bc-433c-b0d7-a0a8acf595db"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:33:01 crc kubenswrapper[4909]: I1128 16:33:01.619449 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/29c42f34-c4bc-433c-b0d7-a0a8acf595db-config" (OuterVolumeSpecName: "config") pod "29c42f34-c4bc-433c-b0d7-a0a8acf595db" (UID: "29c42f34-c4bc-433c-b0d7-a0a8acf595db"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:33:01 crc kubenswrapper[4909]: I1128 16:33:01.687333 4909 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/29c42f34-c4bc-433c-b0d7-a0a8acf595db-config\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:01 crc kubenswrapper[4909]: I1128 16:33:01.688131 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-27sb9\" (UniqueName: \"kubernetes.io/projected/29c42f34-c4bc-433c-b0d7-a0a8acf595db-kube-api-access-27sb9\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:01 crc kubenswrapper[4909]: I1128 16:33:01.688192 4909 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/29c42f34-c4bc-433c-b0d7-a0a8acf595db-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:02 crc kubenswrapper[4909]: I1128 16:33:02.160324 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 28 16:33:02 crc kubenswrapper[4909]: I1128 16:33:02.162859 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-f6ddfdd4b-szlst" event={"ID":"ffd60458-19af-464b-9649-57d25893f22a","Type":"ContainerStarted","Data":"3cd8ac1736c6fbc1977f593e8c58c7c95ab9e0dac8a3505b8acefff70b5cfba5"} Nov 28 16:33:02 crc kubenswrapper[4909]: I1128 16:33:02.162920 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-f6ddfdd4b-szlst" event={"ID":"ffd60458-19af-464b-9649-57d25893f22a","Type":"ContainerStarted","Data":"29d0bc179bbb27c3f9f6023ab4558b76e568b29b98ddf992b4f8391b462dd92d"} Nov 28 16:33:02 crc kubenswrapper[4909]: I1128 16:33:02.162933 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-f6ddfdd4b-szlst" event={"ID":"ffd60458-19af-464b-9649-57d25893f22a","Type":"ContainerStarted","Data":"a53d479313f266911a327abd159a691cbb4811bc83d8bb71b525714484c79937"} Nov 28 16:33:02 crc kubenswrapper[4909]: I1128 16:33:02.162977 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-f6ddfdd4b-szlst" Nov 28 16:33:02 crc kubenswrapper[4909]: I1128 16:33:02.163087 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-f6ddfdd4b-szlst" Nov 28 16:33:02 crc kubenswrapper[4909]: I1128 16:33:02.167554 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-fcstr" event={"ID":"29c42f34-c4bc-433c-b0d7-a0a8acf595db","Type":"ContainerDied","Data":"e8e9e26e7d69fc7aeb23b667ccb57f7d97deea6b8c208c6efa28ce163e7028b8"} Nov 28 16:33:02 crc kubenswrapper[4909]: I1128 16:33:02.167589 4909 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e8e9e26e7d69fc7aeb23b667ccb57f7d97deea6b8c208c6efa28ce163e7028b8" Nov 28 16:33:02 crc kubenswrapper[4909]: I1128 16:33:02.167634 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-fcstr" Nov 28 16:33:02 crc kubenswrapper[4909]: I1128 16:33:02.172928 4909 generic.go:334] "Generic (PLEG): container finished" podID="d3006e8a-91fc-4f9f-8499-bc0e0169ef9b" containerID="865200f3aa817624b4422991e9dd3465edc4dee2880a26165ef081b1898559a7" exitCode=0 Nov 28 16:33:02 crc kubenswrapper[4909]: I1128 16:33:02.172952 4909 generic.go:334] "Generic (PLEG): container finished" podID="d3006e8a-91fc-4f9f-8499-bc0e0169ef9b" containerID="d3dca17f1af9c2a07b654276d016c68d0ec003cca190751daf6731ed8e0bd08c" exitCode=143 Nov 28 16:33:02 crc kubenswrapper[4909]: I1128 16:33:02.172962 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 28 16:33:02 crc kubenswrapper[4909]: I1128 16:33:02.172998 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"d3006e8a-91fc-4f9f-8499-bc0e0169ef9b","Type":"ContainerDied","Data":"865200f3aa817624b4422991e9dd3465edc4dee2880a26165ef081b1898559a7"} Nov 28 16:33:02 crc kubenswrapper[4909]: I1128 16:33:02.173026 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"d3006e8a-91fc-4f9f-8499-bc0e0169ef9b","Type":"ContainerDied","Data":"d3dca17f1af9c2a07b654276d016c68d0ec003cca190751daf6731ed8e0bd08c"} Nov 28 16:33:02 crc kubenswrapper[4909]: I1128 16:33:02.173036 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"d3006e8a-91fc-4f9f-8499-bc0e0169ef9b","Type":"ContainerDied","Data":"8e09c50910568c6be4952cf853635245ed635b8fc3599050e1995060e33b6b96"} Nov 28 16:33:02 crc kubenswrapper[4909]: I1128 16:33:02.173066 4909 scope.go:117] "RemoveContainer" containerID="865200f3aa817624b4422991e9dd3465edc4dee2880a26165ef081b1898559a7" Nov 28 16:33:02 crc kubenswrapper[4909]: I1128 16:33:02.225357 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-f6ddfdd4b-szlst" podStartSLOduration=2.225335984 podStartE2EDuration="2.225335984s" podCreationTimestamp="2025-11-28 16:33:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:33:02.207268129 +0000 UTC m=+1364.603952653" watchObservedRunningTime="2025-11-28 16:33:02.225335984 +0000 UTC m=+1364.622020528" Nov 28 16:33:02 crc kubenswrapper[4909]: I1128 16:33:02.232091 4909 scope.go:117] "RemoveContainer" containerID="d3dca17f1af9c2a07b654276d016c68d0ec003cca190751daf6731ed8e0bd08c" Nov 28 16:33:02 crc kubenswrapper[4909]: I1128 16:33:02.263214 4909 scope.go:117] "RemoveContainer" containerID="865200f3aa817624b4422991e9dd3465edc4dee2880a26165ef081b1898559a7" Nov 28 16:33:02 crc kubenswrapper[4909]: E1128 16:33:02.263636 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"865200f3aa817624b4422991e9dd3465edc4dee2880a26165ef081b1898559a7\": container with ID starting with 865200f3aa817624b4422991e9dd3465edc4dee2880a26165ef081b1898559a7 not found: ID does not exist" containerID="865200f3aa817624b4422991e9dd3465edc4dee2880a26165ef081b1898559a7" Nov 28 16:33:02 crc kubenswrapper[4909]: I1128 16:33:02.263688 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"865200f3aa817624b4422991e9dd3465edc4dee2880a26165ef081b1898559a7"} err="failed to get container status \"865200f3aa817624b4422991e9dd3465edc4dee2880a26165ef081b1898559a7\": rpc error: code = NotFound desc = could not find container \"865200f3aa817624b4422991e9dd3465edc4dee2880a26165ef081b1898559a7\": container with ID starting with 865200f3aa817624b4422991e9dd3465edc4dee2880a26165ef081b1898559a7 not found: ID does not exist" Nov 28 16:33:02 crc kubenswrapper[4909]: I1128 16:33:02.263712 4909 scope.go:117] "RemoveContainer" containerID="d3dca17f1af9c2a07b654276d016c68d0ec003cca190751daf6731ed8e0bd08c" Nov 28 16:33:02 crc kubenswrapper[4909]: E1128 16:33:02.264070 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d3dca17f1af9c2a07b654276d016c68d0ec003cca190751daf6731ed8e0bd08c\": container with ID starting with d3dca17f1af9c2a07b654276d016c68d0ec003cca190751daf6731ed8e0bd08c not found: ID does not exist" containerID="d3dca17f1af9c2a07b654276d016c68d0ec003cca190751daf6731ed8e0bd08c" Nov 28 16:33:02 crc kubenswrapper[4909]: I1128 16:33:02.264103 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d3dca17f1af9c2a07b654276d016c68d0ec003cca190751daf6731ed8e0bd08c"} err="failed to get container status \"d3dca17f1af9c2a07b654276d016c68d0ec003cca190751daf6731ed8e0bd08c\": rpc error: code = NotFound desc = could not find container \"d3dca17f1af9c2a07b654276d016c68d0ec003cca190751daf6731ed8e0bd08c\": container with ID starting with d3dca17f1af9c2a07b654276d016c68d0ec003cca190751daf6731ed8e0bd08c not found: ID does not exist" Nov 28 16:33:02 crc kubenswrapper[4909]: I1128 16:33:02.264122 4909 scope.go:117] "RemoveContainer" containerID="865200f3aa817624b4422991e9dd3465edc4dee2880a26165ef081b1898559a7" Nov 28 16:33:02 crc kubenswrapper[4909]: I1128 16:33:02.264413 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"865200f3aa817624b4422991e9dd3465edc4dee2880a26165ef081b1898559a7"} err="failed to get container status \"865200f3aa817624b4422991e9dd3465edc4dee2880a26165ef081b1898559a7\": rpc error: code = NotFound desc = could not find container \"865200f3aa817624b4422991e9dd3465edc4dee2880a26165ef081b1898559a7\": container with ID starting with 865200f3aa817624b4422991e9dd3465edc4dee2880a26165ef081b1898559a7 not found: ID does not exist" Nov 28 16:33:02 crc kubenswrapper[4909]: I1128 16:33:02.264456 4909 scope.go:117] "RemoveContainer" containerID="d3dca17f1af9c2a07b654276d016c68d0ec003cca190751daf6731ed8e0bd08c" Nov 28 16:33:02 crc kubenswrapper[4909]: I1128 16:33:02.264812 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d3dca17f1af9c2a07b654276d016c68d0ec003cca190751daf6731ed8e0bd08c"} err="failed to get container status \"d3dca17f1af9c2a07b654276d016c68d0ec003cca190751daf6731ed8e0bd08c\": rpc error: code = NotFound desc = could not find container \"d3dca17f1af9c2a07b654276d016c68d0ec003cca190751daf6731ed8e0bd08c\": container with ID starting with d3dca17f1af9c2a07b654276d016c68d0ec003cca190751daf6731ed8e0bd08c not found: ID does not exist" Nov 28 16:33:02 crc kubenswrapper[4909]: I1128 16:33:02.301727 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d3006e8a-91fc-4f9f-8499-bc0e0169ef9b-combined-ca-bundle\") pod \"d3006e8a-91fc-4f9f-8499-bc0e0169ef9b\" (UID: \"d3006e8a-91fc-4f9f-8499-bc0e0169ef9b\") " Nov 28 16:33:02 crc kubenswrapper[4909]: I1128 16:33:02.301781 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/d3006e8a-91fc-4f9f-8499-bc0e0169ef9b-etc-machine-id\") pod \"d3006e8a-91fc-4f9f-8499-bc0e0169ef9b\" (UID: \"d3006e8a-91fc-4f9f-8499-bc0e0169ef9b\") " Nov 28 16:33:02 crc kubenswrapper[4909]: I1128 16:33:02.301823 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d3006e8a-91fc-4f9f-8499-bc0e0169ef9b-config-data-custom\") pod \"d3006e8a-91fc-4f9f-8499-bc0e0169ef9b\" (UID: \"d3006e8a-91fc-4f9f-8499-bc0e0169ef9b\") " Nov 28 16:33:02 crc kubenswrapper[4909]: I1128 16:33:02.301867 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hp6db\" (UniqueName: \"kubernetes.io/projected/d3006e8a-91fc-4f9f-8499-bc0e0169ef9b-kube-api-access-hp6db\") pod \"d3006e8a-91fc-4f9f-8499-bc0e0169ef9b\" (UID: \"d3006e8a-91fc-4f9f-8499-bc0e0169ef9b\") " Nov 28 16:33:02 crc kubenswrapper[4909]: I1128 16:33:02.301954 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d3006e8a-91fc-4f9f-8499-bc0e0169ef9b-config-data\") pod \"d3006e8a-91fc-4f9f-8499-bc0e0169ef9b\" (UID: \"d3006e8a-91fc-4f9f-8499-bc0e0169ef9b\") " Nov 28 16:33:02 crc kubenswrapper[4909]: I1128 16:33:02.301972 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d3006e8a-91fc-4f9f-8499-bc0e0169ef9b-logs\") pod \"d3006e8a-91fc-4f9f-8499-bc0e0169ef9b\" (UID: \"d3006e8a-91fc-4f9f-8499-bc0e0169ef9b\") " Nov 28 16:33:02 crc kubenswrapper[4909]: I1128 16:33:02.302690 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d3006e8a-91fc-4f9f-8499-bc0e0169ef9b-logs" (OuterVolumeSpecName: "logs") pod "d3006e8a-91fc-4f9f-8499-bc0e0169ef9b" (UID: "d3006e8a-91fc-4f9f-8499-bc0e0169ef9b"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:33:02 crc kubenswrapper[4909]: I1128 16:33:02.302846 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d3006e8a-91fc-4f9f-8499-bc0e0169ef9b-scripts\") pod \"d3006e8a-91fc-4f9f-8499-bc0e0169ef9b\" (UID: \"d3006e8a-91fc-4f9f-8499-bc0e0169ef9b\") " Nov 28 16:33:02 crc kubenswrapper[4909]: I1128 16:33:02.302873 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d3006e8a-91fc-4f9f-8499-bc0e0169ef9b-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "d3006e8a-91fc-4f9f-8499-bc0e0169ef9b" (UID: "d3006e8a-91fc-4f9f-8499-bc0e0169ef9b"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 16:33:02 crc kubenswrapper[4909]: I1128 16:33:02.303848 4909 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d3006e8a-91fc-4f9f-8499-bc0e0169ef9b-logs\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:02 crc kubenswrapper[4909]: I1128 16:33:02.303873 4909 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/d3006e8a-91fc-4f9f-8499-bc0e0169ef9b-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:02 crc kubenswrapper[4909]: I1128 16:33:02.306197 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d3006e8a-91fc-4f9f-8499-bc0e0169ef9b-kube-api-access-hp6db" (OuterVolumeSpecName: "kube-api-access-hp6db") pod "d3006e8a-91fc-4f9f-8499-bc0e0169ef9b" (UID: "d3006e8a-91fc-4f9f-8499-bc0e0169ef9b"). InnerVolumeSpecName "kube-api-access-hp6db". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:33:02 crc kubenswrapper[4909]: I1128 16:33:02.306580 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d3006e8a-91fc-4f9f-8499-bc0e0169ef9b-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "d3006e8a-91fc-4f9f-8499-bc0e0169ef9b" (UID: "d3006e8a-91fc-4f9f-8499-bc0e0169ef9b"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:33:02 crc kubenswrapper[4909]: I1128 16:33:02.308581 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d3006e8a-91fc-4f9f-8499-bc0e0169ef9b-scripts" (OuterVolumeSpecName: "scripts") pod "d3006e8a-91fc-4f9f-8499-bc0e0169ef9b" (UID: "d3006e8a-91fc-4f9f-8499-bc0e0169ef9b"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:33:02 crc kubenswrapper[4909]: I1128 16:33:02.354840 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d3006e8a-91fc-4f9f-8499-bc0e0169ef9b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d3006e8a-91fc-4f9f-8499-bc0e0169ef9b" (UID: "d3006e8a-91fc-4f9f-8499-bc0e0169ef9b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:33:02 crc kubenswrapper[4909]: I1128 16:33:02.371491 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d3006e8a-91fc-4f9f-8499-bc0e0169ef9b-config-data" (OuterVolumeSpecName: "config-data") pod "d3006e8a-91fc-4f9f-8499-bc0e0169ef9b" (UID: "d3006e8a-91fc-4f9f-8499-bc0e0169ef9b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:33:02 crc kubenswrapper[4909]: I1128 16:33:02.411834 4909 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d3006e8a-91fc-4f9f-8499-bc0e0169ef9b-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:02 crc kubenswrapper[4909]: I1128 16:33:02.411876 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hp6db\" (UniqueName: \"kubernetes.io/projected/d3006e8a-91fc-4f9f-8499-bc0e0169ef9b-kube-api-access-hp6db\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:02 crc kubenswrapper[4909]: I1128 16:33:02.411886 4909 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d3006e8a-91fc-4f9f-8499-bc0e0169ef9b-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:02 crc kubenswrapper[4909]: I1128 16:33:02.411895 4909 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d3006e8a-91fc-4f9f-8499-bc0e0169ef9b-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:02 crc kubenswrapper[4909]: I1128 16:33:02.411903 4909 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d3006e8a-91fc-4f9f-8499-bc0e0169ef9b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:02 crc kubenswrapper[4909]: I1128 16:33:02.478607 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-674b76c99f-6jwmr"] Nov 28 16:33:02 crc kubenswrapper[4909]: I1128 16:33:02.478917 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-674b76c99f-6jwmr" podUID="b5156fb2-a147-4c44-95eb-8d54163395bc" containerName="dnsmasq-dns" containerID="cri-o://ef2b25a6f860fffb4b80e24aa34060976930ca33ec549bcfa5297c0f1c93ea03" gracePeriod=10 Nov 28 16:33:02 crc kubenswrapper[4909]: I1128 16:33:02.483935 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-674b76c99f-6jwmr" Nov 28 16:33:02 crc kubenswrapper[4909]: I1128 16:33:02.522706 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-674458cfcb-bc4hb"] Nov 28 16:33:02 crc kubenswrapper[4909]: E1128 16:33:02.523076 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d3006e8a-91fc-4f9f-8499-bc0e0169ef9b" containerName="cinder-api-log" Nov 28 16:33:02 crc kubenswrapper[4909]: I1128 16:33:02.523088 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="d3006e8a-91fc-4f9f-8499-bc0e0169ef9b" containerName="cinder-api-log" Nov 28 16:33:02 crc kubenswrapper[4909]: E1128 16:33:02.523126 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d3006e8a-91fc-4f9f-8499-bc0e0169ef9b" containerName="cinder-api" Nov 28 16:33:02 crc kubenswrapper[4909]: I1128 16:33:02.523132 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="d3006e8a-91fc-4f9f-8499-bc0e0169ef9b" containerName="cinder-api" Nov 28 16:33:02 crc kubenswrapper[4909]: E1128 16:33:02.523146 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="29c42f34-c4bc-433c-b0d7-a0a8acf595db" containerName="neutron-db-sync" Nov 28 16:33:02 crc kubenswrapper[4909]: I1128 16:33:02.523152 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="29c42f34-c4bc-433c-b0d7-a0a8acf595db" containerName="neutron-db-sync" Nov 28 16:33:02 crc kubenswrapper[4909]: I1128 16:33:02.523339 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="d3006e8a-91fc-4f9f-8499-bc0e0169ef9b" containerName="cinder-api" Nov 28 16:33:02 crc kubenswrapper[4909]: I1128 16:33:02.523361 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="29c42f34-c4bc-433c-b0d7-a0a8acf595db" containerName="neutron-db-sync" Nov 28 16:33:02 crc kubenswrapper[4909]: I1128 16:33:02.523375 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="d3006e8a-91fc-4f9f-8499-bc0e0169ef9b" containerName="cinder-api-log" Nov 28 16:33:02 crc kubenswrapper[4909]: I1128 16:33:02.524275 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-674458cfcb-bc4hb" Nov 28 16:33:02 crc kubenswrapper[4909]: I1128 16:33:02.530533 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6bb4fc677f-r4b99"] Nov 28 16:33:02 crc kubenswrapper[4909]: I1128 16:33:02.536513 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Nov 28 16:33:02 crc kubenswrapper[4909]: I1128 16:33:02.536821 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Nov 28 16:33:02 crc kubenswrapper[4909]: I1128 16:33:02.536959 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-ovndbs" Nov 28 16:33:02 crc kubenswrapper[4909]: I1128 16:33:02.537133 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-vwqt8" Nov 28 16:33:02 crc kubenswrapper[4909]: I1128 16:33:02.539259 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6bb4fc677f-r4b99" Nov 28 16:33:02 crc kubenswrapper[4909]: I1128 16:33:02.554731 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-674458cfcb-bc4hb"] Nov 28 16:33:02 crc kubenswrapper[4909]: I1128 16:33:02.593861 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6bb4fc677f-r4b99"] Nov 28 16:33:02 crc kubenswrapper[4909]: I1128 16:33:02.632060 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/f3c35b84-87e7-42d1-bbd6-5f72f38ab989-config\") pod \"neutron-674458cfcb-bc4hb\" (UID: \"f3c35b84-87e7-42d1-bbd6-5f72f38ab989\") " pod="openstack/neutron-674458cfcb-bc4hb" Nov 28 16:33:02 crc kubenswrapper[4909]: I1128 16:33:02.632260 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/f3c35b84-87e7-42d1-bbd6-5f72f38ab989-httpd-config\") pod \"neutron-674458cfcb-bc4hb\" (UID: \"f3c35b84-87e7-42d1-bbd6-5f72f38ab989\") " pod="openstack/neutron-674458cfcb-bc4hb" Nov 28 16:33:02 crc kubenswrapper[4909]: I1128 16:33:02.632351 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mlgsf\" (UniqueName: \"kubernetes.io/projected/f3c35b84-87e7-42d1-bbd6-5f72f38ab989-kube-api-access-mlgsf\") pod \"neutron-674458cfcb-bc4hb\" (UID: \"f3c35b84-87e7-42d1-bbd6-5f72f38ab989\") " pod="openstack/neutron-674458cfcb-bc4hb" Nov 28 16:33:02 crc kubenswrapper[4909]: I1128 16:33:02.632411 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f3c35b84-87e7-42d1-bbd6-5f72f38ab989-combined-ca-bundle\") pod \"neutron-674458cfcb-bc4hb\" (UID: \"f3c35b84-87e7-42d1-bbd6-5f72f38ab989\") " pod="openstack/neutron-674458cfcb-bc4hb" Nov 28 16:33:02 crc kubenswrapper[4909]: I1128 16:33:02.632539 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/f3c35b84-87e7-42d1-bbd6-5f72f38ab989-ovndb-tls-certs\") pod \"neutron-674458cfcb-bc4hb\" (UID: \"f3c35b84-87e7-42d1-bbd6-5f72f38ab989\") " pod="openstack/neutron-674458cfcb-bc4hb" Nov 28 16:33:02 crc kubenswrapper[4909]: I1128 16:33:02.707451 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Nov 28 16:33:02 crc kubenswrapper[4909]: I1128 16:33:02.729490 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-api-0"] Nov 28 16:33:02 crc kubenswrapper[4909]: I1128 16:33:02.734607 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/96ca616f-e86a-4274-8bef-da335a23e12c-ovsdbserver-sb\") pod \"dnsmasq-dns-6bb4fc677f-r4b99\" (UID: \"96ca616f-e86a-4274-8bef-da335a23e12c\") " pod="openstack/dnsmasq-dns-6bb4fc677f-r4b99" Nov 28 16:33:02 crc kubenswrapper[4909]: I1128 16:33:02.734675 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/f3c35b84-87e7-42d1-bbd6-5f72f38ab989-httpd-config\") pod \"neutron-674458cfcb-bc4hb\" (UID: \"f3c35b84-87e7-42d1-bbd6-5f72f38ab989\") " pod="openstack/neutron-674458cfcb-bc4hb" Nov 28 16:33:02 crc kubenswrapper[4909]: I1128 16:33:02.734808 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/96ca616f-e86a-4274-8bef-da335a23e12c-dns-swift-storage-0\") pod \"dnsmasq-dns-6bb4fc677f-r4b99\" (UID: \"96ca616f-e86a-4274-8bef-da335a23e12c\") " pod="openstack/dnsmasq-dns-6bb4fc677f-r4b99" Nov 28 16:33:02 crc kubenswrapper[4909]: I1128 16:33:02.734885 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mlgsf\" (UniqueName: \"kubernetes.io/projected/f3c35b84-87e7-42d1-bbd6-5f72f38ab989-kube-api-access-mlgsf\") pod \"neutron-674458cfcb-bc4hb\" (UID: \"f3c35b84-87e7-42d1-bbd6-5f72f38ab989\") " pod="openstack/neutron-674458cfcb-bc4hb" Nov 28 16:33:02 crc kubenswrapper[4909]: I1128 16:33:02.734940 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9zl9h\" (UniqueName: \"kubernetes.io/projected/96ca616f-e86a-4274-8bef-da335a23e12c-kube-api-access-9zl9h\") pod \"dnsmasq-dns-6bb4fc677f-r4b99\" (UID: \"96ca616f-e86a-4274-8bef-da335a23e12c\") " pod="openstack/dnsmasq-dns-6bb4fc677f-r4b99" Nov 28 16:33:02 crc kubenswrapper[4909]: I1128 16:33:02.735002 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f3c35b84-87e7-42d1-bbd6-5f72f38ab989-combined-ca-bundle\") pod \"neutron-674458cfcb-bc4hb\" (UID: \"f3c35b84-87e7-42d1-bbd6-5f72f38ab989\") " pod="openstack/neutron-674458cfcb-bc4hb" Nov 28 16:33:02 crc kubenswrapper[4909]: I1128 16:33:02.735106 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/96ca616f-e86a-4274-8bef-da335a23e12c-dns-svc\") pod \"dnsmasq-dns-6bb4fc677f-r4b99\" (UID: \"96ca616f-e86a-4274-8bef-da335a23e12c\") " pod="openstack/dnsmasq-dns-6bb4fc677f-r4b99" Nov 28 16:33:02 crc kubenswrapper[4909]: I1128 16:33:02.735186 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/f3c35b84-87e7-42d1-bbd6-5f72f38ab989-ovndb-tls-certs\") pod \"neutron-674458cfcb-bc4hb\" (UID: \"f3c35b84-87e7-42d1-bbd6-5f72f38ab989\") " pod="openstack/neutron-674458cfcb-bc4hb" Nov 28 16:33:02 crc kubenswrapper[4909]: I1128 16:33:02.735298 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/f3c35b84-87e7-42d1-bbd6-5f72f38ab989-config\") pod \"neutron-674458cfcb-bc4hb\" (UID: \"f3c35b84-87e7-42d1-bbd6-5f72f38ab989\") " pod="openstack/neutron-674458cfcb-bc4hb" Nov 28 16:33:02 crc kubenswrapper[4909]: I1128 16:33:02.735392 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/96ca616f-e86a-4274-8bef-da335a23e12c-ovsdbserver-nb\") pod \"dnsmasq-dns-6bb4fc677f-r4b99\" (UID: \"96ca616f-e86a-4274-8bef-da335a23e12c\") " pod="openstack/dnsmasq-dns-6bb4fc677f-r4b99" Nov 28 16:33:02 crc kubenswrapper[4909]: I1128 16:33:02.735438 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/96ca616f-e86a-4274-8bef-da335a23e12c-config\") pod \"dnsmasq-dns-6bb4fc677f-r4b99\" (UID: \"96ca616f-e86a-4274-8bef-da335a23e12c\") " pod="openstack/dnsmasq-dns-6bb4fc677f-r4b99" Nov 28 16:33:02 crc kubenswrapper[4909]: I1128 16:33:02.742670 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/f3c35b84-87e7-42d1-bbd6-5f72f38ab989-httpd-config\") pod \"neutron-674458cfcb-bc4hb\" (UID: \"f3c35b84-87e7-42d1-bbd6-5f72f38ab989\") " pod="openstack/neutron-674458cfcb-bc4hb" Nov 28 16:33:02 crc kubenswrapper[4909]: I1128 16:33:02.743767 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/f3c35b84-87e7-42d1-bbd6-5f72f38ab989-ovndb-tls-certs\") pod \"neutron-674458cfcb-bc4hb\" (UID: \"f3c35b84-87e7-42d1-bbd6-5f72f38ab989\") " pod="openstack/neutron-674458cfcb-bc4hb" Nov 28 16:33:02 crc kubenswrapper[4909]: I1128 16:33:02.744008 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/f3c35b84-87e7-42d1-bbd6-5f72f38ab989-config\") pod \"neutron-674458cfcb-bc4hb\" (UID: \"f3c35b84-87e7-42d1-bbd6-5f72f38ab989\") " pod="openstack/neutron-674458cfcb-bc4hb" Nov 28 16:33:02 crc kubenswrapper[4909]: I1128 16:33:02.762997 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f3c35b84-87e7-42d1-bbd6-5f72f38ab989-combined-ca-bundle\") pod \"neutron-674458cfcb-bc4hb\" (UID: \"f3c35b84-87e7-42d1-bbd6-5f72f38ab989\") " pod="openstack/neutron-674458cfcb-bc4hb" Nov 28 16:33:02 crc kubenswrapper[4909]: I1128 16:33:02.768410 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mlgsf\" (UniqueName: \"kubernetes.io/projected/f3c35b84-87e7-42d1-bbd6-5f72f38ab989-kube-api-access-mlgsf\") pod \"neutron-674458cfcb-bc4hb\" (UID: \"f3c35b84-87e7-42d1-bbd6-5f72f38ab989\") " pod="openstack/neutron-674458cfcb-bc4hb" Nov 28 16:33:02 crc kubenswrapper[4909]: I1128 16:33:02.773737 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Nov 28 16:33:02 crc kubenswrapper[4909]: I1128 16:33:02.775586 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 28 16:33:02 crc kubenswrapper[4909]: I1128 16:33:02.779498 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-internal-svc" Nov 28 16:33:02 crc kubenswrapper[4909]: I1128 16:33:02.781603 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Nov 28 16:33:02 crc kubenswrapper[4909]: I1128 16:33:02.781780 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-public-svc" Nov 28 16:33:02 crc kubenswrapper[4909]: I1128 16:33:02.812800 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 28 16:33:02 crc kubenswrapper[4909]: I1128 16:33:02.837619 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/96ca616f-e86a-4274-8bef-da335a23e12c-dns-svc\") pod \"dnsmasq-dns-6bb4fc677f-r4b99\" (UID: \"96ca616f-e86a-4274-8bef-da335a23e12c\") " pod="openstack/dnsmasq-dns-6bb4fc677f-r4b99" Nov 28 16:33:02 crc kubenswrapper[4909]: I1128 16:33:02.837744 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/96ca616f-e86a-4274-8bef-da335a23e12c-ovsdbserver-nb\") pod \"dnsmasq-dns-6bb4fc677f-r4b99\" (UID: \"96ca616f-e86a-4274-8bef-da335a23e12c\") " pod="openstack/dnsmasq-dns-6bb4fc677f-r4b99" Nov 28 16:33:02 crc kubenswrapper[4909]: I1128 16:33:02.837766 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/96ca616f-e86a-4274-8bef-da335a23e12c-config\") pod \"dnsmasq-dns-6bb4fc677f-r4b99\" (UID: \"96ca616f-e86a-4274-8bef-da335a23e12c\") " pod="openstack/dnsmasq-dns-6bb4fc677f-r4b99" Nov 28 16:33:02 crc kubenswrapper[4909]: I1128 16:33:02.837803 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/96ca616f-e86a-4274-8bef-da335a23e12c-ovsdbserver-sb\") pod \"dnsmasq-dns-6bb4fc677f-r4b99\" (UID: \"96ca616f-e86a-4274-8bef-da335a23e12c\") " pod="openstack/dnsmasq-dns-6bb4fc677f-r4b99" Nov 28 16:33:02 crc kubenswrapper[4909]: I1128 16:33:02.837857 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/96ca616f-e86a-4274-8bef-da335a23e12c-dns-swift-storage-0\") pod \"dnsmasq-dns-6bb4fc677f-r4b99\" (UID: \"96ca616f-e86a-4274-8bef-da335a23e12c\") " pod="openstack/dnsmasq-dns-6bb4fc677f-r4b99" Nov 28 16:33:02 crc kubenswrapper[4909]: I1128 16:33:02.837880 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9zl9h\" (UniqueName: \"kubernetes.io/projected/96ca616f-e86a-4274-8bef-da335a23e12c-kube-api-access-9zl9h\") pod \"dnsmasq-dns-6bb4fc677f-r4b99\" (UID: \"96ca616f-e86a-4274-8bef-da335a23e12c\") " pod="openstack/dnsmasq-dns-6bb4fc677f-r4b99" Nov 28 16:33:02 crc kubenswrapper[4909]: I1128 16:33:02.839559 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/96ca616f-e86a-4274-8bef-da335a23e12c-dns-svc\") pod \"dnsmasq-dns-6bb4fc677f-r4b99\" (UID: \"96ca616f-e86a-4274-8bef-da335a23e12c\") " pod="openstack/dnsmasq-dns-6bb4fc677f-r4b99" Nov 28 16:33:02 crc kubenswrapper[4909]: I1128 16:33:02.840877 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/96ca616f-e86a-4274-8bef-da335a23e12c-ovsdbserver-nb\") pod \"dnsmasq-dns-6bb4fc677f-r4b99\" (UID: \"96ca616f-e86a-4274-8bef-da335a23e12c\") " pod="openstack/dnsmasq-dns-6bb4fc677f-r4b99" Nov 28 16:33:02 crc kubenswrapper[4909]: I1128 16:33:02.841454 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/96ca616f-e86a-4274-8bef-da335a23e12c-config\") pod \"dnsmasq-dns-6bb4fc677f-r4b99\" (UID: \"96ca616f-e86a-4274-8bef-da335a23e12c\") " pod="openstack/dnsmasq-dns-6bb4fc677f-r4b99" Nov 28 16:33:02 crc kubenswrapper[4909]: I1128 16:33:02.842080 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/96ca616f-e86a-4274-8bef-da335a23e12c-ovsdbserver-sb\") pod \"dnsmasq-dns-6bb4fc677f-r4b99\" (UID: \"96ca616f-e86a-4274-8bef-da335a23e12c\") " pod="openstack/dnsmasq-dns-6bb4fc677f-r4b99" Nov 28 16:33:02 crc kubenswrapper[4909]: I1128 16:33:02.842952 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/96ca616f-e86a-4274-8bef-da335a23e12c-dns-swift-storage-0\") pod \"dnsmasq-dns-6bb4fc677f-r4b99\" (UID: \"96ca616f-e86a-4274-8bef-da335a23e12c\") " pod="openstack/dnsmasq-dns-6bb4fc677f-r4b99" Nov 28 16:33:02 crc kubenswrapper[4909]: I1128 16:33:02.933535 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9zl9h\" (UniqueName: \"kubernetes.io/projected/96ca616f-e86a-4274-8bef-da335a23e12c-kube-api-access-9zl9h\") pod \"dnsmasq-dns-6bb4fc677f-r4b99\" (UID: \"96ca616f-e86a-4274-8bef-da335a23e12c\") " pod="openstack/dnsmasq-dns-6bb4fc677f-r4b99" Nov 28 16:33:02 crc kubenswrapper[4909]: I1128 16:33:02.939882 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/287e7e9a-0240-478e-a15b-b01122e79c32-scripts\") pod \"cinder-api-0\" (UID: \"287e7e9a-0240-478e-a15b-b01122e79c32\") " pod="openstack/cinder-api-0" Nov 28 16:33:02 crc kubenswrapper[4909]: I1128 16:33:02.939932 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/287e7e9a-0240-478e-a15b-b01122e79c32-config-data-custom\") pod \"cinder-api-0\" (UID: \"287e7e9a-0240-478e-a15b-b01122e79c32\") " pod="openstack/cinder-api-0" Nov 28 16:33:02 crc kubenswrapper[4909]: I1128 16:33:02.939965 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/287e7e9a-0240-478e-a15b-b01122e79c32-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"287e7e9a-0240-478e-a15b-b01122e79c32\") " pod="openstack/cinder-api-0" Nov 28 16:33:02 crc kubenswrapper[4909]: I1128 16:33:02.939989 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/287e7e9a-0240-478e-a15b-b01122e79c32-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"287e7e9a-0240-478e-a15b-b01122e79c32\") " pod="openstack/cinder-api-0" Nov 28 16:33:02 crc kubenswrapper[4909]: I1128 16:33:02.940035 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/287e7e9a-0240-478e-a15b-b01122e79c32-public-tls-certs\") pod \"cinder-api-0\" (UID: \"287e7e9a-0240-478e-a15b-b01122e79c32\") " pod="openstack/cinder-api-0" Nov 28 16:33:02 crc kubenswrapper[4909]: I1128 16:33:02.940053 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/287e7e9a-0240-478e-a15b-b01122e79c32-etc-machine-id\") pod \"cinder-api-0\" (UID: \"287e7e9a-0240-478e-a15b-b01122e79c32\") " pod="openstack/cinder-api-0" Nov 28 16:33:02 crc kubenswrapper[4909]: I1128 16:33:02.940085 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/287e7e9a-0240-478e-a15b-b01122e79c32-config-data\") pod \"cinder-api-0\" (UID: \"287e7e9a-0240-478e-a15b-b01122e79c32\") " pod="openstack/cinder-api-0" Nov 28 16:33:02 crc kubenswrapper[4909]: I1128 16:33:02.940100 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n42nt\" (UniqueName: \"kubernetes.io/projected/287e7e9a-0240-478e-a15b-b01122e79c32-kube-api-access-n42nt\") pod \"cinder-api-0\" (UID: \"287e7e9a-0240-478e-a15b-b01122e79c32\") " pod="openstack/cinder-api-0" Nov 28 16:33:02 crc kubenswrapper[4909]: I1128 16:33:02.940120 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/287e7e9a-0240-478e-a15b-b01122e79c32-logs\") pod \"cinder-api-0\" (UID: \"287e7e9a-0240-478e-a15b-b01122e79c32\") " pod="openstack/cinder-api-0" Nov 28 16:33:02 crc kubenswrapper[4909]: I1128 16:33:02.992841 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-674b76c99f-6jwmr" Nov 28 16:33:03 crc kubenswrapper[4909]: I1128 16:33:03.002918 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-674458cfcb-bc4hb" Nov 28 16:33:03 crc kubenswrapper[4909]: I1128 16:33:03.012471 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6bb4fc677f-r4b99" Nov 28 16:33:03 crc kubenswrapper[4909]: I1128 16:33:03.041625 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/287e7e9a-0240-478e-a15b-b01122e79c32-scripts\") pod \"cinder-api-0\" (UID: \"287e7e9a-0240-478e-a15b-b01122e79c32\") " pod="openstack/cinder-api-0" Nov 28 16:33:03 crc kubenswrapper[4909]: I1128 16:33:03.041700 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/287e7e9a-0240-478e-a15b-b01122e79c32-config-data-custom\") pod \"cinder-api-0\" (UID: \"287e7e9a-0240-478e-a15b-b01122e79c32\") " pod="openstack/cinder-api-0" Nov 28 16:33:03 crc kubenswrapper[4909]: I1128 16:33:03.041774 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/287e7e9a-0240-478e-a15b-b01122e79c32-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"287e7e9a-0240-478e-a15b-b01122e79c32\") " pod="openstack/cinder-api-0" Nov 28 16:33:03 crc kubenswrapper[4909]: I1128 16:33:03.041805 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/287e7e9a-0240-478e-a15b-b01122e79c32-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"287e7e9a-0240-478e-a15b-b01122e79c32\") " pod="openstack/cinder-api-0" Nov 28 16:33:03 crc kubenswrapper[4909]: I1128 16:33:03.041932 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/287e7e9a-0240-478e-a15b-b01122e79c32-public-tls-certs\") pod \"cinder-api-0\" (UID: \"287e7e9a-0240-478e-a15b-b01122e79c32\") " pod="openstack/cinder-api-0" Nov 28 16:33:03 crc kubenswrapper[4909]: I1128 16:33:03.041957 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/287e7e9a-0240-478e-a15b-b01122e79c32-etc-machine-id\") pod \"cinder-api-0\" (UID: \"287e7e9a-0240-478e-a15b-b01122e79c32\") " pod="openstack/cinder-api-0" Nov 28 16:33:03 crc kubenswrapper[4909]: I1128 16:33:03.042028 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/287e7e9a-0240-478e-a15b-b01122e79c32-config-data\") pod \"cinder-api-0\" (UID: \"287e7e9a-0240-478e-a15b-b01122e79c32\") " pod="openstack/cinder-api-0" Nov 28 16:33:03 crc kubenswrapper[4909]: I1128 16:33:03.042046 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n42nt\" (UniqueName: \"kubernetes.io/projected/287e7e9a-0240-478e-a15b-b01122e79c32-kube-api-access-n42nt\") pod \"cinder-api-0\" (UID: \"287e7e9a-0240-478e-a15b-b01122e79c32\") " pod="openstack/cinder-api-0" Nov 28 16:33:03 crc kubenswrapper[4909]: I1128 16:33:03.042100 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/287e7e9a-0240-478e-a15b-b01122e79c32-logs\") pod \"cinder-api-0\" (UID: \"287e7e9a-0240-478e-a15b-b01122e79c32\") " pod="openstack/cinder-api-0" Nov 28 16:33:03 crc kubenswrapper[4909]: I1128 16:33:03.042591 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/287e7e9a-0240-478e-a15b-b01122e79c32-logs\") pod \"cinder-api-0\" (UID: \"287e7e9a-0240-478e-a15b-b01122e79c32\") " pod="openstack/cinder-api-0" Nov 28 16:33:03 crc kubenswrapper[4909]: I1128 16:33:03.043192 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/287e7e9a-0240-478e-a15b-b01122e79c32-etc-machine-id\") pod \"cinder-api-0\" (UID: \"287e7e9a-0240-478e-a15b-b01122e79c32\") " pod="openstack/cinder-api-0" Nov 28 16:33:03 crc kubenswrapper[4909]: I1128 16:33:03.046812 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/287e7e9a-0240-478e-a15b-b01122e79c32-scripts\") pod \"cinder-api-0\" (UID: \"287e7e9a-0240-478e-a15b-b01122e79c32\") " pod="openstack/cinder-api-0" Nov 28 16:33:03 crc kubenswrapper[4909]: I1128 16:33:03.046950 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/287e7e9a-0240-478e-a15b-b01122e79c32-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"287e7e9a-0240-478e-a15b-b01122e79c32\") " pod="openstack/cinder-api-0" Nov 28 16:33:03 crc kubenswrapper[4909]: I1128 16:33:03.048560 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/287e7e9a-0240-478e-a15b-b01122e79c32-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"287e7e9a-0240-478e-a15b-b01122e79c32\") " pod="openstack/cinder-api-0" Nov 28 16:33:03 crc kubenswrapper[4909]: I1128 16:33:03.051569 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/287e7e9a-0240-478e-a15b-b01122e79c32-config-data\") pod \"cinder-api-0\" (UID: \"287e7e9a-0240-478e-a15b-b01122e79c32\") " pod="openstack/cinder-api-0" Nov 28 16:33:03 crc kubenswrapper[4909]: I1128 16:33:03.052207 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/287e7e9a-0240-478e-a15b-b01122e79c32-config-data-custom\") pod \"cinder-api-0\" (UID: \"287e7e9a-0240-478e-a15b-b01122e79c32\") " pod="openstack/cinder-api-0" Nov 28 16:33:03 crc kubenswrapper[4909]: I1128 16:33:03.052701 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/287e7e9a-0240-478e-a15b-b01122e79c32-public-tls-certs\") pod \"cinder-api-0\" (UID: \"287e7e9a-0240-478e-a15b-b01122e79c32\") " pod="openstack/cinder-api-0" Nov 28 16:33:03 crc kubenswrapper[4909]: I1128 16:33:03.072694 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n42nt\" (UniqueName: \"kubernetes.io/projected/287e7e9a-0240-478e-a15b-b01122e79c32-kube-api-access-n42nt\") pod \"cinder-api-0\" (UID: \"287e7e9a-0240-478e-a15b-b01122e79c32\") " pod="openstack/cinder-api-0" Nov 28 16:33:03 crc kubenswrapper[4909]: I1128 16:33:03.142706 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b5156fb2-a147-4c44-95eb-8d54163395bc-dns-svc\") pod \"b5156fb2-a147-4c44-95eb-8d54163395bc\" (UID: \"b5156fb2-a147-4c44-95eb-8d54163395bc\") " Nov 28 16:33:03 crc kubenswrapper[4909]: I1128 16:33:03.142757 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b5156fb2-a147-4c44-95eb-8d54163395bc-config\") pod \"b5156fb2-a147-4c44-95eb-8d54163395bc\" (UID: \"b5156fb2-a147-4c44-95eb-8d54163395bc\") " Nov 28 16:33:03 crc kubenswrapper[4909]: I1128 16:33:03.142831 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b5156fb2-a147-4c44-95eb-8d54163395bc-ovsdbserver-sb\") pod \"b5156fb2-a147-4c44-95eb-8d54163395bc\" (UID: \"b5156fb2-a147-4c44-95eb-8d54163395bc\") " Nov 28 16:33:03 crc kubenswrapper[4909]: I1128 16:33:03.142958 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6lvn8\" (UniqueName: \"kubernetes.io/projected/b5156fb2-a147-4c44-95eb-8d54163395bc-kube-api-access-6lvn8\") pod \"b5156fb2-a147-4c44-95eb-8d54163395bc\" (UID: \"b5156fb2-a147-4c44-95eb-8d54163395bc\") " Nov 28 16:33:03 crc kubenswrapper[4909]: I1128 16:33:03.143076 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b5156fb2-a147-4c44-95eb-8d54163395bc-ovsdbserver-nb\") pod \"b5156fb2-a147-4c44-95eb-8d54163395bc\" (UID: \"b5156fb2-a147-4c44-95eb-8d54163395bc\") " Nov 28 16:33:03 crc kubenswrapper[4909]: I1128 16:33:03.143148 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/b5156fb2-a147-4c44-95eb-8d54163395bc-dns-swift-storage-0\") pod \"b5156fb2-a147-4c44-95eb-8d54163395bc\" (UID: \"b5156fb2-a147-4c44-95eb-8d54163395bc\") " Nov 28 16:33:03 crc kubenswrapper[4909]: I1128 16:33:03.156806 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b5156fb2-a147-4c44-95eb-8d54163395bc-kube-api-access-6lvn8" (OuterVolumeSpecName: "kube-api-access-6lvn8") pod "b5156fb2-a147-4c44-95eb-8d54163395bc" (UID: "b5156fb2-a147-4c44-95eb-8d54163395bc"). InnerVolumeSpecName "kube-api-access-6lvn8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:33:03 crc kubenswrapper[4909]: I1128 16:33:03.196908 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b5156fb2-a147-4c44-95eb-8d54163395bc-config" (OuterVolumeSpecName: "config") pod "b5156fb2-a147-4c44-95eb-8d54163395bc" (UID: "b5156fb2-a147-4c44-95eb-8d54163395bc"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:33:03 crc kubenswrapper[4909]: I1128 16:33:03.206381 4909 generic.go:334] "Generic (PLEG): container finished" podID="b5156fb2-a147-4c44-95eb-8d54163395bc" containerID="ef2b25a6f860fffb4b80e24aa34060976930ca33ec549bcfa5297c0f1c93ea03" exitCode=0 Nov 28 16:33:03 crc kubenswrapper[4909]: I1128 16:33:03.206762 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-674b76c99f-6jwmr" event={"ID":"b5156fb2-a147-4c44-95eb-8d54163395bc","Type":"ContainerDied","Data":"ef2b25a6f860fffb4b80e24aa34060976930ca33ec549bcfa5297c0f1c93ea03"} Nov 28 16:33:03 crc kubenswrapper[4909]: I1128 16:33:03.206819 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-674b76c99f-6jwmr" event={"ID":"b5156fb2-a147-4c44-95eb-8d54163395bc","Type":"ContainerDied","Data":"38313a32d4f20fe07d11ae4c2ed402a7ebf4a88b408352700a8dee6e876311d0"} Nov 28 16:33:03 crc kubenswrapper[4909]: I1128 16:33:03.206836 4909 scope.go:117] "RemoveContainer" containerID="ef2b25a6f860fffb4b80e24aa34060976930ca33ec549bcfa5297c0f1c93ea03" Nov 28 16:33:03 crc kubenswrapper[4909]: I1128 16:33:03.207038 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-674b76c99f-6jwmr" Nov 28 16:33:03 crc kubenswrapper[4909]: I1128 16:33:03.228432 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b5156fb2-a147-4c44-95eb-8d54163395bc-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "b5156fb2-a147-4c44-95eb-8d54163395bc" (UID: "b5156fb2-a147-4c44-95eb-8d54163395bc"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:33:03 crc kubenswrapper[4909]: I1128 16:33:03.231686 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 28 16:33:03 crc kubenswrapper[4909]: I1128 16:33:03.232126 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b5156fb2-a147-4c44-95eb-8d54163395bc-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "b5156fb2-a147-4c44-95eb-8d54163395bc" (UID: "b5156fb2-a147-4c44-95eb-8d54163395bc"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:33:03 crc kubenswrapper[4909]: I1128 16:33:03.243567 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b5156fb2-a147-4c44-95eb-8d54163395bc-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "b5156fb2-a147-4c44-95eb-8d54163395bc" (UID: "b5156fb2-a147-4c44-95eb-8d54163395bc"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:33:03 crc kubenswrapper[4909]: I1128 16:33:03.244904 4909 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b5156fb2-a147-4c44-95eb-8d54163395bc-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:03 crc kubenswrapper[4909]: I1128 16:33:03.244996 4909 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b5156fb2-a147-4c44-95eb-8d54163395bc-config\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:03 crc kubenswrapper[4909]: I1128 16:33:03.245054 4909 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b5156fb2-a147-4c44-95eb-8d54163395bc-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:03 crc kubenswrapper[4909]: I1128 16:33:03.245114 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6lvn8\" (UniqueName: \"kubernetes.io/projected/b5156fb2-a147-4c44-95eb-8d54163395bc-kube-api-access-6lvn8\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:03 crc kubenswrapper[4909]: I1128 16:33:03.245168 4909 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b5156fb2-a147-4c44-95eb-8d54163395bc-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:03 crc kubenswrapper[4909]: I1128 16:33:03.260123 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b5156fb2-a147-4c44-95eb-8d54163395bc-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "b5156fb2-a147-4c44-95eb-8d54163395bc" (UID: "b5156fb2-a147-4c44-95eb-8d54163395bc"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:33:03 crc kubenswrapper[4909]: I1128 16:33:03.301231 4909 scope.go:117] "RemoveContainer" containerID="b11c8afe3d763b3fe490dc8a95028d55d3588631aaea6425626c322fac3f29b7" Nov 28 16:33:03 crc kubenswrapper[4909]: I1128 16:33:03.347991 4909 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/b5156fb2-a147-4c44-95eb-8d54163395bc-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:03 crc kubenswrapper[4909]: I1128 16:33:03.357962 4909 scope.go:117] "RemoveContainer" containerID="ef2b25a6f860fffb4b80e24aa34060976930ca33ec549bcfa5297c0f1c93ea03" Nov 28 16:33:03 crc kubenswrapper[4909]: E1128 16:33:03.359854 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ef2b25a6f860fffb4b80e24aa34060976930ca33ec549bcfa5297c0f1c93ea03\": container with ID starting with ef2b25a6f860fffb4b80e24aa34060976930ca33ec549bcfa5297c0f1c93ea03 not found: ID does not exist" containerID="ef2b25a6f860fffb4b80e24aa34060976930ca33ec549bcfa5297c0f1c93ea03" Nov 28 16:33:03 crc kubenswrapper[4909]: I1128 16:33:03.359896 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ef2b25a6f860fffb4b80e24aa34060976930ca33ec549bcfa5297c0f1c93ea03"} err="failed to get container status \"ef2b25a6f860fffb4b80e24aa34060976930ca33ec549bcfa5297c0f1c93ea03\": rpc error: code = NotFound desc = could not find container \"ef2b25a6f860fffb4b80e24aa34060976930ca33ec549bcfa5297c0f1c93ea03\": container with ID starting with ef2b25a6f860fffb4b80e24aa34060976930ca33ec549bcfa5297c0f1c93ea03 not found: ID does not exist" Nov 28 16:33:03 crc kubenswrapper[4909]: I1128 16:33:03.359922 4909 scope.go:117] "RemoveContainer" containerID="b11c8afe3d763b3fe490dc8a95028d55d3588631aaea6425626c322fac3f29b7" Nov 28 16:33:03 crc kubenswrapper[4909]: E1128 16:33:03.360481 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b11c8afe3d763b3fe490dc8a95028d55d3588631aaea6425626c322fac3f29b7\": container with ID starting with b11c8afe3d763b3fe490dc8a95028d55d3588631aaea6425626c322fac3f29b7 not found: ID does not exist" containerID="b11c8afe3d763b3fe490dc8a95028d55d3588631aaea6425626c322fac3f29b7" Nov 28 16:33:03 crc kubenswrapper[4909]: I1128 16:33:03.360530 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b11c8afe3d763b3fe490dc8a95028d55d3588631aaea6425626c322fac3f29b7"} err="failed to get container status \"b11c8afe3d763b3fe490dc8a95028d55d3588631aaea6425626c322fac3f29b7\": rpc error: code = NotFound desc = could not find container \"b11c8afe3d763b3fe490dc8a95028d55d3588631aaea6425626c322fac3f29b7\": container with ID starting with b11c8afe3d763b3fe490dc8a95028d55d3588631aaea6425626c322fac3f29b7 not found: ID does not exist" Nov 28 16:33:03 crc kubenswrapper[4909]: I1128 16:33:03.552348 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-674b76c99f-6jwmr"] Nov 28 16:33:03 crc kubenswrapper[4909]: I1128 16:33:03.561006 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-674b76c99f-6jwmr"] Nov 28 16:33:03 crc kubenswrapper[4909]: I1128 16:33:03.733642 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-674458cfcb-bc4hb"] Nov 28 16:33:03 crc kubenswrapper[4909]: W1128 16:33:03.735901 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf3c35b84_87e7_42d1_bbd6_5f72f38ab989.slice/crio-90b7681731b09ce06f00b98cfefec0287043357e8ff8ccffd96d79c81aee87ce WatchSource:0}: Error finding container 90b7681731b09ce06f00b98cfefec0287043357e8ff8ccffd96d79c81aee87ce: Status 404 returned error can't find the container with id 90b7681731b09ce06f00b98cfefec0287043357e8ff8ccffd96d79c81aee87ce Nov 28 16:33:03 crc kubenswrapper[4909]: W1128 16:33:03.739823 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod96ca616f_e86a_4274_8bef_da335a23e12c.slice/crio-d56ebfd2c561b84e64bd05e2dca3f294a89009e11c1f056777865d5a9b793367 WatchSource:0}: Error finding container d56ebfd2c561b84e64bd05e2dca3f294a89009e11c1f056777865d5a9b793367: Status 404 returned error can't find the container with id d56ebfd2c561b84e64bd05e2dca3f294a89009e11c1f056777865d5a9b793367 Nov 28 16:33:03 crc kubenswrapper[4909]: I1128 16:33:03.749820 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6bb4fc677f-r4b99"] Nov 28 16:33:03 crc kubenswrapper[4909]: I1128 16:33:03.832171 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 28 16:33:03 crc kubenswrapper[4909]: I1128 16:33:03.926867 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b5156fb2-a147-4c44-95eb-8d54163395bc" path="/var/lib/kubelet/pods/b5156fb2-a147-4c44-95eb-8d54163395bc/volumes" Nov 28 16:33:03 crc kubenswrapper[4909]: I1128 16:33:03.929926 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d3006e8a-91fc-4f9f-8499-bc0e0169ef9b" path="/var/lib/kubelet/pods/d3006e8a-91fc-4f9f-8499-bc0e0169ef9b/volumes" Nov 28 16:33:04 crc kubenswrapper[4909]: I1128 16:33:04.223361 4909 generic.go:334] "Generic (PLEG): container finished" podID="96ca616f-e86a-4274-8bef-da335a23e12c" containerID="7502fa8c34f56280ee4bedf0f0fe8c08cfe511501d8ba742a03861959af17b0a" exitCode=0 Nov 28 16:33:04 crc kubenswrapper[4909]: I1128 16:33:04.223468 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bb4fc677f-r4b99" event={"ID":"96ca616f-e86a-4274-8bef-da335a23e12c","Type":"ContainerDied","Data":"7502fa8c34f56280ee4bedf0f0fe8c08cfe511501d8ba742a03861959af17b0a"} Nov 28 16:33:04 crc kubenswrapper[4909]: I1128 16:33:04.223864 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bb4fc677f-r4b99" event={"ID":"96ca616f-e86a-4274-8bef-da335a23e12c","Type":"ContainerStarted","Data":"d56ebfd2c561b84e64bd05e2dca3f294a89009e11c1f056777865d5a9b793367"} Nov 28 16:33:04 crc kubenswrapper[4909]: I1128 16:33:04.232619 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"287e7e9a-0240-478e-a15b-b01122e79c32","Type":"ContainerStarted","Data":"107a3bcbe7139b74f69622a738afc6a71e6d7f6d4189f9d3eb0043869232e2a0"} Nov 28 16:33:04 crc kubenswrapper[4909]: I1128 16:33:04.237921 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-674458cfcb-bc4hb" event={"ID":"f3c35b84-87e7-42d1-bbd6-5f72f38ab989","Type":"ContainerStarted","Data":"517e56bc76234fa79f62e03acb5bdc533b095335447d92cbe971f721fddb1d41"} Nov 28 16:33:04 crc kubenswrapper[4909]: I1128 16:33:04.237960 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-674458cfcb-bc4hb" event={"ID":"f3c35b84-87e7-42d1-bbd6-5f72f38ab989","Type":"ContainerStarted","Data":"90b7681731b09ce06f00b98cfefec0287043357e8ff8ccffd96d79c81aee87ce"} Nov 28 16:33:05 crc kubenswrapper[4909]: I1128 16:33:05.085311 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-55f6d745d5-tgbm7"] Nov 28 16:33:05 crc kubenswrapper[4909]: E1128 16:33:05.086025 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b5156fb2-a147-4c44-95eb-8d54163395bc" containerName="init" Nov 28 16:33:05 crc kubenswrapper[4909]: I1128 16:33:05.086044 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="b5156fb2-a147-4c44-95eb-8d54163395bc" containerName="init" Nov 28 16:33:05 crc kubenswrapper[4909]: E1128 16:33:05.086060 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b5156fb2-a147-4c44-95eb-8d54163395bc" containerName="dnsmasq-dns" Nov 28 16:33:05 crc kubenswrapper[4909]: I1128 16:33:05.086067 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="b5156fb2-a147-4c44-95eb-8d54163395bc" containerName="dnsmasq-dns" Nov 28 16:33:05 crc kubenswrapper[4909]: I1128 16:33:05.086257 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="b5156fb2-a147-4c44-95eb-8d54163395bc" containerName="dnsmasq-dns" Nov 28 16:33:05 crc kubenswrapper[4909]: I1128 16:33:05.087157 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-55f6d745d5-tgbm7" Nov 28 16:33:05 crc kubenswrapper[4909]: I1128 16:33:05.090793 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-internal-svc" Nov 28 16:33:05 crc kubenswrapper[4909]: I1128 16:33:05.091010 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-public-svc" Nov 28 16:33:05 crc kubenswrapper[4909]: I1128 16:33:05.116325 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-55f6d745d5-tgbm7"] Nov 28 16:33:05 crc kubenswrapper[4909]: I1128 16:33:05.180998 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/febda67e-3daf-4cb4-9fd1-530d6c398404-ovndb-tls-certs\") pod \"neutron-55f6d745d5-tgbm7\" (UID: \"febda67e-3daf-4cb4-9fd1-530d6c398404\") " pod="openstack/neutron-55f6d745d5-tgbm7" Nov 28 16:33:05 crc kubenswrapper[4909]: I1128 16:33:05.181135 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/febda67e-3daf-4cb4-9fd1-530d6c398404-config\") pod \"neutron-55f6d745d5-tgbm7\" (UID: \"febda67e-3daf-4cb4-9fd1-530d6c398404\") " pod="openstack/neutron-55f6d745d5-tgbm7" Nov 28 16:33:05 crc kubenswrapper[4909]: I1128 16:33:05.181200 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/febda67e-3daf-4cb4-9fd1-530d6c398404-internal-tls-certs\") pod \"neutron-55f6d745d5-tgbm7\" (UID: \"febda67e-3daf-4cb4-9fd1-530d6c398404\") " pod="openstack/neutron-55f6d745d5-tgbm7" Nov 28 16:33:05 crc kubenswrapper[4909]: I1128 16:33:05.181237 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/febda67e-3daf-4cb4-9fd1-530d6c398404-public-tls-certs\") pod \"neutron-55f6d745d5-tgbm7\" (UID: \"febda67e-3daf-4cb4-9fd1-530d6c398404\") " pod="openstack/neutron-55f6d745d5-tgbm7" Nov 28 16:33:05 crc kubenswrapper[4909]: I1128 16:33:05.181282 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/febda67e-3daf-4cb4-9fd1-530d6c398404-combined-ca-bundle\") pod \"neutron-55f6d745d5-tgbm7\" (UID: \"febda67e-3daf-4cb4-9fd1-530d6c398404\") " pod="openstack/neutron-55f6d745d5-tgbm7" Nov 28 16:33:05 crc kubenswrapper[4909]: I1128 16:33:05.181311 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5t7lp\" (UniqueName: \"kubernetes.io/projected/febda67e-3daf-4cb4-9fd1-530d6c398404-kube-api-access-5t7lp\") pod \"neutron-55f6d745d5-tgbm7\" (UID: \"febda67e-3daf-4cb4-9fd1-530d6c398404\") " pod="openstack/neutron-55f6d745d5-tgbm7" Nov 28 16:33:05 crc kubenswrapper[4909]: I1128 16:33:05.181403 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/febda67e-3daf-4cb4-9fd1-530d6c398404-httpd-config\") pod \"neutron-55f6d745d5-tgbm7\" (UID: \"febda67e-3daf-4cb4-9fd1-530d6c398404\") " pod="openstack/neutron-55f6d745d5-tgbm7" Nov 28 16:33:05 crc kubenswrapper[4909]: I1128 16:33:05.277374 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"287e7e9a-0240-478e-a15b-b01122e79c32","Type":"ContainerStarted","Data":"d938862fe9fc3e6327eed52ecb437574cdd14b5fddf79ca390b9bf6e50d98375"} Nov 28 16:33:05 crc kubenswrapper[4909]: I1128 16:33:05.281472 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-674458cfcb-bc4hb" event={"ID":"f3c35b84-87e7-42d1-bbd6-5f72f38ab989","Type":"ContainerStarted","Data":"41a9bd70bd75a09405e44da59f77c9160795724e7ab52b06e2ba83b64804be3d"} Nov 28 16:33:05 crc kubenswrapper[4909]: I1128 16:33:05.281633 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-674458cfcb-bc4hb" Nov 28 16:33:05 crc kubenswrapper[4909]: I1128 16:33:05.282813 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5t7lp\" (UniqueName: \"kubernetes.io/projected/febda67e-3daf-4cb4-9fd1-530d6c398404-kube-api-access-5t7lp\") pod \"neutron-55f6d745d5-tgbm7\" (UID: \"febda67e-3daf-4cb4-9fd1-530d6c398404\") " pod="openstack/neutron-55f6d745d5-tgbm7" Nov 28 16:33:05 crc kubenswrapper[4909]: I1128 16:33:05.282872 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/febda67e-3daf-4cb4-9fd1-530d6c398404-httpd-config\") pod \"neutron-55f6d745d5-tgbm7\" (UID: \"febda67e-3daf-4cb4-9fd1-530d6c398404\") " pod="openstack/neutron-55f6d745d5-tgbm7" Nov 28 16:33:05 crc kubenswrapper[4909]: I1128 16:33:05.282942 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/febda67e-3daf-4cb4-9fd1-530d6c398404-ovndb-tls-certs\") pod \"neutron-55f6d745d5-tgbm7\" (UID: \"febda67e-3daf-4cb4-9fd1-530d6c398404\") " pod="openstack/neutron-55f6d745d5-tgbm7" Nov 28 16:33:05 crc kubenswrapper[4909]: I1128 16:33:05.282971 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/febda67e-3daf-4cb4-9fd1-530d6c398404-config\") pod \"neutron-55f6d745d5-tgbm7\" (UID: \"febda67e-3daf-4cb4-9fd1-530d6c398404\") " pod="openstack/neutron-55f6d745d5-tgbm7" Nov 28 16:33:05 crc kubenswrapper[4909]: I1128 16:33:05.283015 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/febda67e-3daf-4cb4-9fd1-530d6c398404-internal-tls-certs\") pod \"neutron-55f6d745d5-tgbm7\" (UID: \"febda67e-3daf-4cb4-9fd1-530d6c398404\") " pod="openstack/neutron-55f6d745d5-tgbm7" Nov 28 16:33:05 crc kubenswrapper[4909]: I1128 16:33:05.283049 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/febda67e-3daf-4cb4-9fd1-530d6c398404-public-tls-certs\") pod \"neutron-55f6d745d5-tgbm7\" (UID: \"febda67e-3daf-4cb4-9fd1-530d6c398404\") " pod="openstack/neutron-55f6d745d5-tgbm7" Nov 28 16:33:05 crc kubenswrapper[4909]: I1128 16:33:05.283074 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/febda67e-3daf-4cb4-9fd1-530d6c398404-combined-ca-bundle\") pod \"neutron-55f6d745d5-tgbm7\" (UID: \"febda67e-3daf-4cb4-9fd1-530d6c398404\") " pod="openstack/neutron-55f6d745d5-tgbm7" Nov 28 16:33:05 crc kubenswrapper[4909]: I1128 16:33:05.288761 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/febda67e-3daf-4cb4-9fd1-530d6c398404-combined-ca-bundle\") pod \"neutron-55f6d745d5-tgbm7\" (UID: \"febda67e-3daf-4cb4-9fd1-530d6c398404\") " pod="openstack/neutron-55f6d745d5-tgbm7" Nov 28 16:33:05 crc kubenswrapper[4909]: I1128 16:33:05.298853 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/febda67e-3daf-4cb4-9fd1-530d6c398404-internal-tls-certs\") pod \"neutron-55f6d745d5-tgbm7\" (UID: \"febda67e-3daf-4cb4-9fd1-530d6c398404\") " pod="openstack/neutron-55f6d745d5-tgbm7" Nov 28 16:33:05 crc kubenswrapper[4909]: I1128 16:33:05.301444 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/febda67e-3daf-4cb4-9fd1-530d6c398404-public-tls-certs\") pod \"neutron-55f6d745d5-tgbm7\" (UID: \"febda67e-3daf-4cb4-9fd1-530d6c398404\") " pod="openstack/neutron-55f6d745d5-tgbm7" Nov 28 16:33:05 crc kubenswrapper[4909]: I1128 16:33:05.301807 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bb4fc677f-r4b99" event={"ID":"96ca616f-e86a-4274-8bef-da335a23e12c","Type":"ContainerStarted","Data":"ad4aa083ac971ebecec5bf41839833e063ceb2c80a55ad2e0be49f895214262f"} Nov 28 16:33:05 crc kubenswrapper[4909]: I1128 16:33:05.301877 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/febda67e-3daf-4cb4-9fd1-530d6c398404-httpd-config\") pod \"neutron-55f6d745d5-tgbm7\" (UID: \"febda67e-3daf-4cb4-9fd1-530d6c398404\") " pod="openstack/neutron-55f6d745d5-tgbm7" Nov 28 16:33:05 crc kubenswrapper[4909]: I1128 16:33:05.302026 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-6bb4fc677f-r4b99" Nov 28 16:33:05 crc kubenswrapper[4909]: I1128 16:33:05.302311 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/febda67e-3daf-4cb4-9fd1-530d6c398404-ovndb-tls-certs\") pod \"neutron-55f6d745d5-tgbm7\" (UID: \"febda67e-3daf-4cb4-9fd1-530d6c398404\") " pod="openstack/neutron-55f6d745d5-tgbm7" Nov 28 16:33:05 crc kubenswrapper[4909]: I1128 16:33:05.314363 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5t7lp\" (UniqueName: \"kubernetes.io/projected/febda67e-3daf-4cb4-9fd1-530d6c398404-kube-api-access-5t7lp\") pod \"neutron-55f6d745d5-tgbm7\" (UID: \"febda67e-3daf-4cb4-9fd1-530d6c398404\") " pod="openstack/neutron-55f6d745d5-tgbm7" Nov 28 16:33:05 crc kubenswrapper[4909]: I1128 16:33:05.322993 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/febda67e-3daf-4cb4-9fd1-530d6c398404-config\") pod \"neutron-55f6d745d5-tgbm7\" (UID: \"febda67e-3daf-4cb4-9fd1-530d6c398404\") " pod="openstack/neutron-55f6d745d5-tgbm7" Nov 28 16:33:05 crc kubenswrapper[4909]: I1128 16:33:05.326305 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-674458cfcb-bc4hb" podStartSLOduration=3.326288826 podStartE2EDuration="3.326288826s" podCreationTimestamp="2025-11-28 16:33:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:33:05.321014145 +0000 UTC m=+1367.717698669" watchObservedRunningTime="2025-11-28 16:33:05.326288826 +0000 UTC m=+1367.722973350" Nov 28 16:33:05 crc kubenswrapper[4909]: I1128 16:33:05.346589 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-6bb4fc677f-r4b99" podStartSLOduration=3.346570631 podStartE2EDuration="3.346570631s" podCreationTimestamp="2025-11-28 16:33:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:33:05.345170583 +0000 UTC m=+1367.741855107" watchObservedRunningTime="2025-11-28 16:33:05.346570631 +0000 UTC m=+1367.743255155" Nov 28 16:33:05 crc kubenswrapper[4909]: I1128 16:33:05.420988 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-55f6d745d5-tgbm7" Nov 28 16:33:05 crc kubenswrapper[4909]: I1128 16:33:05.965123 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-55f6d745d5-tgbm7"] Nov 28 16:33:05 crc kubenswrapper[4909]: I1128 16:33:05.965554 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Nov 28 16:33:06 crc kubenswrapper[4909]: I1128 16:33:06.026783 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 28 16:33:06 crc kubenswrapper[4909]: I1128 16:33:06.229125 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/keystone-8676ff5994-wjk95" Nov 28 16:33:06 crc kubenswrapper[4909]: I1128 16:33:06.319018 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-55f6d745d5-tgbm7" event={"ID":"febda67e-3daf-4cb4-9fd1-530d6c398404","Type":"ContainerStarted","Data":"84ef1b544276823c5c91a1406dc17348087fabba68d10b1561cec7a3a87c25bd"} Nov 28 16:33:06 crc kubenswrapper[4909]: I1128 16:33:06.319059 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-55f6d745d5-tgbm7" event={"ID":"febda67e-3daf-4cb4-9fd1-530d6c398404","Type":"ContainerStarted","Data":"a557cc9d180d0683f4fde9bb969d28af2616171cadbdd8f48218d6d2c12dd589"} Nov 28 16:33:06 crc kubenswrapper[4909]: I1128 16:33:06.321153 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="f7b661e6-2c60-4a0c-a36b-87a3b664c9e3" containerName="cinder-scheduler" containerID="cri-o://726d1b915da1a9defb392ec7e21dada8455bcc44349949a64ffdf8368abaf563" gracePeriod=30 Nov 28 16:33:06 crc kubenswrapper[4909]: I1128 16:33:06.321936 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"287e7e9a-0240-478e-a15b-b01122e79c32","Type":"ContainerStarted","Data":"29762398aa81300aa7e6fa97b5acccc7e5d16e4234ca8d5ea87d42654450084b"} Nov 28 16:33:06 crc kubenswrapper[4909]: I1128 16:33:06.322017 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="f7b661e6-2c60-4a0c-a36b-87a3b664c9e3" containerName="probe" containerID="cri-o://e3a22895d25b271db66c975ba2e0216b64c28fd1e04142dbb967340dd7b6256e" gracePeriod=30 Nov 28 16:33:06 crc kubenswrapper[4909]: I1128 16:33:06.351898 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=4.351873211 podStartE2EDuration="4.351873211s" podCreationTimestamp="2025-11-28 16:33:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:33:06.345324465 +0000 UTC m=+1368.742009009" watchObservedRunningTime="2025-11-28 16:33:06.351873211 +0000 UTC m=+1368.748557735" Nov 28 16:33:07 crc kubenswrapper[4909]: I1128 16:33:07.331952 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-55f6d745d5-tgbm7" event={"ID":"febda67e-3daf-4cb4-9fd1-530d6c398404","Type":"ContainerStarted","Data":"a3fd76fee056f26d16128b3c7dd903dc417db926bafd7b4cc42bf63262cd356c"} Nov 28 16:33:07 crc kubenswrapper[4909]: I1128 16:33:07.332502 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Nov 28 16:33:07 crc kubenswrapper[4909]: I1128 16:33:07.358481 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-55f6d745d5-tgbm7" podStartSLOduration=2.358459916 podStartE2EDuration="2.358459916s" podCreationTimestamp="2025-11-28 16:33:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:33:07.355964279 +0000 UTC m=+1369.752648813" watchObservedRunningTime="2025-11-28 16:33:07.358459916 +0000 UTC m=+1369.755144450" Nov 28 16:33:08 crc kubenswrapper[4909]: I1128 16:33:08.191031 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstackclient"] Nov 28 16:33:08 crc kubenswrapper[4909]: I1128 16:33:08.192511 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 28 16:33:08 crc kubenswrapper[4909]: I1128 16:33:08.195864 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-config-secret" Nov 28 16:33:08 crc kubenswrapper[4909]: I1128 16:33:08.196009 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstackclient-openstackclient-dockercfg-ww85k" Nov 28 16:33:08 crc kubenswrapper[4909]: I1128 16:33:08.198989 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config" Nov 28 16:33:08 crc kubenswrapper[4909]: I1128 16:33:08.214533 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Nov 28 16:33:08 crc kubenswrapper[4909]: I1128 16:33:08.341776 4909 generic.go:334] "Generic (PLEG): container finished" podID="f7b661e6-2c60-4a0c-a36b-87a3b664c9e3" containerID="e3a22895d25b271db66c975ba2e0216b64c28fd1e04142dbb967340dd7b6256e" exitCode=0 Nov 28 16:33:08 crc kubenswrapper[4909]: I1128 16:33:08.341851 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"f7b661e6-2c60-4a0c-a36b-87a3b664c9e3","Type":"ContainerDied","Data":"e3a22895d25b271db66c975ba2e0216b64c28fd1e04142dbb967340dd7b6256e"} Nov 28 16:33:08 crc kubenswrapper[4909]: I1128 16:33:08.342003 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-55f6d745d5-tgbm7" Nov 28 16:33:08 crc kubenswrapper[4909]: I1128 16:33:08.348773 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9p4q5\" (UniqueName: \"kubernetes.io/projected/f8d630e7-c532-4fc8-b3f4-0a2a6b06f5ae-kube-api-access-9p4q5\") pod \"openstackclient\" (UID: \"f8d630e7-c532-4fc8-b3f4-0a2a6b06f5ae\") " pod="openstack/openstackclient" Nov 28 16:33:08 crc kubenswrapper[4909]: I1128 16:33:08.348951 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/f8d630e7-c532-4fc8-b3f4-0a2a6b06f5ae-openstack-config\") pod \"openstackclient\" (UID: \"f8d630e7-c532-4fc8-b3f4-0a2a6b06f5ae\") " pod="openstack/openstackclient" Nov 28 16:33:08 crc kubenswrapper[4909]: I1128 16:33:08.349164 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/f8d630e7-c532-4fc8-b3f4-0a2a6b06f5ae-openstack-config-secret\") pod \"openstackclient\" (UID: \"f8d630e7-c532-4fc8-b3f4-0a2a6b06f5ae\") " pod="openstack/openstackclient" Nov 28 16:33:08 crc kubenswrapper[4909]: I1128 16:33:08.349240 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f8d630e7-c532-4fc8-b3f4-0a2a6b06f5ae-combined-ca-bundle\") pod \"openstackclient\" (UID: \"f8d630e7-c532-4fc8-b3f4-0a2a6b06f5ae\") " pod="openstack/openstackclient" Nov 28 16:33:08 crc kubenswrapper[4909]: I1128 16:33:08.451002 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/f8d630e7-c532-4fc8-b3f4-0a2a6b06f5ae-openstack-config\") pod \"openstackclient\" (UID: \"f8d630e7-c532-4fc8-b3f4-0a2a6b06f5ae\") " pod="openstack/openstackclient" Nov 28 16:33:08 crc kubenswrapper[4909]: I1128 16:33:08.451500 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/f8d630e7-c532-4fc8-b3f4-0a2a6b06f5ae-openstack-config-secret\") pod \"openstackclient\" (UID: \"f8d630e7-c532-4fc8-b3f4-0a2a6b06f5ae\") " pod="openstack/openstackclient" Nov 28 16:33:08 crc kubenswrapper[4909]: I1128 16:33:08.451671 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f8d630e7-c532-4fc8-b3f4-0a2a6b06f5ae-combined-ca-bundle\") pod \"openstackclient\" (UID: \"f8d630e7-c532-4fc8-b3f4-0a2a6b06f5ae\") " pod="openstack/openstackclient" Nov 28 16:33:08 crc kubenswrapper[4909]: I1128 16:33:08.451871 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/f8d630e7-c532-4fc8-b3f4-0a2a6b06f5ae-openstack-config\") pod \"openstackclient\" (UID: \"f8d630e7-c532-4fc8-b3f4-0a2a6b06f5ae\") " pod="openstack/openstackclient" Nov 28 16:33:08 crc kubenswrapper[4909]: I1128 16:33:08.452008 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9p4q5\" (UniqueName: \"kubernetes.io/projected/f8d630e7-c532-4fc8-b3f4-0a2a6b06f5ae-kube-api-access-9p4q5\") pod \"openstackclient\" (UID: \"f8d630e7-c532-4fc8-b3f4-0a2a6b06f5ae\") " pod="openstack/openstackclient" Nov 28 16:33:08 crc kubenswrapper[4909]: I1128 16:33:08.457245 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/f8d630e7-c532-4fc8-b3f4-0a2a6b06f5ae-openstack-config-secret\") pod \"openstackclient\" (UID: \"f8d630e7-c532-4fc8-b3f4-0a2a6b06f5ae\") " pod="openstack/openstackclient" Nov 28 16:33:08 crc kubenswrapper[4909]: I1128 16:33:08.464116 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f8d630e7-c532-4fc8-b3f4-0a2a6b06f5ae-combined-ca-bundle\") pod \"openstackclient\" (UID: \"f8d630e7-c532-4fc8-b3f4-0a2a6b06f5ae\") " pod="openstack/openstackclient" Nov 28 16:33:08 crc kubenswrapper[4909]: I1128 16:33:08.474851 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9p4q5\" (UniqueName: \"kubernetes.io/projected/f8d630e7-c532-4fc8-b3f4-0a2a6b06f5ae-kube-api-access-9p4q5\") pod \"openstackclient\" (UID: \"f8d630e7-c532-4fc8-b3f4-0a2a6b06f5ae\") " pod="openstack/openstackclient" Nov 28 16:33:08 crc kubenswrapper[4909]: I1128 16:33:08.511378 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 28 16:33:09 crc kubenswrapper[4909]: I1128 16:33:09.026550 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Nov 28 16:33:09 crc kubenswrapper[4909]: I1128 16:33:09.352354 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"f8d630e7-c532-4fc8-b3f4-0a2a6b06f5ae","Type":"ContainerStarted","Data":"b3c658b48488035ead1974a51c94ed80db361dcbd60efe47bc1d109ed9664288"} Nov 28 16:33:11 crc kubenswrapper[4909]: I1128 16:33:11.426923 4909 generic.go:334] "Generic (PLEG): container finished" podID="f7b661e6-2c60-4a0c-a36b-87a3b664c9e3" containerID="726d1b915da1a9defb392ec7e21dada8455bcc44349949a64ffdf8368abaf563" exitCode=0 Nov 28 16:33:11 crc kubenswrapper[4909]: I1128 16:33:11.427258 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"f7b661e6-2c60-4a0c-a36b-87a3b664c9e3","Type":"ContainerDied","Data":"726d1b915da1a9defb392ec7e21dada8455bcc44349949a64ffdf8368abaf563"} Nov 28 16:33:11 crc kubenswrapper[4909]: I1128 16:33:11.691331 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 28 16:33:11 crc kubenswrapper[4909]: I1128 16:33:11.698321 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-7bcd585886-f6h7k" Nov 28 16:33:11 crc kubenswrapper[4909]: I1128 16:33:11.832083 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-7bcd585886-f6h7k" Nov 28 16:33:11 crc kubenswrapper[4909]: I1128 16:33:11.832322 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f7b661e6-2c60-4a0c-a36b-87a3b664c9e3-config-data-custom\") pod \"f7b661e6-2c60-4a0c-a36b-87a3b664c9e3\" (UID: \"f7b661e6-2c60-4a0c-a36b-87a3b664c9e3\") " Nov 28 16:33:11 crc kubenswrapper[4909]: I1128 16:33:11.832378 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/f7b661e6-2c60-4a0c-a36b-87a3b664c9e3-etc-machine-id\") pod \"f7b661e6-2c60-4a0c-a36b-87a3b664c9e3\" (UID: \"f7b661e6-2c60-4a0c-a36b-87a3b664c9e3\") " Nov 28 16:33:11 crc kubenswrapper[4909]: I1128 16:33:11.832433 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wfgpl\" (UniqueName: \"kubernetes.io/projected/f7b661e6-2c60-4a0c-a36b-87a3b664c9e3-kube-api-access-wfgpl\") pod \"f7b661e6-2c60-4a0c-a36b-87a3b664c9e3\" (UID: \"f7b661e6-2c60-4a0c-a36b-87a3b664c9e3\") " Nov 28 16:33:11 crc kubenswrapper[4909]: I1128 16:33:11.832486 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f7b661e6-2c60-4a0c-a36b-87a3b664c9e3-scripts\") pod \"f7b661e6-2c60-4a0c-a36b-87a3b664c9e3\" (UID: \"f7b661e6-2c60-4a0c-a36b-87a3b664c9e3\") " Nov 28 16:33:11 crc kubenswrapper[4909]: I1128 16:33:11.832533 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f7b661e6-2c60-4a0c-a36b-87a3b664c9e3-combined-ca-bundle\") pod \"f7b661e6-2c60-4a0c-a36b-87a3b664c9e3\" (UID: \"f7b661e6-2c60-4a0c-a36b-87a3b664c9e3\") " Nov 28 16:33:11 crc kubenswrapper[4909]: I1128 16:33:11.832572 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f7b661e6-2c60-4a0c-a36b-87a3b664c9e3-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "f7b661e6-2c60-4a0c-a36b-87a3b664c9e3" (UID: "f7b661e6-2c60-4a0c-a36b-87a3b664c9e3"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 16:33:11 crc kubenswrapper[4909]: I1128 16:33:11.832612 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f7b661e6-2c60-4a0c-a36b-87a3b664c9e3-config-data\") pod \"f7b661e6-2c60-4a0c-a36b-87a3b664c9e3\" (UID: \"f7b661e6-2c60-4a0c-a36b-87a3b664c9e3\") " Nov 28 16:33:11 crc kubenswrapper[4909]: I1128 16:33:11.833098 4909 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/f7b661e6-2c60-4a0c-a36b-87a3b664c9e3-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:11 crc kubenswrapper[4909]: I1128 16:33:11.849019 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f7b661e6-2c60-4a0c-a36b-87a3b664c9e3-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "f7b661e6-2c60-4a0c-a36b-87a3b664c9e3" (UID: "f7b661e6-2c60-4a0c-a36b-87a3b664c9e3"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:33:11 crc kubenswrapper[4909]: I1128 16:33:11.864417 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f7b661e6-2c60-4a0c-a36b-87a3b664c9e3-scripts" (OuterVolumeSpecName: "scripts") pod "f7b661e6-2c60-4a0c-a36b-87a3b664c9e3" (UID: "f7b661e6-2c60-4a0c-a36b-87a3b664c9e3"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:33:11 crc kubenswrapper[4909]: I1128 16:33:11.865082 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f7b661e6-2c60-4a0c-a36b-87a3b664c9e3-kube-api-access-wfgpl" (OuterVolumeSpecName: "kube-api-access-wfgpl") pod "f7b661e6-2c60-4a0c-a36b-87a3b664c9e3" (UID: "f7b661e6-2c60-4a0c-a36b-87a3b664c9e3"). InnerVolumeSpecName "kube-api-access-wfgpl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:33:11 crc kubenswrapper[4909]: I1128 16:33:11.935440 4909 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f7b661e6-2c60-4a0c-a36b-87a3b664c9e3-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:11 crc kubenswrapper[4909]: I1128 16:33:11.935479 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wfgpl\" (UniqueName: \"kubernetes.io/projected/f7b661e6-2c60-4a0c-a36b-87a3b664c9e3-kube-api-access-wfgpl\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:11 crc kubenswrapper[4909]: I1128 16:33:11.935495 4909 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f7b661e6-2c60-4a0c-a36b-87a3b664c9e3-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:11 crc kubenswrapper[4909]: I1128 16:33:11.965385 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f7b661e6-2c60-4a0c-a36b-87a3b664c9e3-config-data" (OuterVolumeSpecName: "config-data") pod "f7b661e6-2c60-4a0c-a36b-87a3b664c9e3" (UID: "f7b661e6-2c60-4a0c-a36b-87a3b664c9e3"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:33:11 crc kubenswrapper[4909]: I1128 16:33:11.990198 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f7b661e6-2c60-4a0c-a36b-87a3b664c9e3-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f7b661e6-2c60-4a0c-a36b-87a3b664c9e3" (UID: "f7b661e6-2c60-4a0c-a36b-87a3b664c9e3"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:33:12 crc kubenswrapper[4909]: I1128 16:33:12.038230 4909 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f7b661e6-2c60-4a0c-a36b-87a3b664c9e3-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:12 crc kubenswrapper[4909]: I1128 16:33:12.038440 4909 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f7b661e6-2c60-4a0c-a36b-87a3b664c9e3-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:12 crc kubenswrapper[4909]: I1128 16:33:12.449480 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"f7b661e6-2c60-4a0c-a36b-87a3b664c9e3","Type":"ContainerDied","Data":"2904ec4da16cff43ed601e190040b1e1c71ab971dbf2b91d01dd2c7791a3f902"} Nov 28 16:33:12 crc kubenswrapper[4909]: I1128 16:33:12.449517 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 28 16:33:12 crc kubenswrapper[4909]: I1128 16:33:12.449550 4909 scope.go:117] "RemoveContainer" containerID="e3a22895d25b271db66c975ba2e0216b64c28fd1e04142dbb967340dd7b6256e" Nov 28 16:33:12 crc kubenswrapper[4909]: I1128 16:33:12.487076 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 28 16:33:12 crc kubenswrapper[4909]: I1128 16:33:12.520720 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 28 16:33:12 crc kubenswrapper[4909]: I1128 16:33:12.523820 4909 scope.go:117] "RemoveContainer" containerID="726d1b915da1a9defb392ec7e21dada8455bcc44349949a64ffdf8368abaf563" Nov 28 16:33:12 crc kubenswrapper[4909]: I1128 16:33:12.531994 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Nov 28 16:33:12 crc kubenswrapper[4909]: E1128 16:33:12.532377 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f7b661e6-2c60-4a0c-a36b-87a3b664c9e3" containerName="probe" Nov 28 16:33:12 crc kubenswrapper[4909]: I1128 16:33:12.532395 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="f7b661e6-2c60-4a0c-a36b-87a3b664c9e3" containerName="probe" Nov 28 16:33:12 crc kubenswrapper[4909]: E1128 16:33:12.532409 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f7b661e6-2c60-4a0c-a36b-87a3b664c9e3" containerName="cinder-scheduler" Nov 28 16:33:12 crc kubenswrapper[4909]: I1128 16:33:12.532415 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="f7b661e6-2c60-4a0c-a36b-87a3b664c9e3" containerName="cinder-scheduler" Nov 28 16:33:12 crc kubenswrapper[4909]: I1128 16:33:12.532595 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="f7b661e6-2c60-4a0c-a36b-87a3b664c9e3" containerName="cinder-scheduler" Nov 28 16:33:12 crc kubenswrapper[4909]: I1128 16:33:12.532628 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="f7b661e6-2c60-4a0c-a36b-87a3b664c9e3" containerName="probe" Nov 28 16:33:12 crc kubenswrapper[4909]: I1128 16:33:12.533539 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 28 16:33:12 crc kubenswrapper[4909]: I1128 16:33:12.540253 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 28 16:33:12 crc kubenswrapper[4909]: I1128 16:33:12.546254 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Nov 28 16:33:12 crc kubenswrapper[4909]: I1128 16:33:12.650132 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7b7568e8-e3d4-4e06-a25f-33656bdf089f-scripts\") pod \"cinder-scheduler-0\" (UID: \"7b7568e8-e3d4-4e06-a25f-33656bdf089f\") " pod="openstack/cinder-scheduler-0" Nov 28 16:33:12 crc kubenswrapper[4909]: I1128 16:33:12.650186 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7b7568e8-e3d4-4e06-a25f-33656bdf089f-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"7b7568e8-e3d4-4e06-a25f-33656bdf089f\") " pod="openstack/cinder-scheduler-0" Nov 28 16:33:12 crc kubenswrapper[4909]: I1128 16:33:12.650213 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/7b7568e8-e3d4-4e06-a25f-33656bdf089f-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"7b7568e8-e3d4-4e06-a25f-33656bdf089f\") " pod="openstack/cinder-scheduler-0" Nov 28 16:33:12 crc kubenswrapper[4909]: I1128 16:33:12.650438 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x4sst\" (UniqueName: \"kubernetes.io/projected/7b7568e8-e3d4-4e06-a25f-33656bdf089f-kube-api-access-x4sst\") pod \"cinder-scheduler-0\" (UID: \"7b7568e8-e3d4-4e06-a25f-33656bdf089f\") " pod="openstack/cinder-scheduler-0" Nov 28 16:33:12 crc kubenswrapper[4909]: I1128 16:33:12.650506 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/7b7568e8-e3d4-4e06-a25f-33656bdf089f-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"7b7568e8-e3d4-4e06-a25f-33656bdf089f\") " pod="openstack/cinder-scheduler-0" Nov 28 16:33:12 crc kubenswrapper[4909]: I1128 16:33:12.650619 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7b7568e8-e3d4-4e06-a25f-33656bdf089f-config-data\") pod \"cinder-scheduler-0\" (UID: \"7b7568e8-e3d4-4e06-a25f-33656bdf089f\") " pod="openstack/cinder-scheduler-0" Nov 28 16:33:12 crc kubenswrapper[4909]: I1128 16:33:12.740853 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-f6ddfdd4b-szlst" Nov 28 16:33:12 crc kubenswrapper[4909]: I1128 16:33:12.752318 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x4sst\" (UniqueName: \"kubernetes.io/projected/7b7568e8-e3d4-4e06-a25f-33656bdf089f-kube-api-access-x4sst\") pod \"cinder-scheduler-0\" (UID: \"7b7568e8-e3d4-4e06-a25f-33656bdf089f\") " pod="openstack/cinder-scheduler-0" Nov 28 16:33:12 crc kubenswrapper[4909]: I1128 16:33:12.752370 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/7b7568e8-e3d4-4e06-a25f-33656bdf089f-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"7b7568e8-e3d4-4e06-a25f-33656bdf089f\") " pod="openstack/cinder-scheduler-0" Nov 28 16:33:12 crc kubenswrapper[4909]: I1128 16:33:12.752434 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7b7568e8-e3d4-4e06-a25f-33656bdf089f-config-data\") pod \"cinder-scheduler-0\" (UID: \"7b7568e8-e3d4-4e06-a25f-33656bdf089f\") " pod="openstack/cinder-scheduler-0" Nov 28 16:33:12 crc kubenswrapper[4909]: I1128 16:33:12.752527 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/7b7568e8-e3d4-4e06-a25f-33656bdf089f-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"7b7568e8-e3d4-4e06-a25f-33656bdf089f\") " pod="openstack/cinder-scheduler-0" Nov 28 16:33:12 crc kubenswrapper[4909]: I1128 16:33:12.752536 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7b7568e8-e3d4-4e06-a25f-33656bdf089f-scripts\") pod \"cinder-scheduler-0\" (UID: \"7b7568e8-e3d4-4e06-a25f-33656bdf089f\") " pod="openstack/cinder-scheduler-0" Nov 28 16:33:12 crc kubenswrapper[4909]: I1128 16:33:12.752599 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7b7568e8-e3d4-4e06-a25f-33656bdf089f-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"7b7568e8-e3d4-4e06-a25f-33656bdf089f\") " pod="openstack/cinder-scheduler-0" Nov 28 16:33:12 crc kubenswrapper[4909]: I1128 16:33:12.752628 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/7b7568e8-e3d4-4e06-a25f-33656bdf089f-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"7b7568e8-e3d4-4e06-a25f-33656bdf089f\") " pod="openstack/cinder-scheduler-0" Nov 28 16:33:12 crc kubenswrapper[4909]: I1128 16:33:12.760245 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/7b7568e8-e3d4-4e06-a25f-33656bdf089f-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"7b7568e8-e3d4-4e06-a25f-33656bdf089f\") " pod="openstack/cinder-scheduler-0" Nov 28 16:33:12 crc kubenswrapper[4909]: I1128 16:33:12.760290 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7b7568e8-e3d4-4e06-a25f-33656bdf089f-scripts\") pod \"cinder-scheduler-0\" (UID: \"7b7568e8-e3d4-4e06-a25f-33656bdf089f\") " pod="openstack/cinder-scheduler-0" Nov 28 16:33:12 crc kubenswrapper[4909]: I1128 16:33:12.761744 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7b7568e8-e3d4-4e06-a25f-33656bdf089f-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"7b7568e8-e3d4-4e06-a25f-33656bdf089f\") " pod="openstack/cinder-scheduler-0" Nov 28 16:33:12 crc kubenswrapper[4909]: I1128 16:33:12.771763 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x4sst\" (UniqueName: \"kubernetes.io/projected/7b7568e8-e3d4-4e06-a25f-33656bdf089f-kube-api-access-x4sst\") pod \"cinder-scheduler-0\" (UID: \"7b7568e8-e3d4-4e06-a25f-33656bdf089f\") " pod="openstack/cinder-scheduler-0" Nov 28 16:33:12 crc kubenswrapper[4909]: I1128 16:33:12.773235 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7b7568e8-e3d4-4e06-a25f-33656bdf089f-config-data\") pod \"cinder-scheduler-0\" (UID: \"7b7568e8-e3d4-4e06-a25f-33656bdf089f\") " pod="openstack/cinder-scheduler-0" Nov 28 16:33:12 crc kubenswrapper[4909]: I1128 16:33:12.866857 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 28 16:33:12 crc kubenswrapper[4909]: I1128 16:33:12.937593 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-f6ddfdd4b-szlst" Nov 28 16:33:13 crc kubenswrapper[4909]: I1128 16:33:13.014787 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-6bb4fc677f-r4b99" Nov 28 16:33:13 crc kubenswrapper[4909]: I1128 16:33:13.057639 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-c6688d798-rv8rb"] Nov 28 16:33:13 crc kubenswrapper[4909]: I1128 16:33:13.057965 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-c6688d798-rv8rb" podUID="017980d9-7e8a-48d2-af4c-9251526eeb2b" containerName="barbican-api" containerID="cri-o://265365f3053ad5e8490ed5d37cd247f5274927934a22d34f4ecfa26d70eb2022" gracePeriod=30 Nov 28 16:33:13 crc kubenswrapper[4909]: I1128 16:33:13.058125 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-c6688d798-rv8rb" podUID="017980d9-7e8a-48d2-af4c-9251526eeb2b" containerName="barbican-api-log" containerID="cri-o://429dd0fa8094935415ce88e1ed5b0f0c5314da9990aed267bba5db2691fca85c" gracePeriod=30 Nov 28 16:33:13 crc kubenswrapper[4909]: I1128 16:33:13.111620 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-57c957c4ff-zlrjj"] Nov 28 16:33:13 crc kubenswrapper[4909]: I1128 16:33:13.113292 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-57c957c4ff-zlrjj" podUID="e525be76-a5cf-472c-91b6-c602362d9877" containerName="dnsmasq-dns" containerID="cri-o://8be88a5c79574dd45b097de4f349fe3c1ef09fd8b0795929171fcf223c404f1f" gracePeriod=10 Nov 28 16:33:13 crc kubenswrapper[4909]: I1128 16:33:13.470858 4909 generic.go:334] "Generic (PLEG): container finished" podID="e525be76-a5cf-472c-91b6-c602362d9877" containerID="8be88a5c79574dd45b097de4f349fe3c1ef09fd8b0795929171fcf223c404f1f" exitCode=0 Nov 28 16:33:13 crc kubenswrapper[4909]: I1128 16:33:13.470943 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57c957c4ff-zlrjj" event={"ID":"e525be76-a5cf-472c-91b6-c602362d9877","Type":"ContainerDied","Data":"8be88a5c79574dd45b097de4f349fe3c1ef09fd8b0795929171fcf223c404f1f"} Nov 28 16:33:13 crc kubenswrapper[4909]: I1128 16:33:13.513315 4909 generic.go:334] "Generic (PLEG): container finished" podID="017980d9-7e8a-48d2-af4c-9251526eeb2b" containerID="429dd0fa8094935415ce88e1ed5b0f0c5314da9990aed267bba5db2691fca85c" exitCode=143 Nov 28 16:33:13 crc kubenswrapper[4909]: I1128 16:33:13.513425 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-c6688d798-rv8rb" event={"ID":"017980d9-7e8a-48d2-af4c-9251526eeb2b","Type":"ContainerDied","Data":"429dd0fa8094935415ce88e1ed5b0f0c5314da9990aed267bba5db2691fca85c"} Nov 28 16:33:13 crc kubenswrapper[4909]: I1128 16:33:13.582989 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 28 16:33:13 crc kubenswrapper[4909]: I1128 16:33:13.920355 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f7b661e6-2c60-4a0c-a36b-87a3b664c9e3" path="/var/lib/kubelet/pods/f7b661e6-2c60-4a0c-a36b-87a3b664c9e3/volumes" Nov 28 16:33:14 crc kubenswrapper[4909]: I1128 16:33:14.347603 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57c957c4ff-zlrjj" Nov 28 16:33:14 crc kubenswrapper[4909]: I1128 16:33:14.403098 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e525be76-a5cf-472c-91b6-c602362d9877-config\") pod \"e525be76-a5cf-472c-91b6-c602362d9877\" (UID: \"e525be76-a5cf-472c-91b6-c602362d9877\") " Nov 28 16:33:14 crc kubenswrapper[4909]: I1128 16:33:14.403266 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e525be76-a5cf-472c-91b6-c602362d9877-ovsdbserver-sb\") pod \"e525be76-a5cf-472c-91b6-c602362d9877\" (UID: \"e525be76-a5cf-472c-91b6-c602362d9877\") " Nov 28 16:33:14 crc kubenswrapper[4909]: I1128 16:33:14.403494 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e525be76-a5cf-472c-91b6-c602362d9877-dns-svc\") pod \"e525be76-a5cf-472c-91b6-c602362d9877\" (UID: \"e525be76-a5cf-472c-91b6-c602362d9877\") " Nov 28 16:33:14 crc kubenswrapper[4909]: I1128 16:33:14.403527 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e525be76-a5cf-472c-91b6-c602362d9877-ovsdbserver-nb\") pod \"e525be76-a5cf-472c-91b6-c602362d9877\" (UID: \"e525be76-a5cf-472c-91b6-c602362d9877\") " Nov 28 16:33:14 crc kubenswrapper[4909]: I1128 16:33:14.403565 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e525be76-a5cf-472c-91b6-c602362d9877-dns-swift-storage-0\") pod \"e525be76-a5cf-472c-91b6-c602362d9877\" (UID: \"e525be76-a5cf-472c-91b6-c602362d9877\") " Nov 28 16:33:14 crc kubenswrapper[4909]: I1128 16:33:14.403592 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9zl8l\" (UniqueName: \"kubernetes.io/projected/e525be76-a5cf-472c-91b6-c602362d9877-kube-api-access-9zl8l\") pod \"e525be76-a5cf-472c-91b6-c602362d9877\" (UID: \"e525be76-a5cf-472c-91b6-c602362d9877\") " Nov 28 16:33:14 crc kubenswrapper[4909]: I1128 16:33:14.412177 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e525be76-a5cf-472c-91b6-c602362d9877-kube-api-access-9zl8l" (OuterVolumeSpecName: "kube-api-access-9zl8l") pod "e525be76-a5cf-472c-91b6-c602362d9877" (UID: "e525be76-a5cf-472c-91b6-c602362d9877"). InnerVolumeSpecName "kube-api-access-9zl8l". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:33:14 crc kubenswrapper[4909]: I1128 16:33:14.490320 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e525be76-a5cf-472c-91b6-c602362d9877-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "e525be76-a5cf-472c-91b6-c602362d9877" (UID: "e525be76-a5cf-472c-91b6-c602362d9877"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:33:14 crc kubenswrapper[4909]: I1128 16:33:14.506064 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9zl8l\" (UniqueName: \"kubernetes.io/projected/e525be76-a5cf-472c-91b6-c602362d9877-kube-api-access-9zl8l\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:14 crc kubenswrapper[4909]: I1128 16:33:14.506103 4909 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e525be76-a5cf-472c-91b6-c602362d9877-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:14 crc kubenswrapper[4909]: I1128 16:33:14.514105 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e525be76-a5cf-472c-91b6-c602362d9877-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "e525be76-a5cf-472c-91b6-c602362d9877" (UID: "e525be76-a5cf-472c-91b6-c602362d9877"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:33:14 crc kubenswrapper[4909]: I1128 16:33:14.537465 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"7b7568e8-e3d4-4e06-a25f-33656bdf089f","Type":"ContainerStarted","Data":"bd4409c68bbe1e59ec8c444df21ccbe33a05a6b3a3351c40e67fed585b228d63"} Nov 28 16:33:14 crc kubenswrapper[4909]: I1128 16:33:14.549983 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e525be76-a5cf-472c-91b6-c602362d9877-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "e525be76-a5cf-472c-91b6-c602362d9877" (UID: "e525be76-a5cf-472c-91b6-c602362d9877"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:33:14 crc kubenswrapper[4909]: I1128 16:33:14.554297 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e525be76-a5cf-472c-91b6-c602362d9877-config" (OuterVolumeSpecName: "config") pod "e525be76-a5cf-472c-91b6-c602362d9877" (UID: "e525be76-a5cf-472c-91b6-c602362d9877"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:33:14 crc kubenswrapper[4909]: I1128 16:33:14.562538 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e525be76-a5cf-472c-91b6-c602362d9877-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "e525be76-a5cf-472c-91b6-c602362d9877" (UID: "e525be76-a5cf-472c-91b6-c602362d9877"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:33:14 crc kubenswrapper[4909]: I1128 16:33:14.562782 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57c957c4ff-zlrjj" event={"ID":"e525be76-a5cf-472c-91b6-c602362d9877","Type":"ContainerDied","Data":"3038956ff3574df85c351690e9740a8f4541bd8fb4d36295f5c8c3b1cdf62705"} Nov 28 16:33:14 crc kubenswrapper[4909]: I1128 16:33:14.562854 4909 scope.go:117] "RemoveContainer" containerID="8be88a5c79574dd45b097de4f349fe3c1ef09fd8b0795929171fcf223c404f1f" Nov 28 16:33:14 crc kubenswrapper[4909]: I1128 16:33:14.562944 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57c957c4ff-zlrjj" Nov 28 16:33:14 crc kubenswrapper[4909]: I1128 16:33:14.607603 4909 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e525be76-a5cf-472c-91b6-c602362d9877-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:14 crc kubenswrapper[4909]: I1128 16:33:14.607640 4909 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e525be76-a5cf-472c-91b6-c602362d9877-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:14 crc kubenswrapper[4909]: I1128 16:33:14.607668 4909 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e525be76-a5cf-472c-91b6-c602362d9877-config\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:14 crc kubenswrapper[4909]: I1128 16:33:14.607677 4909 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e525be76-a5cf-472c-91b6-c602362d9877-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:14 crc kubenswrapper[4909]: I1128 16:33:14.611494 4909 scope.go:117] "RemoveContainer" containerID="afc7960de6d7d1c58beb323f82e507631698c08c9bf3d0502de2ef29f40a95c7" Nov 28 16:33:14 crc kubenswrapper[4909]: I1128 16:33:14.616853 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-57c957c4ff-zlrjj"] Nov 28 16:33:14 crc kubenswrapper[4909]: I1128 16:33:14.626773 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-57c957c4ff-zlrjj"] Nov 28 16:33:15 crc kubenswrapper[4909]: I1128 16:33:15.579054 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"7b7568e8-e3d4-4e06-a25f-33656bdf089f","Type":"ContainerStarted","Data":"498c5080ce90e9a7105e201c315e2156da6b516e7e542f7334041b20bfa59f28"} Nov 28 16:33:15 crc kubenswrapper[4909]: I1128 16:33:15.913182 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e525be76-a5cf-472c-91b6-c602362d9877" path="/var/lib/kubelet/pods/e525be76-a5cf-472c-91b6-c602362d9877/volumes" Nov 28 16:33:16 crc kubenswrapper[4909]: I1128 16:33:16.612924 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"7b7568e8-e3d4-4e06-a25f-33656bdf089f","Type":"ContainerStarted","Data":"6965ce3a18191ccba9ccd72339cd48bef0713e889950e45db1e00d6f157854c1"} Nov 28 16:33:16 crc kubenswrapper[4909]: I1128 16:33:16.642327 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=4.642307086 podStartE2EDuration="4.642307086s" podCreationTimestamp="2025-11-28 16:33:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:33:16.632518463 +0000 UTC m=+1379.029202987" watchObservedRunningTime="2025-11-28 16:33:16.642307086 +0000 UTC m=+1379.038991620" Nov 28 16:33:16 crc kubenswrapper[4909]: I1128 16:33:16.672879 4909 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-c6688d798-rv8rb" podUID="017980d9-7e8a-48d2-af4c-9251526eeb2b" containerName="barbican-api" probeResult="failure" output="Get \"http://10.217.0.152:9311/healthcheck\": read tcp 10.217.0.2:51326->10.217.0.152:9311: read: connection reset by peer" Nov 28 16:33:16 crc kubenswrapper[4909]: I1128 16:33:16.673142 4909 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-c6688d798-rv8rb" podUID="017980d9-7e8a-48d2-af4c-9251526eeb2b" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.0.152:9311/healthcheck\": read tcp 10.217.0.2:51328->10.217.0.152:9311: read: connection reset by peer" Nov 28 16:33:16 crc kubenswrapper[4909]: I1128 16:33:16.855429 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-proxy-589b6f8979-wbls8"] Nov 28 16:33:16 crc kubenswrapper[4909]: E1128 16:33:16.855836 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e525be76-a5cf-472c-91b6-c602362d9877" containerName="dnsmasq-dns" Nov 28 16:33:16 crc kubenswrapper[4909]: I1128 16:33:16.855854 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="e525be76-a5cf-472c-91b6-c602362d9877" containerName="dnsmasq-dns" Nov 28 16:33:16 crc kubenswrapper[4909]: E1128 16:33:16.855883 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e525be76-a5cf-472c-91b6-c602362d9877" containerName="init" Nov 28 16:33:16 crc kubenswrapper[4909]: I1128 16:33:16.855888 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="e525be76-a5cf-472c-91b6-c602362d9877" containerName="init" Nov 28 16:33:16 crc kubenswrapper[4909]: I1128 16:33:16.856072 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="e525be76-a5cf-472c-91b6-c602362d9877" containerName="dnsmasq-dns" Nov 28 16:33:16 crc kubenswrapper[4909]: I1128 16:33:16.863875 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-589b6f8979-wbls8" Nov 28 16:33:16 crc kubenswrapper[4909]: I1128 16:33:16.868217 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-public-svc" Nov 28 16:33:16 crc kubenswrapper[4909]: I1128 16:33:16.868592 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-internal-svc" Nov 28 16:33:16 crc kubenswrapper[4909]: I1128 16:33:16.868778 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-proxy-config-data" Nov 28 16:33:16 crc kubenswrapper[4909]: I1128 16:33:16.892712 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-589b6f8979-wbls8"] Nov 28 16:33:16 crc kubenswrapper[4909]: I1128 16:33:16.950274 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e792abf7-967c-4293-b5f4-f073b07c8cf1-config-data\") pod \"swift-proxy-589b6f8979-wbls8\" (UID: \"e792abf7-967c-4293-b5f4-f073b07c8cf1\") " pod="openstack/swift-proxy-589b6f8979-wbls8" Nov 28 16:33:16 crc kubenswrapper[4909]: I1128 16:33:16.950325 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mc5q8\" (UniqueName: \"kubernetes.io/projected/e792abf7-967c-4293-b5f4-f073b07c8cf1-kube-api-access-mc5q8\") pod \"swift-proxy-589b6f8979-wbls8\" (UID: \"e792abf7-967c-4293-b5f4-f073b07c8cf1\") " pod="openstack/swift-proxy-589b6f8979-wbls8" Nov 28 16:33:16 crc kubenswrapper[4909]: I1128 16:33:16.950355 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e792abf7-967c-4293-b5f4-f073b07c8cf1-internal-tls-certs\") pod \"swift-proxy-589b6f8979-wbls8\" (UID: \"e792abf7-967c-4293-b5f4-f073b07c8cf1\") " pod="openstack/swift-proxy-589b6f8979-wbls8" Nov 28 16:33:16 crc kubenswrapper[4909]: I1128 16:33:16.950628 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e792abf7-967c-4293-b5f4-f073b07c8cf1-log-httpd\") pod \"swift-proxy-589b6f8979-wbls8\" (UID: \"e792abf7-967c-4293-b5f4-f073b07c8cf1\") " pod="openstack/swift-proxy-589b6f8979-wbls8" Nov 28 16:33:16 crc kubenswrapper[4909]: I1128 16:33:16.950764 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/e792abf7-967c-4293-b5f4-f073b07c8cf1-public-tls-certs\") pod \"swift-proxy-589b6f8979-wbls8\" (UID: \"e792abf7-967c-4293-b5f4-f073b07c8cf1\") " pod="openstack/swift-proxy-589b6f8979-wbls8" Nov 28 16:33:16 crc kubenswrapper[4909]: I1128 16:33:16.950812 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e792abf7-967c-4293-b5f4-f073b07c8cf1-combined-ca-bundle\") pod \"swift-proxy-589b6f8979-wbls8\" (UID: \"e792abf7-967c-4293-b5f4-f073b07c8cf1\") " pod="openstack/swift-proxy-589b6f8979-wbls8" Nov 28 16:33:16 crc kubenswrapper[4909]: I1128 16:33:16.950842 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/e792abf7-967c-4293-b5f4-f073b07c8cf1-etc-swift\") pod \"swift-proxy-589b6f8979-wbls8\" (UID: \"e792abf7-967c-4293-b5f4-f073b07c8cf1\") " pod="openstack/swift-proxy-589b6f8979-wbls8" Nov 28 16:33:16 crc kubenswrapper[4909]: I1128 16:33:16.950895 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e792abf7-967c-4293-b5f4-f073b07c8cf1-run-httpd\") pod \"swift-proxy-589b6f8979-wbls8\" (UID: \"e792abf7-967c-4293-b5f4-f073b07c8cf1\") " pod="openstack/swift-proxy-589b6f8979-wbls8" Nov 28 16:33:16 crc kubenswrapper[4909]: I1128 16:33:16.959889 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/cinder-api-0" Nov 28 16:33:17 crc kubenswrapper[4909]: I1128 16:33:17.052495 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mc5q8\" (UniqueName: \"kubernetes.io/projected/e792abf7-967c-4293-b5f4-f073b07c8cf1-kube-api-access-mc5q8\") pod \"swift-proxy-589b6f8979-wbls8\" (UID: \"e792abf7-967c-4293-b5f4-f073b07c8cf1\") " pod="openstack/swift-proxy-589b6f8979-wbls8" Nov 28 16:33:17 crc kubenswrapper[4909]: I1128 16:33:17.052886 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e792abf7-967c-4293-b5f4-f073b07c8cf1-internal-tls-certs\") pod \"swift-proxy-589b6f8979-wbls8\" (UID: \"e792abf7-967c-4293-b5f4-f073b07c8cf1\") " pod="openstack/swift-proxy-589b6f8979-wbls8" Nov 28 16:33:17 crc kubenswrapper[4909]: I1128 16:33:17.053012 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e792abf7-967c-4293-b5f4-f073b07c8cf1-log-httpd\") pod \"swift-proxy-589b6f8979-wbls8\" (UID: \"e792abf7-967c-4293-b5f4-f073b07c8cf1\") " pod="openstack/swift-proxy-589b6f8979-wbls8" Nov 28 16:33:17 crc kubenswrapper[4909]: I1128 16:33:17.053078 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/e792abf7-967c-4293-b5f4-f073b07c8cf1-public-tls-certs\") pod \"swift-proxy-589b6f8979-wbls8\" (UID: \"e792abf7-967c-4293-b5f4-f073b07c8cf1\") " pod="openstack/swift-proxy-589b6f8979-wbls8" Nov 28 16:33:17 crc kubenswrapper[4909]: I1128 16:33:17.053114 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e792abf7-967c-4293-b5f4-f073b07c8cf1-combined-ca-bundle\") pod \"swift-proxy-589b6f8979-wbls8\" (UID: \"e792abf7-967c-4293-b5f4-f073b07c8cf1\") " pod="openstack/swift-proxy-589b6f8979-wbls8" Nov 28 16:33:17 crc kubenswrapper[4909]: I1128 16:33:17.053142 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/e792abf7-967c-4293-b5f4-f073b07c8cf1-etc-swift\") pod \"swift-proxy-589b6f8979-wbls8\" (UID: \"e792abf7-967c-4293-b5f4-f073b07c8cf1\") " pod="openstack/swift-proxy-589b6f8979-wbls8" Nov 28 16:33:17 crc kubenswrapper[4909]: I1128 16:33:17.053181 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e792abf7-967c-4293-b5f4-f073b07c8cf1-run-httpd\") pod \"swift-proxy-589b6f8979-wbls8\" (UID: \"e792abf7-967c-4293-b5f4-f073b07c8cf1\") " pod="openstack/swift-proxy-589b6f8979-wbls8" Nov 28 16:33:17 crc kubenswrapper[4909]: I1128 16:33:17.053292 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e792abf7-967c-4293-b5f4-f073b07c8cf1-config-data\") pod \"swift-proxy-589b6f8979-wbls8\" (UID: \"e792abf7-967c-4293-b5f4-f073b07c8cf1\") " pod="openstack/swift-proxy-589b6f8979-wbls8" Nov 28 16:33:17 crc kubenswrapper[4909]: I1128 16:33:17.054236 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e792abf7-967c-4293-b5f4-f073b07c8cf1-run-httpd\") pod \"swift-proxy-589b6f8979-wbls8\" (UID: \"e792abf7-967c-4293-b5f4-f073b07c8cf1\") " pod="openstack/swift-proxy-589b6f8979-wbls8" Nov 28 16:33:17 crc kubenswrapper[4909]: I1128 16:33:17.055578 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e792abf7-967c-4293-b5f4-f073b07c8cf1-log-httpd\") pod \"swift-proxy-589b6f8979-wbls8\" (UID: \"e792abf7-967c-4293-b5f4-f073b07c8cf1\") " pod="openstack/swift-proxy-589b6f8979-wbls8" Nov 28 16:33:17 crc kubenswrapper[4909]: I1128 16:33:17.062954 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e792abf7-967c-4293-b5f4-f073b07c8cf1-config-data\") pod \"swift-proxy-589b6f8979-wbls8\" (UID: \"e792abf7-967c-4293-b5f4-f073b07c8cf1\") " pod="openstack/swift-proxy-589b6f8979-wbls8" Nov 28 16:33:17 crc kubenswrapper[4909]: I1128 16:33:17.063883 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/e792abf7-967c-4293-b5f4-f073b07c8cf1-public-tls-certs\") pod \"swift-proxy-589b6f8979-wbls8\" (UID: \"e792abf7-967c-4293-b5f4-f073b07c8cf1\") " pod="openstack/swift-proxy-589b6f8979-wbls8" Nov 28 16:33:17 crc kubenswrapper[4909]: I1128 16:33:17.064170 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e792abf7-967c-4293-b5f4-f073b07c8cf1-combined-ca-bundle\") pod \"swift-proxy-589b6f8979-wbls8\" (UID: \"e792abf7-967c-4293-b5f4-f073b07c8cf1\") " pod="openstack/swift-proxy-589b6f8979-wbls8" Nov 28 16:33:17 crc kubenswrapper[4909]: I1128 16:33:17.065025 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/e792abf7-967c-4293-b5f4-f073b07c8cf1-etc-swift\") pod \"swift-proxy-589b6f8979-wbls8\" (UID: \"e792abf7-967c-4293-b5f4-f073b07c8cf1\") " pod="openstack/swift-proxy-589b6f8979-wbls8" Nov 28 16:33:17 crc kubenswrapper[4909]: I1128 16:33:17.068307 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e792abf7-967c-4293-b5f4-f073b07c8cf1-internal-tls-certs\") pod \"swift-proxy-589b6f8979-wbls8\" (UID: \"e792abf7-967c-4293-b5f4-f073b07c8cf1\") " pod="openstack/swift-proxy-589b6f8979-wbls8" Nov 28 16:33:17 crc kubenswrapper[4909]: I1128 16:33:17.096522 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mc5q8\" (UniqueName: \"kubernetes.io/projected/e792abf7-967c-4293-b5f4-f073b07c8cf1-kube-api-access-mc5q8\") pod \"swift-proxy-589b6f8979-wbls8\" (UID: \"e792abf7-967c-4293-b5f4-f073b07c8cf1\") " pod="openstack/swift-proxy-589b6f8979-wbls8" Nov 28 16:33:17 crc kubenswrapper[4909]: I1128 16:33:17.186262 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-589b6f8979-wbls8" Nov 28 16:33:17 crc kubenswrapper[4909]: I1128 16:33:17.203055 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-c6688d798-rv8rb" Nov 28 16:33:17 crc kubenswrapper[4909]: I1128 16:33:17.262892 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/017980d9-7e8a-48d2-af4c-9251526eeb2b-combined-ca-bundle\") pod \"017980d9-7e8a-48d2-af4c-9251526eeb2b\" (UID: \"017980d9-7e8a-48d2-af4c-9251526eeb2b\") " Nov 28 16:33:17 crc kubenswrapper[4909]: I1128 16:33:17.263144 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/017980d9-7e8a-48d2-af4c-9251526eeb2b-config-data-custom\") pod \"017980d9-7e8a-48d2-af4c-9251526eeb2b\" (UID: \"017980d9-7e8a-48d2-af4c-9251526eeb2b\") " Nov 28 16:33:17 crc kubenswrapper[4909]: I1128 16:33:17.263348 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/017980d9-7e8a-48d2-af4c-9251526eeb2b-logs\") pod \"017980d9-7e8a-48d2-af4c-9251526eeb2b\" (UID: \"017980d9-7e8a-48d2-af4c-9251526eeb2b\") " Nov 28 16:33:17 crc kubenswrapper[4909]: I1128 16:33:17.263503 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/017980d9-7e8a-48d2-af4c-9251526eeb2b-config-data\") pod \"017980d9-7e8a-48d2-af4c-9251526eeb2b\" (UID: \"017980d9-7e8a-48d2-af4c-9251526eeb2b\") " Nov 28 16:33:17 crc kubenswrapper[4909]: I1128 16:33:17.263723 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4d5hb\" (UniqueName: \"kubernetes.io/projected/017980d9-7e8a-48d2-af4c-9251526eeb2b-kube-api-access-4d5hb\") pod \"017980d9-7e8a-48d2-af4c-9251526eeb2b\" (UID: \"017980d9-7e8a-48d2-af4c-9251526eeb2b\") " Nov 28 16:33:17 crc kubenswrapper[4909]: I1128 16:33:17.267033 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/017980d9-7e8a-48d2-af4c-9251526eeb2b-logs" (OuterVolumeSpecName: "logs") pod "017980d9-7e8a-48d2-af4c-9251526eeb2b" (UID: "017980d9-7e8a-48d2-af4c-9251526eeb2b"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:33:17 crc kubenswrapper[4909]: I1128 16:33:17.271479 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/017980d9-7e8a-48d2-af4c-9251526eeb2b-kube-api-access-4d5hb" (OuterVolumeSpecName: "kube-api-access-4d5hb") pod "017980d9-7e8a-48d2-af4c-9251526eeb2b" (UID: "017980d9-7e8a-48d2-af4c-9251526eeb2b"). InnerVolumeSpecName "kube-api-access-4d5hb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:33:17 crc kubenswrapper[4909]: I1128 16:33:17.271732 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/017980d9-7e8a-48d2-af4c-9251526eeb2b-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "017980d9-7e8a-48d2-af4c-9251526eeb2b" (UID: "017980d9-7e8a-48d2-af4c-9251526eeb2b"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:33:17 crc kubenswrapper[4909]: I1128 16:33:17.338839 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/017980d9-7e8a-48d2-af4c-9251526eeb2b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "017980d9-7e8a-48d2-af4c-9251526eeb2b" (UID: "017980d9-7e8a-48d2-af4c-9251526eeb2b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:33:17 crc kubenswrapper[4909]: I1128 16:33:17.369890 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4d5hb\" (UniqueName: \"kubernetes.io/projected/017980d9-7e8a-48d2-af4c-9251526eeb2b-kube-api-access-4d5hb\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:17 crc kubenswrapper[4909]: I1128 16:33:17.369918 4909 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/017980d9-7e8a-48d2-af4c-9251526eeb2b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:17 crc kubenswrapper[4909]: I1128 16:33:17.369931 4909 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/017980d9-7e8a-48d2-af4c-9251526eeb2b-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:17 crc kubenswrapper[4909]: I1128 16:33:17.369939 4909 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/017980d9-7e8a-48d2-af4c-9251526eeb2b-logs\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:17 crc kubenswrapper[4909]: I1128 16:33:17.375875 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/017980d9-7e8a-48d2-af4c-9251526eeb2b-config-data" (OuterVolumeSpecName: "config-data") pod "017980d9-7e8a-48d2-af4c-9251526eeb2b" (UID: "017980d9-7e8a-48d2-af4c-9251526eeb2b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:33:17 crc kubenswrapper[4909]: I1128 16:33:17.475880 4909 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/017980d9-7e8a-48d2-af4c-9251526eeb2b-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:17 crc kubenswrapper[4909]: I1128 16:33:17.638338 4909 generic.go:334] "Generic (PLEG): container finished" podID="017980d9-7e8a-48d2-af4c-9251526eeb2b" containerID="265365f3053ad5e8490ed5d37cd247f5274927934a22d34f4ecfa26d70eb2022" exitCode=0 Nov 28 16:33:17 crc kubenswrapper[4909]: I1128 16:33:17.639223 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-c6688d798-rv8rb" Nov 28 16:33:17 crc kubenswrapper[4909]: I1128 16:33:17.639544 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-c6688d798-rv8rb" event={"ID":"017980d9-7e8a-48d2-af4c-9251526eeb2b","Type":"ContainerDied","Data":"265365f3053ad5e8490ed5d37cd247f5274927934a22d34f4ecfa26d70eb2022"} Nov 28 16:33:17 crc kubenswrapper[4909]: I1128 16:33:17.639586 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-c6688d798-rv8rb" event={"ID":"017980d9-7e8a-48d2-af4c-9251526eeb2b","Type":"ContainerDied","Data":"5648aae709d89fd73a2b7f886742c695b3472bdd16413a0fd9ece2ecb0ac06ef"} Nov 28 16:33:17 crc kubenswrapper[4909]: I1128 16:33:17.639616 4909 scope.go:117] "RemoveContainer" containerID="265365f3053ad5e8490ed5d37cd247f5274927934a22d34f4ecfa26d70eb2022" Nov 28 16:33:17 crc kubenswrapper[4909]: I1128 16:33:17.711081 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-c6688d798-rv8rb"] Nov 28 16:33:17 crc kubenswrapper[4909]: I1128 16:33:17.728292 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-api-c6688d798-rv8rb"] Nov 28 16:33:17 crc kubenswrapper[4909]: I1128 16:33:17.741148 4909 scope.go:117] "RemoveContainer" containerID="429dd0fa8094935415ce88e1ed5b0f0c5314da9990aed267bba5db2691fca85c" Nov 28 16:33:17 crc kubenswrapper[4909]: I1128 16:33:17.762737 4909 scope.go:117] "RemoveContainer" containerID="265365f3053ad5e8490ed5d37cd247f5274927934a22d34f4ecfa26d70eb2022" Nov 28 16:33:17 crc kubenswrapper[4909]: E1128 16:33:17.763442 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"265365f3053ad5e8490ed5d37cd247f5274927934a22d34f4ecfa26d70eb2022\": container with ID starting with 265365f3053ad5e8490ed5d37cd247f5274927934a22d34f4ecfa26d70eb2022 not found: ID does not exist" containerID="265365f3053ad5e8490ed5d37cd247f5274927934a22d34f4ecfa26d70eb2022" Nov 28 16:33:17 crc kubenswrapper[4909]: I1128 16:33:17.763549 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"265365f3053ad5e8490ed5d37cd247f5274927934a22d34f4ecfa26d70eb2022"} err="failed to get container status \"265365f3053ad5e8490ed5d37cd247f5274927934a22d34f4ecfa26d70eb2022\": rpc error: code = NotFound desc = could not find container \"265365f3053ad5e8490ed5d37cd247f5274927934a22d34f4ecfa26d70eb2022\": container with ID starting with 265365f3053ad5e8490ed5d37cd247f5274927934a22d34f4ecfa26d70eb2022 not found: ID does not exist" Nov 28 16:33:17 crc kubenswrapper[4909]: I1128 16:33:17.763631 4909 scope.go:117] "RemoveContainer" containerID="429dd0fa8094935415ce88e1ed5b0f0c5314da9990aed267bba5db2691fca85c" Nov 28 16:33:17 crc kubenswrapper[4909]: E1128 16:33:17.764423 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"429dd0fa8094935415ce88e1ed5b0f0c5314da9990aed267bba5db2691fca85c\": container with ID starting with 429dd0fa8094935415ce88e1ed5b0f0c5314da9990aed267bba5db2691fca85c not found: ID does not exist" containerID="429dd0fa8094935415ce88e1ed5b0f0c5314da9990aed267bba5db2691fca85c" Nov 28 16:33:17 crc kubenswrapper[4909]: I1128 16:33:17.764465 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"429dd0fa8094935415ce88e1ed5b0f0c5314da9990aed267bba5db2691fca85c"} err="failed to get container status \"429dd0fa8094935415ce88e1ed5b0f0c5314da9990aed267bba5db2691fca85c\": rpc error: code = NotFound desc = could not find container \"429dd0fa8094935415ce88e1ed5b0f0c5314da9990aed267bba5db2691fca85c\": container with ID starting with 429dd0fa8094935415ce88e1ed5b0f0c5314da9990aed267bba5db2691fca85c not found: ID does not exist" Nov 28 16:33:17 crc kubenswrapper[4909]: I1128 16:33:17.867967 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Nov 28 16:33:17 crc kubenswrapper[4909]: I1128 16:33:17.915032 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="017980d9-7e8a-48d2-af4c-9251526eeb2b" path="/var/lib/kubelet/pods/017980d9-7e8a-48d2-af4c-9251526eeb2b/volumes" Nov 28 16:33:17 crc kubenswrapper[4909]: I1128 16:33:17.939981 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-589b6f8979-wbls8"] Nov 28 16:33:17 crc kubenswrapper[4909]: W1128 16:33:17.969939 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode792abf7_967c_4293_b5f4_f073b07c8cf1.slice/crio-43391a917392eb96d2e2e8a7d61fdefd88f801c99a389ac0950971d172990617 WatchSource:0}: Error finding container 43391a917392eb96d2e2e8a7d61fdefd88f801c99a389ac0950971d172990617: Status 404 returned error can't find the container with id 43391a917392eb96d2e2e8a7d61fdefd88f801c99a389ac0950971d172990617 Nov 28 16:33:18 crc kubenswrapper[4909]: I1128 16:33:18.648897 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-589b6f8979-wbls8" event={"ID":"e792abf7-967c-4293-b5f4-f073b07c8cf1","Type":"ContainerStarted","Data":"43391a917392eb96d2e2e8a7d61fdefd88f801c99a389ac0950971d172990617"} Nov 28 16:33:19 crc kubenswrapper[4909]: I1128 16:33:19.509054 4909 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ceilometer-0" podUID="403021b8-fd7a-4823-9f99-622829f4d935" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Nov 28 16:33:19 crc kubenswrapper[4909]: I1128 16:33:19.663815 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-589b6f8979-wbls8" event={"ID":"e792abf7-967c-4293-b5f4-f073b07c8cf1","Type":"ContainerStarted","Data":"1be6a34dec3cfc7a9c5a2a82788430cdb9b7ee059f8aeacb143350b3dd68f3c7"} Nov 28 16:33:23 crc kubenswrapper[4909]: I1128 16:33:23.106211 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Nov 28 16:33:23 crc kubenswrapper[4909]: I1128 16:33:23.357299 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-db-create-5hd6v"] Nov 28 16:33:23 crc kubenswrapper[4909]: E1128 16:33:23.357965 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="017980d9-7e8a-48d2-af4c-9251526eeb2b" containerName="barbican-api-log" Nov 28 16:33:23 crc kubenswrapper[4909]: I1128 16:33:23.357982 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="017980d9-7e8a-48d2-af4c-9251526eeb2b" containerName="barbican-api-log" Nov 28 16:33:23 crc kubenswrapper[4909]: E1128 16:33:23.358001 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="017980d9-7e8a-48d2-af4c-9251526eeb2b" containerName="barbican-api" Nov 28 16:33:23 crc kubenswrapper[4909]: I1128 16:33:23.358007 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="017980d9-7e8a-48d2-af4c-9251526eeb2b" containerName="barbican-api" Nov 28 16:33:23 crc kubenswrapper[4909]: I1128 16:33:23.358183 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="017980d9-7e8a-48d2-af4c-9251526eeb2b" containerName="barbican-api-log" Nov 28 16:33:23 crc kubenswrapper[4909]: I1128 16:33:23.358197 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="017980d9-7e8a-48d2-af4c-9251526eeb2b" containerName="barbican-api" Nov 28 16:33:23 crc kubenswrapper[4909]: I1128 16:33:23.358737 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-5hd6v" Nov 28 16:33:23 crc kubenswrapper[4909]: I1128 16:33:23.370965 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-5hd6v"] Nov 28 16:33:23 crc kubenswrapper[4909]: I1128 16:33:23.446785 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-db-create-chks5"] Nov 28 16:33:23 crc kubenswrapper[4909]: I1128 16:33:23.452782 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-chks5" Nov 28 16:33:23 crc kubenswrapper[4909]: I1128 16:33:23.468853 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-ab11-account-create-update-shpl9"] Nov 28 16:33:23 crc kubenswrapper[4909]: I1128 16:33:23.470447 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-ab11-account-create-update-shpl9" Nov 28 16:33:23 crc kubenswrapper[4909]: I1128 16:33:23.472814 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-db-secret" Nov 28 16:33:23 crc kubenswrapper[4909]: I1128 16:33:23.477804 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-chks5"] Nov 28 16:33:23 crc kubenswrapper[4909]: I1128 16:33:23.485635 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-ab11-account-create-update-shpl9"] Nov 28 16:33:23 crc kubenswrapper[4909]: I1128 16:33:23.519786 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/98978b85-3c69-4ae1-83cb-73f72e8d2093-operator-scripts\") pod \"nova-api-db-create-5hd6v\" (UID: \"98978b85-3c69-4ae1-83cb-73f72e8d2093\") " pod="openstack/nova-api-db-create-5hd6v" Nov 28 16:33:23 crc kubenswrapper[4909]: I1128 16:33:23.519853 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g62lw\" (UniqueName: \"kubernetes.io/projected/98978b85-3c69-4ae1-83cb-73f72e8d2093-kube-api-access-g62lw\") pod \"nova-api-db-create-5hd6v\" (UID: \"98978b85-3c69-4ae1-83cb-73f72e8d2093\") " pod="openstack/nova-api-db-create-5hd6v" Nov 28 16:33:23 crc kubenswrapper[4909]: I1128 16:33:23.572025 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-db-create-92jzz"] Nov 28 16:33:23 crc kubenswrapper[4909]: I1128 16:33:23.573220 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-92jzz" Nov 28 16:33:23 crc kubenswrapper[4909]: I1128 16:33:23.583163 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-92jzz"] Nov 28 16:33:23 crc kubenswrapper[4909]: I1128 16:33:23.591286 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 16:33:23 crc kubenswrapper[4909]: I1128 16:33:23.591527 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="1ed2a549-d75f-4b1e-9d00-6eca4f86957f" containerName="glance-log" containerID="cri-o://93b05c0d1f290aace4bb931df0cce5c19a4896a56bff38a462e5ca772902bb88" gracePeriod=30 Nov 28 16:33:23 crc kubenswrapper[4909]: I1128 16:33:23.591704 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="1ed2a549-d75f-4b1e-9d00-6eca4f86957f" containerName="glance-httpd" containerID="cri-o://ad28a714c25d128ef8edba41277bc5b8f460469ced5c8d03d0afd12a7a1547bf" gracePeriod=30 Nov 28 16:33:23 crc kubenswrapper[4909]: I1128 16:33:23.621650 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-plgbd\" (UniqueName: \"kubernetes.io/projected/a4cdba92-a257-403a-a47f-8678e1e63b84-kube-api-access-plgbd\") pod \"nova-api-ab11-account-create-update-shpl9\" (UID: \"a4cdba92-a257-403a-a47f-8678e1e63b84\") " pod="openstack/nova-api-ab11-account-create-update-shpl9" Nov 28 16:33:23 crc kubenswrapper[4909]: I1128 16:33:23.621737 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f41afab6-2271-4347-97fd-15d60c351409-operator-scripts\") pod \"nova-cell0-db-create-chks5\" (UID: \"f41afab6-2271-4347-97fd-15d60c351409\") " pod="openstack/nova-cell0-db-create-chks5" Nov 28 16:33:23 crc kubenswrapper[4909]: I1128 16:33:23.621794 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a4cdba92-a257-403a-a47f-8678e1e63b84-operator-scripts\") pod \"nova-api-ab11-account-create-update-shpl9\" (UID: \"a4cdba92-a257-403a-a47f-8678e1e63b84\") " pod="openstack/nova-api-ab11-account-create-update-shpl9" Nov 28 16:33:23 crc kubenswrapper[4909]: I1128 16:33:23.621843 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/98978b85-3c69-4ae1-83cb-73f72e8d2093-operator-scripts\") pod \"nova-api-db-create-5hd6v\" (UID: \"98978b85-3c69-4ae1-83cb-73f72e8d2093\") " pod="openstack/nova-api-db-create-5hd6v" Nov 28 16:33:23 crc kubenswrapper[4909]: I1128 16:33:23.621881 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t5hrb\" (UniqueName: \"kubernetes.io/projected/f41afab6-2271-4347-97fd-15d60c351409-kube-api-access-t5hrb\") pod \"nova-cell0-db-create-chks5\" (UID: \"f41afab6-2271-4347-97fd-15d60c351409\") " pod="openstack/nova-cell0-db-create-chks5" Nov 28 16:33:23 crc kubenswrapper[4909]: I1128 16:33:23.621923 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g62lw\" (UniqueName: \"kubernetes.io/projected/98978b85-3c69-4ae1-83cb-73f72e8d2093-kube-api-access-g62lw\") pod \"nova-api-db-create-5hd6v\" (UID: \"98978b85-3c69-4ae1-83cb-73f72e8d2093\") " pod="openstack/nova-api-db-create-5hd6v" Nov 28 16:33:23 crc kubenswrapper[4909]: I1128 16:33:23.623146 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/98978b85-3c69-4ae1-83cb-73f72e8d2093-operator-scripts\") pod \"nova-api-db-create-5hd6v\" (UID: \"98978b85-3c69-4ae1-83cb-73f72e8d2093\") " pod="openstack/nova-api-db-create-5hd6v" Nov 28 16:33:23 crc kubenswrapper[4909]: I1128 16:33:23.676494 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g62lw\" (UniqueName: \"kubernetes.io/projected/98978b85-3c69-4ae1-83cb-73f72e8d2093-kube-api-access-g62lw\") pod \"nova-api-db-create-5hd6v\" (UID: \"98978b85-3c69-4ae1-83cb-73f72e8d2093\") " pod="openstack/nova-api-db-create-5hd6v" Nov 28 16:33:23 crc kubenswrapper[4909]: I1128 16:33:23.704465 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-77a6-account-create-update-pq4lq"] Nov 28 16:33:23 crc kubenswrapper[4909]: I1128 16:33:23.720983 4909 generic.go:334] "Generic (PLEG): container finished" podID="403021b8-fd7a-4823-9f99-622829f4d935" containerID="cdf71b20d3f9d2593d7f4c8cb7dd45494865ea0ae01c47186ea6e9b07989d017" exitCode=137 Nov 28 16:33:23 crc kubenswrapper[4909]: I1128 16:33:23.725099 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6a22fca6-e32c-4df8-8240-d9c749c19261-operator-scripts\") pod \"nova-cell1-db-create-92jzz\" (UID: \"6a22fca6-e32c-4df8-8240-d9c749c19261\") " pod="openstack/nova-cell1-db-create-92jzz" Nov 28 16:33:23 crc kubenswrapper[4909]: I1128 16:33:23.725192 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-plgbd\" (UniqueName: \"kubernetes.io/projected/a4cdba92-a257-403a-a47f-8678e1e63b84-kube-api-access-plgbd\") pod \"nova-api-ab11-account-create-update-shpl9\" (UID: \"a4cdba92-a257-403a-a47f-8678e1e63b84\") " pod="openstack/nova-api-ab11-account-create-update-shpl9" Nov 28 16:33:23 crc kubenswrapper[4909]: I1128 16:33:23.725235 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f41afab6-2271-4347-97fd-15d60c351409-operator-scripts\") pod \"nova-cell0-db-create-chks5\" (UID: \"f41afab6-2271-4347-97fd-15d60c351409\") " pod="openstack/nova-cell0-db-create-chks5" Nov 28 16:33:23 crc kubenswrapper[4909]: I1128 16:33:23.725295 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a4cdba92-a257-403a-a47f-8678e1e63b84-operator-scripts\") pod \"nova-api-ab11-account-create-update-shpl9\" (UID: \"a4cdba92-a257-403a-a47f-8678e1e63b84\") " pod="openstack/nova-api-ab11-account-create-update-shpl9" Nov 28 16:33:23 crc kubenswrapper[4909]: I1128 16:33:23.725320 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zmxqp\" (UniqueName: \"kubernetes.io/projected/6a22fca6-e32c-4df8-8240-d9c749c19261-kube-api-access-zmxqp\") pod \"nova-cell1-db-create-92jzz\" (UID: \"6a22fca6-e32c-4df8-8240-d9c749c19261\") " pod="openstack/nova-cell1-db-create-92jzz" Nov 28 16:33:23 crc kubenswrapper[4909]: I1128 16:33:23.725384 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t5hrb\" (UniqueName: \"kubernetes.io/projected/f41afab6-2271-4347-97fd-15d60c351409-kube-api-access-t5hrb\") pod \"nova-cell0-db-create-chks5\" (UID: \"f41afab6-2271-4347-97fd-15d60c351409\") " pod="openstack/nova-cell0-db-create-chks5" Nov 28 16:33:23 crc kubenswrapper[4909]: I1128 16:33:23.727427 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-77a6-account-create-update-pq4lq"] Nov 28 16:33:23 crc kubenswrapper[4909]: I1128 16:33:23.727463 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"403021b8-fd7a-4823-9f99-622829f4d935","Type":"ContainerDied","Data":"cdf71b20d3f9d2593d7f4c8cb7dd45494865ea0ae01c47186ea6e9b07989d017"} Nov 28 16:33:23 crc kubenswrapper[4909]: I1128 16:33:23.727601 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-77a6-account-create-update-pq4lq" Nov 28 16:33:23 crc kubenswrapper[4909]: I1128 16:33:23.730197 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f41afab6-2271-4347-97fd-15d60c351409-operator-scripts\") pod \"nova-cell0-db-create-chks5\" (UID: \"f41afab6-2271-4347-97fd-15d60c351409\") " pod="openstack/nova-cell0-db-create-chks5" Nov 28 16:33:23 crc kubenswrapper[4909]: I1128 16:33:23.730578 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a4cdba92-a257-403a-a47f-8678e1e63b84-operator-scripts\") pod \"nova-api-ab11-account-create-update-shpl9\" (UID: \"a4cdba92-a257-403a-a47f-8678e1e63b84\") " pod="openstack/nova-api-ab11-account-create-update-shpl9" Nov 28 16:33:23 crc kubenswrapper[4909]: I1128 16:33:23.731521 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-db-secret" Nov 28 16:33:23 crc kubenswrapper[4909]: I1128 16:33:23.755776 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t5hrb\" (UniqueName: \"kubernetes.io/projected/f41afab6-2271-4347-97fd-15d60c351409-kube-api-access-t5hrb\") pod \"nova-cell0-db-create-chks5\" (UID: \"f41afab6-2271-4347-97fd-15d60c351409\") " pod="openstack/nova-cell0-db-create-chks5" Nov 28 16:33:23 crc kubenswrapper[4909]: I1128 16:33:23.759186 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-plgbd\" (UniqueName: \"kubernetes.io/projected/a4cdba92-a257-403a-a47f-8678e1e63b84-kube-api-access-plgbd\") pod \"nova-api-ab11-account-create-update-shpl9\" (UID: \"a4cdba92-a257-403a-a47f-8678e1e63b84\") " pod="openstack/nova-api-ab11-account-create-update-shpl9" Nov 28 16:33:23 crc kubenswrapper[4909]: I1128 16:33:23.769259 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-chks5" Nov 28 16:33:23 crc kubenswrapper[4909]: I1128 16:33:23.792836 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-ab11-account-create-update-shpl9" Nov 28 16:33:23 crc kubenswrapper[4909]: I1128 16:33:23.829200 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6a22fca6-e32c-4df8-8240-d9c749c19261-operator-scripts\") pod \"nova-cell1-db-create-92jzz\" (UID: \"6a22fca6-e32c-4df8-8240-d9c749c19261\") " pod="openstack/nova-cell1-db-create-92jzz" Nov 28 16:33:23 crc kubenswrapper[4909]: I1128 16:33:23.829280 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/650d0ac8-9d90-4bca-9d00-830345bf8e65-operator-scripts\") pod \"nova-cell0-77a6-account-create-update-pq4lq\" (UID: \"650d0ac8-9d90-4bca-9d00-830345bf8e65\") " pod="openstack/nova-cell0-77a6-account-create-update-pq4lq" Nov 28 16:33:23 crc kubenswrapper[4909]: I1128 16:33:23.829314 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zmxqp\" (UniqueName: \"kubernetes.io/projected/6a22fca6-e32c-4df8-8240-d9c749c19261-kube-api-access-zmxqp\") pod \"nova-cell1-db-create-92jzz\" (UID: \"6a22fca6-e32c-4df8-8240-d9c749c19261\") " pod="openstack/nova-cell1-db-create-92jzz" Nov 28 16:33:23 crc kubenswrapper[4909]: I1128 16:33:23.829335 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x7s6f\" (UniqueName: \"kubernetes.io/projected/650d0ac8-9d90-4bca-9d00-830345bf8e65-kube-api-access-x7s6f\") pod \"nova-cell0-77a6-account-create-update-pq4lq\" (UID: \"650d0ac8-9d90-4bca-9d00-830345bf8e65\") " pod="openstack/nova-cell0-77a6-account-create-update-pq4lq" Nov 28 16:33:23 crc kubenswrapper[4909]: I1128 16:33:23.830259 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6a22fca6-e32c-4df8-8240-d9c749c19261-operator-scripts\") pod \"nova-cell1-db-create-92jzz\" (UID: \"6a22fca6-e32c-4df8-8240-d9c749c19261\") " pod="openstack/nova-cell1-db-create-92jzz" Nov 28 16:33:23 crc kubenswrapper[4909]: I1128 16:33:23.853771 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zmxqp\" (UniqueName: \"kubernetes.io/projected/6a22fca6-e32c-4df8-8240-d9c749c19261-kube-api-access-zmxqp\") pod \"nova-cell1-db-create-92jzz\" (UID: \"6a22fca6-e32c-4df8-8240-d9c749c19261\") " pod="openstack/nova-cell1-db-create-92jzz" Nov 28 16:33:23 crc kubenswrapper[4909]: I1128 16:33:23.858481 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-e892-account-create-update-rwgsj"] Nov 28 16:33:23 crc kubenswrapper[4909]: I1128 16:33:23.859591 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-e892-account-create-update-rwgsj" Nov 28 16:33:23 crc kubenswrapper[4909]: I1128 16:33:23.862245 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-db-secret" Nov 28 16:33:23 crc kubenswrapper[4909]: I1128 16:33:23.877533 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-e892-account-create-update-rwgsj"] Nov 28 16:33:23 crc kubenswrapper[4909]: I1128 16:33:23.934813 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x7s6f\" (UniqueName: \"kubernetes.io/projected/650d0ac8-9d90-4bca-9d00-830345bf8e65-kube-api-access-x7s6f\") pod \"nova-cell0-77a6-account-create-update-pq4lq\" (UID: \"650d0ac8-9d90-4bca-9d00-830345bf8e65\") " pod="openstack/nova-cell0-77a6-account-create-update-pq4lq" Nov 28 16:33:23 crc kubenswrapper[4909]: I1128 16:33:23.935039 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/650d0ac8-9d90-4bca-9d00-830345bf8e65-operator-scripts\") pod \"nova-cell0-77a6-account-create-update-pq4lq\" (UID: \"650d0ac8-9d90-4bca-9d00-830345bf8e65\") " pod="openstack/nova-cell0-77a6-account-create-update-pq4lq" Nov 28 16:33:23 crc kubenswrapper[4909]: I1128 16:33:23.935871 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/650d0ac8-9d90-4bca-9d00-830345bf8e65-operator-scripts\") pod \"nova-cell0-77a6-account-create-update-pq4lq\" (UID: \"650d0ac8-9d90-4bca-9d00-830345bf8e65\") " pod="openstack/nova-cell0-77a6-account-create-update-pq4lq" Nov 28 16:33:23 crc kubenswrapper[4909]: I1128 16:33:23.945131 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-92jzz" Nov 28 16:33:23 crc kubenswrapper[4909]: I1128 16:33:23.968292 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x7s6f\" (UniqueName: \"kubernetes.io/projected/650d0ac8-9d90-4bca-9d00-830345bf8e65-kube-api-access-x7s6f\") pod \"nova-cell0-77a6-account-create-update-pq4lq\" (UID: \"650d0ac8-9d90-4bca-9d00-830345bf8e65\") " pod="openstack/nova-cell0-77a6-account-create-update-pq4lq" Nov 28 16:33:23 crc kubenswrapper[4909]: I1128 16:33:23.974448 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-5hd6v" Nov 28 16:33:24 crc kubenswrapper[4909]: I1128 16:33:24.036229 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bf22088e-8ad0-4323-bca3-2b7bdb648bf9-operator-scripts\") pod \"nova-cell1-e892-account-create-update-rwgsj\" (UID: \"bf22088e-8ad0-4323-bca3-2b7bdb648bf9\") " pod="openstack/nova-cell1-e892-account-create-update-rwgsj" Nov 28 16:33:24 crc kubenswrapper[4909]: I1128 16:33:24.036444 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6xt55\" (UniqueName: \"kubernetes.io/projected/bf22088e-8ad0-4323-bca3-2b7bdb648bf9-kube-api-access-6xt55\") pod \"nova-cell1-e892-account-create-update-rwgsj\" (UID: \"bf22088e-8ad0-4323-bca3-2b7bdb648bf9\") " pod="openstack/nova-cell1-e892-account-create-update-rwgsj" Nov 28 16:33:24 crc kubenswrapper[4909]: I1128 16:33:24.137932 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bf22088e-8ad0-4323-bca3-2b7bdb648bf9-operator-scripts\") pod \"nova-cell1-e892-account-create-update-rwgsj\" (UID: \"bf22088e-8ad0-4323-bca3-2b7bdb648bf9\") " pod="openstack/nova-cell1-e892-account-create-update-rwgsj" Nov 28 16:33:24 crc kubenswrapper[4909]: I1128 16:33:24.138095 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6xt55\" (UniqueName: \"kubernetes.io/projected/bf22088e-8ad0-4323-bca3-2b7bdb648bf9-kube-api-access-6xt55\") pod \"nova-cell1-e892-account-create-update-rwgsj\" (UID: \"bf22088e-8ad0-4323-bca3-2b7bdb648bf9\") " pod="openstack/nova-cell1-e892-account-create-update-rwgsj" Nov 28 16:33:24 crc kubenswrapper[4909]: I1128 16:33:24.138752 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bf22088e-8ad0-4323-bca3-2b7bdb648bf9-operator-scripts\") pod \"nova-cell1-e892-account-create-update-rwgsj\" (UID: \"bf22088e-8ad0-4323-bca3-2b7bdb648bf9\") " pod="openstack/nova-cell1-e892-account-create-update-rwgsj" Nov 28 16:33:24 crc kubenswrapper[4909]: I1128 16:33:24.143493 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-77a6-account-create-update-pq4lq" Nov 28 16:33:24 crc kubenswrapper[4909]: I1128 16:33:24.159283 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6xt55\" (UniqueName: \"kubernetes.io/projected/bf22088e-8ad0-4323-bca3-2b7bdb648bf9-kube-api-access-6xt55\") pod \"nova-cell1-e892-account-create-update-rwgsj\" (UID: \"bf22088e-8ad0-4323-bca3-2b7bdb648bf9\") " pod="openstack/nova-cell1-e892-account-create-update-rwgsj" Nov 28 16:33:24 crc kubenswrapper[4909]: I1128 16:33:24.223287 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-e892-account-create-update-rwgsj" Nov 28 16:33:24 crc kubenswrapper[4909]: I1128 16:33:24.740263 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 16:33:24 crc kubenswrapper[4909]: I1128 16:33:24.740496 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="e9618898-4d20-4c0d-ab4d-1dc67e0f109a" containerName="glance-log" containerID="cri-o://ddaf1e784f7ded7f9c54a05ec17a7bae8623c22eea8fc597422047afc91451a0" gracePeriod=30 Nov 28 16:33:24 crc kubenswrapper[4909]: I1128 16:33:24.740626 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="e9618898-4d20-4c0d-ab4d-1dc67e0f109a" containerName="glance-httpd" containerID="cri-o://d40a4b6cfcdb1ace4ed94b638cf6d1b874b6ed7fea6b7915423222d760158ab8" gracePeriod=30 Nov 28 16:33:24 crc kubenswrapper[4909]: I1128 16:33:24.747774 4909 generic.go:334] "Generic (PLEG): container finished" podID="1ed2a549-d75f-4b1e-9d00-6eca4f86957f" containerID="93b05c0d1f290aace4bb931df0cce5c19a4896a56bff38a462e5ca772902bb88" exitCode=143 Nov 28 16:33:24 crc kubenswrapper[4909]: I1128 16:33:24.747831 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"1ed2a549-d75f-4b1e-9d00-6eca4f86957f","Type":"ContainerDied","Data":"93b05c0d1f290aace4bb931df0cce5c19a4896a56bff38a462e5ca772902bb88"} Nov 28 16:33:25 crc kubenswrapper[4909]: I1128 16:33:25.318280 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 16:33:25 crc kubenswrapper[4909]: I1128 16:33:25.476807 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/403021b8-fd7a-4823-9f99-622829f4d935-config-data\") pod \"403021b8-fd7a-4823-9f99-622829f4d935\" (UID: \"403021b8-fd7a-4823-9f99-622829f4d935\") " Nov 28 16:33:25 crc kubenswrapper[4909]: I1128 16:33:25.476861 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/403021b8-fd7a-4823-9f99-622829f4d935-run-httpd\") pod \"403021b8-fd7a-4823-9f99-622829f4d935\" (UID: \"403021b8-fd7a-4823-9f99-622829f4d935\") " Nov 28 16:33:25 crc kubenswrapper[4909]: I1128 16:33:25.476887 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kt7dc\" (UniqueName: \"kubernetes.io/projected/403021b8-fd7a-4823-9f99-622829f4d935-kube-api-access-kt7dc\") pod \"403021b8-fd7a-4823-9f99-622829f4d935\" (UID: \"403021b8-fd7a-4823-9f99-622829f4d935\") " Nov 28 16:33:25 crc kubenswrapper[4909]: I1128 16:33:25.476919 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/403021b8-fd7a-4823-9f99-622829f4d935-scripts\") pod \"403021b8-fd7a-4823-9f99-622829f4d935\" (UID: \"403021b8-fd7a-4823-9f99-622829f4d935\") " Nov 28 16:33:25 crc kubenswrapper[4909]: I1128 16:33:25.476955 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/403021b8-fd7a-4823-9f99-622829f4d935-combined-ca-bundle\") pod \"403021b8-fd7a-4823-9f99-622829f4d935\" (UID: \"403021b8-fd7a-4823-9f99-622829f4d935\") " Nov 28 16:33:25 crc kubenswrapper[4909]: I1128 16:33:25.477021 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/403021b8-fd7a-4823-9f99-622829f4d935-log-httpd\") pod \"403021b8-fd7a-4823-9f99-622829f4d935\" (UID: \"403021b8-fd7a-4823-9f99-622829f4d935\") " Nov 28 16:33:25 crc kubenswrapper[4909]: I1128 16:33:25.477064 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/403021b8-fd7a-4823-9f99-622829f4d935-sg-core-conf-yaml\") pod \"403021b8-fd7a-4823-9f99-622829f4d935\" (UID: \"403021b8-fd7a-4823-9f99-622829f4d935\") " Nov 28 16:33:25 crc kubenswrapper[4909]: I1128 16:33:25.478004 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/403021b8-fd7a-4823-9f99-622829f4d935-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "403021b8-fd7a-4823-9f99-622829f4d935" (UID: "403021b8-fd7a-4823-9f99-622829f4d935"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:33:25 crc kubenswrapper[4909]: I1128 16:33:25.485927 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/403021b8-fd7a-4823-9f99-622829f4d935-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "403021b8-fd7a-4823-9f99-622829f4d935" (UID: "403021b8-fd7a-4823-9f99-622829f4d935"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:33:25 crc kubenswrapper[4909]: I1128 16:33:25.494641 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/403021b8-fd7a-4823-9f99-622829f4d935-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "403021b8-fd7a-4823-9f99-622829f4d935" (UID: "403021b8-fd7a-4823-9f99-622829f4d935"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:33:25 crc kubenswrapper[4909]: I1128 16:33:25.500270 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/403021b8-fd7a-4823-9f99-622829f4d935-scripts" (OuterVolumeSpecName: "scripts") pod "403021b8-fd7a-4823-9f99-622829f4d935" (UID: "403021b8-fd7a-4823-9f99-622829f4d935"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:33:25 crc kubenswrapper[4909]: I1128 16:33:25.505782 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/403021b8-fd7a-4823-9f99-622829f4d935-kube-api-access-kt7dc" (OuterVolumeSpecName: "kube-api-access-kt7dc") pod "403021b8-fd7a-4823-9f99-622829f4d935" (UID: "403021b8-fd7a-4823-9f99-622829f4d935"). InnerVolumeSpecName "kube-api-access-kt7dc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:33:25 crc kubenswrapper[4909]: I1128 16:33:25.578709 4909 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/403021b8-fd7a-4823-9f99-622829f4d935-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:25 crc kubenswrapper[4909]: I1128 16:33:25.578898 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kt7dc\" (UniqueName: \"kubernetes.io/projected/403021b8-fd7a-4823-9f99-622829f4d935-kube-api-access-kt7dc\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:25 crc kubenswrapper[4909]: I1128 16:33:25.578957 4909 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/403021b8-fd7a-4823-9f99-622829f4d935-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:25 crc kubenswrapper[4909]: I1128 16:33:25.579009 4909 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/403021b8-fd7a-4823-9f99-622829f4d935-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:25 crc kubenswrapper[4909]: I1128 16:33:25.579083 4909 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/403021b8-fd7a-4823-9f99-622829f4d935-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:25 crc kubenswrapper[4909]: I1128 16:33:25.598329 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/403021b8-fd7a-4823-9f99-622829f4d935-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "403021b8-fd7a-4823-9f99-622829f4d935" (UID: "403021b8-fd7a-4823-9f99-622829f4d935"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:33:25 crc kubenswrapper[4909]: I1128 16:33:25.648869 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/403021b8-fd7a-4823-9f99-622829f4d935-config-data" (OuterVolumeSpecName: "config-data") pod "403021b8-fd7a-4823-9f99-622829f4d935" (UID: "403021b8-fd7a-4823-9f99-622829f4d935"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:33:25 crc kubenswrapper[4909]: I1128 16:33:25.676829 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-5hd6v"] Nov 28 16:33:25 crc kubenswrapper[4909]: I1128 16:33:25.680320 4909 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/403021b8-fd7a-4823-9f99-622829f4d935-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:25 crc kubenswrapper[4909]: I1128 16:33:25.680431 4909 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/403021b8-fd7a-4823-9f99-622829f4d935-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:25 crc kubenswrapper[4909]: I1128 16:33:25.767515 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"403021b8-fd7a-4823-9f99-622829f4d935","Type":"ContainerDied","Data":"7b59dfb079d7a33b4b7baf4f3829af44a1b4d5b0e1f531562c57d9be26f51565"} Nov 28 16:33:25 crc kubenswrapper[4909]: I1128 16:33:25.767576 4909 scope.go:117] "RemoveContainer" containerID="cdf71b20d3f9d2593d7f4c8cb7dd45494865ea0ae01c47186ea6e9b07989d017" Nov 28 16:33:25 crc kubenswrapper[4909]: I1128 16:33:25.767756 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 16:33:25 crc kubenswrapper[4909]: I1128 16:33:25.772983 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"f8d630e7-c532-4fc8-b3f4-0a2a6b06f5ae","Type":"ContainerStarted","Data":"15d6d7bb76cdcf47fdb12971445027a116e8951d0899335b43ee4f4fb9c7586a"} Nov 28 16:33:25 crc kubenswrapper[4909]: I1128 16:33:25.778991 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-589b6f8979-wbls8" event={"ID":"e792abf7-967c-4293-b5f4-f073b07c8cf1","Type":"ContainerStarted","Data":"95eacaaf5af98b7623dbb633b828928620aa97b977799e32b9e1bc8948b35490"} Nov 28 16:33:25 crc kubenswrapper[4909]: I1128 16:33:25.779414 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-589b6f8979-wbls8" Nov 28 16:33:25 crc kubenswrapper[4909]: I1128 16:33:25.779521 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-589b6f8979-wbls8" Nov 28 16:33:25 crc kubenswrapper[4909]: I1128 16:33:25.782725 4909 generic.go:334] "Generic (PLEG): container finished" podID="e9618898-4d20-4c0d-ab4d-1dc67e0f109a" containerID="ddaf1e784f7ded7f9c54a05ec17a7bae8623c22eea8fc597422047afc91451a0" exitCode=143 Nov 28 16:33:25 crc kubenswrapper[4909]: I1128 16:33:25.782775 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"e9618898-4d20-4c0d-ab4d-1dc67e0f109a","Type":"ContainerDied","Data":"ddaf1e784f7ded7f9c54a05ec17a7bae8623c22eea8fc597422047afc91451a0"} Nov 28 16:33:25 crc kubenswrapper[4909]: I1128 16:33:25.784614 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-5hd6v" event={"ID":"98978b85-3c69-4ae1-83cb-73f72e8d2093","Type":"ContainerStarted","Data":"60e824c91b3e9d5ce9b91037063372d58c921797508c607c6c0bedb652e94aef"} Nov 28 16:33:25 crc kubenswrapper[4909]: I1128 16:33:25.788639 4909 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/swift-proxy-589b6f8979-wbls8" podUID="e792abf7-967c-4293-b5f4-f073b07c8cf1" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Nov 28 16:33:25 crc kubenswrapper[4909]: I1128 16:33:25.810154 4909 scope.go:117] "RemoveContainer" containerID="cfe793ee5a40fe2697a86b51f9d61105e2f440e85819826987d20a8cc47e8088" Nov 28 16:33:25 crc kubenswrapper[4909]: I1128 16:33:25.844707 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstackclient" podStartSLOduration=1.804032273 podStartE2EDuration="17.844683467s" podCreationTimestamp="2025-11-28 16:33:08 +0000 UTC" firstStartedPulling="2025-11-28 16:33:09.037395411 +0000 UTC m=+1371.434079935" lastFinishedPulling="2025-11-28 16:33:25.078046605 +0000 UTC m=+1387.474731129" observedRunningTime="2025-11-28 16:33:25.837090243 +0000 UTC m=+1388.233774787" watchObservedRunningTime="2025-11-28 16:33:25.844683467 +0000 UTC m=+1388.241367991" Nov 28 16:33:25 crc kubenswrapper[4909]: I1128 16:33:25.853931 4909 scope.go:117] "RemoveContainer" containerID="e36ac30c46e63b922b526420bbd327b8c4c53bf4ecea562152d6d344150751dd" Nov 28 16:33:25 crc kubenswrapper[4909]: I1128 16:33:25.885517 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-92jzz"] Nov 28 16:33:25 crc kubenswrapper[4909]: I1128 16:33:25.893988 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-77a6-account-create-update-pq4lq"] Nov 28 16:33:25 crc kubenswrapper[4909]: I1128 16:33:25.898751 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-proxy-589b6f8979-wbls8" podStartSLOduration=9.898731828 podStartE2EDuration="9.898731828s" podCreationTimestamp="2025-11-28 16:33:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:33:25.875055973 +0000 UTC m=+1388.271740497" watchObservedRunningTime="2025-11-28 16:33:25.898731828 +0000 UTC m=+1388.295416352" Nov 28 16:33:25 crc kubenswrapper[4909]: I1128 16:33:25.949840 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-e892-account-create-update-rwgsj"] Nov 28 16:33:25 crc kubenswrapper[4909]: I1128 16:33:25.950139 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-chks5"] Nov 28 16:33:25 crc kubenswrapper[4909]: I1128 16:33:25.958478 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 28 16:33:25 crc kubenswrapper[4909]: I1128 16:33:25.976117 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 28 16:33:25 crc kubenswrapper[4909]: I1128 16:33:25.978677 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 28 16:33:25 crc kubenswrapper[4909]: E1128 16:33:25.979123 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="403021b8-fd7a-4823-9f99-622829f4d935" containerName="ceilometer-notification-agent" Nov 28 16:33:25 crc kubenswrapper[4909]: I1128 16:33:25.979146 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="403021b8-fd7a-4823-9f99-622829f4d935" containerName="ceilometer-notification-agent" Nov 28 16:33:25 crc kubenswrapper[4909]: E1128 16:33:25.979172 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="403021b8-fd7a-4823-9f99-622829f4d935" containerName="ceilometer-central-agent" Nov 28 16:33:25 crc kubenswrapper[4909]: I1128 16:33:25.979181 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="403021b8-fd7a-4823-9f99-622829f4d935" containerName="ceilometer-central-agent" Nov 28 16:33:25 crc kubenswrapper[4909]: E1128 16:33:25.979212 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="403021b8-fd7a-4823-9f99-622829f4d935" containerName="proxy-httpd" Nov 28 16:33:25 crc kubenswrapper[4909]: I1128 16:33:25.979221 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="403021b8-fd7a-4823-9f99-622829f4d935" containerName="proxy-httpd" Nov 28 16:33:25 crc kubenswrapper[4909]: I1128 16:33:25.979448 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="403021b8-fd7a-4823-9f99-622829f4d935" containerName="ceilometer-central-agent" Nov 28 16:33:25 crc kubenswrapper[4909]: I1128 16:33:25.979474 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="403021b8-fd7a-4823-9f99-622829f4d935" containerName="proxy-httpd" Nov 28 16:33:25 crc kubenswrapper[4909]: I1128 16:33:25.979494 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="403021b8-fd7a-4823-9f99-622829f4d935" containerName="ceilometer-notification-agent" Nov 28 16:33:25 crc kubenswrapper[4909]: I1128 16:33:25.988265 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 16:33:25 crc kubenswrapper[4909]: I1128 16:33:25.992329 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 28 16:33:25 crc kubenswrapper[4909]: I1128 16:33:25.992778 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 28 16:33:26 crc kubenswrapper[4909]: I1128 16:33:26.001181 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fbcf7a74-7fb6-4c78-8e6a-86539a59eb71-config-data\") pod \"ceilometer-0\" (UID: \"fbcf7a74-7fb6-4c78-8e6a-86539a59eb71\") " pod="openstack/ceilometer-0" Nov 28 16:33:26 crc kubenswrapper[4909]: I1128 16:33:26.001281 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fbcf7a74-7fb6-4c78-8e6a-86539a59eb71-scripts\") pod \"ceilometer-0\" (UID: \"fbcf7a74-7fb6-4c78-8e6a-86539a59eb71\") " pod="openstack/ceilometer-0" Nov 28 16:33:26 crc kubenswrapper[4909]: I1128 16:33:26.001303 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/fbcf7a74-7fb6-4c78-8e6a-86539a59eb71-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"fbcf7a74-7fb6-4c78-8e6a-86539a59eb71\") " pod="openstack/ceilometer-0" Nov 28 16:33:26 crc kubenswrapper[4909]: I1128 16:33:26.001330 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-564lh\" (UniqueName: \"kubernetes.io/projected/fbcf7a74-7fb6-4c78-8e6a-86539a59eb71-kube-api-access-564lh\") pod \"ceilometer-0\" (UID: \"fbcf7a74-7fb6-4c78-8e6a-86539a59eb71\") " pod="openstack/ceilometer-0" Nov 28 16:33:26 crc kubenswrapper[4909]: I1128 16:33:26.001401 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fbcf7a74-7fb6-4c78-8e6a-86539a59eb71-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"fbcf7a74-7fb6-4c78-8e6a-86539a59eb71\") " pod="openstack/ceilometer-0" Nov 28 16:33:26 crc kubenswrapper[4909]: I1128 16:33:26.001461 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/fbcf7a74-7fb6-4c78-8e6a-86539a59eb71-run-httpd\") pod \"ceilometer-0\" (UID: \"fbcf7a74-7fb6-4c78-8e6a-86539a59eb71\") " pod="openstack/ceilometer-0" Nov 28 16:33:26 crc kubenswrapper[4909]: I1128 16:33:26.001519 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/fbcf7a74-7fb6-4c78-8e6a-86539a59eb71-log-httpd\") pod \"ceilometer-0\" (UID: \"fbcf7a74-7fb6-4c78-8e6a-86539a59eb71\") " pod="openstack/ceilometer-0" Nov 28 16:33:26 crc kubenswrapper[4909]: I1128 16:33:26.004942 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 28 16:33:26 crc kubenswrapper[4909]: I1128 16:33:26.103798 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fbcf7a74-7fb6-4c78-8e6a-86539a59eb71-scripts\") pod \"ceilometer-0\" (UID: \"fbcf7a74-7fb6-4c78-8e6a-86539a59eb71\") " pod="openstack/ceilometer-0" Nov 28 16:33:26 crc kubenswrapper[4909]: I1128 16:33:26.103838 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/fbcf7a74-7fb6-4c78-8e6a-86539a59eb71-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"fbcf7a74-7fb6-4c78-8e6a-86539a59eb71\") " pod="openstack/ceilometer-0" Nov 28 16:33:26 crc kubenswrapper[4909]: I1128 16:33:26.103860 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-564lh\" (UniqueName: \"kubernetes.io/projected/fbcf7a74-7fb6-4c78-8e6a-86539a59eb71-kube-api-access-564lh\") pod \"ceilometer-0\" (UID: \"fbcf7a74-7fb6-4c78-8e6a-86539a59eb71\") " pod="openstack/ceilometer-0" Nov 28 16:33:26 crc kubenswrapper[4909]: I1128 16:33:26.103905 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fbcf7a74-7fb6-4c78-8e6a-86539a59eb71-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"fbcf7a74-7fb6-4c78-8e6a-86539a59eb71\") " pod="openstack/ceilometer-0" Nov 28 16:33:26 crc kubenswrapper[4909]: I1128 16:33:26.103935 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/fbcf7a74-7fb6-4c78-8e6a-86539a59eb71-run-httpd\") pod \"ceilometer-0\" (UID: \"fbcf7a74-7fb6-4c78-8e6a-86539a59eb71\") " pod="openstack/ceilometer-0" Nov 28 16:33:26 crc kubenswrapper[4909]: I1128 16:33:26.103961 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/fbcf7a74-7fb6-4c78-8e6a-86539a59eb71-log-httpd\") pod \"ceilometer-0\" (UID: \"fbcf7a74-7fb6-4c78-8e6a-86539a59eb71\") " pod="openstack/ceilometer-0" Nov 28 16:33:26 crc kubenswrapper[4909]: I1128 16:33:26.104005 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fbcf7a74-7fb6-4c78-8e6a-86539a59eb71-config-data\") pod \"ceilometer-0\" (UID: \"fbcf7a74-7fb6-4c78-8e6a-86539a59eb71\") " pod="openstack/ceilometer-0" Nov 28 16:33:26 crc kubenswrapper[4909]: I1128 16:33:26.107195 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/fbcf7a74-7fb6-4c78-8e6a-86539a59eb71-log-httpd\") pod \"ceilometer-0\" (UID: \"fbcf7a74-7fb6-4c78-8e6a-86539a59eb71\") " pod="openstack/ceilometer-0" Nov 28 16:33:26 crc kubenswrapper[4909]: I1128 16:33:26.107248 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/fbcf7a74-7fb6-4c78-8e6a-86539a59eb71-run-httpd\") pod \"ceilometer-0\" (UID: \"fbcf7a74-7fb6-4c78-8e6a-86539a59eb71\") " pod="openstack/ceilometer-0" Nov 28 16:33:26 crc kubenswrapper[4909]: I1128 16:33:26.120697 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fbcf7a74-7fb6-4c78-8e6a-86539a59eb71-config-data\") pod \"ceilometer-0\" (UID: \"fbcf7a74-7fb6-4c78-8e6a-86539a59eb71\") " pod="openstack/ceilometer-0" Nov 28 16:33:26 crc kubenswrapper[4909]: I1128 16:33:26.120780 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fbcf7a74-7fb6-4c78-8e6a-86539a59eb71-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"fbcf7a74-7fb6-4c78-8e6a-86539a59eb71\") " pod="openstack/ceilometer-0" Nov 28 16:33:26 crc kubenswrapper[4909]: I1128 16:33:26.122439 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/fbcf7a74-7fb6-4c78-8e6a-86539a59eb71-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"fbcf7a74-7fb6-4c78-8e6a-86539a59eb71\") " pod="openstack/ceilometer-0" Nov 28 16:33:26 crc kubenswrapper[4909]: I1128 16:33:26.123056 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-ab11-account-create-update-shpl9"] Nov 28 16:33:26 crc kubenswrapper[4909]: I1128 16:33:26.126094 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fbcf7a74-7fb6-4c78-8e6a-86539a59eb71-scripts\") pod \"ceilometer-0\" (UID: \"fbcf7a74-7fb6-4c78-8e6a-86539a59eb71\") " pod="openstack/ceilometer-0" Nov 28 16:33:26 crc kubenswrapper[4909]: I1128 16:33:26.126683 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-564lh\" (UniqueName: \"kubernetes.io/projected/fbcf7a74-7fb6-4c78-8e6a-86539a59eb71-kube-api-access-564lh\") pod \"ceilometer-0\" (UID: \"fbcf7a74-7fb6-4c78-8e6a-86539a59eb71\") " pod="openstack/ceilometer-0" Nov 28 16:33:26 crc kubenswrapper[4909]: W1128 16:33:26.130938 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda4cdba92_a257_403a_a47f_8678e1e63b84.slice/crio-e568465824e6858cab55929b757276b1627faf58ab3a3df06c6f58b44e164b67 WatchSource:0}: Error finding container e568465824e6858cab55929b757276b1627faf58ab3a3df06c6f58b44e164b67: Status 404 returned error can't find the container with id e568465824e6858cab55929b757276b1627faf58ab3a3df06c6f58b44e164b67 Nov 28 16:33:26 crc kubenswrapper[4909]: I1128 16:33:26.200316 4909 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/swift-proxy-589b6f8979-wbls8" podUID="e792abf7-967c-4293-b5f4-f073b07c8cf1" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Nov 28 16:33:26 crc kubenswrapper[4909]: I1128 16:33:26.331115 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 16:33:26 crc kubenswrapper[4909]: I1128 16:33:26.795018 4909 generic.go:334] "Generic (PLEG): container finished" podID="650d0ac8-9d90-4bca-9d00-830345bf8e65" containerID="6e1fc2641917ff4d1fab4358ecd7104b0ea53db894cd04eda884fbd1c239f5a1" exitCode=0 Nov 28 16:33:26 crc kubenswrapper[4909]: I1128 16:33:26.795109 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-77a6-account-create-update-pq4lq" event={"ID":"650d0ac8-9d90-4bca-9d00-830345bf8e65","Type":"ContainerDied","Data":"6e1fc2641917ff4d1fab4358ecd7104b0ea53db894cd04eda884fbd1c239f5a1"} Nov 28 16:33:26 crc kubenswrapper[4909]: I1128 16:33:26.795455 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-77a6-account-create-update-pq4lq" event={"ID":"650d0ac8-9d90-4bca-9d00-830345bf8e65","Type":"ContainerStarted","Data":"f00ef96ca623b27a7d79ff48bf49cbe953cf518222bf31bc5d8f1f09d26a7358"} Nov 28 16:33:26 crc kubenswrapper[4909]: I1128 16:33:26.797761 4909 generic.go:334] "Generic (PLEG): container finished" podID="a4cdba92-a257-403a-a47f-8678e1e63b84" containerID="bb569642f57e937293a08525ea86da1ca995760deb6efcdbd4112c1b2e6df5c2" exitCode=0 Nov 28 16:33:26 crc kubenswrapper[4909]: I1128 16:33:26.797812 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-ab11-account-create-update-shpl9" event={"ID":"a4cdba92-a257-403a-a47f-8678e1e63b84","Type":"ContainerDied","Data":"bb569642f57e937293a08525ea86da1ca995760deb6efcdbd4112c1b2e6df5c2"} Nov 28 16:33:26 crc kubenswrapper[4909]: I1128 16:33:26.797836 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-ab11-account-create-update-shpl9" event={"ID":"a4cdba92-a257-403a-a47f-8678e1e63b84","Type":"ContainerStarted","Data":"e568465824e6858cab55929b757276b1627faf58ab3a3df06c6f58b44e164b67"} Nov 28 16:33:26 crc kubenswrapper[4909]: I1128 16:33:26.799071 4909 generic.go:334] "Generic (PLEG): container finished" podID="f41afab6-2271-4347-97fd-15d60c351409" containerID="444bf66121973198387c53d2fe4d567588df490dfe1e0393da998e6a7e899b67" exitCode=0 Nov 28 16:33:26 crc kubenswrapper[4909]: I1128 16:33:26.799101 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-chks5" event={"ID":"f41afab6-2271-4347-97fd-15d60c351409","Type":"ContainerDied","Data":"444bf66121973198387c53d2fe4d567588df490dfe1e0393da998e6a7e899b67"} Nov 28 16:33:26 crc kubenswrapper[4909]: I1128 16:33:26.799128 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-chks5" event={"ID":"f41afab6-2271-4347-97fd-15d60c351409","Type":"ContainerStarted","Data":"7bbc9f04bd92421ee0092003ee7fff766bcaf163bd5c29a3e4f33f17f454707d"} Nov 28 16:33:26 crc kubenswrapper[4909]: I1128 16:33:26.800119 4909 generic.go:334] "Generic (PLEG): container finished" podID="bf22088e-8ad0-4323-bca3-2b7bdb648bf9" containerID="123e4bf1e8a307c46d39779dbbb6fd173da51f3bf9f94d9b59703bfb90dc1d0c" exitCode=0 Nov 28 16:33:26 crc kubenswrapper[4909]: I1128 16:33:26.800168 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-e892-account-create-update-rwgsj" event={"ID":"bf22088e-8ad0-4323-bca3-2b7bdb648bf9","Type":"ContainerDied","Data":"123e4bf1e8a307c46d39779dbbb6fd173da51f3bf9f94d9b59703bfb90dc1d0c"} Nov 28 16:33:26 crc kubenswrapper[4909]: I1128 16:33:26.800191 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-e892-account-create-update-rwgsj" event={"ID":"bf22088e-8ad0-4323-bca3-2b7bdb648bf9","Type":"ContainerStarted","Data":"5d65741c4d142f5ed19dca29cac9a41499288c6ff81d6e8446b5ff96ee3d7678"} Nov 28 16:33:26 crc kubenswrapper[4909]: I1128 16:33:26.801307 4909 generic.go:334] "Generic (PLEG): container finished" podID="98978b85-3c69-4ae1-83cb-73f72e8d2093" containerID="c5b2101613b15e354dda8e19429c85ff82a7aa110a1c2928c61644354ffa188c" exitCode=0 Nov 28 16:33:26 crc kubenswrapper[4909]: I1128 16:33:26.801391 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-5hd6v" event={"ID":"98978b85-3c69-4ae1-83cb-73f72e8d2093","Type":"ContainerDied","Data":"c5b2101613b15e354dda8e19429c85ff82a7aa110a1c2928c61644354ffa188c"} Nov 28 16:33:26 crc kubenswrapper[4909]: I1128 16:33:26.804550 4909 generic.go:334] "Generic (PLEG): container finished" podID="6a22fca6-e32c-4df8-8240-d9c749c19261" containerID="440e32fab867b54123f7d40ba325b3066e9c3cfb206e853ad32cc65a3e064bd4" exitCode=0 Nov 28 16:33:26 crc kubenswrapper[4909]: I1128 16:33:26.804594 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-92jzz" event={"ID":"6a22fca6-e32c-4df8-8240-d9c749c19261","Type":"ContainerDied","Data":"440e32fab867b54123f7d40ba325b3066e9c3cfb206e853ad32cc65a3e064bd4"} Nov 28 16:33:26 crc kubenswrapper[4909]: I1128 16:33:26.804641 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-92jzz" event={"ID":"6a22fca6-e32c-4df8-8240-d9c749c19261","Type":"ContainerStarted","Data":"2b35ea8f1a764af3ca74e22e93f32805a1aff9668ad089fe74c934c6f5ed5f2e"} Nov 28 16:33:26 crc kubenswrapper[4909]: I1128 16:33:26.812634 4909 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/swift-proxy-589b6f8979-wbls8" podUID="e792abf7-967c-4293-b5f4-f073b07c8cf1" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Nov 28 16:33:26 crc kubenswrapper[4909]: I1128 16:33:26.824750 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 28 16:33:27 crc kubenswrapper[4909]: I1128 16:33:27.196012 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/swift-proxy-589b6f8979-wbls8" Nov 28 16:33:27 crc kubenswrapper[4909]: I1128 16:33:27.617233 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 28 16:33:27 crc kubenswrapper[4909]: I1128 16:33:27.739408 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t2nqz\" (UniqueName: \"kubernetes.io/projected/1ed2a549-d75f-4b1e-9d00-6eca4f86957f-kube-api-access-t2nqz\") pod \"1ed2a549-d75f-4b1e-9d00-6eca4f86957f\" (UID: \"1ed2a549-d75f-4b1e-9d00-6eca4f86957f\") " Nov 28 16:33:27 crc kubenswrapper[4909]: I1128 16:33:27.739510 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/1ed2a549-d75f-4b1e-9d00-6eca4f86957f-httpd-run\") pod \"1ed2a549-d75f-4b1e-9d00-6eca4f86957f\" (UID: \"1ed2a549-d75f-4b1e-9d00-6eca4f86957f\") " Nov 28 16:33:27 crc kubenswrapper[4909]: I1128 16:33:27.739561 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1ed2a549-d75f-4b1e-9d00-6eca4f86957f-combined-ca-bundle\") pod \"1ed2a549-d75f-4b1e-9d00-6eca4f86957f\" (UID: \"1ed2a549-d75f-4b1e-9d00-6eca4f86957f\") " Nov 28 16:33:27 crc kubenswrapper[4909]: I1128 16:33:27.739584 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1ed2a549-d75f-4b1e-9d00-6eca4f86957f-config-data\") pod \"1ed2a549-d75f-4b1e-9d00-6eca4f86957f\" (UID: \"1ed2a549-d75f-4b1e-9d00-6eca4f86957f\") " Nov 28 16:33:27 crc kubenswrapper[4909]: I1128 16:33:27.739608 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1ed2a549-d75f-4b1e-9d00-6eca4f86957f-scripts\") pod \"1ed2a549-d75f-4b1e-9d00-6eca4f86957f\" (UID: \"1ed2a549-d75f-4b1e-9d00-6eca4f86957f\") " Nov 28 16:33:27 crc kubenswrapper[4909]: I1128 16:33:27.739635 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/1ed2a549-d75f-4b1e-9d00-6eca4f86957f-public-tls-certs\") pod \"1ed2a549-d75f-4b1e-9d00-6eca4f86957f\" (UID: \"1ed2a549-d75f-4b1e-9d00-6eca4f86957f\") " Nov 28 16:33:27 crc kubenswrapper[4909]: I1128 16:33:27.739736 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1ed2a549-d75f-4b1e-9d00-6eca4f86957f-logs\") pod \"1ed2a549-d75f-4b1e-9d00-6eca4f86957f\" (UID: \"1ed2a549-d75f-4b1e-9d00-6eca4f86957f\") " Nov 28 16:33:27 crc kubenswrapper[4909]: I1128 16:33:27.739775 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"1ed2a549-d75f-4b1e-9d00-6eca4f86957f\" (UID: \"1ed2a549-d75f-4b1e-9d00-6eca4f86957f\") " Nov 28 16:33:27 crc kubenswrapper[4909]: I1128 16:33:27.741168 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1ed2a549-d75f-4b1e-9d00-6eca4f86957f-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "1ed2a549-d75f-4b1e-9d00-6eca4f86957f" (UID: "1ed2a549-d75f-4b1e-9d00-6eca4f86957f"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:33:27 crc kubenswrapper[4909]: I1128 16:33:27.745050 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1ed2a549-d75f-4b1e-9d00-6eca4f86957f-logs" (OuterVolumeSpecName: "logs") pod "1ed2a549-d75f-4b1e-9d00-6eca4f86957f" (UID: "1ed2a549-d75f-4b1e-9d00-6eca4f86957f"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:33:27 crc kubenswrapper[4909]: I1128 16:33:27.749286 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1ed2a549-d75f-4b1e-9d00-6eca4f86957f-kube-api-access-t2nqz" (OuterVolumeSpecName: "kube-api-access-t2nqz") pod "1ed2a549-d75f-4b1e-9d00-6eca4f86957f" (UID: "1ed2a549-d75f-4b1e-9d00-6eca4f86957f"). InnerVolumeSpecName "kube-api-access-t2nqz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:33:27 crc kubenswrapper[4909]: I1128 16:33:27.756884 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage03-crc" (OuterVolumeSpecName: "glance") pod "1ed2a549-d75f-4b1e-9d00-6eca4f86957f" (UID: "1ed2a549-d75f-4b1e-9d00-6eca4f86957f"). InnerVolumeSpecName "local-storage03-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 28 16:33:27 crc kubenswrapper[4909]: I1128 16:33:27.777912 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1ed2a549-d75f-4b1e-9d00-6eca4f86957f-scripts" (OuterVolumeSpecName: "scripts") pod "1ed2a549-d75f-4b1e-9d00-6eca4f86957f" (UID: "1ed2a549-d75f-4b1e-9d00-6eca4f86957f"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:33:27 crc kubenswrapper[4909]: I1128 16:33:27.822007 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1ed2a549-d75f-4b1e-9d00-6eca4f86957f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "1ed2a549-d75f-4b1e-9d00-6eca4f86957f" (UID: "1ed2a549-d75f-4b1e-9d00-6eca4f86957f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:33:27 crc kubenswrapper[4909]: I1128 16:33:27.836323 4909 generic.go:334] "Generic (PLEG): container finished" podID="1ed2a549-d75f-4b1e-9d00-6eca4f86957f" containerID="ad28a714c25d128ef8edba41277bc5b8f460469ced5c8d03d0afd12a7a1547bf" exitCode=0 Nov 28 16:33:27 crc kubenswrapper[4909]: I1128 16:33:27.836575 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"1ed2a549-d75f-4b1e-9d00-6eca4f86957f","Type":"ContainerDied","Data":"ad28a714c25d128ef8edba41277bc5b8f460469ced5c8d03d0afd12a7a1547bf"} Nov 28 16:33:27 crc kubenswrapper[4909]: I1128 16:33:27.836602 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 28 16:33:27 crc kubenswrapper[4909]: I1128 16:33:27.836642 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"1ed2a549-d75f-4b1e-9d00-6eca4f86957f","Type":"ContainerDied","Data":"f33fbb62f1f2b29260f0cba66028250bb06937ba450ea1cda7eb169b6dea3062"} Nov 28 16:33:27 crc kubenswrapper[4909]: I1128 16:33:27.836692 4909 scope.go:117] "RemoveContainer" containerID="ad28a714c25d128ef8edba41277bc5b8f460469ced5c8d03d0afd12a7a1547bf" Nov 28 16:33:27 crc kubenswrapper[4909]: I1128 16:33:27.841920 4909 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") on node \"crc\" " Nov 28 16:33:27 crc kubenswrapper[4909]: I1128 16:33:27.841960 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t2nqz\" (UniqueName: \"kubernetes.io/projected/1ed2a549-d75f-4b1e-9d00-6eca4f86957f-kube-api-access-t2nqz\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:27 crc kubenswrapper[4909]: I1128 16:33:27.841973 4909 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/1ed2a549-d75f-4b1e-9d00-6eca4f86957f-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:27 crc kubenswrapper[4909]: I1128 16:33:27.841988 4909 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1ed2a549-d75f-4b1e-9d00-6eca4f86957f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:27 crc kubenswrapper[4909]: I1128 16:33:27.842004 4909 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1ed2a549-d75f-4b1e-9d00-6eca4f86957f-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:27 crc kubenswrapper[4909]: I1128 16:33:27.842019 4909 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1ed2a549-d75f-4b1e-9d00-6eca4f86957f-logs\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:27 crc kubenswrapper[4909]: I1128 16:33:27.844916 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"fbcf7a74-7fb6-4c78-8e6a-86539a59eb71","Type":"ContainerStarted","Data":"800423d80fa6fb28efaf88c09b053243c0b2c7dbb4787a6cb1e8c9a281d4aeb2"} Nov 28 16:33:27 crc kubenswrapper[4909]: I1128 16:33:27.844965 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"fbcf7a74-7fb6-4c78-8e6a-86539a59eb71","Type":"ContainerStarted","Data":"bb0c29e9a90a2fe9de00fd482b5e984c8eab44dca0c70d0a9c4e0af3d1789e3e"} Nov 28 16:33:27 crc kubenswrapper[4909]: I1128 16:33:27.882197 4909 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage03-crc" (UniqueName: "kubernetes.io/local-volume/local-storage03-crc") on node "crc" Nov 28 16:33:27 crc kubenswrapper[4909]: I1128 16:33:27.914860 4909 scope.go:117] "RemoveContainer" containerID="93b05c0d1f290aace4bb931df0cce5c19a4896a56bff38a462e5ca772902bb88" Nov 28 16:33:27 crc kubenswrapper[4909]: I1128 16:33:27.942190 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1ed2a549-d75f-4b1e-9d00-6eca4f86957f-config-data" (OuterVolumeSpecName: "config-data") pod "1ed2a549-d75f-4b1e-9d00-6eca4f86957f" (UID: "1ed2a549-d75f-4b1e-9d00-6eca4f86957f"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:33:27 crc kubenswrapper[4909]: I1128 16:33:27.944487 4909 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1ed2a549-d75f-4b1e-9d00-6eca4f86957f-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:27 crc kubenswrapper[4909]: I1128 16:33:27.944524 4909 reconciler_common.go:293] "Volume detached for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:27 crc kubenswrapper[4909]: I1128 16:33:27.952092 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="403021b8-fd7a-4823-9f99-622829f4d935" path="/var/lib/kubelet/pods/403021b8-fd7a-4823-9f99-622829f4d935/volumes" Nov 28 16:33:27 crc kubenswrapper[4909]: I1128 16:33:27.953898 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1ed2a549-d75f-4b1e-9d00-6eca4f86957f-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "1ed2a549-d75f-4b1e-9d00-6eca4f86957f" (UID: "1ed2a549-d75f-4b1e-9d00-6eca4f86957f"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:33:28 crc kubenswrapper[4909]: I1128 16:33:28.047331 4909 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/1ed2a549-d75f-4b1e-9d00-6eca4f86957f-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:28 crc kubenswrapper[4909]: I1128 16:33:28.070449 4909 scope.go:117] "RemoveContainer" containerID="ad28a714c25d128ef8edba41277bc5b8f460469ced5c8d03d0afd12a7a1547bf" Nov 28 16:33:28 crc kubenswrapper[4909]: E1128 16:33:28.071012 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ad28a714c25d128ef8edba41277bc5b8f460469ced5c8d03d0afd12a7a1547bf\": container with ID starting with ad28a714c25d128ef8edba41277bc5b8f460469ced5c8d03d0afd12a7a1547bf not found: ID does not exist" containerID="ad28a714c25d128ef8edba41277bc5b8f460469ced5c8d03d0afd12a7a1547bf" Nov 28 16:33:28 crc kubenswrapper[4909]: I1128 16:33:28.071045 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ad28a714c25d128ef8edba41277bc5b8f460469ced5c8d03d0afd12a7a1547bf"} err="failed to get container status \"ad28a714c25d128ef8edba41277bc5b8f460469ced5c8d03d0afd12a7a1547bf\": rpc error: code = NotFound desc = could not find container \"ad28a714c25d128ef8edba41277bc5b8f460469ced5c8d03d0afd12a7a1547bf\": container with ID starting with ad28a714c25d128ef8edba41277bc5b8f460469ced5c8d03d0afd12a7a1547bf not found: ID does not exist" Nov 28 16:33:28 crc kubenswrapper[4909]: I1128 16:33:28.071235 4909 scope.go:117] "RemoveContainer" containerID="93b05c0d1f290aace4bb931df0cce5c19a4896a56bff38a462e5ca772902bb88" Nov 28 16:33:28 crc kubenswrapper[4909]: E1128 16:33:28.072303 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"93b05c0d1f290aace4bb931df0cce5c19a4896a56bff38a462e5ca772902bb88\": container with ID starting with 93b05c0d1f290aace4bb931df0cce5c19a4896a56bff38a462e5ca772902bb88 not found: ID does not exist" containerID="93b05c0d1f290aace4bb931df0cce5c19a4896a56bff38a462e5ca772902bb88" Nov 28 16:33:28 crc kubenswrapper[4909]: I1128 16:33:28.072334 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"93b05c0d1f290aace4bb931df0cce5c19a4896a56bff38a462e5ca772902bb88"} err="failed to get container status \"93b05c0d1f290aace4bb931df0cce5c19a4896a56bff38a462e5ca772902bb88\": rpc error: code = NotFound desc = could not find container \"93b05c0d1f290aace4bb931df0cce5c19a4896a56bff38a462e5ca772902bb88\": container with ID starting with 93b05c0d1f290aace4bb931df0cce5c19a4896a56bff38a462e5ca772902bb88 not found: ID does not exist" Nov 28 16:33:28 crc kubenswrapper[4909]: I1128 16:33:28.172154 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 16:33:28 crc kubenswrapper[4909]: I1128 16:33:28.190171 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 16:33:28 crc kubenswrapper[4909]: I1128 16:33:28.241811 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 16:33:28 crc kubenswrapper[4909]: E1128 16:33:28.248222 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1ed2a549-d75f-4b1e-9d00-6eca4f86957f" containerName="glance-httpd" Nov 28 16:33:28 crc kubenswrapper[4909]: I1128 16:33:28.248255 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="1ed2a549-d75f-4b1e-9d00-6eca4f86957f" containerName="glance-httpd" Nov 28 16:33:28 crc kubenswrapper[4909]: E1128 16:33:28.248322 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1ed2a549-d75f-4b1e-9d00-6eca4f86957f" containerName="glance-log" Nov 28 16:33:28 crc kubenswrapper[4909]: I1128 16:33:28.248337 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="1ed2a549-d75f-4b1e-9d00-6eca4f86957f" containerName="glance-log" Nov 28 16:33:28 crc kubenswrapper[4909]: I1128 16:33:28.249153 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="1ed2a549-d75f-4b1e-9d00-6eca4f86957f" containerName="glance-log" Nov 28 16:33:28 crc kubenswrapper[4909]: I1128 16:33:28.249192 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="1ed2a549-d75f-4b1e-9d00-6eca4f86957f" containerName="glance-httpd" Nov 28 16:33:28 crc kubenswrapper[4909]: I1128 16:33:28.252949 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 28 16:33:28 crc kubenswrapper[4909]: I1128 16:33:28.269468 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 16:33:28 crc kubenswrapper[4909]: I1128 16:33:28.284310 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Nov 28 16:33:28 crc kubenswrapper[4909]: I1128 16:33:28.284507 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Nov 28 16:33:28 crc kubenswrapper[4909]: I1128 16:33:28.328614 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-92jzz" Nov 28 16:33:28 crc kubenswrapper[4909]: I1128 16:33:28.364945 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z52sq\" (UniqueName: \"kubernetes.io/projected/00913f80-f496-44ec-a619-99129724cb89-kube-api-access-z52sq\") pod \"glance-default-external-api-0\" (UID: \"00913f80-f496-44ec-a619-99129724cb89\") " pod="openstack/glance-default-external-api-0" Nov 28 16:33:28 crc kubenswrapper[4909]: I1128 16:33:28.364993 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/00913f80-f496-44ec-a619-99129724cb89-logs\") pod \"glance-default-external-api-0\" (UID: \"00913f80-f496-44ec-a619-99129724cb89\") " pod="openstack/glance-default-external-api-0" Nov 28 16:33:28 crc kubenswrapper[4909]: I1128 16:33:28.365039 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/00913f80-f496-44ec-a619-99129724cb89-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"00913f80-f496-44ec-a619-99129724cb89\") " pod="openstack/glance-default-external-api-0" Nov 28 16:33:28 crc kubenswrapper[4909]: I1128 16:33:28.365070 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"glance-default-external-api-0\" (UID: \"00913f80-f496-44ec-a619-99129724cb89\") " pod="openstack/glance-default-external-api-0" Nov 28 16:33:28 crc kubenswrapper[4909]: I1128 16:33:28.365090 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/00913f80-f496-44ec-a619-99129724cb89-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"00913f80-f496-44ec-a619-99129724cb89\") " pod="openstack/glance-default-external-api-0" Nov 28 16:33:28 crc kubenswrapper[4909]: I1128 16:33:28.365121 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/00913f80-f496-44ec-a619-99129724cb89-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"00913f80-f496-44ec-a619-99129724cb89\") " pod="openstack/glance-default-external-api-0" Nov 28 16:33:28 crc kubenswrapper[4909]: I1128 16:33:28.365164 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/00913f80-f496-44ec-a619-99129724cb89-scripts\") pod \"glance-default-external-api-0\" (UID: \"00913f80-f496-44ec-a619-99129724cb89\") " pod="openstack/glance-default-external-api-0" Nov 28 16:33:28 crc kubenswrapper[4909]: I1128 16:33:28.365193 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/00913f80-f496-44ec-a619-99129724cb89-config-data\") pod \"glance-default-external-api-0\" (UID: \"00913f80-f496-44ec-a619-99129724cb89\") " pod="openstack/glance-default-external-api-0" Nov 28 16:33:28 crc kubenswrapper[4909]: I1128 16:33:28.471287 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6a22fca6-e32c-4df8-8240-d9c749c19261-operator-scripts\") pod \"6a22fca6-e32c-4df8-8240-d9c749c19261\" (UID: \"6a22fca6-e32c-4df8-8240-d9c749c19261\") " Nov 28 16:33:28 crc kubenswrapper[4909]: I1128 16:33:28.471357 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zmxqp\" (UniqueName: \"kubernetes.io/projected/6a22fca6-e32c-4df8-8240-d9c749c19261-kube-api-access-zmxqp\") pod \"6a22fca6-e32c-4df8-8240-d9c749c19261\" (UID: \"6a22fca6-e32c-4df8-8240-d9c749c19261\") " Nov 28 16:33:28 crc kubenswrapper[4909]: I1128 16:33:28.471720 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z52sq\" (UniqueName: \"kubernetes.io/projected/00913f80-f496-44ec-a619-99129724cb89-kube-api-access-z52sq\") pod \"glance-default-external-api-0\" (UID: \"00913f80-f496-44ec-a619-99129724cb89\") " pod="openstack/glance-default-external-api-0" Nov 28 16:33:28 crc kubenswrapper[4909]: I1128 16:33:28.471750 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/00913f80-f496-44ec-a619-99129724cb89-logs\") pod \"glance-default-external-api-0\" (UID: \"00913f80-f496-44ec-a619-99129724cb89\") " pod="openstack/glance-default-external-api-0" Nov 28 16:33:28 crc kubenswrapper[4909]: I1128 16:33:28.471773 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/00913f80-f496-44ec-a619-99129724cb89-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"00913f80-f496-44ec-a619-99129724cb89\") " pod="openstack/glance-default-external-api-0" Nov 28 16:33:28 crc kubenswrapper[4909]: I1128 16:33:28.471821 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"glance-default-external-api-0\" (UID: \"00913f80-f496-44ec-a619-99129724cb89\") " pod="openstack/glance-default-external-api-0" Nov 28 16:33:28 crc kubenswrapper[4909]: I1128 16:33:28.471847 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/00913f80-f496-44ec-a619-99129724cb89-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"00913f80-f496-44ec-a619-99129724cb89\") " pod="openstack/glance-default-external-api-0" Nov 28 16:33:28 crc kubenswrapper[4909]: I1128 16:33:28.471888 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/00913f80-f496-44ec-a619-99129724cb89-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"00913f80-f496-44ec-a619-99129724cb89\") " pod="openstack/glance-default-external-api-0" Nov 28 16:33:28 crc kubenswrapper[4909]: I1128 16:33:28.471944 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/00913f80-f496-44ec-a619-99129724cb89-scripts\") pod \"glance-default-external-api-0\" (UID: \"00913f80-f496-44ec-a619-99129724cb89\") " pod="openstack/glance-default-external-api-0" Nov 28 16:33:28 crc kubenswrapper[4909]: I1128 16:33:28.471977 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/00913f80-f496-44ec-a619-99129724cb89-config-data\") pod \"glance-default-external-api-0\" (UID: \"00913f80-f496-44ec-a619-99129724cb89\") " pod="openstack/glance-default-external-api-0" Nov 28 16:33:28 crc kubenswrapper[4909]: I1128 16:33:28.473808 4909 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"glance-default-external-api-0\" (UID: \"00913f80-f496-44ec-a619-99129724cb89\") device mount path \"/mnt/openstack/pv03\"" pod="openstack/glance-default-external-api-0" Nov 28 16:33:28 crc kubenswrapper[4909]: I1128 16:33:28.479310 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/00913f80-f496-44ec-a619-99129724cb89-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"00913f80-f496-44ec-a619-99129724cb89\") " pod="openstack/glance-default-external-api-0" Nov 28 16:33:28 crc kubenswrapper[4909]: I1128 16:33:28.480407 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6a22fca6-e32c-4df8-8240-d9c749c19261-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "6a22fca6-e32c-4df8-8240-d9c749c19261" (UID: "6a22fca6-e32c-4df8-8240-d9c749c19261"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:33:28 crc kubenswrapper[4909]: I1128 16:33:28.480877 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/00913f80-f496-44ec-a619-99129724cb89-logs\") pod \"glance-default-external-api-0\" (UID: \"00913f80-f496-44ec-a619-99129724cb89\") " pod="openstack/glance-default-external-api-0" Nov 28 16:33:28 crc kubenswrapper[4909]: I1128 16:33:28.481338 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/00913f80-f496-44ec-a619-99129724cb89-config-data\") pod \"glance-default-external-api-0\" (UID: \"00913f80-f496-44ec-a619-99129724cb89\") " pod="openstack/glance-default-external-api-0" Nov 28 16:33:28 crc kubenswrapper[4909]: I1128 16:33:28.484452 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/00913f80-f496-44ec-a619-99129724cb89-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"00913f80-f496-44ec-a619-99129724cb89\") " pod="openstack/glance-default-external-api-0" Nov 28 16:33:28 crc kubenswrapper[4909]: I1128 16:33:28.485789 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/00913f80-f496-44ec-a619-99129724cb89-scripts\") pod \"glance-default-external-api-0\" (UID: \"00913f80-f496-44ec-a619-99129724cb89\") " pod="openstack/glance-default-external-api-0" Nov 28 16:33:28 crc kubenswrapper[4909]: I1128 16:33:28.486910 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/00913f80-f496-44ec-a619-99129724cb89-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"00913f80-f496-44ec-a619-99129724cb89\") " pod="openstack/glance-default-external-api-0" Nov 28 16:33:28 crc kubenswrapper[4909]: I1128 16:33:28.497791 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6a22fca6-e32c-4df8-8240-d9c749c19261-kube-api-access-zmxqp" (OuterVolumeSpecName: "kube-api-access-zmxqp") pod "6a22fca6-e32c-4df8-8240-d9c749c19261" (UID: "6a22fca6-e32c-4df8-8240-d9c749c19261"). InnerVolumeSpecName "kube-api-access-zmxqp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:33:28 crc kubenswrapper[4909]: I1128 16:33:28.517597 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z52sq\" (UniqueName: \"kubernetes.io/projected/00913f80-f496-44ec-a619-99129724cb89-kube-api-access-z52sq\") pod \"glance-default-external-api-0\" (UID: \"00913f80-f496-44ec-a619-99129724cb89\") " pod="openstack/glance-default-external-api-0" Nov 28 16:33:28 crc kubenswrapper[4909]: I1128 16:33:28.573569 4909 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6a22fca6-e32c-4df8-8240-d9c749c19261-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:28 crc kubenswrapper[4909]: I1128 16:33:28.573874 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zmxqp\" (UniqueName: \"kubernetes.io/projected/6a22fca6-e32c-4df8-8240-d9c749c19261-kube-api-access-zmxqp\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:28 crc kubenswrapper[4909]: I1128 16:33:28.596943 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"glance-default-external-api-0\" (UID: \"00913f80-f496-44ec-a619-99129724cb89\") " pod="openstack/glance-default-external-api-0" Nov 28 16:33:28 crc kubenswrapper[4909]: I1128 16:33:28.619099 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 28 16:33:28 crc kubenswrapper[4909]: I1128 16:33:28.658813 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-5hd6v" Nov 28 16:33:28 crc kubenswrapper[4909]: I1128 16:33:28.660307 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-77a6-account-create-update-pq4lq" Nov 28 16:33:28 crc kubenswrapper[4909]: I1128 16:33:28.787741 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/98978b85-3c69-4ae1-83cb-73f72e8d2093-operator-scripts\") pod \"98978b85-3c69-4ae1-83cb-73f72e8d2093\" (UID: \"98978b85-3c69-4ae1-83cb-73f72e8d2093\") " Nov 28 16:33:28 crc kubenswrapper[4909]: I1128 16:33:28.787890 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/650d0ac8-9d90-4bca-9d00-830345bf8e65-operator-scripts\") pod \"650d0ac8-9d90-4bca-9d00-830345bf8e65\" (UID: \"650d0ac8-9d90-4bca-9d00-830345bf8e65\") " Nov 28 16:33:28 crc kubenswrapper[4909]: I1128 16:33:28.787949 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x7s6f\" (UniqueName: \"kubernetes.io/projected/650d0ac8-9d90-4bca-9d00-830345bf8e65-kube-api-access-x7s6f\") pod \"650d0ac8-9d90-4bca-9d00-830345bf8e65\" (UID: \"650d0ac8-9d90-4bca-9d00-830345bf8e65\") " Nov 28 16:33:28 crc kubenswrapper[4909]: I1128 16:33:28.788056 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g62lw\" (UniqueName: \"kubernetes.io/projected/98978b85-3c69-4ae1-83cb-73f72e8d2093-kube-api-access-g62lw\") pod \"98978b85-3c69-4ae1-83cb-73f72e8d2093\" (UID: \"98978b85-3c69-4ae1-83cb-73f72e8d2093\") " Nov 28 16:33:28 crc kubenswrapper[4909]: I1128 16:33:28.788613 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/98978b85-3c69-4ae1-83cb-73f72e8d2093-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "98978b85-3c69-4ae1-83cb-73f72e8d2093" (UID: "98978b85-3c69-4ae1-83cb-73f72e8d2093"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:33:28 crc kubenswrapper[4909]: I1128 16:33:28.788622 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/650d0ac8-9d90-4bca-9d00-830345bf8e65-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "650d0ac8-9d90-4bca-9d00-830345bf8e65" (UID: "650d0ac8-9d90-4bca-9d00-830345bf8e65"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:33:28 crc kubenswrapper[4909]: I1128 16:33:28.795432 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/98978b85-3c69-4ae1-83cb-73f72e8d2093-kube-api-access-g62lw" (OuterVolumeSpecName: "kube-api-access-g62lw") pod "98978b85-3c69-4ae1-83cb-73f72e8d2093" (UID: "98978b85-3c69-4ae1-83cb-73f72e8d2093"). InnerVolumeSpecName "kube-api-access-g62lw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:33:28 crc kubenswrapper[4909]: I1128 16:33:28.797156 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/650d0ac8-9d90-4bca-9d00-830345bf8e65-kube-api-access-x7s6f" (OuterVolumeSpecName: "kube-api-access-x7s6f") pod "650d0ac8-9d90-4bca-9d00-830345bf8e65" (UID: "650d0ac8-9d90-4bca-9d00-830345bf8e65"). InnerVolumeSpecName "kube-api-access-x7s6f". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:33:28 crc kubenswrapper[4909]: I1128 16:33:28.862582 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-chks5" Nov 28 16:33:28 crc kubenswrapper[4909]: I1128 16:33:28.869333 4909 generic.go:334] "Generic (PLEG): container finished" podID="e9618898-4d20-4c0d-ab4d-1dc67e0f109a" containerID="d40a4b6cfcdb1ace4ed94b638cf6d1b874b6ed7fea6b7915423222d760158ab8" exitCode=0 Nov 28 16:33:28 crc kubenswrapper[4909]: I1128 16:33:28.869458 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"e9618898-4d20-4c0d-ab4d-1dc67e0f109a","Type":"ContainerDied","Data":"d40a4b6cfcdb1ace4ed94b638cf6d1b874b6ed7fea6b7915423222d760158ab8"} Nov 28 16:33:28 crc kubenswrapper[4909]: I1128 16:33:28.869493 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"e9618898-4d20-4c0d-ab4d-1dc67e0f109a","Type":"ContainerDied","Data":"f650d12360f184251edc81adb3789ea75b73835fbcff32683fdc367eeb276ef9"} Nov 28 16:33:28 crc kubenswrapper[4909]: I1128 16:33:28.869507 4909 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f650d12360f184251edc81adb3789ea75b73835fbcff32683fdc367eeb276ef9" Nov 28 16:33:28 crc kubenswrapper[4909]: I1128 16:33:28.889934 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g62lw\" (UniqueName: \"kubernetes.io/projected/98978b85-3c69-4ae1-83cb-73f72e8d2093-kube-api-access-g62lw\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:28 crc kubenswrapper[4909]: I1128 16:33:28.889995 4909 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/98978b85-3c69-4ae1-83cb-73f72e8d2093-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:28 crc kubenswrapper[4909]: I1128 16:33:28.890005 4909 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/650d0ac8-9d90-4bca-9d00-830345bf8e65-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:28 crc kubenswrapper[4909]: I1128 16:33:28.890014 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x7s6f\" (UniqueName: \"kubernetes.io/projected/650d0ac8-9d90-4bca-9d00-830345bf8e65-kube-api-access-x7s6f\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:28 crc kubenswrapper[4909]: I1128 16:33:28.899142 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-e892-account-create-update-rwgsj" event={"ID":"bf22088e-8ad0-4323-bca3-2b7bdb648bf9","Type":"ContainerDied","Data":"5d65741c4d142f5ed19dca29cac9a41499288c6ff81d6e8446b5ff96ee3d7678"} Nov 28 16:33:28 crc kubenswrapper[4909]: I1128 16:33:28.899177 4909 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5d65741c4d142f5ed19dca29cac9a41499288c6ff81d6e8446b5ff96ee3d7678" Nov 28 16:33:28 crc kubenswrapper[4909]: I1128 16:33:28.909119 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-ab11-account-create-update-shpl9" Nov 28 16:33:28 crc kubenswrapper[4909]: I1128 16:33:28.926809 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-e892-account-create-update-rwgsj" Nov 28 16:33:28 crc kubenswrapper[4909]: I1128 16:33:28.927340 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-5hd6v" event={"ID":"98978b85-3c69-4ae1-83cb-73f72e8d2093","Type":"ContainerDied","Data":"60e824c91b3e9d5ce9b91037063372d58c921797508c607c6c0bedb652e94aef"} Nov 28 16:33:28 crc kubenswrapper[4909]: I1128 16:33:28.927377 4909 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="60e824c91b3e9d5ce9b91037063372d58c921797508c607c6c0bedb652e94aef" Nov 28 16:33:28 crc kubenswrapper[4909]: I1128 16:33:28.927412 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-5hd6v" Nov 28 16:33:28 crc kubenswrapper[4909]: I1128 16:33:28.936300 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 28 16:33:28 crc kubenswrapper[4909]: I1128 16:33:28.936856 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-92jzz" event={"ID":"6a22fca6-e32c-4df8-8240-d9c749c19261","Type":"ContainerDied","Data":"2b35ea8f1a764af3ca74e22e93f32805a1aff9668ad089fe74c934c6f5ed5f2e"} Nov 28 16:33:28 crc kubenswrapper[4909]: I1128 16:33:28.936890 4909 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2b35ea8f1a764af3ca74e22e93f32805a1aff9668ad089fe74c934c6f5ed5f2e" Nov 28 16:33:28 crc kubenswrapper[4909]: I1128 16:33:28.936967 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-92jzz" Nov 28 16:33:28 crc kubenswrapper[4909]: I1128 16:33:28.958426 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"fbcf7a74-7fb6-4c78-8e6a-86539a59eb71","Type":"ContainerStarted","Data":"2a1f0639baeefb1607f256503f16c79ecbfd0b0c79a2d3012c1d27ae41d1a1e6"} Nov 28 16:33:28 crc kubenswrapper[4909]: I1128 16:33:28.969040 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-chks5" Nov 28 16:33:28 crc kubenswrapper[4909]: I1128 16:33:28.969587 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-chks5" event={"ID":"f41afab6-2271-4347-97fd-15d60c351409","Type":"ContainerDied","Data":"7bbc9f04bd92421ee0092003ee7fff766bcaf163bd5c29a3e4f33f17f454707d"} Nov 28 16:33:28 crc kubenswrapper[4909]: I1128 16:33:28.969612 4909 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7bbc9f04bd92421ee0092003ee7fff766bcaf163bd5c29a3e4f33f17f454707d" Nov 28 16:33:28 crc kubenswrapper[4909]: I1128 16:33:28.977522 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-77a6-account-create-update-pq4lq" event={"ID":"650d0ac8-9d90-4bca-9d00-830345bf8e65","Type":"ContainerDied","Data":"f00ef96ca623b27a7d79ff48bf49cbe953cf518222bf31bc5d8f1f09d26a7358"} Nov 28 16:33:28 crc kubenswrapper[4909]: I1128 16:33:28.977559 4909 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f00ef96ca623b27a7d79ff48bf49cbe953cf518222bf31bc5d8f1f09d26a7358" Nov 28 16:33:28 crc kubenswrapper[4909]: I1128 16:33:28.977624 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-77a6-account-create-update-pq4lq" Nov 28 16:33:28 crc kubenswrapper[4909]: I1128 16:33:28.990682 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t5hrb\" (UniqueName: \"kubernetes.io/projected/f41afab6-2271-4347-97fd-15d60c351409-kube-api-access-t5hrb\") pod \"f41afab6-2271-4347-97fd-15d60c351409\" (UID: \"f41afab6-2271-4347-97fd-15d60c351409\") " Nov 28 16:33:28 crc kubenswrapper[4909]: I1128 16:33:28.990936 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a4cdba92-a257-403a-a47f-8678e1e63b84-operator-scripts\") pod \"a4cdba92-a257-403a-a47f-8678e1e63b84\" (UID: \"a4cdba92-a257-403a-a47f-8678e1e63b84\") " Nov 28 16:33:28 crc kubenswrapper[4909]: I1128 16:33:28.990970 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-plgbd\" (UniqueName: \"kubernetes.io/projected/a4cdba92-a257-403a-a47f-8678e1e63b84-kube-api-access-plgbd\") pod \"a4cdba92-a257-403a-a47f-8678e1e63b84\" (UID: \"a4cdba92-a257-403a-a47f-8678e1e63b84\") " Nov 28 16:33:28 crc kubenswrapper[4909]: I1128 16:33:28.991055 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f41afab6-2271-4347-97fd-15d60c351409-operator-scripts\") pod \"f41afab6-2271-4347-97fd-15d60c351409\" (UID: \"f41afab6-2271-4347-97fd-15d60c351409\") " Nov 28 16:33:28 crc kubenswrapper[4909]: I1128 16:33:28.997226 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f41afab6-2271-4347-97fd-15d60c351409-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "f41afab6-2271-4347-97fd-15d60c351409" (UID: "f41afab6-2271-4347-97fd-15d60c351409"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:33:29 crc kubenswrapper[4909]: I1128 16:33:29.004995 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a4cdba92-a257-403a-a47f-8678e1e63b84-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "a4cdba92-a257-403a-a47f-8678e1e63b84" (UID: "a4cdba92-a257-403a-a47f-8678e1e63b84"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:33:29 crc kubenswrapper[4909]: I1128 16:33:29.011866 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a4cdba92-a257-403a-a47f-8678e1e63b84-kube-api-access-plgbd" (OuterVolumeSpecName: "kube-api-access-plgbd") pod "a4cdba92-a257-403a-a47f-8678e1e63b84" (UID: "a4cdba92-a257-403a-a47f-8678e1e63b84"). InnerVolumeSpecName "kube-api-access-plgbd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:33:29 crc kubenswrapper[4909]: I1128 16:33:29.011950 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-ab11-account-create-update-shpl9" event={"ID":"a4cdba92-a257-403a-a47f-8678e1e63b84","Type":"ContainerDied","Data":"e568465824e6858cab55929b757276b1627faf58ab3a3df06c6f58b44e164b67"} Nov 28 16:33:29 crc kubenswrapper[4909]: I1128 16:33:29.011995 4909 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e568465824e6858cab55929b757276b1627faf58ab3a3df06c6f58b44e164b67" Nov 28 16:33:29 crc kubenswrapper[4909]: I1128 16:33:29.012195 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-ab11-account-create-update-shpl9" Nov 28 16:33:29 crc kubenswrapper[4909]: I1128 16:33:29.027903 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f41afab6-2271-4347-97fd-15d60c351409-kube-api-access-t5hrb" (OuterVolumeSpecName: "kube-api-access-t5hrb") pod "f41afab6-2271-4347-97fd-15d60c351409" (UID: "f41afab6-2271-4347-97fd-15d60c351409"). InnerVolumeSpecName "kube-api-access-t5hrb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:33:29 crc kubenswrapper[4909]: I1128 16:33:29.092387 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e9618898-4d20-4c0d-ab4d-1dc67e0f109a-scripts\") pod \"e9618898-4d20-4c0d-ab4d-1dc67e0f109a\" (UID: \"e9618898-4d20-4c0d-ab4d-1dc67e0f109a\") " Nov 28 16:33:29 crc kubenswrapper[4909]: I1128 16:33:29.092452 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e9618898-4d20-4c0d-ab4d-1dc67e0f109a-logs\") pod \"e9618898-4d20-4c0d-ab4d-1dc67e0f109a\" (UID: \"e9618898-4d20-4c0d-ab4d-1dc67e0f109a\") " Nov 28 16:33:29 crc kubenswrapper[4909]: I1128 16:33:29.092469 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"e9618898-4d20-4c0d-ab4d-1dc67e0f109a\" (UID: \"e9618898-4d20-4c0d-ab4d-1dc67e0f109a\") " Nov 28 16:33:29 crc kubenswrapper[4909]: I1128 16:33:29.092494 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e9618898-4d20-4c0d-ab4d-1dc67e0f109a-internal-tls-certs\") pod \"e9618898-4d20-4c0d-ab4d-1dc67e0f109a\" (UID: \"e9618898-4d20-4c0d-ab4d-1dc67e0f109a\") " Nov 28 16:33:29 crc kubenswrapper[4909]: I1128 16:33:29.092564 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kb5vt\" (UniqueName: \"kubernetes.io/projected/e9618898-4d20-4c0d-ab4d-1dc67e0f109a-kube-api-access-kb5vt\") pod \"e9618898-4d20-4c0d-ab4d-1dc67e0f109a\" (UID: \"e9618898-4d20-4c0d-ab4d-1dc67e0f109a\") " Nov 28 16:33:29 crc kubenswrapper[4909]: I1128 16:33:29.092616 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6xt55\" (UniqueName: \"kubernetes.io/projected/bf22088e-8ad0-4323-bca3-2b7bdb648bf9-kube-api-access-6xt55\") pod \"bf22088e-8ad0-4323-bca3-2b7bdb648bf9\" (UID: \"bf22088e-8ad0-4323-bca3-2b7bdb648bf9\") " Nov 28 16:33:29 crc kubenswrapper[4909]: I1128 16:33:29.092681 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e9618898-4d20-4c0d-ab4d-1dc67e0f109a-combined-ca-bundle\") pod \"e9618898-4d20-4c0d-ab4d-1dc67e0f109a\" (UID: \"e9618898-4d20-4c0d-ab4d-1dc67e0f109a\") " Nov 28 16:33:29 crc kubenswrapper[4909]: I1128 16:33:29.092757 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bf22088e-8ad0-4323-bca3-2b7bdb648bf9-operator-scripts\") pod \"bf22088e-8ad0-4323-bca3-2b7bdb648bf9\" (UID: \"bf22088e-8ad0-4323-bca3-2b7bdb648bf9\") " Nov 28 16:33:29 crc kubenswrapper[4909]: I1128 16:33:29.092792 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/e9618898-4d20-4c0d-ab4d-1dc67e0f109a-httpd-run\") pod \"e9618898-4d20-4c0d-ab4d-1dc67e0f109a\" (UID: \"e9618898-4d20-4c0d-ab4d-1dc67e0f109a\") " Nov 28 16:33:29 crc kubenswrapper[4909]: I1128 16:33:29.092823 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e9618898-4d20-4c0d-ab4d-1dc67e0f109a-config-data\") pod \"e9618898-4d20-4c0d-ab4d-1dc67e0f109a\" (UID: \"e9618898-4d20-4c0d-ab4d-1dc67e0f109a\") " Nov 28 16:33:29 crc kubenswrapper[4909]: I1128 16:33:29.093368 4909 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a4cdba92-a257-403a-a47f-8678e1e63b84-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:29 crc kubenswrapper[4909]: I1128 16:33:29.093382 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-plgbd\" (UniqueName: \"kubernetes.io/projected/a4cdba92-a257-403a-a47f-8678e1e63b84-kube-api-access-plgbd\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:29 crc kubenswrapper[4909]: I1128 16:33:29.093392 4909 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f41afab6-2271-4347-97fd-15d60c351409-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:29 crc kubenswrapper[4909]: I1128 16:33:29.093400 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t5hrb\" (UniqueName: \"kubernetes.io/projected/f41afab6-2271-4347-97fd-15d60c351409-kube-api-access-t5hrb\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:29 crc kubenswrapper[4909]: I1128 16:33:29.095469 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bf22088e-8ad0-4323-bca3-2b7bdb648bf9-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "bf22088e-8ad0-4323-bca3-2b7bdb648bf9" (UID: "bf22088e-8ad0-4323-bca3-2b7bdb648bf9"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:33:29 crc kubenswrapper[4909]: I1128 16:33:29.095529 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e9618898-4d20-4c0d-ab4d-1dc67e0f109a-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "e9618898-4d20-4c0d-ab4d-1dc67e0f109a" (UID: "e9618898-4d20-4c0d-ab4d-1dc67e0f109a"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:33:29 crc kubenswrapper[4909]: I1128 16:33:29.095672 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e9618898-4d20-4c0d-ab4d-1dc67e0f109a-logs" (OuterVolumeSpecName: "logs") pod "e9618898-4d20-4c0d-ab4d-1dc67e0f109a" (UID: "e9618898-4d20-4c0d-ab4d-1dc67e0f109a"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:33:29 crc kubenswrapper[4909]: I1128 16:33:29.098079 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e9618898-4d20-4c0d-ab4d-1dc67e0f109a-kube-api-access-kb5vt" (OuterVolumeSpecName: "kube-api-access-kb5vt") pod "e9618898-4d20-4c0d-ab4d-1dc67e0f109a" (UID: "e9618898-4d20-4c0d-ab4d-1dc67e0f109a"). InnerVolumeSpecName "kube-api-access-kb5vt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:33:29 crc kubenswrapper[4909]: I1128 16:33:29.098244 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage09-crc" (OuterVolumeSpecName: "glance") pod "e9618898-4d20-4c0d-ab4d-1dc67e0f109a" (UID: "e9618898-4d20-4c0d-ab4d-1dc67e0f109a"). InnerVolumeSpecName "local-storage09-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 28 16:33:29 crc kubenswrapper[4909]: I1128 16:33:29.104569 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e9618898-4d20-4c0d-ab4d-1dc67e0f109a-scripts" (OuterVolumeSpecName: "scripts") pod "e9618898-4d20-4c0d-ab4d-1dc67e0f109a" (UID: "e9618898-4d20-4c0d-ab4d-1dc67e0f109a"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:33:29 crc kubenswrapper[4909]: I1128 16:33:29.104936 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf22088e-8ad0-4323-bca3-2b7bdb648bf9-kube-api-access-6xt55" (OuterVolumeSpecName: "kube-api-access-6xt55") pod "bf22088e-8ad0-4323-bca3-2b7bdb648bf9" (UID: "bf22088e-8ad0-4323-bca3-2b7bdb648bf9"). InnerVolumeSpecName "kube-api-access-6xt55". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:33:29 crc kubenswrapper[4909]: I1128 16:33:29.128247 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e9618898-4d20-4c0d-ab4d-1dc67e0f109a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e9618898-4d20-4c0d-ab4d-1dc67e0f109a" (UID: "e9618898-4d20-4c0d-ab4d-1dc67e0f109a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:33:29 crc kubenswrapper[4909]: I1128 16:33:29.187642 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e9618898-4d20-4c0d-ab4d-1dc67e0f109a-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "e9618898-4d20-4c0d-ab4d-1dc67e0f109a" (UID: "e9618898-4d20-4c0d-ab4d-1dc67e0f109a"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:33:29 crc kubenswrapper[4909]: I1128 16:33:29.188291 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e9618898-4d20-4c0d-ab4d-1dc67e0f109a-config-data" (OuterVolumeSpecName: "config-data") pod "e9618898-4d20-4c0d-ab4d-1dc67e0f109a" (UID: "e9618898-4d20-4c0d-ab4d-1dc67e0f109a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:33:29 crc kubenswrapper[4909]: I1128 16:33:29.194973 4909 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bf22088e-8ad0-4323-bca3-2b7bdb648bf9-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:29 crc kubenswrapper[4909]: I1128 16:33:29.195004 4909 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/e9618898-4d20-4c0d-ab4d-1dc67e0f109a-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:29 crc kubenswrapper[4909]: I1128 16:33:29.195014 4909 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e9618898-4d20-4c0d-ab4d-1dc67e0f109a-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:29 crc kubenswrapper[4909]: I1128 16:33:29.195022 4909 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e9618898-4d20-4c0d-ab4d-1dc67e0f109a-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:29 crc kubenswrapper[4909]: I1128 16:33:29.195049 4909 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e9618898-4d20-4c0d-ab4d-1dc67e0f109a-logs\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:29 crc kubenswrapper[4909]: I1128 16:33:29.195084 4909 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") on node \"crc\" " Nov 28 16:33:29 crc kubenswrapper[4909]: I1128 16:33:29.195093 4909 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e9618898-4d20-4c0d-ab4d-1dc67e0f109a-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:29 crc kubenswrapper[4909]: I1128 16:33:29.195101 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kb5vt\" (UniqueName: \"kubernetes.io/projected/e9618898-4d20-4c0d-ab4d-1dc67e0f109a-kube-api-access-kb5vt\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:29 crc kubenswrapper[4909]: I1128 16:33:29.195127 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6xt55\" (UniqueName: \"kubernetes.io/projected/bf22088e-8ad0-4323-bca3-2b7bdb648bf9-kube-api-access-6xt55\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:29 crc kubenswrapper[4909]: I1128 16:33:29.195137 4909 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e9618898-4d20-4c0d-ab4d-1dc67e0f109a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:29 crc kubenswrapper[4909]: I1128 16:33:29.227105 4909 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage09-crc" (UniqueName: "kubernetes.io/local-volume/local-storage09-crc") on node "crc" Nov 28 16:33:29 crc kubenswrapper[4909]: I1128 16:33:29.297327 4909 reconciler_common.go:293] "Volume detached for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:29 crc kubenswrapper[4909]: I1128 16:33:29.485317 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 16:33:29 crc kubenswrapper[4909]: I1128 16:33:29.920279 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1ed2a549-d75f-4b1e-9d00-6eca4f86957f" path="/var/lib/kubelet/pods/1ed2a549-d75f-4b1e-9d00-6eca4f86957f/volumes" Nov 28 16:33:30 crc kubenswrapper[4909]: I1128 16:33:30.025483 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"fbcf7a74-7fb6-4c78-8e6a-86539a59eb71","Type":"ContainerStarted","Data":"81db4557742d314ae925fe733a89a4e9d28890b8b23a13a13e3d329164b79e39"} Nov 28 16:33:30 crc kubenswrapper[4909]: I1128 16:33:30.027585 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-e892-account-create-update-rwgsj" Nov 28 16:33:30 crc kubenswrapper[4909]: I1128 16:33:30.028286 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"00913f80-f496-44ec-a619-99129724cb89","Type":"ContainerStarted","Data":"68a0915ee2dd3acb65a87dd91cc9a841f68c3241c8362deb5a14bb722ea6a2b9"} Nov 28 16:33:30 crc kubenswrapper[4909]: I1128 16:33:30.028342 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 28 16:33:30 crc kubenswrapper[4909]: I1128 16:33:30.054608 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 16:33:30 crc kubenswrapper[4909]: I1128 16:33:30.069023 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 16:33:30 crc kubenswrapper[4909]: I1128 16:33:30.082554 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 16:33:30 crc kubenswrapper[4909]: E1128 16:33:30.083066 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e9618898-4d20-4c0d-ab4d-1dc67e0f109a" containerName="glance-log" Nov 28 16:33:30 crc kubenswrapper[4909]: I1128 16:33:30.083105 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="e9618898-4d20-4c0d-ab4d-1dc67e0f109a" containerName="glance-log" Nov 28 16:33:30 crc kubenswrapper[4909]: E1128 16:33:30.083130 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f41afab6-2271-4347-97fd-15d60c351409" containerName="mariadb-database-create" Nov 28 16:33:30 crc kubenswrapper[4909]: I1128 16:33:30.083138 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="f41afab6-2271-4347-97fd-15d60c351409" containerName="mariadb-database-create" Nov 28 16:33:30 crc kubenswrapper[4909]: E1128 16:33:30.083153 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="650d0ac8-9d90-4bca-9d00-830345bf8e65" containerName="mariadb-account-create-update" Nov 28 16:33:30 crc kubenswrapper[4909]: I1128 16:33:30.083162 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="650d0ac8-9d90-4bca-9d00-830345bf8e65" containerName="mariadb-account-create-update" Nov 28 16:33:30 crc kubenswrapper[4909]: E1128 16:33:30.083177 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bf22088e-8ad0-4323-bca3-2b7bdb648bf9" containerName="mariadb-account-create-update" Nov 28 16:33:30 crc kubenswrapper[4909]: I1128 16:33:30.083185 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="bf22088e-8ad0-4323-bca3-2b7bdb648bf9" containerName="mariadb-account-create-update" Nov 28 16:33:30 crc kubenswrapper[4909]: E1128 16:33:30.083195 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a4cdba92-a257-403a-a47f-8678e1e63b84" containerName="mariadb-account-create-update" Nov 28 16:33:30 crc kubenswrapper[4909]: I1128 16:33:30.083202 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="a4cdba92-a257-403a-a47f-8678e1e63b84" containerName="mariadb-account-create-update" Nov 28 16:33:30 crc kubenswrapper[4909]: E1128 16:33:30.083227 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6a22fca6-e32c-4df8-8240-d9c749c19261" containerName="mariadb-database-create" Nov 28 16:33:30 crc kubenswrapper[4909]: I1128 16:33:30.083235 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="6a22fca6-e32c-4df8-8240-d9c749c19261" containerName="mariadb-database-create" Nov 28 16:33:30 crc kubenswrapper[4909]: E1128 16:33:30.083259 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="98978b85-3c69-4ae1-83cb-73f72e8d2093" containerName="mariadb-database-create" Nov 28 16:33:30 crc kubenswrapper[4909]: I1128 16:33:30.083267 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="98978b85-3c69-4ae1-83cb-73f72e8d2093" containerName="mariadb-database-create" Nov 28 16:33:30 crc kubenswrapper[4909]: E1128 16:33:30.083281 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e9618898-4d20-4c0d-ab4d-1dc67e0f109a" containerName="glance-httpd" Nov 28 16:33:30 crc kubenswrapper[4909]: I1128 16:33:30.083289 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="e9618898-4d20-4c0d-ab4d-1dc67e0f109a" containerName="glance-httpd" Nov 28 16:33:30 crc kubenswrapper[4909]: I1128 16:33:30.083558 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="6a22fca6-e32c-4df8-8240-d9c749c19261" containerName="mariadb-database-create" Nov 28 16:33:30 crc kubenswrapper[4909]: I1128 16:33:30.083573 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="a4cdba92-a257-403a-a47f-8678e1e63b84" containerName="mariadb-account-create-update" Nov 28 16:33:30 crc kubenswrapper[4909]: I1128 16:33:30.083586 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="f41afab6-2271-4347-97fd-15d60c351409" containerName="mariadb-database-create" Nov 28 16:33:30 crc kubenswrapper[4909]: I1128 16:33:30.083593 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="98978b85-3c69-4ae1-83cb-73f72e8d2093" containerName="mariadb-database-create" Nov 28 16:33:30 crc kubenswrapper[4909]: I1128 16:33:30.083611 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="e9618898-4d20-4c0d-ab4d-1dc67e0f109a" containerName="glance-httpd" Nov 28 16:33:30 crc kubenswrapper[4909]: I1128 16:33:30.083631 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="bf22088e-8ad0-4323-bca3-2b7bdb648bf9" containerName="mariadb-account-create-update" Nov 28 16:33:30 crc kubenswrapper[4909]: I1128 16:33:30.083643 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="650d0ac8-9d90-4bca-9d00-830345bf8e65" containerName="mariadb-account-create-update" Nov 28 16:33:30 crc kubenswrapper[4909]: I1128 16:33:30.083676 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="e9618898-4d20-4c0d-ab4d-1dc67e0f109a" containerName="glance-log" Nov 28 16:33:30 crc kubenswrapper[4909]: I1128 16:33:30.084867 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 28 16:33:30 crc kubenswrapper[4909]: I1128 16:33:30.088329 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Nov 28 16:33:30 crc kubenswrapper[4909]: I1128 16:33:30.088539 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Nov 28 16:33:30 crc kubenswrapper[4909]: I1128 16:33:30.109572 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 16:33:30 crc kubenswrapper[4909]: I1128 16:33:30.222461 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b97782ba-8bf0-4da9-bd81-97e88b4e73e7-logs\") pod \"glance-default-internal-api-0\" (UID: \"b97782ba-8bf0-4da9-bd81-97e88b4e73e7\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:33:30 crc kubenswrapper[4909]: I1128 16:33:30.222853 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/b97782ba-8bf0-4da9-bd81-97e88b4e73e7-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"b97782ba-8bf0-4da9-bd81-97e88b4e73e7\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:33:30 crc kubenswrapper[4909]: I1128 16:33:30.223087 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4hj7x\" (UniqueName: \"kubernetes.io/projected/b97782ba-8bf0-4da9-bd81-97e88b4e73e7-kube-api-access-4hj7x\") pod \"glance-default-internal-api-0\" (UID: \"b97782ba-8bf0-4da9-bd81-97e88b4e73e7\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:33:30 crc kubenswrapper[4909]: I1128 16:33:30.223217 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-internal-api-0\" (UID: \"b97782ba-8bf0-4da9-bd81-97e88b4e73e7\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:33:30 crc kubenswrapper[4909]: I1128 16:33:30.223361 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b97782ba-8bf0-4da9-bd81-97e88b4e73e7-config-data\") pod \"glance-default-internal-api-0\" (UID: \"b97782ba-8bf0-4da9-bd81-97e88b4e73e7\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:33:30 crc kubenswrapper[4909]: I1128 16:33:30.223485 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/b97782ba-8bf0-4da9-bd81-97e88b4e73e7-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"b97782ba-8bf0-4da9-bd81-97e88b4e73e7\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:33:30 crc kubenswrapper[4909]: I1128 16:33:30.223612 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b97782ba-8bf0-4da9-bd81-97e88b4e73e7-scripts\") pod \"glance-default-internal-api-0\" (UID: \"b97782ba-8bf0-4da9-bd81-97e88b4e73e7\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:33:30 crc kubenswrapper[4909]: I1128 16:33:30.223711 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b97782ba-8bf0-4da9-bd81-97e88b4e73e7-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"b97782ba-8bf0-4da9-bd81-97e88b4e73e7\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:33:30 crc kubenswrapper[4909]: I1128 16:33:30.325198 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b97782ba-8bf0-4da9-bd81-97e88b4e73e7-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"b97782ba-8bf0-4da9-bd81-97e88b4e73e7\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:33:30 crc kubenswrapper[4909]: I1128 16:33:30.325331 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b97782ba-8bf0-4da9-bd81-97e88b4e73e7-logs\") pod \"glance-default-internal-api-0\" (UID: \"b97782ba-8bf0-4da9-bd81-97e88b4e73e7\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:33:30 crc kubenswrapper[4909]: I1128 16:33:30.325388 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/b97782ba-8bf0-4da9-bd81-97e88b4e73e7-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"b97782ba-8bf0-4da9-bd81-97e88b4e73e7\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:33:30 crc kubenswrapper[4909]: I1128 16:33:30.325455 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4hj7x\" (UniqueName: \"kubernetes.io/projected/b97782ba-8bf0-4da9-bd81-97e88b4e73e7-kube-api-access-4hj7x\") pod \"glance-default-internal-api-0\" (UID: \"b97782ba-8bf0-4da9-bd81-97e88b4e73e7\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:33:30 crc kubenswrapper[4909]: I1128 16:33:30.325485 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-internal-api-0\" (UID: \"b97782ba-8bf0-4da9-bd81-97e88b4e73e7\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:33:30 crc kubenswrapper[4909]: I1128 16:33:30.325509 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b97782ba-8bf0-4da9-bd81-97e88b4e73e7-config-data\") pod \"glance-default-internal-api-0\" (UID: \"b97782ba-8bf0-4da9-bd81-97e88b4e73e7\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:33:30 crc kubenswrapper[4909]: I1128 16:33:30.325533 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/b97782ba-8bf0-4da9-bd81-97e88b4e73e7-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"b97782ba-8bf0-4da9-bd81-97e88b4e73e7\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:33:30 crc kubenswrapper[4909]: I1128 16:33:30.325583 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b97782ba-8bf0-4da9-bd81-97e88b4e73e7-scripts\") pod \"glance-default-internal-api-0\" (UID: \"b97782ba-8bf0-4da9-bd81-97e88b4e73e7\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:33:30 crc kubenswrapper[4909]: I1128 16:33:30.326282 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b97782ba-8bf0-4da9-bd81-97e88b4e73e7-logs\") pod \"glance-default-internal-api-0\" (UID: \"b97782ba-8bf0-4da9-bd81-97e88b4e73e7\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:33:30 crc kubenswrapper[4909]: I1128 16:33:30.326430 4909 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-internal-api-0\" (UID: \"b97782ba-8bf0-4da9-bd81-97e88b4e73e7\") device mount path \"/mnt/openstack/pv09\"" pod="openstack/glance-default-internal-api-0" Nov 28 16:33:30 crc kubenswrapper[4909]: I1128 16:33:30.326429 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/b97782ba-8bf0-4da9-bd81-97e88b4e73e7-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"b97782ba-8bf0-4da9-bd81-97e88b4e73e7\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:33:30 crc kubenswrapper[4909]: I1128 16:33:30.331230 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b97782ba-8bf0-4da9-bd81-97e88b4e73e7-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"b97782ba-8bf0-4da9-bd81-97e88b4e73e7\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:33:30 crc kubenswrapper[4909]: I1128 16:33:30.331773 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/b97782ba-8bf0-4da9-bd81-97e88b4e73e7-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"b97782ba-8bf0-4da9-bd81-97e88b4e73e7\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:33:30 crc kubenswrapper[4909]: I1128 16:33:30.332119 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b97782ba-8bf0-4da9-bd81-97e88b4e73e7-scripts\") pod \"glance-default-internal-api-0\" (UID: \"b97782ba-8bf0-4da9-bd81-97e88b4e73e7\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:33:30 crc kubenswrapper[4909]: I1128 16:33:30.333734 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b97782ba-8bf0-4da9-bd81-97e88b4e73e7-config-data\") pod \"glance-default-internal-api-0\" (UID: \"b97782ba-8bf0-4da9-bd81-97e88b4e73e7\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:33:30 crc kubenswrapper[4909]: I1128 16:33:30.343868 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4hj7x\" (UniqueName: \"kubernetes.io/projected/b97782ba-8bf0-4da9-bd81-97e88b4e73e7-kube-api-access-4hj7x\") pod \"glance-default-internal-api-0\" (UID: \"b97782ba-8bf0-4da9-bd81-97e88b4e73e7\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:33:30 crc kubenswrapper[4909]: I1128 16:33:30.359513 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-internal-api-0\" (UID: \"b97782ba-8bf0-4da9-bd81-97e88b4e73e7\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:33:30 crc kubenswrapper[4909]: I1128 16:33:30.404305 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 28 16:33:31 crc kubenswrapper[4909]: I1128 16:33:31.040180 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"00913f80-f496-44ec-a619-99129724cb89","Type":"ContainerStarted","Data":"30fbad84f804b7ada9da16d8ee037dd6c5bb06b55551d23a9f96ea3c5222b69f"} Nov 28 16:33:31 crc kubenswrapper[4909]: I1128 16:33:31.040414 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"00913f80-f496-44ec-a619-99129724cb89","Type":"ContainerStarted","Data":"257ba16eb5dc11579d07b9316fe274af6b54797db9a8db896742e423617ab540"} Nov 28 16:33:31 crc kubenswrapper[4909]: I1128 16:33:31.069077 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=3.069062528 podStartE2EDuration="3.069062528s" podCreationTimestamp="2025-11-28 16:33:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:33:31.064912877 +0000 UTC m=+1393.461597391" watchObservedRunningTime="2025-11-28 16:33:31.069062528 +0000 UTC m=+1393.465747052" Nov 28 16:33:31 crc kubenswrapper[4909]: I1128 16:33:31.166688 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 16:33:31 crc kubenswrapper[4909]: I1128 16:33:31.947643 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e9618898-4d20-4c0d-ab4d-1dc67e0f109a" path="/var/lib/kubelet/pods/e9618898-4d20-4c0d-ab4d-1dc67e0f109a/volumes" Nov 28 16:33:32 crc kubenswrapper[4909]: I1128 16:33:32.072029 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"fbcf7a74-7fb6-4c78-8e6a-86539a59eb71","Type":"ContainerStarted","Data":"8e824f6bffec178bb7da3230b8cd5e6d04d4b558190b57ed85b3123d329f8ad9"} Nov 28 16:33:32 crc kubenswrapper[4909]: I1128 16:33:32.072121 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 28 16:33:32 crc kubenswrapper[4909]: I1128 16:33:32.081782 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"b97782ba-8bf0-4da9-bd81-97e88b4e73e7","Type":"ContainerStarted","Data":"0805f4bc86dd00471ce300a5735ca911ab4a9a41d60ea124ead1fd0e3fd4ccbe"} Nov 28 16:33:32 crc kubenswrapper[4909]: I1128 16:33:32.081825 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"b97782ba-8bf0-4da9-bd81-97e88b4e73e7","Type":"ContainerStarted","Data":"757942926ce5d1f49f6df9094a2b9c8153258d63791c59f907eb87a53c3b24b2"} Nov 28 16:33:32 crc kubenswrapper[4909]: I1128 16:33:32.098998 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.539167219 podStartE2EDuration="7.098979709s" podCreationTimestamp="2025-11-28 16:33:25 +0000 UTC" firstStartedPulling="2025-11-28 16:33:26.865564555 +0000 UTC m=+1389.262249079" lastFinishedPulling="2025-11-28 16:33:31.425377045 +0000 UTC m=+1393.822061569" observedRunningTime="2025-11-28 16:33:32.094703925 +0000 UTC m=+1394.491388469" watchObservedRunningTime="2025-11-28 16:33:32.098979709 +0000 UTC m=+1394.495664233" Nov 28 16:33:32 crc kubenswrapper[4909]: I1128 16:33:32.199628 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/swift-proxy-589b6f8979-wbls8" Nov 28 16:33:33 crc kubenswrapper[4909]: I1128 16:33:33.024047 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-674458cfcb-bc4hb" Nov 28 16:33:33 crc kubenswrapper[4909]: I1128 16:33:33.094985 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"b97782ba-8bf0-4da9-bd81-97e88b4e73e7","Type":"ContainerStarted","Data":"06ec10e870b78a9508fdc0f9af0d0769bace54567bdcb85ffc77bf9a218d7d6e"} Nov 28 16:33:33 crc kubenswrapper[4909]: I1128 16:33:33.127260 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=3.127242036 podStartE2EDuration="3.127242036s" podCreationTimestamp="2025-11-28 16:33:30 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:33:33.122461158 +0000 UTC m=+1395.519145682" watchObservedRunningTime="2025-11-28 16:33:33.127242036 +0000 UTC m=+1395.523926560" Nov 28 16:33:34 crc kubenswrapper[4909]: I1128 16:33:34.144318 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-db-sync-dntf4"] Nov 28 16:33:34 crc kubenswrapper[4909]: I1128 16:33:34.146091 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-dntf4" Nov 28 16:33:34 crc kubenswrapper[4909]: I1128 16:33:34.147572 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-677zc" Nov 28 16:33:34 crc kubenswrapper[4909]: I1128 16:33:34.147761 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-scripts" Nov 28 16:33:34 crc kubenswrapper[4909]: I1128 16:33:34.148792 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Nov 28 16:33:34 crc kubenswrapper[4909]: I1128 16:33:34.162011 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-dntf4"] Nov 28 16:33:34 crc kubenswrapper[4909]: I1128 16:33:34.197303 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e042c4d0-9fc9-4e8d-a67f-6e3029444f4a-scripts\") pod \"nova-cell0-conductor-db-sync-dntf4\" (UID: \"e042c4d0-9fc9-4e8d-a67f-6e3029444f4a\") " pod="openstack/nova-cell0-conductor-db-sync-dntf4" Nov 28 16:33:34 crc kubenswrapper[4909]: I1128 16:33:34.198129 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8grmp\" (UniqueName: \"kubernetes.io/projected/e042c4d0-9fc9-4e8d-a67f-6e3029444f4a-kube-api-access-8grmp\") pod \"nova-cell0-conductor-db-sync-dntf4\" (UID: \"e042c4d0-9fc9-4e8d-a67f-6e3029444f4a\") " pod="openstack/nova-cell0-conductor-db-sync-dntf4" Nov 28 16:33:34 crc kubenswrapper[4909]: I1128 16:33:34.198217 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e042c4d0-9fc9-4e8d-a67f-6e3029444f4a-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-dntf4\" (UID: \"e042c4d0-9fc9-4e8d-a67f-6e3029444f4a\") " pod="openstack/nova-cell0-conductor-db-sync-dntf4" Nov 28 16:33:34 crc kubenswrapper[4909]: I1128 16:33:34.198569 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e042c4d0-9fc9-4e8d-a67f-6e3029444f4a-config-data\") pod \"nova-cell0-conductor-db-sync-dntf4\" (UID: \"e042c4d0-9fc9-4e8d-a67f-6e3029444f4a\") " pod="openstack/nova-cell0-conductor-db-sync-dntf4" Nov 28 16:33:34 crc kubenswrapper[4909]: I1128 16:33:34.301041 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e042c4d0-9fc9-4e8d-a67f-6e3029444f4a-config-data\") pod \"nova-cell0-conductor-db-sync-dntf4\" (UID: \"e042c4d0-9fc9-4e8d-a67f-6e3029444f4a\") " pod="openstack/nova-cell0-conductor-db-sync-dntf4" Nov 28 16:33:34 crc kubenswrapper[4909]: I1128 16:33:34.301201 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e042c4d0-9fc9-4e8d-a67f-6e3029444f4a-scripts\") pod \"nova-cell0-conductor-db-sync-dntf4\" (UID: \"e042c4d0-9fc9-4e8d-a67f-6e3029444f4a\") " pod="openstack/nova-cell0-conductor-db-sync-dntf4" Nov 28 16:33:34 crc kubenswrapper[4909]: I1128 16:33:34.301290 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8grmp\" (UniqueName: \"kubernetes.io/projected/e042c4d0-9fc9-4e8d-a67f-6e3029444f4a-kube-api-access-8grmp\") pod \"nova-cell0-conductor-db-sync-dntf4\" (UID: \"e042c4d0-9fc9-4e8d-a67f-6e3029444f4a\") " pod="openstack/nova-cell0-conductor-db-sync-dntf4" Nov 28 16:33:34 crc kubenswrapper[4909]: I1128 16:33:34.301362 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e042c4d0-9fc9-4e8d-a67f-6e3029444f4a-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-dntf4\" (UID: \"e042c4d0-9fc9-4e8d-a67f-6e3029444f4a\") " pod="openstack/nova-cell0-conductor-db-sync-dntf4" Nov 28 16:33:34 crc kubenswrapper[4909]: I1128 16:33:34.310621 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e042c4d0-9fc9-4e8d-a67f-6e3029444f4a-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-dntf4\" (UID: \"e042c4d0-9fc9-4e8d-a67f-6e3029444f4a\") " pod="openstack/nova-cell0-conductor-db-sync-dntf4" Nov 28 16:33:34 crc kubenswrapper[4909]: I1128 16:33:34.310894 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e042c4d0-9fc9-4e8d-a67f-6e3029444f4a-config-data\") pod \"nova-cell0-conductor-db-sync-dntf4\" (UID: \"e042c4d0-9fc9-4e8d-a67f-6e3029444f4a\") " pod="openstack/nova-cell0-conductor-db-sync-dntf4" Nov 28 16:33:34 crc kubenswrapper[4909]: I1128 16:33:34.312157 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e042c4d0-9fc9-4e8d-a67f-6e3029444f4a-scripts\") pod \"nova-cell0-conductor-db-sync-dntf4\" (UID: \"e042c4d0-9fc9-4e8d-a67f-6e3029444f4a\") " pod="openstack/nova-cell0-conductor-db-sync-dntf4" Nov 28 16:33:34 crc kubenswrapper[4909]: I1128 16:33:34.330109 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8grmp\" (UniqueName: \"kubernetes.io/projected/e042c4d0-9fc9-4e8d-a67f-6e3029444f4a-kube-api-access-8grmp\") pod \"nova-cell0-conductor-db-sync-dntf4\" (UID: \"e042c4d0-9fc9-4e8d-a67f-6e3029444f4a\") " pod="openstack/nova-cell0-conductor-db-sync-dntf4" Nov 28 16:33:34 crc kubenswrapper[4909]: I1128 16:33:34.427792 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 28 16:33:34 crc kubenswrapper[4909]: I1128 16:33:34.428043 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="fbcf7a74-7fb6-4c78-8e6a-86539a59eb71" containerName="ceilometer-central-agent" containerID="cri-o://800423d80fa6fb28efaf88c09b053243c0b2c7dbb4787a6cb1e8c9a281d4aeb2" gracePeriod=30 Nov 28 16:33:34 crc kubenswrapper[4909]: I1128 16:33:34.428285 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="fbcf7a74-7fb6-4c78-8e6a-86539a59eb71" containerName="sg-core" containerID="cri-o://81db4557742d314ae925fe733a89a4e9d28890b8b23a13a13e3d329164b79e39" gracePeriod=30 Nov 28 16:33:34 crc kubenswrapper[4909]: I1128 16:33:34.428299 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="fbcf7a74-7fb6-4c78-8e6a-86539a59eb71" containerName="ceilometer-notification-agent" containerID="cri-o://2a1f0639baeefb1607f256503f16c79ecbfd0b0c79a2d3012c1d27ae41d1a1e6" gracePeriod=30 Nov 28 16:33:34 crc kubenswrapper[4909]: I1128 16:33:34.428408 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="fbcf7a74-7fb6-4c78-8e6a-86539a59eb71" containerName="proxy-httpd" containerID="cri-o://8e824f6bffec178bb7da3230b8cd5e6d04d4b558190b57ed85b3123d329f8ad9" gracePeriod=30 Nov 28 16:33:34 crc kubenswrapper[4909]: I1128 16:33:34.463512 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-dntf4" Nov 28 16:33:35 crc kubenswrapper[4909]: I1128 16:33:35.000378 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-dntf4"] Nov 28 16:33:35 crc kubenswrapper[4909]: I1128 16:33:35.046001 4909 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 28 16:33:35 crc kubenswrapper[4909]: I1128 16:33:35.115683 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-dntf4" event={"ID":"e042c4d0-9fc9-4e8d-a67f-6e3029444f4a","Type":"ContainerStarted","Data":"c60adb894b78cb43fcb87f2c8745fb6b98645df166d13878a2f36960110858ce"} Nov 28 16:33:35 crc kubenswrapper[4909]: I1128 16:33:35.135947 4909 generic.go:334] "Generic (PLEG): container finished" podID="fbcf7a74-7fb6-4c78-8e6a-86539a59eb71" containerID="8e824f6bffec178bb7da3230b8cd5e6d04d4b558190b57ed85b3123d329f8ad9" exitCode=0 Nov 28 16:33:35 crc kubenswrapper[4909]: I1128 16:33:35.136011 4909 generic.go:334] "Generic (PLEG): container finished" podID="fbcf7a74-7fb6-4c78-8e6a-86539a59eb71" containerID="81db4557742d314ae925fe733a89a4e9d28890b8b23a13a13e3d329164b79e39" exitCode=2 Nov 28 16:33:35 crc kubenswrapper[4909]: I1128 16:33:35.136024 4909 generic.go:334] "Generic (PLEG): container finished" podID="fbcf7a74-7fb6-4c78-8e6a-86539a59eb71" containerID="2a1f0639baeefb1607f256503f16c79ecbfd0b0c79a2d3012c1d27ae41d1a1e6" exitCode=0 Nov 28 16:33:35 crc kubenswrapper[4909]: I1128 16:33:35.136036 4909 generic.go:334] "Generic (PLEG): container finished" podID="fbcf7a74-7fb6-4c78-8e6a-86539a59eb71" containerID="800423d80fa6fb28efaf88c09b053243c0b2c7dbb4787a6cb1e8c9a281d4aeb2" exitCode=0 Nov 28 16:33:35 crc kubenswrapper[4909]: I1128 16:33:35.136067 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"fbcf7a74-7fb6-4c78-8e6a-86539a59eb71","Type":"ContainerDied","Data":"8e824f6bffec178bb7da3230b8cd5e6d04d4b558190b57ed85b3123d329f8ad9"} Nov 28 16:33:35 crc kubenswrapper[4909]: I1128 16:33:35.136114 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"fbcf7a74-7fb6-4c78-8e6a-86539a59eb71","Type":"ContainerDied","Data":"81db4557742d314ae925fe733a89a4e9d28890b8b23a13a13e3d329164b79e39"} Nov 28 16:33:35 crc kubenswrapper[4909]: I1128 16:33:35.136133 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"fbcf7a74-7fb6-4c78-8e6a-86539a59eb71","Type":"ContainerDied","Data":"2a1f0639baeefb1607f256503f16c79ecbfd0b0c79a2d3012c1d27ae41d1a1e6"} Nov 28 16:33:35 crc kubenswrapper[4909]: I1128 16:33:35.136145 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"fbcf7a74-7fb6-4c78-8e6a-86539a59eb71","Type":"ContainerDied","Data":"800423d80fa6fb28efaf88c09b053243c0b2c7dbb4787a6cb1e8c9a281d4aeb2"} Nov 28 16:33:35 crc kubenswrapper[4909]: I1128 16:33:35.434482 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-55f6d745d5-tgbm7" Nov 28 16:33:35 crc kubenswrapper[4909]: I1128 16:33:35.514815 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-674458cfcb-bc4hb"] Nov 28 16:33:35 crc kubenswrapper[4909]: I1128 16:33:35.515109 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-674458cfcb-bc4hb" podUID="f3c35b84-87e7-42d1-bbd6-5f72f38ab989" containerName="neutron-api" containerID="cri-o://517e56bc76234fa79f62e03acb5bdc533b095335447d92cbe971f721fddb1d41" gracePeriod=30 Nov 28 16:33:35 crc kubenswrapper[4909]: I1128 16:33:35.515657 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-674458cfcb-bc4hb" podUID="f3c35b84-87e7-42d1-bbd6-5f72f38ab989" containerName="neutron-httpd" containerID="cri-o://41a9bd70bd75a09405e44da59f77c9160795724e7ab52b06e2ba83b64804be3d" gracePeriod=30 Nov 28 16:33:35 crc kubenswrapper[4909]: I1128 16:33:35.928175 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 16:33:36 crc kubenswrapper[4909]: I1128 16:33:36.030745 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/fbcf7a74-7fb6-4c78-8e6a-86539a59eb71-log-httpd\") pod \"fbcf7a74-7fb6-4c78-8e6a-86539a59eb71\" (UID: \"fbcf7a74-7fb6-4c78-8e6a-86539a59eb71\") " Nov 28 16:33:36 crc kubenswrapper[4909]: I1128 16:33:36.030862 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fbcf7a74-7fb6-4c78-8e6a-86539a59eb71-config-data\") pod \"fbcf7a74-7fb6-4c78-8e6a-86539a59eb71\" (UID: \"fbcf7a74-7fb6-4c78-8e6a-86539a59eb71\") " Nov 28 16:33:36 crc kubenswrapper[4909]: I1128 16:33:36.030898 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/fbcf7a74-7fb6-4c78-8e6a-86539a59eb71-run-httpd\") pod \"fbcf7a74-7fb6-4c78-8e6a-86539a59eb71\" (UID: \"fbcf7a74-7fb6-4c78-8e6a-86539a59eb71\") " Nov 28 16:33:36 crc kubenswrapper[4909]: I1128 16:33:36.030966 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fbcf7a74-7fb6-4c78-8e6a-86539a59eb71-scripts\") pod \"fbcf7a74-7fb6-4c78-8e6a-86539a59eb71\" (UID: \"fbcf7a74-7fb6-4c78-8e6a-86539a59eb71\") " Nov 28 16:33:36 crc kubenswrapper[4909]: I1128 16:33:36.031002 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-564lh\" (UniqueName: \"kubernetes.io/projected/fbcf7a74-7fb6-4c78-8e6a-86539a59eb71-kube-api-access-564lh\") pod \"fbcf7a74-7fb6-4c78-8e6a-86539a59eb71\" (UID: \"fbcf7a74-7fb6-4c78-8e6a-86539a59eb71\") " Nov 28 16:33:36 crc kubenswrapper[4909]: I1128 16:33:36.031052 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fbcf7a74-7fb6-4c78-8e6a-86539a59eb71-combined-ca-bundle\") pod \"fbcf7a74-7fb6-4c78-8e6a-86539a59eb71\" (UID: \"fbcf7a74-7fb6-4c78-8e6a-86539a59eb71\") " Nov 28 16:33:36 crc kubenswrapper[4909]: I1128 16:33:36.031073 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/fbcf7a74-7fb6-4c78-8e6a-86539a59eb71-sg-core-conf-yaml\") pod \"fbcf7a74-7fb6-4c78-8e6a-86539a59eb71\" (UID: \"fbcf7a74-7fb6-4c78-8e6a-86539a59eb71\") " Nov 28 16:33:36 crc kubenswrapper[4909]: I1128 16:33:36.031650 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fbcf7a74-7fb6-4c78-8e6a-86539a59eb71-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "fbcf7a74-7fb6-4c78-8e6a-86539a59eb71" (UID: "fbcf7a74-7fb6-4c78-8e6a-86539a59eb71"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:33:36 crc kubenswrapper[4909]: I1128 16:33:36.031720 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fbcf7a74-7fb6-4c78-8e6a-86539a59eb71-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "fbcf7a74-7fb6-4c78-8e6a-86539a59eb71" (UID: "fbcf7a74-7fb6-4c78-8e6a-86539a59eb71"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:33:36 crc kubenswrapper[4909]: I1128 16:33:36.041845 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fbcf7a74-7fb6-4c78-8e6a-86539a59eb71-scripts" (OuterVolumeSpecName: "scripts") pod "fbcf7a74-7fb6-4c78-8e6a-86539a59eb71" (UID: "fbcf7a74-7fb6-4c78-8e6a-86539a59eb71"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:33:36 crc kubenswrapper[4909]: I1128 16:33:36.042150 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fbcf7a74-7fb6-4c78-8e6a-86539a59eb71-kube-api-access-564lh" (OuterVolumeSpecName: "kube-api-access-564lh") pod "fbcf7a74-7fb6-4c78-8e6a-86539a59eb71" (UID: "fbcf7a74-7fb6-4c78-8e6a-86539a59eb71"). InnerVolumeSpecName "kube-api-access-564lh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:33:36 crc kubenswrapper[4909]: I1128 16:33:36.067610 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fbcf7a74-7fb6-4c78-8e6a-86539a59eb71-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "fbcf7a74-7fb6-4c78-8e6a-86539a59eb71" (UID: "fbcf7a74-7fb6-4c78-8e6a-86539a59eb71"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:33:36 crc kubenswrapper[4909]: I1128 16:33:36.119876 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fbcf7a74-7fb6-4c78-8e6a-86539a59eb71-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "fbcf7a74-7fb6-4c78-8e6a-86539a59eb71" (UID: "fbcf7a74-7fb6-4c78-8e6a-86539a59eb71"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:33:36 crc kubenswrapper[4909]: I1128 16:33:36.132945 4909 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fbcf7a74-7fb6-4c78-8e6a-86539a59eb71-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:36 crc kubenswrapper[4909]: I1128 16:33:36.132982 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-564lh\" (UniqueName: \"kubernetes.io/projected/fbcf7a74-7fb6-4c78-8e6a-86539a59eb71-kube-api-access-564lh\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:36 crc kubenswrapper[4909]: I1128 16:33:36.132993 4909 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fbcf7a74-7fb6-4c78-8e6a-86539a59eb71-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:36 crc kubenswrapper[4909]: I1128 16:33:36.133003 4909 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/fbcf7a74-7fb6-4c78-8e6a-86539a59eb71-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:36 crc kubenswrapper[4909]: I1128 16:33:36.133012 4909 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/fbcf7a74-7fb6-4c78-8e6a-86539a59eb71-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:36 crc kubenswrapper[4909]: I1128 16:33:36.133022 4909 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/fbcf7a74-7fb6-4c78-8e6a-86539a59eb71-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:36 crc kubenswrapper[4909]: I1128 16:33:36.149100 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 16:33:36 crc kubenswrapper[4909]: I1128 16:33:36.149114 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"fbcf7a74-7fb6-4c78-8e6a-86539a59eb71","Type":"ContainerDied","Data":"bb0c29e9a90a2fe9de00fd482b5e984c8eab44dca0c70d0a9c4e0af3d1789e3e"} Nov 28 16:33:36 crc kubenswrapper[4909]: I1128 16:33:36.149168 4909 scope.go:117] "RemoveContainer" containerID="8e824f6bffec178bb7da3230b8cd5e6d04d4b558190b57ed85b3123d329f8ad9" Nov 28 16:33:36 crc kubenswrapper[4909]: I1128 16:33:36.152087 4909 generic.go:334] "Generic (PLEG): container finished" podID="f3c35b84-87e7-42d1-bbd6-5f72f38ab989" containerID="41a9bd70bd75a09405e44da59f77c9160795724e7ab52b06e2ba83b64804be3d" exitCode=0 Nov 28 16:33:36 crc kubenswrapper[4909]: I1128 16:33:36.152136 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-674458cfcb-bc4hb" event={"ID":"f3c35b84-87e7-42d1-bbd6-5f72f38ab989","Type":"ContainerDied","Data":"41a9bd70bd75a09405e44da59f77c9160795724e7ab52b06e2ba83b64804be3d"} Nov 28 16:33:36 crc kubenswrapper[4909]: I1128 16:33:36.158552 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fbcf7a74-7fb6-4c78-8e6a-86539a59eb71-config-data" (OuterVolumeSpecName: "config-data") pod "fbcf7a74-7fb6-4c78-8e6a-86539a59eb71" (UID: "fbcf7a74-7fb6-4c78-8e6a-86539a59eb71"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:33:36 crc kubenswrapper[4909]: I1128 16:33:36.188000 4909 scope.go:117] "RemoveContainer" containerID="81db4557742d314ae925fe733a89a4e9d28890b8b23a13a13e3d329164b79e39" Nov 28 16:33:36 crc kubenswrapper[4909]: I1128 16:33:36.208890 4909 scope.go:117] "RemoveContainer" containerID="2a1f0639baeefb1607f256503f16c79ecbfd0b0c79a2d3012c1d27ae41d1a1e6" Nov 28 16:33:36 crc kubenswrapper[4909]: I1128 16:33:36.234289 4909 scope.go:117] "RemoveContainer" containerID="800423d80fa6fb28efaf88c09b053243c0b2c7dbb4787a6cb1e8c9a281d4aeb2" Nov 28 16:33:36 crc kubenswrapper[4909]: I1128 16:33:36.238936 4909 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fbcf7a74-7fb6-4c78-8e6a-86539a59eb71-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:36 crc kubenswrapper[4909]: I1128 16:33:36.489937 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 28 16:33:36 crc kubenswrapper[4909]: I1128 16:33:36.501004 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 28 16:33:36 crc kubenswrapper[4909]: I1128 16:33:36.514913 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 28 16:33:36 crc kubenswrapper[4909]: E1128 16:33:36.515367 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fbcf7a74-7fb6-4c78-8e6a-86539a59eb71" containerName="ceilometer-notification-agent" Nov 28 16:33:36 crc kubenswrapper[4909]: I1128 16:33:36.515390 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="fbcf7a74-7fb6-4c78-8e6a-86539a59eb71" containerName="ceilometer-notification-agent" Nov 28 16:33:36 crc kubenswrapper[4909]: E1128 16:33:36.515399 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fbcf7a74-7fb6-4c78-8e6a-86539a59eb71" containerName="proxy-httpd" Nov 28 16:33:36 crc kubenswrapper[4909]: I1128 16:33:36.515404 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="fbcf7a74-7fb6-4c78-8e6a-86539a59eb71" containerName="proxy-httpd" Nov 28 16:33:36 crc kubenswrapper[4909]: E1128 16:33:36.515422 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fbcf7a74-7fb6-4c78-8e6a-86539a59eb71" containerName="ceilometer-central-agent" Nov 28 16:33:36 crc kubenswrapper[4909]: I1128 16:33:36.515428 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="fbcf7a74-7fb6-4c78-8e6a-86539a59eb71" containerName="ceilometer-central-agent" Nov 28 16:33:36 crc kubenswrapper[4909]: E1128 16:33:36.515446 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fbcf7a74-7fb6-4c78-8e6a-86539a59eb71" containerName="sg-core" Nov 28 16:33:36 crc kubenswrapper[4909]: I1128 16:33:36.515452 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="fbcf7a74-7fb6-4c78-8e6a-86539a59eb71" containerName="sg-core" Nov 28 16:33:36 crc kubenswrapper[4909]: I1128 16:33:36.515645 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="fbcf7a74-7fb6-4c78-8e6a-86539a59eb71" containerName="proxy-httpd" Nov 28 16:33:36 crc kubenswrapper[4909]: I1128 16:33:36.515678 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="fbcf7a74-7fb6-4c78-8e6a-86539a59eb71" containerName="ceilometer-notification-agent" Nov 28 16:33:36 crc kubenswrapper[4909]: I1128 16:33:36.515693 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="fbcf7a74-7fb6-4c78-8e6a-86539a59eb71" containerName="sg-core" Nov 28 16:33:36 crc kubenswrapper[4909]: I1128 16:33:36.515701 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="fbcf7a74-7fb6-4c78-8e6a-86539a59eb71" containerName="ceilometer-central-agent" Nov 28 16:33:36 crc kubenswrapper[4909]: I1128 16:33:36.517381 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 16:33:36 crc kubenswrapper[4909]: I1128 16:33:36.522502 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 28 16:33:36 crc kubenswrapper[4909]: I1128 16:33:36.522743 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 28 16:33:36 crc kubenswrapper[4909]: I1128 16:33:36.538866 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 28 16:33:36 crc kubenswrapper[4909]: I1128 16:33:36.644167 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b05b53b8-63e6-45e2-bff1-48ff19c49c76-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"b05b53b8-63e6-45e2-bff1-48ff19c49c76\") " pod="openstack/ceilometer-0" Nov 28 16:33:36 crc kubenswrapper[4909]: I1128 16:33:36.644206 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b05b53b8-63e6-45e2-bff1-48ff19c49c76-run-httpd\") pod \"ceilometer-0\" (UID: \"b05b53b8-63e6-45e2-bff1-48ff19c49c76\") " pod="openstack/ceilometer-0" Nov 28 16:33:36 crc kubenswrapper[4909]: I1128 16:33:36.644228 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4f5nj\" (UniqueName: \"kubernetes.io/projected/b05b53b8-63e6-45e2-bff1-48ff19c49c76-kube-api-access-4f5nj\") pod \"ceilometer-0\" (UID: \"b05b53b8-63e6-45e2-bff1-48ff19c49c76\") " pod="openstack/ceilometer-0" Nov 28 16:33:36 crc kubenswrapper[4909]: I1128 16:33:36.644406 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b05b53b8-63e6-45e2-bff1-48ff19c49c76-config-data\") pod \"ceilometer-0\" (UID: \"b05b53b8-63e6-45e2-bff1-48ff19c49c76\") " pod="openstack/ceilometer-0" Nov 28 16:33:36 crc kubenswrapper[4909]: I1128 16:33:36.644620 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b05b53b8-63e6-45e2-bff1-48ff19c49c76-scripts\") pod \"ceilometer-0\" (UID: \"b05b53b8-63e6-45e2-bff1-48ff19c49c76\") " pod="openstack/ceilometer-0" Nov 28 16:33:36 crc kubenswrapper[4909]: I1128 16:33:36.644752 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b05b53b8-63e6-45e2-bff1-48ff19c49c76-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"b05b53b8-63e6-45e2-bff1-48ff19c49c76\") " pod="openstack/ceilometer-0" Nov 28 16:33:36 crc kubenswrapper[4909]: I1128 16:33:36.644817 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b05b53b8-63e6-45e2-bff1-48ff19c49c76-log-httpd\") pod \"ceilometer-0\" (UID: \"b05b53b8-63e6-45e2-bff1-48ff19c49c76\") " pod="openstack/ceilometer-0" Nov 28 16:33:36 crc kubenswrapper[4909]: I1128 16:33:36.746583 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b05b53b8-63e6-45e2-bff1-48ff19c49c76-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"b05b53b8-63e6-45e2-bff1-48ff19c49c76\") " pod="openstack/ceilometer-0" Nov 28 16:33:36 crc kubenswrapper[4909]: I1128 16:33:36.746651 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b05b53b8-63e6-45e2-bff1-48ff19c49c76-run-httpd\") pod \"ceilometer-0\" (UID: \"b05b53b8-63e6-45e2-bff1-48ff19c49c76\") " pod="openstack/ceilometer-0" Nov 28 16:33:36 crc kubenswrapper[4909]: I1128 16:33:36.746700 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4f5nj\" (UniqueName: \"kubernetes.io/projected/b05b53b8-63e6-45e2-bff1-48ff19c49c76-kube-api-access-4f5nj\") pod \"ceilometer-0\" (UID: \"b05b53b8-63e6-45e2-bff1-48ff19c49c76\") " pod="openstack/ceilometer-0" Nov 28 16:33:36 crc kubenswrapper[4909]: I1128 16:33:36.746792 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b05b53b8-63e6-45e2-bff1-48ff19c49c76-config-data\") pod \"ceilometer-0\" (UID: \"b05b53b8-63e6-45e2-bff1-48ff19c49c76\") " pod="openstack/ceilometer-0" Nov 28 16:33:36 crc kubenswrapper[4909]: I1128 16:33:36.746884 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b05b53b8-63e6-45e2-bff1-48ff19c49c76-scripts\") pod \"ceilometer-0\" (UID: \"b05b53b8-63e6-45e2-bff1-48ff19c49c76\") " pod="openstack/ceilometer-0" Nov 28 16:33:36 crc kubenswrapper[4909]: I1128 16:33:36.746934 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b05b53b8-63e6-45e2-bff1-48ff19c49c76-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"b05b53b8-63e6-45e2-bff1-48ff19c49c76\") " pod="openstack/ceilometer-0" Nov 28 16:33:36 crc kubenswrapper[4909]: I1128 16:33:36.746966 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b05b53b8-63e6-45e2-bff1-48ff19c49c76-log-httpd\") pod \"ceilometer-0\" (UID: \"b05b53b8-63e6-45e2-bff1-48ff19c49c76\") " pod="openstack/ceilometer-0" Nov 28 16:33:36 crc kubenswrapper[4909]: I1128 16:33:36.747496 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b05b53b8-63e6-45e2-bff1-48ff19c49c76-log-httpd\") pod \"ceilometer-0\" (UID: \"b05b53b8-63e6-45e2-bff1-48ff19c49c76\") " pod="openstack/ceilometer-0" Nov 28 16:33:36 crc kubenswrapper[4909]: I1128 16:33:36.747585 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b05b53b8-63e6-45e2-bff1-48ff19c49c76-run-httpd\") pod \"ceilometer-0\" (UID: \"b05b53b8-63e6-45e2-bff1-48ff19c49c76\") " pod="openstack/ceilometer-0" Nov 28 16:33:36 crc kubenswrapper[4909]: I1128 16:33:36.752319 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b05b53b8-63e6-45e2-bff1-48ff19c49c76-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"b05b53b8-63e6-45e2-bff1-48ff19c49c76\") " pod="openstack/ceilometer-0" Nov 28 16:33:36 crc kubenswrapper[4909]: I1128 16:33:36.754081 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b05b53b8-63e6-45e2-bff1-48ff19c49c76-config-data\") pod \"ceilometer-0\" (UID: \"b05b53b8-63e6-45e2-bff1-48ff19c49c76\") " pod="openstack/ceilometer-0" Nov 28 16:33:36 crc kubenswrapper[4909]: I1128 16:33:36.754806 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b05b53b8-63e6-45e2-bff1-48ff19c49c76-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"b05b53b8-63e6-45e2-bff1-48ff19c49c76\") " pod="openstack/ceilometer-0" Nov 28 16:33:36 crc kubenswrapper[4909]: I1128 16:33:36.755535 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b05b53b8-63e6-45e2-bff1-48ff19c49c76-scripts\") pod \"ceilometer-0\" (UID: \"b05b53b8-63e6-45e2-bff1-48ff19c49c76\") " pod="openstack/ceilometer-0" Nov 28 16:33:36 crc kubenswrapper[4909]: I1128 16:33:36.765858 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4f5nj\" (UniqueName: \"kubernetes.io/projected/b05b53b8-63e6-45e2-bff1-48ff19c49c76-kube-api-access-4f5nj\") pod \"ceilometer-0\" (UID: \"b05b53b8-63e6-45e2-bff1-48ff19c49c76\") " pod="openstack/ceilometer-0" Nov 28 16:33:36 crc kubenswrapper[4909]: I1128 16:33:36.838053 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 16:33:37 crc kubenswrapper[4909]: I1128 16:33:37.276555 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 28 16:33:37 crc kubenswrapper[4909]: I1128 16:33:37.933403 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fbcf7a74-7fb6-4c78-8e6a-86539a59eb71" path="/var/lib/kubelet/pods/fbcf7a74-7fb6-4c78-8e6a-86539a59eb71/volumes" Nov 28 16:33:38 crc kubenswrapper[4909]: I1128 16:33:38.200055 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b05b53b8-63e6-45e2-bff1-48ff19c49c76","Type":"ContainerStarted","Data":"d8369f4a74461b3441ba2003ba71f3483fdd6004a11ddf23dcef0a5bd1aba6b1"} Nov 28 16:33:38 crc kubenswrapper[4909]: I1128 16:33:38.619693 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 28 16:33:38 crc kubenswrapper[4909]: I1128 16:33:38.619737 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 28 16:33:38 crc kubenswrapper[4909]: I1128 16:33:38.672842 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 28 16:33:38 crc kubenswrapper[4909]: I1128 16:33:38.677767 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 28 16:33:39 crc kubenswrapper[4909]: I1128 16:33:39.207704 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 28 16:33:39 crc kubenswrapper[4909]: I1128 16:33:39.208028 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 28 16:33:40 crc kubenswrapper[4909]: I1128 16:33:40.405443 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 28 16:33:40 crc kubenswrapper[4909]: I1128 16:33:40.406749 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 28 16:33:40 crc kubenswrapper[4909]: I1128 16:33:40.455880 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 28 16:33:40 crc kubenswrapper[4909]: I1128 16:33:40.461104 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 28 16:33:41 crc kubenswrapper[4909]: I1128 16:33:41.226500 4909 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 28 16:33:41 crc kubenswrapper[4909]: I1128 16:33:41.226525 4909 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 28 16:33:41 crc kubenswrapper[4909]: I1128 16:33:41.227097 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 28 16:33:41 crc kubenswrapper[4909]: I1128 16:33:41.227134 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 28 16:33:41 crc kubenswrapper[4909]: I1128 16:33:41.327144 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 28 16:33:41 crc kubenswrapper[4909]: I1128 16:33:41.329436 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 28 16:33:42 crc kubenswrapper[4909]: I1128 16:33:42.242411 4909 generic.go:334] "Generic (PLEG): container finished" podID="f3c35b84-87e7-42d1-bbd6-5f72f38ab989" containerID="517e56bc76234fa79f62e03acb5bdc533b095335447d92cbe971f721fddb1d41" exitCode=0 Nov 28 16:33:42 crc kubenswrapper[4909]: I1128 16:33:42.242488 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-674458cfcb-bc4hb" event={"ID":"f3c35b84-87e7-42d1-bbd6-5f72f38ab989","Type":"ContainerDied","Data":"517e56bc76234fa79f62e03acb5bdc533b095335447d92cbe971f721fddb1d41"} Nov 28 16:33:43 crc kubenswrapper[4909]: I1128 16:33:43.476508 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 28 16:33:43 crc kubenswrapper[4909]: I1128 16:33:43.476895 4909 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 28 16:33:43 crc kubenswrapper[4909]: I1128 16:33:43.555521 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 28 16:33:46 crc kubenswrapper[4909]: I1128 16:33:46.042125 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-674458cfcb-bc4hb" Nov 28 16:33:46 crc kubenswrapper[4909]: I1128 16:33:46.145678 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/f3c35b84-87e7-42d1-bbd6-5f72f38ab989-ovndb-tls-certs\") pod \"f3c35b84-87e7-42d1-bbd6-5f72f38ab989\" (UID: \"f3c35b84-87e7-42d1-bbd6-5f72f38ab989\") " Nov 28 16:33:46 crc kubenswrapper[4909]: I1128 16:33:46.146161 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mlgsf\" (UniqueName: \"kubernetes.io/projected/f3c35b84-87e7-42d1-bbd6-5f72f38ab989-kube-api-access-mlgsf\") pod \"f3c35b84-87e7-42d1-bbd6-5f72f38ab989\" (UID: \"f3c35b84-87e7-42d1-bbd6-5f72f38ab989\") " Nov 28 16:33:46 crc kubenswrapper[4909]: I1128 16:33:46.146222 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/f3c35b84-87e7-42d1-bbd6-5f72f38ab989-config\") pod \"f3c35b84-87e7-42d1-bbd6-5f72f38ab989\" (UID: \"f3c35b84-87e7-42d1-bbd6-5f72f38ab989\") " Nov 28 16:33:46 crc kubenswrapper[4909]: I1128 16:33:46.146263 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/f3c35b84-87e7-42d1-bbd6-5f72f38ab989-httpd-config\") pod \"f3c35b84-87e7-42d1-bbd6-5f72f38ab989\" (UID: \"f3c35b84-87e7-42d1-bbd6-5f72f38ab989\") " Nov 28 16:33:46 crc kubenswrapper[4909]: I1128 16:33:46.146344 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f3c35b84-87e7-42d1-bbd6-5f72f38ab989-combined-ca-bundle\") pod \"f3c35b84-87e7-42d1-bbd6-5f72f38ab989\" (UID: \"f3c35b84-87e7-42d1-bbd6-5f72f38ab989\") " Nov 28 16:33:46 crc kubenswrapper[4909]: I1128 16:33:46.158831 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f3c35b84-87e7-42d1-bbd6-5f72f38ab989-httpd-config" (OuterVolumeSpecName: "httpd-config") pod "f3c35b84-87e7-42d1-bbd6-5f72f38ab989" (UID: "f3c35b84-87e7-42d1-bbd6-5f72f38ab989"). InnerVolumeSpecName "httpd-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:33:46 crc kubenswrapper[4909]: I1128 16:33:46.169504 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f3c35b84-87e7-42d1-bbd6-5f72f38ab989-kube-api-access-mlgsf" (OuterVolumeSpecName: "kube-api-access-mlgsf") pod "f3c35b84-87e7-42d1-bbd6-5f72f38ab989" (UID: "f3c35b84-87e7-42d1-bbd6-5f72f38ab989"). InnerVolumeSpecName "kube-api-access-mlgsf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:33:46 crc kubenswrapper[4909]: I1128 16:33:46.238915 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f3c35b84-87e7-42d1-bbd6-5f72f38ab989-config" (OuterVolumeSpecName: "config") pod "f3c35b84-87e7-42d1-bbd6-5f72f38ab989" (UID: "f3c35b84-87e7-42d1-bbd6-5f72f38ab989"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:33:46 crc kubenswrapper[4909]: I1128 16:33:46.243229 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f3c35b84-87e7-42d1-bbd6-5f72f38ab989-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f3c35b84-87e7-42d1-bbd6-5f72f38ab989" (UID: "f3c35b84-87e7-42d1-bbd6-5f72f38ab989"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:33:46 crc kubenswrapper[4909]: I1128 16:33:46.248827 4909 reconciler_common.go:293] "Volume detached for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/f3c35b84-87e7-42d1-bbd6-5f72f38ab989-httpd-config\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:46 crc kubenswrapper[4909]: I1128 16:33:46.248869 4909 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f3c35b84-87e7-42d1-bbd6-5f72f38ab989-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:46 crc kubenswrapper[4909]: I1128 16:33:46.248879 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mlgsf\" (UniqueName: \"kubernetes.io/projected/f3c35b84-87e7-42d1-bbd6-5f72f38ab989-kube-api-access-mlgsf\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:46 crc kubenswrapper[4909]: I1128 16:33:46.248888 4909 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/f3c35b84-87e7-42d1-bbd6-5f72f38ab989-config\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:46 crc kubenswrapper[4909]: I1128 16:33:46.266280 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f3c35b84-87e7-42d1-bbd6-5f72f38ab989-ovndb-tls-certs" (OuterVolumeSpecName: "ovndb-tls-certs") pod "f3c35b84-87e7-42d1-bbd6-5f72f38ab989" (UID: "f3c35b84-87e7-42d1-bbd6-5f72f38ab989"). InnerVolumeSpecName "ovndb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:33:46 crc kubenswrapper[4909]: I1128 16:33:46.305855 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b05b53b8-63e6-45e2-bff1-48ff19c49c76","Type":"ContainerStarted","Data":"eb988c2bffc55db0ee605010794988fd302f7fd755a1c2b5859db81da90c3e75"} Nov 28 16:33:46 crc kubenswrapper[4909]: I1128 16:33:46.308772 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-674458cfcb-bc4hb" Nov 28 16:33:46 crc kubenswrapper[4909]: I1128 16:33:46.308815 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-674458cfcb-bc4hb" event={"ID":"f3c35b84-87e7-42d1-bbd6-5f72f38ab989","Type":"ContainerDied","Data":"90b7681731b09ce06f00b98cfefec0287043357e8ff8ccffd96d79c81aee87ce"} Nov 28 16:33:46 crc kubenswrapper[4909]: I1128 16:33:46.308888 4909 scope.go:117] "RemoveContainer" containerID="41a9bd70bd75a09405e44da59f77c9160795724e7ab52b06e2ba83b64804be3d" Nov 28 16:33:46 crc kubenswrapper[4909]: I1128 16:33:46.310984 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-dntf4" event={"ID":"e042c4d0-9fc9-4e8d-a67f-6e3029444f4a","Type":"ContainerStarted","Data":"b06f7ceb8721d82fd465811ed29d9a70387b3cc0eee40cd66aaef773726c983a"} Nov 28 16:33:46 crc kubenswrapper[4909]: I1128 16:33:46.335822 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-db-sync-dntf4" podStartSLOduration=1.443220234 podStartE2EDuration="12.335799773s" podCreationTimestamp="2025-11-28 16:33:34 +0000 UTC" firstStartedPulling="2025-11-28 16:33:35.045723132 +0000 UTC m=+1397.442407656" lastFinishedPulling="2025-11-28 16:33:45.938302671 +0000 UTC m=+1408.334987195" observedRunningTime="2025-11-28 16:33:46.3330689 +0000 UTC m=+1408.729753434" watchObservedRunningTime="2025-11-28 16:33:46.335799773 +0000 UTC m=+1408.732484297" Nov 28 16:33:46 crc kubenswrapper[4909]: I1128 16:33:46.350686 4909 reconciler_common.go:293] "Volume detached for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/f3c35b84-87e7-42d1-bbd6-5f72f38ab989-ovndb-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:46 crc kubenswrapper[4909]: I1128 16:33:46.375652 4909 scope.go:117] "RemoveContainer" containerID="517e56bc76234fa79f62e03acb5bdc533b095335447d92cbe971f721fddb1d41" Nov 28 16:33:46 crc kubenswrapper[4909]: I1128 16:33:46.387987 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-674458cfcb-bc4hb"] Nov 28 16:33:46 crc kubenswrapper[4909]: I1128 16:33:46.396458 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-674458cfcb-bc4hb"] Nov 28 16:33:47 crc kubenswrapper[4909]: I1128 16:33:47.254281 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 28 16:33:47 crc kubenswrapper[4909]: I1128 16:33:47.321529 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b05b53b8-63e6-45e2-bff1-48ff19c49c76","Type":"ContainerStarted","Data":"2d8778a731b69e96d101ec03705a43c56ceeb2509fcdd2bc37509c3151a9d1d2"} Nov 28 16:33:47 crc kubenswrapper[4909]: I1128 16:33:47.745462 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-v7qgf"] Nov 28 16:33:47 crc kubenswrapper[4909]: E1128 16:33:47.746384 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f3c35b84-87e7-42d1-bbd6-5f72f38ab989" containerName="neutron-httpd" Nov 28 16:33:47 crc kubenswrapper[4909]: I1128 16:33:47.746455 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="f3c35b84-87e7-42d1-bbd6-5f72f38ab989" containerName="neutron-httpd" Nov 28 16:33:47 crc kubenswrapper[4909]: E1128 16:33:47.746544 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f3c35b84-87e7-42d1-bbd6-5f72f38ab989" containerName="neutron-api" Nov 28 16:33:47 crc kubenswrapper[4909]: I1128 16:33:47.746598 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="f3c35b84-87e7-42d1-bbd6-5f72f38ab989" containerName="neutron-api" Nov 28 16:33:47 crc kubenswrapper[4909]: I1128 16:33:47.746838 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="f3c35b84-87e7-42d1-bbd6-5f72f38ab989" containerName="neutron-httpd" Nov 28 16:33:47 crc kubenswrapper[4909]: I1128 16:33:47.746917 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="f3c35b84-87e7-42d1-bbd6-5f72f38ab989" containerName="neutron-api" Nov 28 16:33:47 crc kubenswrapper[4909]: I1128 16:33:47.748247 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-v7qgf" Nov 28 16:33:47 crc kubenswrapper[4909]: I1128 16:33:47.755383 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-v7qgf"] Nov 28 16:33:47 crc kubenswrapper[4909]: I1128 16:33:47.876783 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/904cc494-851b-40cd-9c15-1beecd95ec35-utilities\") pod \"redhat-operators-v7qgf\" (UID: \"904cc494-851b-40cd-9c15-1beecd95ec35\") " pod="openshift-marketplace/redhat-operators-v7qgf" Nov 28 16:33:47 crc kubenswrapper[4909]: I1128 16:33:47.876833 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/904cc494-851b-40cd-9c15-1beecd95ec35-catalog-content\") pod \"redhat-operators-v7qgf\" (UID: \"904cc494-851b-40cd-9c15-1beecd95ec35\") " pod="openshift-marketplace/redhat-operators-v7qgf" Nov 28 16:33:47 crc kubenswrapper[4909]: I1128 16:33:47.877071 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-px7s2\" (UniqueName: \"kubernetes.io/projected/904cc494-851b-40cd-9c15-1beecd95ec35-kube-api-access-px7s2\") pod \"redhat-operators-v7qgf\" (UID: \"904cc494-851b-40cd-9c15-1beecd95ec35\") " pod="openshift-marketplace/redhat-operators-v7qgf" Nov 28 16:33:47 crc kubenswrapper[4909]: I1128 16:33:47.933152 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f3c35b84-87e7-42d1-bbd6-5f72f38ab989" path="/var/lib/kubelet/pods/f3c35b84-87e7-42d1-bbd6-5f72f38ab989/volumes" Nov 28 16:33:47 crc kubenswrapper[4909]: I1128 16:33:47.980975 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/904cc494-851b-40cd-9c15-1beecd95ec35-utilities\") pod \"redhat-operators-v7qgf\" (UID: \"904cc494-851b-40cd-9c15-1beecd95ec35\") " pod="openshift-marketplace/redhat-operators-v7qgf" Nov 28 16:33:47 crc kubenswrapper[4909]: I1128 16:33:47.981024 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/904cc494-851b-40cd-9c15-1beecd95ec35-catalog-content\") pod \"redhat-operators-v7qgf\" (UID: \"904cc494-851b-40cd-9c15-1beecd95ec35\") " pod="openshift-marketplace/redhat-operators-v7qgf" Nov 28 16:33:47 crc kubenswrapper[4909]: I1128 16:33:47.981259 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-px7s2\" (UniqueName: \"kubernetes.io/projected/904cc494-851b-40cd-9c15-1beecd95ec35-kube-api-access-px7s2\") pod \"redhat-operators-v7qgf\" (UID: \"904cc494-851b-40cd-9c15-1beecd95ec35\") " pod="openshift-marketplace/redhat-operators-v7qgf" Nov 28 16:33:47 crc kubenswrapper[4909]: I1128 16:33:47.981797 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/904cc494-851b-40cd-9c15-1beecd95ec35-catalog-content\") pod \"redhat-operators-v7qgf\" (UID: \"904cc494-851b-40cd-9c15-1beecd95ec35\") " pod="openshift-marketplace/redhat-operators-v7qgf" Nov 28 16:33:47 crc kubenswrapper[4909]: I1128 16:33:47.982116 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/904cc494-851b-40cd-9c15-1beecd95ec35-utilities\") pod \"redhat-operators-v7qgf\" (UID: \"904cc494-851b-40cd-9c15-1beecd95ec35\") " pod="openshift-marketplace/redhat-operators-v7qgf" Nov 28 16:33:48 crc kubenswrapper[4909]: I1128 16:33:48.034188 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-px7s2\" (UniqueName: \"kubernetes.io/projected/904cc494-851b-40cd-9c15-1beecd95ec35-kube-api-access-px7s2\") pod \"redhat-operators-v7qgf\" (UID: \"904cc494-851b-40cd-9c15-1beecd95ec35\") " pod="openshift-marketplace/redhat-operators-v7qgf" Nov 28 16:33:48 crc kubenswrapper[4909]: I1128 16:33:48.063149 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-v7qgf" Nov 28 16:33:48 crc kubenswrapper[4909]: I1128 16:33:48.332295 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b05b53b8-63e6-45e2-bff1-48ff19c49c76","Type":"ContainerStarted","Data":"b58ce16914e357736110052249afde41ec72ccc14a02c4f11b717af75a2412c8"} Nov 28 16:33:48 crc kubenswrapper[4909]: I1128 16:33:48.983487 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-v7qgf"] Nov 28 16:33:49 crc kubenswrapper[4909]: I1128 16:33:49.348269 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-v7qgf" event={"ID":"904cc494-851b-40cd-9c15-1beecd95ec35","Type":"ContainerStarted","Data":"7105da525d5b2a502b3e733e64127b4a1b133bcdfe325299f433d4e517d95579"} Nov 28 16:33:49 crc kubenswrapper[4909]: I1128 16:33:49.911811 4909 patch_prober.go:28] interesting pod/machine-config-daemon-d5nd7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 16:33:49 crc kubenswrapper[4909]: I1128 16:33:49.912180 4909 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 16:33:50 crc kubenswrapper[4909]: I1128 16:33:50.359767 4909 generic.go:334] "Generic (PLEG): container finished" podID="904cc494-851b-40cd-9c15-1beecd95ec35" containerID="41476722c092b4262ae79202ce146eb2fcdd76085c0defd8169972ed047bcca2" exitCode=0 Nov 28 16:33:50 crc kubenswrapper[4909]: I1128 16:33:50.359820 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-v7qgf" event={"ID":"904cc494-851b-40cd-9c15-1beecd95ec35","Type":"ContainerDied","Data":"41476722c092b4262ae79202ce146eb2fcdd76085c0defd8169972ed047bcca2"} Nov 28 16:33:52 crc kubenswrapper[4909]: I1128 16:33:52.382957 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-v7qgf" event={"ID":"904cc494-851b-40cd-9c15-1beecd95ec35","Type":"ContainerStarted","Data":"ac8683e8eb4eaced3485daf456285279ce6f473479fc553a1fcdebee2cf78e1f"} Nov 28 16:33:52 crc kubenswrapper[4909]: I1128 16:33:52.387278 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b05b53b8-63e6-45e2-bff1-48ff19c49c76","Type":"ContainerStarted","Data":"982e87f51cda2c31d683962a7a51ddee53c307917bcb1f9dd0d25f418f16e278"} Nov 28 16:33:52 crc kubenswrapper[4909]: I1128 16:33:52.387478 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="b05b53b8-63e6-45e2-bff1-48ff19c49c76" containerName="ceilometer-central-agent" containerID="cri-o://eb988c2bffc55db0ee605010794988fd302f7fd755a1c2b5859db81da90c3e75" gracePeriod=30 Nov 28 16:33:52 crc kubenswrapper[4909]: I1128 16:33:52.387753 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 28 16:33:52 crc kubenswrapper[4909]: I1128 16:33:52.387799 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="b05b53b8-63e6-45e2-bff1-48ff19c49c76" containerName="proxy-httpd" containerID="cri-o://982e87f51cda2c31d683962a7a51ddee53c307917bcb1f9dd0d25f418f16e278" gracePeriod=30 Nov 28 16:33:52 crc kubenswrapper[4909]: I1128 16:33:52.387837 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="b05b53b8-63e6-45e2-bff1-48ff19c49c76" containerName="sg-core" containerID="cri-o://b58ce16914e357736110052249afde41ec72ccc14a02c4f11b717af75a2412c8" gracePeriod=30 Nov 28 16:33:52 crc kubenswrapper[4909]: I1128 16:33:52.387868 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="b05b53b8-63e6-45e2-bff1-48ff19c49c76" containerName="ceilometer-notification-agent" containerID="cri-o://2d8778a731b69e96d101ec03705a43c56ceeb2509fcdd2bc37509c3151a9d1d2" gracePeriod=30 Nov 28 16:33:52 crc kubenswrapper[4909]: I1128 16:33:52.441309 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.055345723 podStartE2EDuration="16.441289491s" podCreationTimestamp="2025-11-28 16:33:36 +0000 UTC" firstStartedPulling="2025-11-28 16:33:37.292305647 +0000 UTC m=+1399.688990171" lastFinishedPulling="2025-11-28 16:33:51.678249415 +0000 UTC m=+1414.074933939" observedRunningTime="2025-11-28 16:33:52.435496305 +0000 UTC m=+1414.832180829" watchObservedRunningTime="2025-11-28 16:33:52.441289491 +0000 UTC m=+1414.837974035" Nov 28 16:33:53 crc kubenswrapper[4909]: I1128 16:33:53.398053 4909 generic.go:334] "Generic (PLEG): container finished" podID="904cc494-851b-40cd-9c15-1beecd95ec35" containerID="ac8683e8eb4eaced3485daf456285279ce6f473479fc553a1fcdebee2cf78e1f" exitCode=0 Nov 28 16:33:53 crc kubenswrapper[4909]: I1128 16:33:53.398134 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-v7qgf" event={"ID":"904cc494-851b-40cd-9c15-1beecd95ec35","Type":"ContainerDied","Data":"ac8683e8eb4eaced3485daf456285279ce6f473479fc553a1fcdebee2cf78e1f"} Nov 28 16:33:53 crc kubenswrapper[4909]: I1128 16:33:53.402753 4909 generic.go:334] "Generic (PLEG): container finished" podID="b05b53b8-63e6-45e2-bff1-48ff19c49c76" containerID="982e87f51cda2c31d683962a7a51ddee53c307917bcb1f9dd0d25f418f16e278" exitCode=0 Nov 28 16:33:53 crc kubenswrapper[4909]: I1128 16:33:53.402781 4909 generic.go:334] "Generic (PLEG): container finished" podID="b05b53b8-63e6-45e2-bff1-48ff19c49c76" containerID="b58ce16914e357736110052249afde41ec72ccc14a02c4f11b717af75a2412c8" exitCode=2 Nov 28 16:33:53 crc kubenswrapper[4909]: I1128 16:33:53.402799 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b05b53b8-63e6-45e2-bff1-48ff19c49c76","Type":"ContainerDied","Data":"982e87f51cda2c31d683962a7a51ddee53c307917bcb1f9dd0d25f418f16e278"} Nov 28 16:33:53 crc kubenswrapper[4909]: I1128 16:33:53.402823 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b05b53b8-63e6-45e2-bff1-48ff19c49c76","Type":"ContainerDied","Data":"b58ce16914e357736110052249afde41ec72ccc14a02c4f11b717af75a2412c8"} Nov 28 16:33:54 crc kubenswrapper[4909]: I1128 16:33:54.418501 4909 generic.go:334] "Generic (PLEG): container finished" podID="b05b53b8-63e6-45e2-bff1-48ff19c49c76" containerID="2d8778a731b69e96d101ec03705a43c56ceeb2509fcdd2bc37509c3151a9d1d2" exitCode=0 Nov 28 16:33:54 crc kubenswrapper[4909]: I1128 16:33:54.418559 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b05b53b8-63e6-45e2-bff1-48ff19c49c76","Type":"ContainerDied","Data":"2d8778a731b69e96d101ec03705a43c56ceeb2509fcdd2bc37509c3151a9d1d2"} Nov 28 16:33:55 crc kubenswrapper[4909]: I1128 16:33:55.431971 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-v7qgf" event={"ID":"904cc494-851b-40cd-9c15-1beecd95ec35","Type":"ContainerStarted","Data":"356ffcd8f6df478f2d4c31a6d698374ccc314a0a8b62b064bbca2b32b178af48"} Nov 28 16:33:55 crc kubenswrapper[4909]: I1128 16:33:55.450878 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-v7qgf" podStartSLOduration=4.469367787 podStartE2EDuration="8.450860211s" podCreationTimestamp="2025-11-28 16:33:47 +0000 UTC" firstStartedPulling="2025-11-28 16:33:50.362632374 +0000 UTC m=+1412.759316898" lastFinishedPulling="2025-11-28 16:33:54.344124758 +0000 UTC m=+1416.740809322" observedRunningTime="2025-11-28 16:33:55.449470234 +0000 UTC m=+1417.846154768" watchObservedRunningTime="2025-11-28 16:33:55.450860211 +0000 UTC m=+1417.847544735" Nov 28 16:33:58 crc kubenswrapper[4909]: I1128 16:33:58.064018 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-v7qgf" Nov 28 16:33:58 crc kubenswrapper[4909]: I1128 16:33:58.064414 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-v7qgf" Nov 28 16:33:59 crc kubenswrapper[4909]: I1128 16:33:59.109949 4909 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-v7qgf" podUID="904cc494-851b-40cd-9c15-1beecd95ec35" containerName="registry-server" probeResult="failure" output=< Nov 28 16:33:59 crc kubenswrapper[4909]: timeout: failed to connect service ":50051" within 1s Nov 28 16:33:59 crc kubenswrapper[4909]: > Nov 28 16:34:01 crc kubenswrapper[4909]: I1128 16:34:01.051593 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 16:34:01 crc kubenswrapper[4909]: I1128 16:34:01.141914 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b05b53b8-63e6-45e2-bff1-48ff19c49c76-log-httpd\") pod \"b05b53b8-63e6-45e2-bff1-48ff19c49c76\" (UID: \"b05b53b8-63e6-45e2-bff1-48ff19c49c76\") " Nov 28 16:34:01 crc kubenswrapper[4909]: I1128 16:34:01.142058 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b05b53b8-63e6-45e2-bff1-48ff19c49c76-run-httpd\") pod \"b05b53b8-63e6-45e2-bff1-48ff19c49c76\" (UID: \"b05b53b8-63e6-45e2-bff1-48ff19c49c76\") " Nov 28 16:34:01 crc kubenswrapper[4909]: I1128 16:34:01.142156 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b05b53b8-63e6-45e2-bff1-48ff19c49c76-combined-ca-bundle\") pod \"b05b53b8-63e6-45e2-bff1-48ff19c49c76\" (UID: \"b05b53b8-63e6-45e2-bff1-48ff19c49c76\") " Nov 28 16:34:01 crc kubenswrapper[4909]: I1128 16:34:01.142204 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b05b53b8-63e6-45e2-bff1-48ff19c49c76-scripts\") pod \"b05b53b8-63e6-45e2-bff1-48ff19c49c76\" (UID: \"b05b53b8-63e6-45e2-bff1-48ff19c49c76\") " Nov 28 16:34:01 crc kubenswrapper[4909]: I1128 16:34:01.142249 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4f5nj\" (UniqueName: \"kubernetes.io/projected/b05b53b8-63e6-45e2-bff1-48ff19c49c76-kube-api-access-4f5nj\") pod \"b05b53b8-63e6-45e2-bff1-48ff19c49c76\" (UID: \"b05b53b8-63e6-45e2-bff1-48ff19c49c76\") " Nov 28 16:34:01 crc kubenswrapper[4909]: I1128 16:34:01.142274 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b05b53b8-63e6-45e2-bff1-48ff19c49c76-config-data\") pod \"b05b53b8-63e6-45e2-bff1-48ff19c49c76\" (UID: \"b05b53b8-63e6-45e2-bff1-48ff19c49c76\") " Nov 28 16:34:01 crc kubenswrapper[4909]: I1128 16:34:01.142306 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b05b53b8-63e6-45e2-bff1-48ff19c49c76-sg-core-conf-yaml\") pod \"b05b53b8-63e6-45e2-bff1-48ff19c49c76\" (UID: \"b05b53b8-63e6-45e2-bff1-48ff19c49c76\") " Nov 28 16:34:01 crc kubenswrapper[4909]: I1128 16:34:01.149978 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b05b53b8-63e6-45e2-bff1-48ff19c49c76-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "b05b53b8-63e6-45e2-bff1-48ff19c49c76" (UID: "b05b53b8-63e6-45e2-bff1-48ff19c49c76"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:34:01 crc kubenswrapper[4909]: I1128 16:34:01.151994 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b05b53b8-63e6-45e2-bff1-48ff19c49c76-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "b05b53b8-63e6-45e2-bff1-48ff19c49c76" (UID: "b05b53b8-63e6-45e2-bff1-48ff19c49c76"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:34:01 crc kubenswrapper[4909]: I1128 16:34:01.157831 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b05b53b8-63e6-45e2-bff1-48ff19c49c76-kube-api-access-4f5nj" (OuterVolumeSpecName: "kube-api-access-4f5nj") pod "b05b53b8-63e6-45e2-bff1-48ff19c49c76" (UID: "b05b53b8-63e6-45e2-bff1-48ff19c49c76"). InnerVolumeSpecName "kube-api-access-4f5nj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:34:01 crc kubenswrapper[4909]: I1128 16:34:01.162826 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b05b53b8-63e6-45e2-bff1-48ff19c49c76-scripts" (OuterVolumeSpecName: "scripts") pod "b05b53b8-63e6-45e2-bff1-48ff19c49c76" (UID: "b05b53b8-63e6-45e2-bff1-48ff19c49c76"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:34:01 crc kubenswrapper[4909]: I1128 16:34:01.220825 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b05b53b8-63e6-45e2-bff1-48ff19c49c76-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "b05b53b8-63e6-45e2-bff1-48ff19c49c76" (UID: "b05b53b8-63e6-45e2-bff1-48ff19c49c76"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:34:01 crc kubenswrapper[4909]: I1128 16:34:01.245088 4909 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b05b53b8-63e6-45e2-bff1-48ff19c49c76-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 28 16:34:01 crc kubenswrapper[4909]: I1128 16:34:01.245136 4909 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b05b53b8-63e6-45e2-bff1-48ff19c49c76-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 16:34:01 crc kubenswrapper[4909]: I1128 16:34:01.245148 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4f5nj\" (UniqueName: \"kubernetes.io/projected/b05b53b8-63e6-45e2-bff1-48ff19c49c76-kube-api-access-4f5nj\") on node \"crc\" DevicePath \"\"" Nov 28 16:34:01 crc kubenswrapper[4909]: I1128 16:34:01.245160 4909 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b05b53b8-63e6-45e2-bff1-48ff19c49c76-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 28 16:34:01 crc kubenswrapper[4909]: I1128 16:34:01.245174 4909 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b05b53b8-63e6-45e2-bff1-48ff19c49c76-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 28 16:34:01 crc kubenswrapper[4909]: I1128 16:34:01.338787 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b05b53b8-63e6-45e2-bff1-48ff19c49c76-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b05b53b8-63e6-45e2-bff1-48ff19c49c76" (UID: "b05b53b8-63e6-45e2-bff1-48ff19c49c76"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:34:01 crc kubenswrapper[4909]: I1128 16:34:01.347170 4909 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b05b53b8-63e6-45e2-bff1-48ff19c49c76-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:34:01 crc kubenswrapper[4909]: I1128 16:34:01.362885 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b05b53b8-63e6-45e2-bff1-48ff19c49c76-config-data" (OuterVolumeSpecName: "config-data") pod "b05b53b8-63e6-45e2-bff1-48ff19c49c76" (UID: "b05b53b8-63e6-45e2-bff1-48ff19c49c76"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:34:01 crc kubenswrapper[4909]: I1128 16:34:01.449205 4909 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b05b53b8-63e6-45e2-bff1-48ff19c49c76-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:34:01 crc kubenswrapper[4909]: I1128 16:34:01.499472 4909 generic.go:334] "Generic (PLEG): container finished" podID="b05b53b8-63e6-45e2-bff1-48ff19c49c76" containerID="eb988c2bffc55db0ee605010794988fd302f7fd755a1c2b5859db81da90c3e75" exitCode=0 Nov 28 16:34:01 crc kubenswrapper[4909]: I1128 16:34:01.499549 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 16:34:01 crc kubenswrapper[4909]: I1128 16:34:01.499571 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b05b53b8-63e6-45e2-bff1-48ff19c49c76","Type":"ContainerDied","Data":"eb988c2bffc55db0ee605010794988fd302f7fd755a1c2b5859db81da90c3e75"} Nov 28 16:34:01 crc kubenswrapper[4909]: I1128 16:34:01.499903 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b05b53b8-63e6-45e2-bff1-48ff19c49c76","Type":"ContainerDied","Data":"d8369f4a74461b3441ba2003ba71f3483fdd6004a11ddf23dcef0a5bd1aba6b1"} Nov 28 16:34:01 crc kubenswrapper[4909]: I1128 16:34:01.499928 4909 scope.go:117] "RemoveContainer" containerID="982e87f51cda2c31d683962a7a51ddee53c307917bcb1f9dd0d25f418f16e278" Nov 28 16:34:01 crc kubenswrapper[4909]: I1128 16:34:01.524264 4909 scope.go:117] "RemoveContainer" containerID="b58ce16914e357736110052249afde41ec72ccc14a02c4f11b717af75a2412c8" Nov 28 16:34:01 crc kubenswrapper[4909]: I1128 16:34:01.539890 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 28 16:34:01 crc kubenswrapper[4909]: I1128 16:34:01.546952 4909 scope.go:117] "RemoveContainer" containerID="2d8778a731b69e96d101ec03705a43c56ceeb2509fcdd2bc37509c3151a9d1d2" Nov 28 16:34:01 crc kubenswrapper[4909]: I1128 16:34:01.552588 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 28 16:34:01 crc kubenswrapper[4909]: I1128 16:34:01.561811 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 28 16:34:01 crc kubenswrapper[4909]: E1128 16:34:01.562359 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b05b53b8-63e6-45e2-bff1-48ff19c49c76" containerName="ceilometer-notification-agent" Nov 28 16:34:01 crc kubenswrapper[4909]: I1128 16:34:01.562447 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="b05b53b8-63e6-45e2-bff1-48ff19c49c76" containerName="ceilometer-notification-agent" Nov 28 16:34:01 crc kubenswrapper[4909]: E1128 16:34:01.562509 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b05b53b8-63e6-45e2-bff1-48ff19c49c76" containerName="ceilometer-central-agent" Nov 28 16:34:01 crc kubenswrapper[4909]: I1128 16:34:01.562563 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="b05b53b8-63e6-45e2-bff1-48ff19c49c76" containerName="ceilometer-central-agent" Nov 28 16:34:01 crc kubenswrapper[4909]: E1128 16:34:01.562634 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b05b53b8-63e6-45e2-bff1-48ff19c49c76" containerName="proxy-httpd" Nov 28 16:34:01 crc kubenswrapper[4909]: I1128 16:34:01.562719 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="b05b53b8-63e6-45e2-bff1-48ff19c49c76" containerName="proxy-httpd" Nov 28 16:34:01 crc kubenswrapper[4909]: E1128 16:34:01.563166 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b05b53b8-63e6-45e2-bff1-48ff19c49c76" containerName="sg-core" Nov 28 16:34:01 crc kubenswrapper[4909]: I1128 16:34:01.563237 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="b05b53b8-63e6-45e2-bff1-48ff19c49c76" containerName="sg-core" Nov 28 16:34:01 crc kubenswrapper[4909]: I1128 16:34:01.563480 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="b05b53b8-63e6-45e2-bff1-48ff19c49c76" containerName="proxy-httpd" Nov 28 16:34:01 crc kubenswrapper[4909]: I1128 16:34:01.563544 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="b05b53b8-63e6-45e2-bff1-48ff19c49c76" containerName="ceilometer-notification-agent" Nov 28 16:34:01 crc kubenswrapper[4909]: I1128 16:34:01.563607 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="b05b53b8-63e6-45e2-bff1-48ff19c49c76" containerName="ceilometer-central-agent" Nov 28 16:34:01 crc kubenswrapper[4909]: I1128 16:34:01.563691 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="b05b53b8-63e6-45e2-bff1-48ff19c49c76" containerName="sg-core" Nov 28 16:34:01 crc kubenswrapper[4909]: I1128 16:34:01.565700 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 16:34:01 crc kubenswrapper[4909]: I1128 16:34:01.568209 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 28 16:34:01 crc kubenswrapper[4909]: I1128 16:34:01.568840 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 28 16:34:01 crc kubenswrapper[4909]: I1128 16:34:01.574606 4909 scope.go:117] "RemoveContainer" containerID="eb988c2bffc55db0ee605010794988fd302f7fd755a1c2b5859db81da90c3e75" Nov 28 16:34:01 crc kubenswrapper[4909]: I1128 16:34:01.580270 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 28 16:34:01 crc kubenswrapper[4909]: I1128 16:34:01.638217 4909 scope.go:117] "RemoveContainer" containerID="982e87f51cda2c31d683962a7a51ddee53c307917bcb1f9dd0d25f418f16e278" Nov 28 16:34:01 crc kubenswrapper[4909]: E1128 16:34:01.640895 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"982e87f51cda2c31d683962a7a51ddee53c307917bcb1f9dd0d25f418f16e278\": container with ID starting with 982e87f51cda2c31d683962a7a51ddee53c307917bcb1f9dd0d25f418f16e278 not found: ID does not exist" containerID="982e87f51cda2c31d683962a7a51ddee53c307917bcb1f9dd0d25f418f16e278" Nov 28 16:34:01 crc kubenswrapper[4909]: I1128 16:34:01.640945 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"982e87f51cda2c31d683962a7a51ddee53c307917bcb1f9dd0d25f418f16e278"} err="failed to get container status \"982e87f51cda2c31d683962a7a51ddee53c307917bcb1f9dd0d25f418f16e278\": rpc error: code = NotFound desc = could not find container \"982e87f51cda2c31d683962a7a51ddee53c307917bcb1f9dd0d25f418f16e278\": container with ID starting with 982e87f51cda2c31d683962a7a51ddee53c307917bcb1f9dd0d25f418f16e278 not found: ID does not exist" Nov 28 16:34:01 crc kubenswrapper[4909]: I1128 16:34:01.640976 4909 scope.go:117] "RemoveContainer" containerID="b58ce16914e357736110052249afde41ec72ccc14a02c4f11b717af75a2412c8" Nov 28 16:34:01 crc kubenswrapper[4909]: E1128 16:34:01.643765 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b58ce16914e357736110052249afde41ec72ccc14a02c4f11b717af75a2412c8\": container with ID starting with b58ce16914e357736110052249afde41ec72ccc14a02c4f11b717af75a2412c8 not found: ID does not exist" containerID="b58ce16914e357736110052249afde41ec72ccc14a02c4f11b717af75a2412c8" Nov 28 16:34:01 crc kubenswrapper[4909]: I1128 16:34:01.643871 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b58ce16914e357736110052249afde41ec72ccc14a02c4f11b717af75a2412c8"} err="failed to get container status \"b58ce16914e357736110052249afde41ec72ccc14a02c4f11b717af75a2412c8\": rpc error: code = NotFound desc = could not find container \"b58ce16914e357736110052249afde41ec72ccc14a02c4f11b717af75a2412c8\": container with ID starting with b58ce16914e357736110052249afde41ec72ccc14a02c4f11b717af75a2412c8 not found: ID does not exist" Nov 28 16:34:01 crc kubenswrapper[4909]: I1128 16:34:01.643976 4909 scope.go:117] "RemoveContainer" containerID="2d8778a731b69e96d101ec03705a43c56ceeb2509fcdd2bc37509c3151a9d1d2" Nov 28 16:34:01 crc kubenswrapper[4909]: E1128 16:34:01.644426 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2d8778a731b69e96d101ec03705a43c56ceeb2509fcdd2bc37509c3151a9d1d2\": container with ID starting with 2d8778a731b69e96d101ec03705a43c56ceeb2509fcdd2bc37509c3151a9d1d2 not found: ID does not exist" containerID="2d8778a731b69e96d101ec03705a43c56ceeb2509fcdd2bc37509c3151a9d1d2" Nov 28 16:34:01 crc kubenswrapper[4909]: I1128 16:34:01.644515 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2d8778a731b69e96d101ec03705a43c56ceeb2509fcdd2bc37509c3151a9d1d2"} err="failed to get container status \"2d8778a731b69e96d101ec03705a43c56ceeb2509fcdd2bc37509c3151a9d1d2\": rpc error: code = NotFound desc = could not find container \"2d8778a731b69e96d101ec03705a43c56ceeb2509fcdd2bc37509c3151a9d1d2\": container with ID starting with 2d8778a731b69e96d101ec03705a43c56ceeb2509fcdd2bc37509c3151a9d1d2 not found: ID does not exist" Nov 28 16:34:01 crc kubenswrapper[4909]: I1128 16:34:01.644595 4909 scope.go:117] "RemoveContainer" containerID="eb988c2bffc55db0ee605010794988fd302f7fd755a1c2b5859db81da90c3e75" Nov 28 16:34:01 crc kubenswrapper[4909]: E1128 16:34:01.644973 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"eb988c2bffc55db0ee605010794988fd302f7fd755a1c2b5859db81da90c3e75\": container with ID starting with eb988c2bffc55db0ee605010794988fd302f7fd755a1c2b5859db81da90c3e75 not found: ID does not exist" containerID="eb988c2bffc55db0ee605010794988fd302f7fd755a1c2b5859db81da90c3e75" Nov 28 16:34:01 crc kubenswrapper[4909]: I1128 16:34:01.645019 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"eb988c2bffc55db0ee605010794988fd302f7fd755a1c2b5859db81da90c3e75"} err="failed to get container status \"eb988c2bffc55db0ee605010794988fd302f7fd755a1c2b5859db81da90c3e75\": rpc error: code = NotFound desc = could not find container \"eb988c2bffc55db0ee605010794988fd302f7fd755a1c2b5859db81da90c3e75\": container with ID starting with eb988c2bffc55db0ee605010794988fd302f7fd755a1c2b5859db81da90c3e75 not found: ID does not exist" Nov 28 16:34:01 crc kubenswrapper[4909]: I1128 16:34:01.651784 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/55b36ba0-f0a1-4de4-a4e4-0896cc7291b4-run-httpd\") pod \"ceilometer-0\" (UID: \"55b36ba0-f0a1-4de4-a4e4-0896cc7291b4\") " pod="openstack/ceilometer-0" Nov 28 16:34:01 crc kubenswrapper[4909]: I1128 16:34:01.651837 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vs2s8\" (UniqueName: \"kubernetes.io/projected/55b36ba0-f0a1-4de4-a4e4-0896cc7291b4-kube-api-access-vs2s8\") pod \"ceilometer-0\" (UID: \"55b36ba0-f0a1-4de4-a4e4-0896cc7291b4\") " pod="openstack/ceilometer-0" Nov 28 16:34:01 crc kubenswrapper[4909]: I1128 16:34:01.651893 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/55b36ba0-f0a1-4de4-a4e4-0896cc7291b4-scripts\") pod \"ceilometer-0\" (UID: \"55b36ba0-f0a1-4de4-a4e4-0896cc7291b4\") " pod="openstack/ceilometer-0" Nov 28 16:34:01 crc kubenswrapper[4909]: I1128 16:34:01.651974 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/55b36ba0-f0a1-4de4-a4e4-0896cc7291b4-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"55b36ba0-f0a1-4de4-a4e4-0896cc7291b4\") " pod="openstack/ceilometer-0" Nov 28 16:34:01 crc kubenswrapper[4909]: I1128 16:34:01.652071 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/55b36ba0-f0a1-4de4-a4e4-0896cc7291b4-log-httpd\") pod \"ceilometer-0\" (UID: \"55b36ba0-f0a1-4de4-a4e4-0896cc7291b4\") " pod="openstack/ceilometer-0" Nov 28 16:34:01 crc kubenswrapper[4909]: I1128 16:34:01.652099 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/55b36ba0-f0a1-4de4-a4e4-0896cc7291b4-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"55b36ba0-f0a1-4de4-a4e4-0896cc7291b4\") " pod="openstack/ceilometer-0" Nov 28 16:34:01 crc kubenswrapper[4909]: I1128 16:34:01.652131 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/55b36ba0-f0a1-4de4-a4e4-0896cc7291b4-config-data\") pod \"ceilometer-0\" (UID: \"55b36ba0-f0a1-4de4-a4e4-0896cc7291b4\") " pod="openstack/ceilometer-0" Nov 28 16:34:01 crc kubenswrapper[4909]: I1128 16:34:01.754415 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/55b36ba0-f0a1-4de4-a4e4-0896cc7291b4-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"55b36ba0-f0a1-4de4-a4e4-0896cc7291b4\") " pod="openstack/ceilometer-0" Nov 28 16:34:01 crc kubenswrapper[4909]: I1128 16:34:01.754485 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/55b36ba0-f0a1-4de4-a4e4-0896cc7291b4-log-httpd\") pod \"ceilometer-0\" (UID: \"55b36ba0-f0a1-4de4-a4e4-0896cc7291b4\") " pod="openstack/ceilometer-0" Nov 28 16:34:01 crc kubenswrapper[4909]: I1128 16:34:01.754511 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/55b36ba0-f0a1-4de4-a4e4-0896cc7291b4-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"55b36ba0-f0a1-4de4-a4e4-0896cc7291b4\") " pod="openstack/ceilometer-0" Nov 28 16:34:01 crc kubenswrapper[4909]: I1128 16:34:01.754544 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/55b36ba0-f0a1-4de4-a4e4-0896cc7291b4-config-data\") pod \"ceilometer-0\" (UID: \"55b36ba0-f0a1-4de4-a4e4-0896cc7291b4\") " pod="openstack/ceilometer-0" Nov 28 16:34:01 crc kubenswrapper[4909]: I1128 16:34:01.754604 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/55b36ba0-f0a1-4de4-a4e4-0896cc7291b4-run-httpd\") pod \"ceilometer-0\" (UID: \"55b36ba0-f0a1-4de4-a4e4-0896cc7291b4\") " pod="openstack/ceilometer-0" Nov 28 16:34:01 crc kubenswrapper[4909]: I1128 16:34:01.754626 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vs2s8\" (UniqueName: \"kubernetes.io/projected/55b36ba0-f0a1-4de4-a4e4-0896cc7291b4-kube-api-access-vs2s8\") pod \"ceilometer-0\" (UID: \"55b36ba0-f0a1-4de4-a4e4-0896cc7291b4\") " pod="openstack/ceilometer-0" Nov 28 16:34:01 crc kubenswrapper[4909]: I1128 16:34:01.754690 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/55b36ba0-f0a1-4de4-a4e4-0896cc7291b4-scripts\") pod \"ceilometer-0\" (UID: \"55b36ba0-f0a1-4de4-a4e4-0896cc7291b4\") " pod="openstack/ceilometer-0" Nov 28 16:34:01 crc kubenswrapper[4909]: I1128 16:34:01.754908 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/55b36ba0-f0a1-4de4-a4e4-0896cc7291b4-log-httpd\") pod \"ceilometer-0\" (UID: \"55b36ba0-f0a1-4de4-a4e4-0896cc7291b4\") " pod="openstack/ceilometer-0" Nov 28 16:34:01 crc kubenswrapper[4909]: I1128 16:34:01.755442 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/55b36ba0-f0a1-4de4-a4e4-0896cc7291b4-run-httpd\") pod \"ceilometer-0\" (UID: \"55b36ba0-f0a1-4de4-a4e4-0896cc7291b4\") " pod="openstack/ceilometer-0" Nov 28 16:34:01 crc kubenswrapper[4909]: I1128 16:34:01.759011 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/55b36ba0-f0a1-4de4-a4e4-0896cc7291b4-config-data\") pod \"ceilometer-0\" (UID: \"55b36ba0-f0a1-4de4-a4e4-0896cc7291b4\") " pod="openstack/ceilometer-0" Nov 28 16:34:01 crc kubenswrapper[4909]: I1128 16:34:01.759267 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/55b36ba0-f0a1-4de4-a4e4-0896cc7291b4-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"55b36ba0-f0a1-4de4-a4e4-0896cc7291b4\") " pod="openstack/ceilometer-0" Nov 28 16:34:01 crc kubenswrapper[4909]: I1128 16:34:01.759801 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/55b36ba0-f0a1-4de4-a4e4-0896cc7291b4-scripts\") pod \"ceilometer-0\" (UID: \"55b36ba0-f0a1-4de4-a4e4-0896cc7291b4\") " pod="openstack/ceilometer-0" Nov 28 16:34:01 crc kubenswrapper[4909]: I1128 16:34:01.769251 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/55b36ba0-f0a1-4de4-a4e4-0896cc7291b4-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"55b36ba0-f0a1-4de4-a4e4-0896cc7291b4\") " pod="openstack/ceilometer-0" Nov 28 16:34:01 crc kubenswrapper[4909]: I1128 16:34:01.773033 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vs2s8\" (UniqueName: \"kubernetes.io/projected/55b36ba0-f0a1-4de4-a4e4-0896cc7291b4-kube-api-access-vs2s8\") pod \"ceilometer-0\" (UID: \"55b36ba0-f0a1-4de4-a4e4-0896cc7291b4\") " pod="openstack/ceilometer-0" Nov 28 16:34:01 crc kubenswrapper[4909]: I1128 16:34:01.915493 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b05b53b8-63e6-45e2-bff1-48ff19c49c76" path="/var/lib/kubelet/pods/b05b53b8-63e6-45e2-bff1-48ff19c49c76/volumes" Nov 28 16:34:01 crc kubenswrapper[4909]: I1128 16:34:01.928074 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 16:34:02 crc kubenswrapper[4909]: I1128 16:34:02.388358 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 28 16:34:02 crc kubenswrapper[4909]: I1128 16:34:02.513089 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"55b36ba0-f0a1-4de4-a4e4-0896cc7291b4","Type":"ContainerStarted","Data":"f5b6f2f4ebb3ee5b372eb465b6414e2da842e6021d927b8e90887a48ac40f28d"} Nov 28 16:34:03 crc kubenswrapper[4909]: I1128 16:34:03.070820 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 28 16:34:05 crc kubenswrapper[4909]: I1128 16:34:05.555607 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"55b36ba0-f0a1-4de4-a4e4-0896cc7291b4","Type":"ContainerStarted","Data":"e6e645fabdf72540bad71659d62cc61d550d5131f071ac02e662ebfb69991f04"} Nov 28 16:34:06 crc kubenswrapper[4909]: I1128 16:34:06.567832 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"55b36ba0-f0a1-4de4-a4e4-0896cc7291b4","Type":"ContainerStarted","Data":"71ea8b09eb6024bffef6a00074f38edb5df62a42149d93f74d218f410d00292c"} Nov 28 16:34:07 crc kubenswrapper[4909]: I1128 16:34:07.577969 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"55b36ba0-f0a1-4de4-a4e4-0896cc7291b4","Type":"ContainerStarted","Data":"25149522bc5abac18b8b5b177459e0d1b3747e94a6872d2b3d9ef9b026c8cf52"} Nov 28 16:34:08 crc kubenswrapper[4909]: I1128 16:34:08.114955 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-v7qgf" Nov 28 16:34:08 crc kubenswrapper[4909]: I1128 16:34:08.169537 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-v7qgf" Nov 28 16:34:08 crc kubenswrapper[4909]: I1128 16:34:08.365230 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-v7qgf"] Nov 28 16:34:08 crc kubenswrapper[4909]: I1128 16:34:08.592237 4909 generic.go:334] "Generic (PLEG): container finished" podID="e042c4d0-9fc9-4e8d-a67f-6e3029444f4a" containerID="b06f7ceb8721d82fd465811ed29d9a70387b3cc0eee40cd66aaef773726c983a" exitCode=0 Nov 28 16:34:08 crc kubenswrapper[4909]: I1128 16:34:08.593834 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-dntf4" event={"ID":"e042c4d0-9fc9-4e8d-a67f-6e3029444f4a","Type":"ContainerDied","Data":"b06f7ceb8721d82fd465811ed29d9a70387b3cc0eee40cd66aaef773726c983a"} Nov 28 16:34:09 crc kubenswrapper[4909]: I1128 16:34:09.609180 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-v7qgf" podUID="904cc494-851b-40cd-9c15-1beecd95ec35" containerName="registry-server" containerID="cri-o://356ffcd8f6df478f2d4c31a6d698374ccc314a0a8b62b064bbca2b32b178af48" gracePeriod=2 Nov 28 16:34:09 crc kubenswrapper[4909]: I1128 16:34:09.609537 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="55b36ba0-f0a1-4de4-a4e4-0896cc7291b4" containerName="ceilometer-central-agent" containerID="cri-o://e6e645fabdf72540bad71659d62cc61d550d5131f071ac02e662ebfb69991f04" gracePeriod=30 Nov 28 16:34:09 crc kubenswrapper[4909]: I1128 16:34:09.609590 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"55b36ba0-f0a1-4de4-a4e4-0896cc7291b4","Type":"ContainerStarted","Data":"3a9032519f8e9fe424e691068a12060322948da1fc696b223487c23fb4a7c0eb"} Nov 28 16:34:09 crc kubenswrapper[4909]: I1128 16:34:09.610031 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 28 16:34:09 crc kubenswrapper[4909]: I1128 16:34:09.610078 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="55b36ba0-f0a1-4de4-a4e4-0896cc7291b4" containerName="proxy-httpd" containerID="cri-o://3a9032519f8e9fe424e691068a12060322948da1fc696b223487c23fb4a7c0eb" gracePeriod=30 Nov 28 16:34:09 crc kubenswrapper[4909]: I1128 16:34:09.610138 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="55b36ba0-f0a1-4de4-a4e4-0896cc7291b4" containerName="sg-core" containerID="cri-o://25149522bc5abac18b8b5b177459e0d1b3747e94a6872d2b3d9ef9b026c8cf52" gracePeriod=30 Nov 28 16:34:09 crc kubenswrapper[4909]: I1128 16:34:09.610189 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="55b36ba0-f0a1-4de4-a4e4-0896cc7291b4" containerName="ceilometer-notification-agent" containerID="cri-o://71ea8b09eb6024bffef6a00074f38edb5df62a42149d93f74d218f410d00292c" gracePeriod=30 Nov 28 16:34:09 crc kubenswrapper[4909]: I1128 16:34:09.649367 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.663080078 podStartE2EDuration="8.649348785s" podCreationTimestamp="2025-11-28 16:34:01 +0000 UTC" firstStartedPulling="2025-11-28 16:34:02.399219447 +0000 UTC m=+1424.795903971" lastFinishedPulling="2025-11-28 16:34:08.385488164 +0000 UTC m=+1430.782172678" observedRunningTime="2025-11-28 16:34:09.63313497 +0000 UTC m=+1432.029819504" watchObservedRunningTime="2025-11-28 16:34:09.649348785 +0000 UTC m=+1432.046033309" Nov 28 16:34:09 crc kubenswrapper[4909]: I1128 16:34:09.983172 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-dntf4" Nov 28 16:34:09 crc kubenswrapper[4909]: E1128 16:34:09.987307 4909 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod55b36ba0_f0a1_4de4_a4e4_0896cc7291b4.slice/crio-3a9032519f8e9fe424e691068a12060322948da1fc696b223487c23fb4a7c0eb.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod55b36ba0_f0a1_4de4_a4e4_0896cc7291b4.slice/crio-conmon-3a9032519f8e9fe424e691068a12060322948da1fc696b223487c23fb4a7c0eb.scope\": RecentStats: unable to find data in memory cache]" Nov 28 16:34:10 crc kubenswrapper[4909]: I1128 16:34:10.070833 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-v7qgf" Nov 28 16:34:10 crc kubenswrapper[4909]: I1128 16:34:10.127419 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e042c4d0-9fc9-4e8d-a67f-6e3029444f4a-scripts\") pod \"e042c4d0-9fc9-4e8d-a67f-6e3029444f4a\" (UID: \"e042c4d0-9fc9-4e8d-a67f-6e3029444f4a\") " Nov 28 16:34:10 crc kubenswrapper[4909]: I1128 16:34:10.127480 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e042c4d0-9fc9-4e8d-a67f-6e3029444f4a-config-data\") pod \"e042c4d0-9fc9-4e8d-a67f-6e3029444f4a\" (UID: \"e042c4d0-9fc9-4e8d-a67f-6e3029444f4a\") " Nov 28 16:34:10 crc kubenswrapper[4909]: I1128 16:34:10.127538 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e042c4d0-9fc9-4e8d-a67f-6e3029444f4a-combined-ca-bundle\") pod \"e042c4d0-9fc9-4e8d-a67f-6e3029444f4a\" (UID: \"e042c4d0-9fc9-4e8d-a67f-6e3029444f4a\") " Nov 28 16:34:10 crc kubenswrapper[4909]: I1128 16:34:10.127624 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8grmp\" (UniqueName: \"kubernetes.io/projected/e042c4d0-9fc9-4e8d-a67f-6e3029444f4a-kube-api-access-8grmp\") pod \"e042c4d0-9fc9-4e8d-a67f-6e3029444f4a\" (UID: \"e042c4d0-9fc9-4e8d-a67f-6e3029444f4a\") " Nov 28 16:34:10 crc kubenswrapper[4909]: I1128 16:34:10.140371 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e042c4d0-9fc9-4e8d-a67f-6e3029444f4a-scripts" (OuterVolumeSpecName: "scripts") pod "e042c4d0-9fc9-4e8d-a67f-6e3029444f4a" (UID: "e042c4d0-9fc9-4e8d-a67f-6e3029444f4a"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:34:10 crc kubenswrapper[4909]: I1128 16:34:10.140380 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e042c4d0-9fc9-4e8d-a67f-6e3029444f4a-kube-api-access-8grmp" (OuterVolumeSpecName: "kube-api-access-8grmp") pod "e042c4d0-9fc9-4e8d-a67f-6e3029444f4a" (UID: "e042c4d0-9fc9-4e8d-a67f-6e3029444f4a"). InnerVolumeSpecName "kube-api-access-8grmp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:34:10 crc kubenswrapper[4909]: I1128 16:34:10.158609 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e042c4d0-9fc9-4e8d-a67f-6e3029444f4a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e042c4d0-9fc9-4e8d-a67f-6e3029444f4a" (UID: "e042c4d0-9fc9-4e8d-a67f-6e3029444f4a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:34:10 crc kubenswrapper[4909]: I1128 16:34:10.161860 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e042c4d0-9fc9-4e8d-a67f-6e3029444f4a-config-data" (OuterVolumeSpecName: "config-data") pod "e042c4d0-9fc9-4e8d-a67f-6e3029444f4a" (UID: "e042c4d0-9fc9-4e8d-a67f-6e3029444f4a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:34:10 crc kubenswrapper[4909]: I1128 16:34:10.229131 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/904cc494-851b-40cd-9c15-1beecd95ec35-catalog-content\") pod \"904cc494-851b-40cd-9c15-1beecd95ec35\" (UID: \"904cc494-851b-40cd-9c15-1beecd95ec35\") " Nov 28 16:34:10 crc kubenswrapper[4909]: I1128 16:34:10.229286 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/904cc494-851b-40cd-9c15-1beecd95ec35-utilities\") pod \"904cc494-851b-40cd-9c15-1beecd95ec35\" (UID: \"904cc494-851b-40cd-9c15-1beecd95ec35\") " Nov 28 16:34:10 crc kubenswrapper[4909]: I1128 16:34:10.229332 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-px7s2\" (UniqueName: \"kubernetes.io/projected/904cc494-851b-40cd-9c15-1beecd95ec35-kube-api-access-px7s2\") pod \"904cc494-851b-40cd-9c15-1beecd95ec35\" (UID: \"904cc494-851b-40cd-9c15-1beecd95ec35\") " Nov 28 16:34:10 crc kubenswrapper[4909]: I1128 16:34:10.229978 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8grmp\" (UniqueName: \"kubernetes.io/projected/e042c4d0-9fc9-4e8d-a67f-6e3029444f4a-kube-api-access-8grmp\") on node \"crc\" DevicePath \"\"" Nov 28 16:34:10 crc kubenswrapper[4909]: I1128 16:34:10.229999 4909 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e042c4d0-9fc9-4e8d-a67f-6e3029444f4a-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 16:34:10 crc kubenswrapper[4909]: I1128 16:34:10.230010 4909 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e042c4d0-9fc9-4e8d-a67f-6e3029444f4a-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:34:10 crc kubenswrapper[4909]: I1128 16:34:10.230031 4909 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e042c4d0-9fc9-4e8d-a67f-6e3029444f4a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:34:10 crc kubenswrapper[4909]: I1128 16:34:10.230493 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/904cc494-851b-40cd-9c15-1beecd95ec35-utilities" (OuterVolumeSpecName: "utilities") pod "904cc494-851b-40cd-9c15-1beecd95ec35" (UID: "904cc494-851b-40cd-9c15-1beecd95ec35"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:34:10 crc kubenswrapper[4909]: I1128 16:34:10.233296 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/904cc494-851b-40cd-9c15-1beecd95ec35-kube-api-access-px7s2" (OuterVolumeSpecName: "kube-api-access-px7s2") pod "904cc494-851b-40cd-9c15-1beecd95ec35" (UID: "904cc494-851b-40cd-9c15-1beecd95ec35"). InnerVolumeSpecName "kube-api-access-px7s2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:34:10 crc kubenswrapper[4909]: I1128 16:34:10.332171 4909 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/904cc494-851b-40cd-9c15-1beecd95ec35-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 16:34:10 crc kubenswrapper[4909]: I1128 16:34:10.332216 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-px7s2\" (UniqueName: \"kubernetes.io/projected/904cc494-851b-40cd-9c15-1beecd95ec35-kube-api-access-px7s2\") on node \"crc\" DevicePath \"\"" Nov 28 16:34:10 crc kubenswrapper[4909]: I1128 16:34:10.344476 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/904cc494-851b-40cd-9c15-1beecd95ec35-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "904cc494-851b-40cd-9c15-1beecd95ec35" (UID: "904cc494-851b-40cd-9c15-1beecd95ec35"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:34:10 crc kubenswrapper[4909]: I1128 16:34:10.434540 4909 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/904cc494-851b-40cd-9c15-1beecd95ec35-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 16:34:10 crc kubenswrapper[4909]: I1128 16:34:10.618462 4909 generic.go:334] "Generic (PLEG): container finished" podID="55b36ba0-f0a1-4de4-a4e4-0896cc7291b4" containerID="3a9032519f8e9fe424e691068a12060322948da1fc696b223487c23fb4a7c0eb" exitCode=0 Nov 28 16:34:10 crc kubenswrapper[4909]: I1128 16:34:10.618495 4909 generic.go:334] "Generic (PLEG): container finished" podID="55b36ba0-f0a1-4de4-a4e4-0896cc7291b4" containerID="25149522bc5abac18b8b5b177459e0d1b3747e94a6872d2b3d9ef9b026c8cf52" exitCode=2 Nov 28 16:34:10 crc kubenswrapper[4909]: I1128 16:34:10.618505 4909 generic.go:334] "Generic (PLEG): container finished" podID="55b36ba0-f0a1-4de4-a4e4-0896cc7291b4" containerID="71ea8b09eb6024bffef6a00074f38edb5df62a42149d93f74d218f410d00292c" exitCode=0 Nov 28 16:34:10 crc kubenswrapper[4909]: I1128 16:34:10.618562 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"55b36ba0-f0a1-4de4-a4e4-0896cc7291b4","Type":"ContainerDied","Data":"3a9032519f8e9fe424e691068a12060322948da1fc696b223487c23fb4a7c0eb"} Nov 28 16:34:10 crc kubenswrapper[4909]: I1128 16:34:10.618605 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"55b36ba0-f0a1-4de4-a4e4-0896cc7291b4","Type":"ContainerDied","Data":"25149522bc5abac18b8b5b177459e0d1b3747e94a6872d2b3d9ef9b026c8cf52"} Nov 28 16:34:10 crc kubenswrapper[4909]: I1128 16:34:10.618693 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"55b36ba0-f0a1-4de4-a4e4-0896cc7291b4","Type":"ContainerDied","Data":"71ea8b09eb6024bffef6a00074f38edb5df62a42149d93f74d218f410d00292c"} Nov 28 16:34:10 crc kubenswrapper[4909]: I1128 16:34:10.619996 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-dntf4" event={"ID":"e042c4d0-9fc9-4e8d-a67f-6e3029444f4a","Type":"ContainerDied","Data":"c60adb894b78cb43fcb87f2c8745fb6b98645df166d13878a2f36960110858ce"} Nov 28 16:34:10 crc kubenswrapper[4909]: I1128 16:34:10.620025 4909 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c60adb894b78cb43fcb87f2c8745fb6b98645df166d13878a2f36960110858ce" Nov 28 16:34:10 crc kubenswrapper[4909]: I1128 16:34:10.620052 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-dntf4" Nov 28 16:34:10 crc kubenswrapper[4909]: I1128 16:34:10.621947 4909 generic.go:334] "Generic (PLEG): container finished" podID="904cc494-851b-40cd-9c15-1beecd95ec35" containerID="356ffcd8f6df478f2d4c31a6d698374ccc314a0a8b62b064bbca2b32b178af48" exitCode=0 Nov 28 16:34:10 crc kubenswrapper[4909]: I1128 16:34:10.621972 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-v7qgf" event={"ID":"904cc494-851b-40cd-9c15-1beecd95ec35","Type":"ContainerDied","Data":"356ffcd8f6df478f2d4c31a6d698374ccc314a0a8b62b064bbca2b32b178af48"} Nov 28 16:34:10 crc kubenswrapper[4909]: I1128 16:34:10.621987 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-v7qgf" event={"ID":"904cc494-851b-40cd-9c15-1beecd95ec35","Type":"ContainerDied","Data":"7105da525d5b2a502b3e733e64127b4a1b133bcdfe325299f433d4e517d95579"} Nov 28 16:34:10 crc kubenswrapper[4909]: I1128 16:34:10.622000 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-v7qgf" Nov 28 16:34:10 crc kubenswrapper[4909]: I1128 16:34:10.622004 4909 scope.go:117] "RemoveContainer" containerID="356ffcd8f6df478f2d4c31a6d698374ccc314a0a8b62b064bbca2b32b178af48" Nov 28 16:34:10 crc kubenswrapper[4909]: I1128 16:34:10.657498 4909 scope.go:117] "RemoveContainer" containerID="ac8683e8eb4eaced3485daf456285279ce6f473479fc553a1fcdebee2cf78e1f" Nov 28 16:34:10 crc kubenswrapper[4909]: I1128 16:34:10.700178 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-v7qgf"] Nov 28 16:34:10 crc kubenswrapper[4909]: I1128 16:34:10.709714 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-v7qgf"] Nov 28 16:34:10 crc kubenswrapper[4909]: I1128 16:34:10.715604 4909 scope.go:117] "RemoveContainer" containerID="41476722c092b4262ae79202ce146eb2fcdd76085c0defd8169972ed047bcca2" Nov 28 16:34:10 crc kubenswrapper[4909]: I1128 16:34:10.740215 4909 scope.go:117] "RemoveContainer" containerID="356ffcd8f6df478f2d4c31a6d698374ccc314a0a8b62b064bbca2b32b178af48" Nov 28 16:34:10 crc kubenswrapper[4909]: E1128 16:34:10.743611 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"356ffcd8f6df478f2d4c31a6d698374ccc314a0a8b62b064bbca2b32b178af48\": container with ID starting with 356ffcd8f6df478f2d4c31a6d698374ccc314a0a8b62b064bbca2b32b178af48 not found: ID does not exist" containerID="356ffcd8f6df478f2d4c31a6d698374ccc314a0a8b62b064bbca2b32b178af48" Nov 28 16:34:10 crc kubenswrapper[4909]: I1128 16:34:10.743678 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"356ffcd8f6df478f2d4c31a6d698374ccc314a0a8b62b064bbca2b32b178af48"} err="failed to get container status \"356ffcd8f6df478f2d4c31a6d698374ccc314a0a8b62b064bbca2b32b178af48\": rpc error: code = NotFound desc = could not find container \"356ffcd8f6df478f2d4c31a6d698374ccc314a0a8b62b064bbca2b32b178af48\": container with ID starting with 356ffcd8f6df478f2d4c31a6d698374ccc314a0a8b62b064bbca2b32b178af48 not found: ID does not exist" Nov 28 16:34:10 crc kubenswrapper[4909]: I1128 16:34:10.743712 4909 scope.go:117] "RemoveContainer" containerID="ac8683e8eb4eaced3485daf456285279ce6f473479fc553a1fcdebee2cf78e1f" Nov 28 16:34:10 crc kubenswrapper[4909]: E1128 16:34:10.744296 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ac8683e8eb4eaced3485daf456285279ce6f473479fc553a1fcdebee2cf78e1f\": container with ID starting with ac8683e8eb4eaced3485daf456285279ce6f473479fc553a1fcdebee2cf78e1f not found: ID does not exist" containerID="ac8683e8eb4eaced3485daf456285279ce6f473479fc553a1fcdebee2cf78e1f" Nov 28 16:34:10 crc kubenswrapper[4909]: I1128 16:34:10.744318 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ac8683e8eb4eaced3485daf456285279ce6f473479fc553a1fcdebee2cf78e1f"} err="failed to get container status \"ac8683e8eb4eaced3485daf456285279ce6f473479fc553a1fcdebee2cf78e1f\": rpc error: code = NotFound desc = could not find container \"ac8683e8eb4eaced3485daf456285279ce6f473479fc553a1fcdebee2cf78e1f\": container with ID starting with ac8683e8eb4eaced3485daf456285279ce6f473479fc553a1fcdebee2cf78e1f not found: ID does not exist" Nov 28 16:34:10 crc kubenswrapper[4909]: I1128 16:34:10.744333 4909 scope.go:117] "RemoveContainer" containerID="41476722c092b4262ae79202ce146eb2fcdd76085c0defd8169972ed047bcca2" Nov 28 16:34:10 crc kubenswrapper[4909]: E1128 16:34:10.744977 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"41476722c092b4262ae79202ce146eb2fcdd76085c0defd8169972ed047bcca2\": container with ID starting with 41476722c092b4262ae79202ce146eb2fcdd76085c0defd8169972ed047bcca2 not found: ID does not exist" containerID="41476722c092b4262ae79202ce146eb2fcdd76085c0defd8169972ed047bcca2" Nov 28 16:34:10 crc kubenswrapper[4909]: I1128 16:34:10.745031 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"41476722c092b4262ae79202ce146eb2fcdd76085c0defd8169972ed047bcca2"} err="failed to get container status \"41476722c092b4262ae79202ce146eb2fcdd76085c0defd8169972ed047bcca2\": rpc error: code = NotFound desc = could not find container \"41476722c092b4262ae79202ce146eb2fcdd76085c0defd8169972ed047bcca2\": container with ID starting with 41476722c092b4262ae79202ce146eb2fcdd76085c0defd8169972ed047bcca2 not found: ID does not exist" Nov 28 16:34:10 crc kubenswrapper[4909]: I1128 16:34:10.754892 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 28 16:34:10 crc kubenswrapper[4909]: E1128 16:34:10.755296 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="904cc494-851b-40cd-9c15-1beecd95ec35" containerName="extract-content" Nov 28 16:34:10 crc kubenswrapper[4909]: I1128 16:34:10.755307 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="904cc494-851b-40cd-9c15-1beecd95ec35" containerName="extract-content" Nov 28 16:34:10 crc kubenswrapper[4909]: E1128 16:34:10.755325 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="904cc494-851b-40cd-9c15-1beecd95ec35" containerName="registry-server" Nov 28 16:34:10 crc kubenswrapper[4909]: I1128 16:34:10.755331 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="904cc494-851b-40cd-9c15-1beecd95ec35" containerName="registry-server" Nov 28 16:34:10 crc kubenswrapper[4909]: E1128 16:34:10.755346 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e042c4d0-9fc9-4e8d-a67f-6e3029444f4a" containerName="nova-cell0-conductor-db-sync" Nov 28 16:34:10 crc kubenswrapper[4909]: I1128 16:34:10.755351 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="e042c4d0-9fc9-4e8d-a67f-6e3029444f4a" containerName="nova-cell0-conductor-db-sync" Nov 28 16:34:10 crc kubenswrapper[4909]: E1128 16:34:10.755376 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="904cc494-851b-40cd-9c15-1beecd95ec35" containerName="extract-utilities" Nov 28 16:34:10 crc kubenswrapper[4909]: I1128 16:34:10.755382 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="904cc494-851b-40cd-9c15-1beecd95ec35" containerName="extract-utilities" Nov 28 16:34:10 crc kubenswrapper[4909]: I1128 16:34:10.755550 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="e042c4d0-9fc9-4e8d-a67f-6e3029444f4a" containerName="nova-cell0-conductor-db-sync" Nov 28 16:34:10 crc kubenswrapper[4909]: I1128 16:34:10.755560 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="904cc494-851b-40cd-9c15-1beecd95ec35" containerName="registry-server" Nov 28 16:34:10 crc kubenswrapper[4909]: I1128 16:34:10.757940 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 28 16:34:10 crc kubenswrapper[4909]: I1128 16:34:10.761496 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-677zc" Nov 28 16:34:10 crc kubenswrapper[4909]: I1128 16:34:10.761615 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Nov 28 16:34:10 crc kubenswrapper[4909]: I1128 16:34:10.789992 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 28 16:34:10 crc kubenswrapper[4909]: I1128 16:34:10.840075 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c64b6821-6f46-4764-be55-97ed8c71fefa-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"c64b6821-6f46-4764-be55-97ed8c71fefa\") " pod="openstack/nova-cell0-conductor-0" Nov 28 16:34:10 crc kubenswrapper[4909]: I1128 16:34:10.840137 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c64b6821-6f46-4764-be55-97ed8c71fefa-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"c64b6821-6f46-4764-be55-97ed8c71fefa\") " pod="openstack/nova-cell0-conductor-0" Nov 28 16:34:10 crc kubenswrapper[4909]: I1128 16:34:10.840196 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hzsgm\" (UniqueName: \"kubernetes.io/projected/c64b6821-6f46-4764-be55-97ed8c71fefa-kube-api-access-hzsgm\") pod \"nova-cell0-conductor-0\" (UID: \"c64b6821-6f46-4764-be55-97ed8c71fefa\") " pod="openstack/nova-cell0-conductor-0" Nov 28 16:34:10 crc kubenswrapper[4909]: I1128 16:34:10.941836 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c64b6821-6f46-4764-be55-97ed8c71fefa-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"c64b6821-6f46-4764-be55-97ed8c71fefa\") " pod="openstack/nova-cell0-conductor-0" Nov 28 16:34:10 crc kubenswrapper[4909]: I1128 16:34:10.941888 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c64b6821-6f46-4764-be55-97ed8c71fefa-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"c64b6821-6f46-4764-be55-97ed8c71fefa\") " pod="openstack/nova-cell0-conductor-0" Nov 28 16:34:10 crc kubenswrapper[4909]: I1128 16:34:10.941923 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hzsgm\" (UniqueName: \"kubernetes.io/projected/c64b6821-6f46-4764-be55-97ed8c71fefa-kube-api-access-hzsgm\") pod \"nova-cell0-conductor-0\" (UID: \"c64b6821-6f46-4764-be55-97ed8c71fefa\") " pod="openstack/nova-cell0-conductor-0" Nov 28 16:34:10 crc kubenswrapper[4909]: I1128 16:34:10.947081 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c64b6821-6f46-4764-be55-97ed8c71fefa-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"c64b6821-6f46-4764-be55-97ed8c71fefa\") " pod="openstack/nova-cell0-conductor-0" Nov 28 16:34:10 crc kubenswrapper[4909]: I1128 16:34:10.947247 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c64b6821-6f46-4764-be55-97ed8c71fefa-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"c64b6821-6f46-4764-be55-97ed8c71fefa\") " pod="openstack/nova-cell0-conductor-0" Nov 28 16:34:10 crc kubenswrapper[4909]: I1128 16:34:10.958865 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hzsgm\" (UniqueName: \"kubernetes.io/projected/c64b6821-6f46-4764-be55-97ed8c71fefa-kube-api-access-hzsgm\") pod \"nova-cell0-conductor-0\" (UID: \"c64b6821-6f46-4764-be55-97ed8c71fefa\") " pod="openstack/nova-cell0-conductor-0" Nov 28 16:34:11 crc kubenswrapper[4909]: I1128 16:34:11.121240 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 28 16:34:11 crc kubenswrapper[4909]: I1128 16:34:11.374543 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-lc22m"] Nov 28 16:34:11 crc kubenswrapper[4909]: I1128 16:34:11.376466 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-lc22m" Nov 28 16:34:11 crc kubenswrapper[4909]: I1128 16:34:11.385337 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-lc22m"] Nov 28 16:34:11 crc kubenswrapper[4909]: I1128 16:34:11.451271 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bgqf8\" (UniqueName: \"kubernetes.io/projected/9b6c3766-5116-4179-aae7-17f9564115c0-kube-api-access-bgqf8\") pod \"certified-operators-lc22m\" (UID: \"9b6c3766-5116-4179-aae7-17f9564115c0\") " pod="openshift-marketplace/certified-operators-lc22m" Nov 28 16:34:11 crc kubenswrapper[4909]: I1128 16:34:11.451401 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9b6c3766-5116-4179-aae7-17f9564115c0-catalog-content\") pod \"certified-operators-lc22m\" (UID: \"9b6c3766-5116-4179-aae7-17f9564115c0\") " pod="openshift-marketplace/certified-operators-lc22m" Nov 28 16:34:11 crc kubenswrapper[4909]: I1128 16:34:11.451479 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9b6c3766-5116-4179-aae7-17f9564115c0-utilities\") pod \"certified-operators-lc22m\" (UID: \"9b6c3766-5116-4179-aae7-17f9564115c0\") " pod="openshift-marketplace/certified-operators-lc22m" Nov 28 16:34:11 crc kubenswrapper[4909]: I1128 16:34:11.528953 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 28 16:34:11 crc kubenswrapper[4909]: I1128 16:34:11.552503 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bgqf8\" (UniqueName: \"kubernetes.io/projected/9b6c3766-5116-4179-aae7-17f9564115c0-kube-api-access-bgqf8\") pod \"certified-operators-lc22m\" (UID: \"9b6c3766-5116-4179-aae7-17f9564115c0\") " pod="openshift-marketplace/certified-operators-lc22m" Nov 28 16:34:11 crc kubenswrapper[4909]: I1128 16:34:11.552552 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9b6c3766-5116-4179-aae7-17f9564115c0-catalog-content\") pod \"certified-operators-lc22m\" (UID: \"9b6c3766-5116-4179-aae7-17f9564115c0\") " pod="openshift-marketplace/certified-operators-lc22m" Nov 28 16:34:11 crc kubenswrapper[4909]: I1128 16:34:11.552603 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9b6c3766-5116-4179-aae7-17f9564115c0-utilities\") pod \"certified-operators-lc22m\" (UID: \"9b6c3766-5116-4179-aae7-17f9564115c0\") " pod="openshift-marketplace/certified-operators-lc22m" Nov 28 16:34:11 crc kubenswrapper[4909]: I1128 16:34:11.552999 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9b6c3766-5116-4179-aae7-17f9564115c0-utilities\") pod \"certified-operators-lc22m\" (UID: \"9b6c3766-5116-4179-aae7-17f9564115c0\") " pod="openshift-marketplace/certified-operators-lc22m" Nov 28 16:34:11 crc kubenswrapper[4909]: I1128 16:34:11.553081 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9b6c3766-5116-4179-aae7-17f9564115c0-catalog-content\") pod \"certified-operators-lc22m\" (UID: \"9b6c3766-5116-4179-aae7-17f9564115c0\") " pod="openshift-marketplace/certified-operators-lc22m" Nov 28 16:34:11 crc kubenswrapper[4909]: I1128 16:34:11.571145 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bgqf8\" (UniqueName: \"kubernetes.io/projected/9b6c3766-5116-4179-aae7-17f9564115c0-kube-api-access-bgqf8\") pod \"certified-operators-lc22m\" (UID: \"9b6c3766-5116-4179-aae7-17f9564115c0\") " pod="openshift-marketplace/certified-operators-lc22m" Nov 28 16:34:11 crc kubenswrapper[4909]: I1128 16:34:11.638319 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"c64b6821-6f46-4764-be55-97ed8c71fefa","Type":"ContainerStarted","Data":"f1a87ce8374db877ff4a4b3a66ae2f5efd1d08bd22aafe79ee5158a6aef305d0"} Nov 28 16:34:11 crc kubenswrapper[4909]: I1128 16:34:11.697860 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-lc22m" Nov 28 16:34:11 crc kubenswrapper[4909]: I1128 16:34:11.916766 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="904cc494-851b-40cd-9c15-1beecd95ec35" path="/var/lib/kubelet/pods/904cc494-851b-40cd-9c15-1beecd95ec35/volumes" Nov 28 16:34:12 crc kubenswrapper[4909]: W1128 16:34:12.026731 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9b6c3766_5116_4179_aae7_17f9564115c0.slice/crio-f4a5af0dae7a54b8be7d541d46d9ca4b7fd667001eddee9f2a1505f11f89f7a8 WatchSource:0}: Error finding container f4a5af0dae7a54b8be7d541d46d9ca4b7fd667001eddee9f2a1505f11f89f7a8: Status 404 returned error can't find the container with id f4a5af0dae7a54b8be7d541d46d9ca4b7fd667001eddee9f2a1505f11f89f7a8 Nov 28 16:34:12 crc kubenswrapper[4909]: I1128 16:34:12.026889 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-lc22m"] Nov 28 16:34:12 crc kubenswrapper[4909]: I1128 16:34:12.650494 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"c64b6821-6f46-4764-be55-97ed8c71fefa","Type":"ContainerStarted","Data":"1b0631590dbf2f02faff4199c7f351befddd6177ddde8165eb64c85e0c20b740"} Nov 28 16:34:12 crc kubenswrapper[4909]: I1128 16:34:12.650569 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell0-conductor-0" Nov 28 16:34:12 crc kubenswrapper[4909]: I1128 16:34:12.652400 4909 generic.go:334] "Generic (PLEG): container finished" podID="9b6c3766-5116-4179-aae7-17f9564115c0" containerID="7d42a28246fae2657fd4cb276d094f8e58a75c8280c1eca1f559b3a78ea22dd3" exitCode=0 Nov 28 16:34:12 crc kubenswrapper[4909]: I1128 16:34:12.652439 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lc22m" event={"ID":"9b6c3766-5116-4179-aae7-17f9564115c0","Type":"ContainerDied","Data":"7d42a28246fae2657fd4cb276d094f8e58a75c8280c1eca1f559b3a78ea22dd3"} Nov 28 16:34:12 crc kubenswrapper[4909]: I1128 16:34:12.652458 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lc22m" event={"ID":"9b6c3766-5116-4179-aae7-17f9564115c0","Type":"ContainerStarted","Data":"f4a5af0dae7a54b8be7d541d46d9ca4b7fd667001eddee9f2a1505f11f89f7a8"} Nov 28 16:34:12 crc kubenswrapper[4909]: I1128 16:34:12.669358 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-0" podStartSLOduration=2.669329885 podStartE2EDuration="2.669329885s" podCreationTimestamp="2025-11-28 16:34:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:34:12.666005456 +0000 UTC m=+1435.062689980" watchObservedRunningTime="2025-11-28 16:34:12.669329885 +0000 UTC m=+1435.066014419" Nov 28 16:34:13 crc kubenswrapper[4909]: I1128 16:34:13.670226 4909 generic.go:334] "Generic (PLEG): container finished" podID="55b36ba0-f0a1-4de4-a4e4-0896cc7291b4" containerID="e6e645fabdf72540bad71659d62cc61d550d5131f071ac02e662ebfb69991f04" exitCode=0 Nov 28 16:34:13 crc kubenswrapper[4909]: I1128 16:34:13.671569 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"55b36ba0-f0a1-4de4-a4e4-0896cc7291b4","Type":"ContainerDied","Data":"e6e645fabdf72540bad71659d62cc61d550d5131f071ac02e662ebfb69991f04"} Nov 28 16:34:13 crc kubenswrapper[4909]: I1128 16:34:13.671603 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"55b36ba0-f0a1-4de4-a4e4-0896cc7291b4","Type":"ContainerDied","Data":"f5b6f2f4ebb3ee5b372eb465b6414e2da842e6021d927b8e90887a48ac40f28d"} Nov 28 16:34:13 crc kubenswrapper[4909]: I1128 16:34:13.671768 4909 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f5b6f2f4ebb3ee5b372eb465b6414e2da842e6021d927b8e90887a48ac40f28d" Nov 28 16:34:13 crc kubenswrapper[4909]: I1128 16:34:13.726248 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 16:34:13 crc kubenswrapper[4909]: I1128 16:34:13.804723 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/55b36ba0-f0a1-4de4-a4e4-0896cc7291b4-sg-core-conf-yaml\") pod \"55b36ba0-f0a1-4de4-a4e4-0896cc7291b4\" (UID: \"55b36ba0-f0a1-4de4-a4e4-0896cc7291b4\") " Nov 28 16:34:13 crc kubenswrapper[4909]: I1128 16:34:13.804777 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/55b36ba0-f0a1-4de4-a4e4-0896cc7291b4-config-data\") pod \"55b36ba0-f0a1-4de4-a4e4-0896cc7291b4\" (UID: \"55b36ba0-f0a1-4de4-a4e4-0896cc7291b4\") " Nov 28 16:34:13 crc kubenswrapper[4909]: I1128 16:34:13.804842 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/55b36ba0-f0a1-4de4-a4e4-0896cc7291b4-run-httpd\") pod \"55b36ba0-f0a1-4de4-a4e4-0896cc7291b4\" (UID: \"55b36ba0-f0a1-4de4-a4e4-0896cc7291b4\") " Nov 28 16:34:13 crc kubenswrapper[4909]: I1128 16:34:13.804872 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vs2s8\" (UniqueName: \"kubernetes.io/projected/55b36ba0-f0a1-4de4-a4e4-0896cc7291b4-kube-api-access-vs2s8\") pod \"55b36ba0-f0a1-4de4-a4e4-0896cc7291b4\" (UID: \"55b36ba0-f0a1-4de4-a4e4-0896cc7291b4\") " Nov 28 16:34:13 crc kubenswrapper[4909]: I1128 16:34:13.804901 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/55b36ba0-f0a1-4de4-a4e4-0896cc7291b4-combined-ca-bundle\") pod \"55b36ba0-f0a1-4de4-a4e4-0896cc7291b4\" (UID: \"55b36ba0-f0a1-4de4-a4e4-0896cc7291b4\") " Nov 28 16:34:13 crc kubenswrapper[4909]: I1128 16:34:13.804932 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/55b36ba0-f0a1-4de4-a4e4-0896cc7291b4-scripts\") pod \"55b36ba0-f0a1-4de4-a4e4-0896cc7291b4\" (UID: \"55b36ba0-f0a1-4de4-a4e4-0896cc7291b4\") " Nov 28 16:34:13 crc kubenswrapper[4909]: I1128 16:34:13.804972 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/55b36ba0-f0a1-4de4-a4e4-0896cc7291b4-log-httpd\") pod \"55b36ba0-f0a1-4de4-a4e4-0896cc7291b4\" (UID: \"55b36ba0-f0a1-4de4-a4e4-0896cc7291b4\") " Nov 28 16:34:13 crc kubenswrapper[4909]: I1128 16:34:13.805218 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/55b36ba0-f0a1-4de4-a4e4-0896cc7291b4-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "55b36ba0-f0a1-4de4-a4e4-0896cc7291b4" (UID: "55b36ba0-f0a1-4de4-a4e4-0896cc7291b4"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:34:13 crc kubenswrapper[4909]: I1128 16:34:13.805341 4909 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/55b36ba0-f0a1-4de4-a4e4-0896cc7291b4-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 28 16:34:13 crc kubenswrapper[4909]: I1128 16:34:13.806150 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/55b36ba0-f0a1-4de4-a4e4-0896cc7291b4-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "55b36ba0-f0a1-4de4-a4e4-0896cc7291b4" (UID: "55b36ba0-f0a1-4de4-a4e4-0896cc7291b4"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:34:13 crc kubenswrapper[4909]: I1128 16:34:13.810753 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/55b36ba0-f0a1-4de4-a4e4-0896cc7291b4-scripts" (OuterVolumeSpecName: "scripts") pod "55b36ba0-f0a1-4de4-a4e4-0896cc7291b4" (UID: "55b36ba0-f0a1-4de4-a4e4-0896cc7291b4"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:34:13 crc kubenswrapper[4909]: I1128 16:34:13.810909 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/55b36ba0-f0a1-4de4-a4e4-0896cc7291b4-kube-api-access-vs2s8" (OuterVolumeSpecName: "kube-api-access-vs2s8") pod "55b36ba0-f0a1-4de4-a4e4-0896cc7291b4" (UID: "55b36ba0-f0a1-4de4-a4e4-0896cc7291b4"). InnerVolumeSpecName "kube-api-access-vs2s8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:34:13 crc kubenswrapper[4909]: I1128 16:34:13.841793 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/55b36ba0-f0a1-4de4-a4e4-0896cc7291b4-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "55b36ba0-f0a1-4de4-a4e4-0896cc7291b4" (UID: "55b36ba0-f0a1-4de4-a4e4-0896cc7291b4"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:34:13 crc kubenswrapper[4909]: I1128 16:34:13.877831 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/55b36ba0-f0a1-4de4-a4e4-0896cc7291b4-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "55b36ba0-f0a1-4de4-a4e4-0896cc7291b4" (UID: "55b36ba0-f0a1-4de4-a4e4-0896cc7291b4"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:34:13 crc kubenswrapper[4909]: I1128 16:34:13.907750 4909 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/55b36ba0-f0a1-4de4-a4e4-0896cc7291b4-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 28 16:34:13 crc kubenswrapper[4909]: I1128 16:34:13.907781 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vs2s8\" (UniqueName: \"kubernetes.io/projected/55b36ba0-f0a1-4de4-a4e4-0896cc7291b4-kube-api-access-vs2s8\") on node \"crc\" DevicePath \"\"" Nov 28 16:34:13 crc kubenswrapper[4909]: I1128 16:34:13.907790 4909 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/55b36ba0-f0a1-4de4-a4e4-0896cc7291b4-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:34:13 crc kubenswrapper[4909]: I1128 16:34:13.907798 4909 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/55b36ba0-f0a1-4de4-a4e4-0896cc7291b4-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 16:34:13 crc kubenswrapper[4909]: I1128 16:34:13.907807 4909 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/55b36ba0-f0a1-4de4-a4e4-0896cc7291b4-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 28 16:34:13 crc kubenswrapper[4909]: I1128 16:34:13.912062 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/55b36ba0-f0a1-4de4-a4e4-0896cc7291b4-config-data" (OuterVolumeSpecName: "config-data") pod "55b36ba0-f0a1-4de4-a4e4-0896cc7291b4" (UID: "55b36ba0-f0a1-4de4-a4e4-0896cc7291b4"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:34:14 crc kubenswrapper[4909]: I1128 16:34:14.009547 4909 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/55b36ba0-f0a1-4de4-a4e4-0896cc7291b4-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:34:14 crc kubenswrapper[4909]: I1128 16:34:14.680625 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 16:34:14 crc kubenswrapper[4909]: I1128 16:34:14.721156 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 28 16:34:14 crc kubenswrapper[4909]: I1128 16:34:14.734786 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 28 16:34:14 crc kubenswrapper[4909]: I1128 16:34:14.755888 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 28 16:34:14 crc kubenswrapper[4909]: E1128 16:34:14.756527 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="55b36ba0-f0a1-4de4-a4e4-0896cc7291b4" containerName="sg-core" Nov 28 16:34:14 crc kubenswrapper[4909]: I1128 16:34:14.756623 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="55b36ba0-f0a1-4de4-a4e4-0896cc7291b4" containerName="sg-core" Nov 28 16:34:14 crc kubenswrapper[4909]: E1128 16:34:14.756725 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="55b36ba0-f0a1-4de4-a4e4-0896cc7291b4" containerName="proxy-httpd" Nov 28 16:34:14 crc kubenswrapper[4909]: I1128 16:34:14.756819 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="55b36ba0-f0a1-4de4-a4e4-0896cc7291b4" containerName="proxy-httpd" Nov 28 16:34:14 crc kubenswrapper[4909]: E1128 16:34:14.756909 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="55b36ba0-f0a1-4de4-a4e4-0896cc7291b4" containerName="ceilometer-central-agent" Nov 28 16:34:14 crc kubenswrapper[4909]: I1128 16:34:14.756989 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="55b36ba0-f0a1-4de4-a4e4-0896cc7291b4" containerName="ceilometer-central-agent" Nov 28 16:34:14 crc kubenswrapper[4909]: E1128 16:34:14.757093 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="55b36ba0-f0a1-4de4-a4e4-0896cc7291b4" containerName="ceilometer-notification-agent" Nov 28 16:34:14 crc kubenswrapper[4909]: I1128 16:34:14.757182 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="55b36ba0-f0a1-4de4-a4e4-0896cc7291b4" containerName="ceilometer-notification-agent" Nov 28 16:34:14 crc kubenswrapper[4909]: I1128 16:34:14.757406 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="55b36ba0-f0a1-4de4-a4e4-0896cc7291b4" containerName="proxy-httpd" Nov 28 16:34:14 crc kubenswrapper[4909]: I1128 16:34:14.757475 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="55b36ba0-f0a1-4de4-a4e4-0896cc7291b4" containerName="sg-core" Nov 28 16:34:14 crc kubenswrapper[4909]: I1128 16:34:14.757574 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="55b36ba0-f0a1-4de4-a4e4-0896cc7291b4" containerName="ceilometer-notification-agent" Nov 28 16:34:14 crc kubenswrapper[4909]: I1128 16:34:14.757640 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="55b36ba0-f0a1-4de4-a4e4-0896cc7291b4" containerName="ceilometer-central-agent" Nov 28 16:34:14 crc kubenswrapper[4909]: I1128 16:34:14.759883 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 16:34:14 crc kubenswrapper[4909]: I1128 16:34:14.765099 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 28 16:34:14 crc kubenswrapper[4909]: I1128 16:34:14.765267 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 28 16:34:14 crc kubenswrapper[4909]: I1128 16:34:14.785318 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 28 16:34:14 crc kubenswrapper[4909]: I1128 16:34:14.925367 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cxr2k\" (UniqueName: \"kubernetes.io/projected/bfd233c2-72be-482b-a194-b68da87eb105-kube-api-access-cxr2k\") pod \"ceilometer-0\" (UID: \"bfd233c2-72be-482b-a194-b68da87eb105\") " pod="openstack/ceilometer-0" Nov 28 16:34:14 crc kubenswrapper[4909]: I1128 16:34:14.925628 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bfd233c2-72be-482b-a194-b68da87eb105-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"bfd233c2-72be-482b-a194-b68da87eb105\") " pod="openstack/ceilometer-0" Nov 28 16:34:14 crc kubenswrapper[4909]: I1128 16:34:14.925722 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bfd233c2-72be-482b-a194-b68da87eb105-config-data\") pod \"ceilometer-0\" (UID: \"bfd233c2-72be-482b-a194-b68da87eb105\") " pod="openstack/ceilometer-0" Nov 28 16:34:14 crc kubenswrapper[4909]: I1128 16:34:14.925915 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bfd233c2-72be-482b-a194-b68da87eb105-log-httpd\") pod \"ceilometer-0\" (UID: \"bfd233c2-72be-482b-a194-b68da87eb105\") " pod="openstack/ceilometer-0" Nov 28 16:34:14 crc kubenswrapper[4909]: I1128 16:34:14.926015 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/bfd233c2-72be-482b-a194-b68da87eb105-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"bfd233c2-72be-482b-a194-b68da87eb105\") " pod="openstack/ceilometer-0" Nov 28 16:34:14 crc kubenswrapper[4909]: I1128 16:34:14.926141 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bfd233c2-72be-482b-a194-b68da87eb105-scripts\") pod \"ceilometer-0\" (UID: \"bfd233c2-72be-482b-a194-b68da87eb105\") " pod="openstack/ceilometer-0" Nov 28 16:34:14 crc kubenswrapper[4909]: I1128 16:34:14.926256 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bfd233c2-72be-482b-a194-b68da87eb105-run-httpd\") pod \"ceilometer-0\" (UID: \"bfd233c2-72be-482b-a194-b68da87eb105\") " pod="openstack/ceilometer-0" Nov 28 16:34:15 crc kubenswrapper[4909]: I1128 16:34:15.027799 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bfd233c2-72be-482b-a194-b68da87eb105-log-httpd\") pod \"ceilometer-0\" (UID: \"bfd233c2-72be-482b-a194-b68da87eb105\") " pod="openstack/ceilometer-0" Nov 28 16:34:15 crc kubenswrapper[4909]: I1128 16:34:15.028877 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/bfd233c2-72be-482b-a194-b68da87eb105-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"bfd233c2-72be-482b-a194-b68da87eb105\") " pod="openstack/ceilometer-0" Nov 28 16:34:15 crc kubenswrapper[4909]: I1128 16:34:15.029864 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bfd233c2-72be-482b-a194-b68da87eb105-scripts\") pod \"ceilometer-0\" (UID: \"bfd233c2-72be-482b-a194-b68da87eb105\") " pod="openstack/ceilometer-0" Nov 28 16:34:15 crc kubenswrapper[4909]: I1128 16:34:15.029989 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bfd233c2-72be-482b-a194-b68da87eb105-run-httpd\") pod \"ceilometer-0\" (UID: \"bfd233c2-72be-482b-a194-b68da87eb105\") " pod="openstack/ceilometer-0" Nov 28 16:34:15 crc kubenswrapper[4909]: I1128 16:34:15.030160 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bfd233c2-72be-482b-a194-b68da87eb105-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"bfd233c2-72be-482b-a194-b68da87eb105\") " pod="openstack/ceilometer-0" Nov 28 16:34:15 crc kubenswrapper[4909]: I1128 16:34:15.028758 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bfd233c2-72be-482b-a194-b68da87eb105-log-httpd\") pod \"ceilometer-0\" (UID: \"bfd233c2-72be-482b-a194-b68da87eb105\") " pod="openstack/ceilometer-0" Nov 28 16:34:15 crc kubenswrapper[4909]: I1128 16:34:15.030272 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cxr2k\" (UniqueName: \"kubernetes.io/projected/bfd233c2-72be-482b-a194-b68da87eb105-kube-api-access-cxr2k\") pod \"ceilometer-0\" (UID: \"bfd233c2-72be-482b-a194-b68da87eb105\") " pod="openstack/ceilometer-0" Nov 28 16:34:15 crc kubenswrapper[4909]: I1128 16:34:15.030398 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bfd233c2-72be-482b-a194-b68da87eb105-config-data\") pod \"ceilometer-0\" (UID: \"bfd233c2-72be-482b-a194-b68da87eb105\") " pod="openstack/ceilometer-0" Nov 28 16:34:15 crc kubenswrapper[4909]: I1128 16:34:15.030418 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bfd233c2-72be-482b-a194-b68da87eb105-run-httpd\") pod \"ceilometer-0\" (UID: \"bfd233c2-72be-482b-a194-b68da87eb105\") " pod="openstack/ceilometer-0" Nov 28 16:34:15 crc kubenswrapper[4909]: I1128 16:34:15.033980 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/bfd233c2-72be-482b-a194-b68da87eb105-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"bfd233c2-72be-482b-a194-b68da87eb105\") " pod="openstack/ceilometer-0" Nov 28 16:34:15 crc kubenswrapper[4909]: I1128 16:34:15.034392 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bfd233c2-72be-482b-a194-b68da87eb105-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"bfd233c2-72be-482b-a194-b68da87eb105\") " pod="openstack/ceilometer-0" Nov 28 16:34:15 crc kubenswrapper[4909]: I1128 16:34:15.046357 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bfd233c2-72be-482b-a194-b68da87eb105-scripts\") pod \"ceilometer-0\" (UID: \"bfd233c2-72be-482b-a194-b68da87eb105\") " pod="openstack/ceilometer-0" Nov 28 16:34:15 crc kubenswrapper[4909]: I1128 16:34:15.052805 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bfd233c2-72be-482b-a194-b68da87eb105-config-data\") pod \"ceilometer-0\" (UID: \"bfd233c2-72be-482b-a194-b68da87eb105\") " pod="openstack/ceilometer-0" Nov 28 16:34:15 crc kubenswrapper[4909]: I1128 16:34:15.064525 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cxr2k\" (UniqueName: \"kubernetes.io/projected/bfd233c2-72be-482b-a194-b68da87eb105-kube-api-access-cxr2k\") pod \"ceilometer-0\" (UID: \"bfd233c2-72be-482b-a194-b68da87eb105\") " pod="openstack/ceilometer-0" Nov 28 16:34:15 crc kubenswrapper[4909]: I1128 16:34:15.080821 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 16:34:15 crc kubenswrapper[4909]: I1128 16:34:15.525762 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 28 16:34:15 crc kubenswrapper[4909]: I1128 16:34:15.695141 4909 generic.go:334] "Generic (PLEG): container finished" podID="9b6c3766-5116-4179-aae7-17f9564115c0" containerID="67e0f1c681dafb9f44527d9c932c6bbaf1e769e001c149ff04552c7d18d55fbf" exitCode=0 Nov 28 16:34:15 crc kubenswrapper[4909]: I1128 16:34:15.695197 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lc22m" event={"ID":"9b6c3766-5116-4179-aae7-17f9564115c0","Type":"ContainerDied","Data":"67e0f1c681dafb9f44527d9c932c6bbaf1e769e001c149ff04552c7d18d55fbf"} Nov 28 16:34:15 crc kubenswrapper[4909]: I1128 16:34:15.701289 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"bfd233c2-72be-482b-a194-b68da87eb105","Type":"ContainerStarted","Data":"efbd0e447d3ba60537b057f0fcecf38cfff86dd3cddc453609da0881f3a4b5b5"} Nov 28 16:34:15 crc kubenswrapper[4909]: I1128 16:34:15.912370 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="55b36ba0-f0a1-4de4-a4e4-0896cc7291b4" path="/var/lib/kubelet/pods/55b36ba0-f0a1-4de4-a4e4-0896cc7291b4/volumes" Nov 28 16:34:16 crc kubenswrapper[4909]: I1128 16:34:16.155722 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell0-conductor-0" Nov 28 16:34:16 crc kubenswrapper[4909]: I1128 16:34:16.702899 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-cell-mapping-fn62x"] Nov 28 16:34:16 crc kubenswrapper[4909]: I1128 16:34:16.704379 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-fn62x" Nov 28 16:34:16 crc kubenswrapper[4909]: I1128 16:34:16.706124 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-scripts" Nov 28 16:34:16 crc kubenswrapper[4909]: I1128 16:34:16.707876 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-config-data" Nov 28 16:34:16 crc kubenswrapper[4909]: I1128 16:34:16.711095 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-fn62x"] Nov 28 16:34:16 crc kubenswrapper[4909]: I1128 16:34:16.713453 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"bfd233c2-72be-482b-a194-b68da87eb105","Type":"ContainerStarted","Data":"02db1c4e3450806668eec7d06c775b02cd0475c718da433f81486b1beac23fcc"} Nov 28 16:34:16 crc kubenswrapper[4909]: I1128 16:34:16.718318 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lc22m" event={"ID":"9b6c3766-5116-4179-aae7-17f9564115c0","Type":"ContainerStarted","Data":"924581b5519ed51dbb5bff40fa9f5672250e24b09e8125e170e9230cb63b1335"} Nov 28 16:34:16 crc kubenswrapper[4909]: I1128 16:34:16.761196 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-lc22m" podStartSLOduration=2.2613860089999998 podStartE2EDuration="5.76117711s" podCreationTimestamp="2025-11-28 16:34:11 +0000 UTC" firstStartedPulling="2025-11-28 16:34:12.655089753 +0000 UTC m=+1435.051774277" lastFinishedPulling="2025-11-28 16:34:16.154880854 +0000 UTC m=+1438.551565378" observedRunningTime="2025-11-28 16:34:16.760309437 +0000 UTC m=+1439.156993961" watchObservedRunningTime="2025-11-28 16:34:16.76117711 +0000 UTC m=+1439.157861624" Nov 28 16:34:16 crc kubenswrapper[4909]: I1128 16:34:16.763932 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b69a490a-8b2d-405f-b41e-903268d44cca-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-fn62x\" (UID: \"b69a490a-8b2d-405f-b41e-903268d44cca\") " pod="openstack/nova-cell0-cell-mapping-fn62x" Nov 28 16:34:16 crc kubenswrapper[4909]: I1128 16:34:16.764090 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b69a490a-8b2d-405f-b41e-903268d44cca-config-data\") pod \"nova-cell0-cell-mapping-fn62x\" (UID: \"b69a490a-8b2d-405f-b41e-903268d44cca\") " pod="openstack/nova-cell0-cell-mapping-fn62x" Nov 28 16:34:16 crc kubenswrapper[4909]: I1128 16:34:16.764123 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b69a490a-8b2d-405f-b41e-903268d44cca-scripts\") pod \"nova-cell0-cell-mapping-fn62x\" (UID: \"b69a490a-8b2d-405f-b41e-903268d44cca\") " pod="openstack/nova-cell0-cell-mapping-fn62x" Nov 28 16:34:16 crc kubenswrapper[4909]: I1128 16:34:16.764151 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dmwmx\" (UniqueName: \"kubernetes.io/projected/b69a490a-8b2d-405f-b41e-903268d44cca-kube-api-access-dmwmx\") pod \"nova-cell0-cell-mapping-fn62x\" (UID: \"b69a490a-8b2d-405f-b41e-903268d44cca\") " pod="openstack/nova-cell0-cell-mapping-fn62x" Nov 28 16:34:16 crc kubenswrapper[4909]: I1128 16:34:16.865247 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dmwmx\" (UniqueName: \"kubernetes.io/projected/b69a490a-8b2d-405f-b41e-903268d44cca-kube-api-access-dmwmx\") pod \"nova-cell0-cell-mapping-fn62x\" (UID: \"b69a490a-8b2d-405f-b41e-903268d44cca\") " pod="openstack/nova-cell0-cell-mapping-fn62x" Nov 28 16:34:16 crc kubenswrapper[4909]: I1128 16:34:16.865333 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 28 16:34:16 crc kubenswrapper[4909]: I1128 16:34:16.865341 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b69a490a-8b2d-405f-b41e-903268d44cca-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-fn62x\" (UID: \"b69a490a-8b2d-405f-b41e-903268d44cca\") " pod="openstack/nova-cell0-cell-mapping-fn62x" Nov 28 16:34:16 crc kubenswrapper[4909]: I1128 16:34:16.865938 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b69a490a-8b2d-405f-b41e-903268d44cca-config-data\") pod \"nova-cell0-cell-mapping-fn62x\" (UID: \"b69a490a-8b2d-405f-b41e-903268d44cca\") " pod="openstack/nova-cell0-cell-mapping-fn62x" Nov 28 16:34:16 crc kubenswrapper[4909]: I1128 16:34:16.866022 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b69a490a-8b2d-405f-b41e-903268d44cca-scripts\") pod \"nova-cell0-cell-mapping-fn62x\" (UID: \"b69a490a-8b2d-405f-b41e-903268d44cca\") " pod="openstack/nova-cell0-cell-mapping-fn62x" Nov 28 16:34:16 crc kubenswrapper[4909]: I1128 16:34:16.867275 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 16:34:16 crc kubenswrapper[4909]: I1128 16:34:16.872282 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b69a490a-8b2d-405f-b41e-903268d44cca-scripts\") pod \"nova-cell0-cell-mapping-fn62x\" (UID: \"b69a490a-8b2d-405f-b41e-903268d44cca\") " pod="openstack/nova-cell0-cell-mapping-fn62x" Nov 28 16:34:16 crc kubenswrapper[4909]: I1128 16:34:16.872703 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 28 16:34:16 crc kubenswrapper[4909]: I1128 16:34:16.876834 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b69a490a-8b2d-405f-b41e-903268d44cca-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-fn62x\" (UID: \"b69a490a-8b2d-405f-b41e-903268d44cca\") " pod="openstack/nova-cell0-cell-mapping-fn62x" Nov 28 16:34:16 crc kubenswrapper[4909]: I1128 16:34:16.883185 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 28 16:34:16 crc kubenswrapper[4909]: I1128 16:34:16.894404 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b69a490a-8b2d-405f-b41e-903268d44cca-config-data\") pod \"nova-cell0-cell-mapping-fn62x\" (UID: \"b69a490a-8b2d-405f-b41e-903268d44cca\") " pod="openstack/nova-cell0-cell-mapping-fn62x" Nov 28 16:34:16 crc kubenswrapper[4909]: I1128 16:34:16.908853 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dmwmx\" (UniqueName: \"kubernetes.io/projected/b69a490a-8b2d-405f-b41e-903268d44cca-kube-api-access-dmwmx\") pod \"nova-cell0-cell-mapping-fn62x\" (UID: \"b69a490a-8b2d-405f-b41e-903268d44cca\") " pod="openstack/nova-cell0-cell-mapping-fn62x" Nov 28 16:34:17 crc kubenswrapper[4909]: I1128 16:34:17.008098 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 28 16:34:17 crc kubenswrapper[4909]: I1128 16:34:17.023189 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-fn62x" Nov 28 16:34:17 crc kubenswrapper[4909]: I1128 16:34:17.048259 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 28 16:34:17 crc kubenswrapper[4909]: I1128 16:34:17.048368 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 28 16:34:17 crc kubenswrapper[4909]: I1128 16:34:17.056839 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Nov 28 16:34:17 crc kubenswrapper[4909]: I1128 16:34:17.065911 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 28 16:34:17 crc kubenswrapper[4909]: I1128 16:34:17.068587 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 16:34:17 crc kubenswrapper[4909]: I1128 16:34:17.073188 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 28 16:34:17 crc kubenswrapper[4909]: I1128 16:34:17.122892 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2287c28b-bb92-4e34-9ca4-a31222838904-logs\") pod \"nova-api-0\" (UID: \"2287c28b-bb92-4e34-9ca4-a31222838904\") " pod="openstack/nova-api-0" Nov 28 16:34:17 crc kubenswrapper[4909]: I1128 16:34:17.122972 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jcwj9\" (UniqueName: \"kubernetes.io/projected/2287c28b-bb92-4e34-9ca4-a31222838904-kube-api-access-jcwj9\") pod \"nova-api-0\" (UID: \"2287c28b-bb92-4e34-9ca4-a31222838904\") " pod="openstack/nova-api-0" Nov 28 16:34:17 crc kubenswrapper[4909]: I1128 16:34:17.123007 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2287c28b-bb92-4e34-9ca4-a31222838904-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"2287c28b-bb92-4e34-9ca4-a31222838904\") " pod="openstack/nova-api-0" Nov 28 16:34:17 crc kubenswrapper[4909]: I1128 16:34:17.123028 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2287c28b-bb92-4e34-9ca4-a31222838904-config-data\") pod \"nova-api-0\" (UID: \"2287c28b-bb92-4e34-9ca4-a31222838904\") " pod="openstack/nova-api-0" Nov 28 16:34:17 crc kubenswrapper[4909]: I1128 16:34:17.140373 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 16:34:17 crc kubenswrapper[4909]: I1128 16:34:17.155882 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 16:34:17 crc kubenswrapper[4909]: I1128 16:34:17.174871 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 16:34:17 crc kubenswrapper[4909]: I1128 16:34:17.181287 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 28 16:34:17 crc kubenswrapper[4909]: I1128 16:34:17.191127 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-865f5d856f-rmkzw"] Nov 28 16:34:17 crc kubenswrapper[4909]: I1128 16:34:17.192716 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-865f5d856f-rmkzw" Nov 28 16:34:17 crc kubenswrapper[4909]: I1128 16:34:17.226745 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 16:34:17 crc kubenswrapper[4909]: I1128 16:34:17.230356 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b94e81c7-38f5-47c0-b6f7-7624101ef17b-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"b94e81c7-38f5-47c0-b6f7-7624101ef17b\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 16:34:17 crc kubenswrapper[4909]: I1128 16:34:17.230417 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d6mv7\" (UniqueName: \"kubernetes.io/projected/43057ca4-f849-409a-9b27-810bd9343936-kube-api-access-d6mv7\") pod \"nova-metadata-0\" (UID: \"43057ca4-f849-409a-9b27-810bd9343936\") " pod="openstack/nova-metadata-0" Nov 28 16:34:17 crc kubenswrapper[4909]: I1128 16:34:17.230471 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/43057ca4-f849-409a-9b27-810bd9343936-logs\") pod \"nova-metadata-0\" (UID: \"43057ca4-f849-409a-9b27-810bd9343936\") " pod="openstack/nova-metadata-0" Nov 28 16:34:17 crc kubenswrapper[4909]: I1128 16:34:17.230517 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2287c28b-bb92-4e34-9ca4-a31222838904-logs\") pod \"nova-api-0\" (UID: \"2287c28b-bb92-4e34-9ca4-a31222838904\") " pod="openstack/nova-api-0" Nov 28 16:34:17 crc kubenswrapper[4909]: I1128 16:34:17.230557 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b94e81c7-38f5-47c0-b6f7-7624101ef17b-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"b94e81c7-38f5-47c0-b6f7-7624101ef17b\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 16:34:17 crc kubenswrapper[4909]: I1128 16:34:17.230604 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jcwj9\" (UniqueName: \"kubernetes.io/projected/2287c28b-bb92-4e34-9ca4-a31222838904-kube-api-access-jcwj9\") pod \"nova-api-0\" (UID: \"2287c28b-bb92-4e34-9ca4-a31222838904\") " pod="openstack/nova-api-0" Nov 28 16:34:17 crc kubenswrapper[4909]: I1128 16:34:17.230644 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/43057ca4-f849-409a-9b27-810bd9343936-config-data\") pod \"nova-metadata-0\" (UID: \"43057ca4-f849-409a-9b27-810bd9343936\") " pod="openstack/nova-metadata-0" Nov 28 16:34:17 crc kubenswrapper[4909]: I1128 16:34:17.230692 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2287c28b-bb92-4e34-9ca4-a31222838904-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"2287c28b-bb92-4e34-9ca4-a31222838904\") " pod="openstack/nova-api-0" Nov 28 16:34:17 crc kubenswrapper[4909]: I1128 16:34:17.230725 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2287c28b-bb92-4e34-9ca4-a31222838904-config-data\") pod \"nova-api-0\" (UID: \"2287c28b-bb92-4e34-9ca4-a31222838904\") " pod="openstack/nova-api-0" Nov 28 16:34:17 crc kubenswrapper[4909]: I1128 16:34:17.230753 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qgzrr\" (UniqueName: \"kubernetes.io/projected/b94e81c7-38f5-47c0-b6f7-7624101ef17b-kube-api-access-qgzrr\") pod \"nova-cell1-novncproxy-0\" (UID: \"b94e81c7-38f5-47c0-b6f7-7624101ef17b\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 16:34:17 crc kubenswrapper[4909]: I1128 16:34:17.230805 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/43057ca4-f849-409a-9b27-810bd9343936-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"43057ca4-f849-409a-9b27-810bd9343936\") " pod="openstack/nova-metadata-0" Nov 28 16:34:17 crc kubenswrapper[4909]: I1128 16:34:17.230989 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2287c28b-bb92-4e34-9ca4-a31222838904-logs\") pod \"nova-api-0\" (UID: \"2287c28b-bb92-4e34-9ca4-a31222838904\") " pod="openstack/nova-api-0" Nov 28 16:34:17 crc kubenswrapper[4909]: I1128 16:34:17.261426 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-865f5d856f-rmkzw"] Nov 28 16:34:17 crc kubenswrapper[4909]: I1128 16:34:17.266791 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2287c28b-bb92-4e34-9ca4-a31222838904-config-data\") pod \"nova-api-0\" (UID: \"2287c28b-bb92-4e34-9ca4-a31222838904\") " pod="openstack/nova-api-0" Nov 28 16:34:17 crc kubenswrapper[4909]: I1128 16:34:17.272153 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2287c28b-bb92-4e34-9ca4-a31222838904-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"2287c28b-bb92-4e34-9ca4-a31222838904\") " pod="openstack/nova-api-0" Nov 28 16:34:17 crc kubenswrapper[4909]: I1128 16:34:17.292365 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jcwj9\" (UniqueName: \"kubernetes.io/projected/2287c28b-bb92-4e34-9ca4-a31222838904-kube-api-access-jcwj9\") pod \"nova-api-0\" (UID: \"2287c28b-bb92-4e34-9ca4-a31222838904\") " pod="openstack/nova-api-0" Nov 28 16:34:17 crc kubenswrapper[4909]: I1128 16:34:17.301937 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 16:34:17 crc kubenswrapper[4909]: I1128 16:34:17.335933 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wkxmb\" (UniqueName: \"kubernetes.io/projected/2d385d4c-d982-4d91-817f-f21d879b187d-kube-api-access-wkxmb\") pod \"nova-scheduler-0\" (UID: \"2d385d4c-d982-4d91-817f-f21d879b187d\") " pod="openstack/nova-scheduler-0" Nov 28 16:34:17 crc kubenswrapper[4909]: I1128 16:34:17.336017 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/43057ca4-f849-409a-9b27-810bd9343936-logs\") pod \"nova-metadata-0\" (UID: \"43057ca4-f849-409a-9b27-810bd9343936\") " pod="openstack/nova-metadata-0" Nov 28 16:34:17 crc kubenswrapper[4909]: I1128 16:34:17.336100 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b94e81c7-38f5-47c0-b6f7-7624101ef17b-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"b94e81c7-38f5-47c0-b6f7-7624101ef17b\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 16:34:17 crc kubenswrapper[4909]: I1128 16:34:17.336132 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2d385d4c-d982-4d91-817f-f21d879b187d-config-data\") pod \"nova-scheduler-0\" (UID: \"2d385d4c-d982-4d91-817f-f21d879b187d\") " pod="openstack/nova-scheduler-0" Nov 28 16:34:17 crc kubenswrapper[4909]: I1128 16:34:17.336191 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/1c23d3ad-6031-41a7-89cd-f9b863351cc1-dns-swift-storage-0\") pod \"dnsmasq-dns-865f5d856f-rmkzw\" (UID: \"1c23d3ad-6031-41a7-89cd-f9b863351cc1\") " pod="openstack/dnsmasq-dns-865f5d856f-rmkzw" Nov 28 16:34:17 crc kubenswrapper[4909]: I1128 16:34:17.336239 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1c23d3ad-6031-41a7-89cd-f9b863351cc1-dns-svc\") pod \"dnsmasq-dns-865f5d856f-rmkzw\" (UID: \"1c23d3ad-6031-41a7-89cd-f9b863351cc1\") " pod="openstack/dnsmasq-dns-865f5d856f-rmkzw" Nov 28 16:34:17 crc kubenswrapper[4909]: I1128 16:34:17.336286 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/43057ca4-f849-409a-9b27-810bd9343936-config-data\") pod \"nova-metadata-0\" (UID: \"43057ca4-f849-409a-9b27-810bd9343936\") " pod="openstack/nova-metadata-0" Nov 28 16:34:17 crc kubenswrapper[4909]: I1128 16:34:17.336311 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-56mff\" (UniqueName: \"kubernetes.io/projected/1c23d3ad-6031-41a7-89cd-f9b863351cc1-kube-api-access-56mff\") pod \"dnsmasq-dns-865f5d856f-rmkzw\" (UID: \"1c23d3ad-6031-41a7-89cd-f9b863351cc1\") " pod="openstack/dnsmasq-dns-865f5d856f-rmkzw" Nov 28 16:34:17 crc kubenswrapper[4909]: I1128 16:34:17.336375 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qgzrr\" (UniqueName: \"kubernetes.io/projected/b94e81c7-38f5-47c0-b6f7-7624101ef17b-kube-api-access-qgzrr\") pod \"nova-cell1-novncproxy-0\" (UID: \"b94e81c7-38f5-47c0-b6f7-7624101ef17b\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 16:34:17 crc kubenswrapper[4909]: I1128 16:34:17.336435 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2d385d4c-d982-4d91-817f-f21d879b187d-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"2d385d4c-d982-4d91-817f-f21d879b187d\") " pod="openstack/nova-scheduler-0" Nov 28 16:34:17 crc kubenswrapper[4909]: I1128 16:34:17.336467 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1c23d3ad-6031-41a7-89cd-f9b863351cc1-config\") pod \"dnsmasq-dns-865f5d856f-rmkzw\" (UID: \"1c23d3ad-6031-41a7-89cd-f9b863351cc1\") " pod="openstack/dnsmasq-dns-865f5d856f-rmkzw" Nov 28 16:34:17 crc kubenswrapper[4909]: I1128 16:34:17.336494 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1c23d3ad-6031-41a7-89cd-f9b863351cc1-ovsdbserver-nb\") pod \"dnsmasq-dns-865f5d856f-rmkzw\" (UID: \"1c23d3ad-6031-41a7-89cd-f9b863351cc1\") " pod="openstack/dnsmasq-dns-865f5d856f-rmkzw" Nov 28 16:34:17 crc kubenswrapper[4909]: I1128 16:34:17.336530 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/43057ca4-f849-409a-9b27-810bd9343936-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"43057ca4-f849-409a-9b27-810bd9343936\") " pod="openstack/nova-metadata-0" Nov 28 16:34:17 crc kubenswrapper[4909]: I1128 16:34:17.336555 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1c23d3ad-6031-41a7-89cd-f9b863351cc1-ovsdbserver-sb\") pod \"dnsmasq-dns-865f5d856f-rmkzw\" (UID: \"1c23d3ad-6031-41a7-89cd-f9b863351cc1\") " pod="openstack/dnsmasq-dns-865f5d856f-rmkzw" Nov 28 16:34:17 crc kubenswrapper[4909]: I1128 16:34:17.336698 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b94e81c7-38f5-47c0-b6f7-7624101ef17b-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"b94e81c7-38f5-47c0-b6f7-7624101ef17b\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 16:34:17 crc kubenswrapper[4909]: I1128 16:34:17.336723 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d6mv7\" (UniqueName: \"kubernetes.io/projected/43057ca4-f849-409a-9b27-810bd9343936-kube-api-access-d6mv7\") pod \"nova-metadata-0\" (UID: \"43057ca4-f849-409a-9b27-810bd9343936\") " pod="openstack/nova-metadata-0" Nov 28 16:34:17 crc kubenswrapper[4909]: I1128 16:34:17.337517 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/43057ca4-f849-409a-9b27-810bd9343936-logs\") pod \"nova-metadata-0\" (UID: \"43057ca4-f849-409a-9b27-810bd9343936\") " pod="openstack/nova-metadata-0" Nov 28 16:34:17 crc kubenswrapper[4909]: I1128 16:34:17.341860 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/43057ca4-f849-409a-9b27-810bd9343936-config-data\") pod \"nova-metadata-0\" (UID: \"43057ca4-f849-409a-9b27-810bd9343936\") " pod="openstack/nova-metadata-0" Nov 28 16:34:17 crc kubenswrapper[4909]: I1128 16:34:17.347295 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b94e81c7-38f5-47c0-b6f7-7624101ef17b-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"b94e81c7-38f5-47c0-b6f7-7624101ef17b\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 16:34:17 crc kubenswrapper[4909]: I1128 16:34:17.350205 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/43057ca4-f849-409a-9b27-810bd9343936-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"43057ca4-f849-409a-9b27-810bd9343936\") " pod="openstack/nova-metadata-0" Nov 28 16:34:17 crc kubenswrapper[4909]: I1128 16:34:17.354980 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b94e81c7-38f5-47c0-b6f7-7624101ef17b-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"b94e81c7-38f5-47c0-b6f7-7624101ef17b\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 16:34:17 crc kubenswrapper[4909]: I1128 16:34:17.357322 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d6mv7\" (UniqueName: \"kubernetes.io/projected/43057ca4-f849-409a-9b27-810bd9343936-kube-api-access-d6mv7\") pod \"nova-metadata-0\" (UID: \"43057ca4-f849-409a-9b27-810bd9343936\") " pod="openstack/nova-metadata-0" Nov 28 16:34:17 crc kubenswrapper[4909]: I1128 16:34:17.359164 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qgzrr\" (UniqueName: \"kubernetes.io/projected/b94e81c7-38f5-47c0-b6f7-7624101ef17b-kube-api-access-qgzrr\") pod \"nova-cell1-novncproxy-0\" (UID: \"b94e81c7-38f5-47c0-b6f7-7624101ef17b\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 16:34:17 crc kubenswrapper[4909]: I1128 16:34:17.438085 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wkxmb\" (UniqueName: \"kubernetes.io/projected/2d385d4c-d982-4d91-817f-f21d879b187d-kube-api-access-wkxmb\") pod \"nova-scheduler-0\" (UID: \"2d385d4c-d982-4d91-817f-f21d879b187d\") " pod="openstack/nova-scheduler-0" Nov 28 16:34:17 crc kubenswrapper[4909]: I1128 16:34:17.438170 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2d385d4c-d982-4d91-817f-f21d879b187d-config-data\") pod \"nova-scheduler-0\" (UID: \"2d385d4c-d982-4d91-817f-f21d879b187d\") " pod="openstack/nova-scheduler-0" Nov 28 16:34:17 crc kubenswrapper[4909]: I1128 16:34:17.438204 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/1c23d3ad-6031-41a7-89cd-f9b863351cc1-dns-swift-storage-0\") pod \"dnsmasq-dns-865f5d856f-rmkzw\" (UID: \"1c23d3ad-6031-41a7-89cd-f9b863351cc1\") " pod="openstack/dnsmasq-dns-865f5d856f-rmkzw" Nov 28 16:34:17 crc kubenswrapper[4909]: I1128 16:34:17.438228 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1c23d3ad-6031-41a7-89cd-f9b863351cc1-dns-svc\") pod \"dnsmasq-dns-865f5d856f-rmkzw\" (UID: \"1c23d3ad-6031-41a7-89cd-f9b863351cc1\") " pod="openstack/dnsmasq-dns-865f5d856f-rmkzw" Nov 28 16:34:17 crc kubenswrapper[4909]: I1128 16:34:17.438251 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-56mff\" (UniqueName: \"kubernetes.io/projected/1c23d3ad-6031-41a7-89cd-f9b863351cc1-kube-api-access-56mff\") pod \"dnsmasq-dns-865f5d856f-rmkzw\" (UID: \"1c23d3ad-6031-41a7-89cd-f9b863351cc1\") " pod="openstack/dnsmasq-dns-865f5d856f-rmkzw" Nov 28 16:34:17 crc kubenswrapper[4909]: I1128 16:34:17.438296 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2d385d4c-d982-4d91-817f-f21d879b187d-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"2d385d4c-d982-4d91-817f-f21d879b187d\") " pod="openstack/nova-scheduler-0" Nov 28 16:34:17 crc kubenswrapper[4909]: I1128 16:34:17.438317 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1c23d3ad-6031-41a7-89cd-f9b863351cc1-config\") pod \"dnsmasq-dns-865f5d856f-rmkzw\" (UID: \"1c23d3ad-6031-41a7-89cd-f9b863351cc1\") " pod="openstack/dnsmasq-dns-865f5d856f-rmkzw" Nov 28 16:34:17 crc kubenswrapper[4909]: I1128 16:34:17.438339 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1c23d3ad-6031-41a7-89cd-f9b863351cc1-ovsdbserver-nb\") pod \"dnsmasq-dns-865f5d856f-rmkzw\" (UID: \"1c23d3ad-6031-41a7-89cd-f9b863351cc1\") " pod="openstack/dnsmasq-dns-865f5d856f-rmkzw" Nov 28 16:34:17 crc kubenswrapper[4909]: I1128 16:34:17.438369 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1c23d3ad-6031-41a7-89cd-f9b863351cc1-ovsdbserver-sb\") pod \"dnsmasq-dns-865f5d856f-rmkzw\" (UID: \"1c23d3ad-6031-41a7-89cd-f9b863351cc1\") " pod="openstack/dnsmasq-dns-865f5d856f-rmkzw" Nov 28 16:34:17 crc kubenswrapper[4909]: I1128 16:34:17.439305 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1c23d3ad-6031-41a7-89cd-f9b863351cc1-ovsdbserver-sb\") pod \"dnsmasq-dns-865f5d856f-rmkzw\" (UID: \"1c23d3ad-6031-41a7-89cd-f9b863351cc1\") " pod="openstack/dnsmasq-dns-865f5d856f-rmkzw" Nov 28 16:34:17 crc kubenswrapper[4909]: I1128 16:34:17.440232 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/1c23d3ad-6031-41a7-89cd-f9b863351cc1-dns-swift-storage-0\") pod \"dnsmasq-dns-865f5d856f-rmkzw\" (UID: \"1c23d3ad-6031-41a7-89cd-f9b863351cc1\") " pod="openstack/dnsmasq-dns-865f5d856f-rmkzw" Nov 28 16:34:17 crc kubenswrapper[4909]: I1128 16:34:17.440432 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1c23d3ad-6031-41a7-89cd-f9b863351cc1-dns-svc\") pod \"dnsmasq-dns-865f5d856f-rmkzw\" (UID: \"1c23d3ad-6031-41a7-89cd-f9b863351cc1\") " pod="openstack/dnsmasq-dns-865f5d856f-rmkzw" Nov 28 16:34:17 crc kubenswrapper[4909]: I1128 16:34:17.440924 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1c23d3ad-6031-41a7-89cd-f9b863351cc1-config\") pod \"dnsmasq-dns-865f5d856f-rmkzw\" (UID: \"1c23d3ad-6031-41a7-89cd-f9b863351cc1\") " pod="openstack/dnsmasq-dns-865f5d856f-rmkzw" Nov 28 16:34:17 crc kubenswrapper[4909]: I1128 16:34:17.441608 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1c23d3ad-6031-41a7-89cd-f9b863351cc1-ovsdbserver-nb\") pod \"dnsmasq-dns-865f5d856f-rmkzw\" (UID: \"1c23d3ad-6031-41a7-89cd-f9b863351cc1\") " pod="openstack/dnsmasq-dns-865f5d856f-rmkzw" Nov 28 16:34:17 crc kubenswrapper[4909]: I1128 16:34:17.444357 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2d385d4c-d982-4d91-817f-f21d879b187d-config-data\") pod \"nova-scheduler-0\" (UID: \"2d385d4c-d982-4d91-817f-f21d879b187d\") " pod="openstack/nova-scheduler-0" Nov 28 16:34:17 crc kubenswrapper[4909]: I1128 16:34:17.445015 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2d385d4c-d982-4d91-817f-f21d879b187d-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"2d385d4c-d982-4d91-817f-f21d879b187d\") " pod="openstack/nova-scheduler-0" Nov 28 16:34:17 crc kubenswrapper[4909]: I1128 16:34:17.466684 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wkxmb\" (UniqueName: \"kubernetes.io/projected/2d385d4c-d982-4d91-817f-f21d879b187d-kube-api-access-wkxmb\") pod \"nova-scheduler-0\" (UID: \"2d385d4c-d982-4d91-817f-f21d879b187d\") " pod="openstack/nova-scheduler-0" Nov 28 16:34:17 crc kubenswrapper[4909]: I1128 16:34:17.474878 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-56mff\" (UniqueName: \"kubernetes.io/projected/1c23d3ad-6031-41a7-89cd-f9b863351cc1-kube-api-access-56mff\") pod \"dnsmasq-dns-865f5d856f-rmkzw\" (UID: \"1c23d3ad-6031-41a7-89cd-f9b863351cc1\") " pod="openstack/dnsmasq-dns-865f5d856f-rmkzw" Nov 28 16:34:17 crc kubenswrapper[4909]: I1128 16:34:17.535086 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 28 16:34:17 crc kubenswrapper[4909]: I1128 16:34:17.568336 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 16:34:17 crc kubenswrapper[4909]: I1128 16:34:17.606125 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 16:34:17 crc kubenswrapper[4909]: I1128 16:34:17.644078 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-865f5d856f-rmkzw" Nov 28 16:34:17 crc kubenswrapper[4909]: I1128 16:34:17.724511 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-fn62x"] Nov 28 16:34:17 crc kubenswrapper[4909]: I1128 16:34:17.780440 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 28 16:34:17 crc kubenswrapper[4909]: I1128 16:34:17.785355 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"bfd233c2-72be-482b-a194-b68da87eb105","Type":"ContainerStarted","Data":"0bf94391061ba390529c2e2964c9e83a8be207bca24a9d6ec6465491a60c316a"} Nov 28 16:34:18 crc kubenswrapper[4909]: I1128 16:34:18.244895 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 28 16:34:18 crc kubenswrapper[4909]: I1128 16:34:18.593603 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 16:34:18 crc kubenswrapper[4909]: I1128 16:34:18.694430 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-db-sync-f75mj"] Nov 28 16:34:18 crc kubenswrapper[4909]: I1128 16:34:18.705162 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-f75mj" Nov 28 16:34:18 crc kubenswrapper[4909]: I1128 16:34:18.712459 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-scripts" Nov 28 16:34:18 crc kubenswrapper[4909]: I1128 16:34:18.713080 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Nov 28 16:34:18 crc kubenswrapper[4909]: I1128 16:34:18.744054 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-f75mj"] Nov 28 16:34:18 crc kubenswrapper[4909]: I1128 16:34:18.771801 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 16:34:18 crc kubenswrapper[4909]: I1128 16:34:18.783585 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-865f5d856f-rmkzw"] Nov 28 16:34:18 crc kubenswrapper[4909]: I1128 16:34:18.798107 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"bfd233c2-72be-482b-a194-b68da87eb105","Type":"ContainerStarted","Data":"89a939d5748b3d516cb33bd8aaf468f21514ffdd33b84f75867920b98d05018f"} Nov 28 16:34:18 crc kubenswrapper[4909]: I1128 16:34:18.809342 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"2287c28b-bb92-4e34-9ca4-a31222838904","Type":"ContainerStarted","Data":"017d003f78858088f93bea3a24c4bc9aca35af382cc8d43cecbcc3eb1a7f06f0"} Nov 28 16:34:18 crc kubenswrapper[4909]: I1128 16:34:18.811523 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"b94e81c7-38f5-47c0-b6f7-7624101ef17b","Type":"ContainerStarted","Data":"c496250ab43f390a4e55378a43b247acee7b9004fcb35f4346bff5d92d079ceb"} Nov 28 16:34:18 crc kubenswrapper[4909]: I1128 16:34:18.813574 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"2d385d4c-d982-4d91-817f-f21d879b187d","Type":"ContainerStarted","Data":"ef9a871bc72782d62bb29d6250306b234e704f120c173470eeb4cfd7bd32ba73"} Nov 28 16:34:18 crc kubenswrapper[4909]: I1128 16:34:18.815435 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-fn62x" event={"ID":"b69a490a-8b2d-405f-b41e-903268d44cca","Type":"ContainerStarted","Data":"ce2426fd0deaef85cce4c6ed203e672ca1bccd2feef4b79a7cc06bd0d0e935c9"} Nov 28 16:34:18 crc kubenswrapper[4909]: I1128 16:34:18.815472 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-fn62x" event={"ID":"b69a490a-8b2d-405f-b41e-903268d44cca","Type":"ContainerStarted","Data":"bd42249a773de9330a491b8acd0cfac8b5fd43e4c1a56e4840bd7c7d401a5744"} Nov 28 16:34:18 crc kubenswrapper[4909]: I1128 16:34:18.817254 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-865f5d856f-rmkzw" event={"ID":"1c23d3ad-6031-41a7-89cd-f9b863351cc1","Type":"ContainerStarted","Data":"de9c72cc855c99206312d060643947b52b77e5ea882c520360218724f6568a84"} Nov 28 16:34:18 crc kubenswrapper[4909]: I1128 16:34:18.820634 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"43057ca4-f849-409a-9b27-810bd9343936","Type":"ContainerStarted","Data":"47be0d04a0f1a1109b47c39249d6603d370c54687f262f537cc2cc5b8866f311"} Nov 28 16:34:18 crc kubenswrapper[4909]: I1128 16:34:18.838714 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-cell-mapping-fn62x" podStartSLOduration=2.838695177 podStartE2EDuration="2.838695177s" podCreationTimestamp="2025-11-28 16:34:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:34:18.831997107 +0000 UTC m=+1441.228681631" watchObservedRunningTime="2025-11-28 16:34:18.838695177 +0000 UTC m=+1441.235379691" Nov 28 16:34:18 crc kubenswrapper[4909]: I1128 16:34:18.897620 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5np4r\" (UniqueName: \"kubernetes.io/projected/93ff38ac-f623-4932-aaa4-fde31d11a4ed-kube-api-access-5np4r\") pod \"nova-cell1-conductor-db-sync-f75mj\" (UID: \"93ff38ac-f623-4932-aaa4-fde31d11a4ed\") " pod="openstack/nova-cell1-conductor-db-sync-f75mj" Nov 28 16:34:18 crc kubenswrapper[4909]: I1128 16:34:18.898178 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/93ff38ac-f623-4932-aaa4-fde31d11a4ed-config-data\") pod \"nova-cell1-conductor-db-sync-f75mj\" (UID: \"93ff38ac-f623-4932-aaa4-fde31d11a4ed\") " pod="openstack/nova-cell1-conductor-db-sync-f75mj" Nov 28 16:34:18 crc kubenswrapper[4909]: I1128 16:34:18.898257 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/93ff38ac-f623-4932-aaa4-fde31d11a4ed-scripts\") pod \"nova-cell1-conductor-db-sync-f75mj\" (UID: \"93ff38ac-f623-4932-aaa4-fde31d11a4ed\") " pod="openstack/nova-cell1-conductor-db-sync-f75mj" Nov 28 16:34:18 crc kubenswrapper[4909]: I1128 16:34:18.898302 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/93ff38ac-f623-4932-aaa4-fde31d11a4ed-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-f75mj\" (UID: \"93ff38ac-f623-4932-aaa4-fde31d11a4ed\") " pod="openstack/nova-cell1-conductor-db-sync-f75mj" Nov 28 16:34:19 crc kubenswrapper[4909]: I1128 16:34:19.000610 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5np4r\" (UniqueName: \"kubernetes.io/projected/93ff38ac-f623-4932-aaa4-fde31d11a4ed-kube-api-access-5np4r\") pod \"nova-cell1-conductor-db-sync-f75mj\" (UID: \"93ff38ac-f623-4932-aaa4-fde31d11a4ed\") " pod="openstack/nova-cell1-conductor-db-sync-f75mj" Nov 28 16:34:19 crc kubenswrapper[4909]: I1128 16:34:19.000711 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/93ff38ac-f623-4932-aaa4-fde31d11a4ed-config-data\") pod \"nova-cell1-conductor-db-sync-f75mj\" (UID: \"93ff38ac-f623-4932-aaa4-fde31d11a4ed\") " pod="openstack/nova-cell1-conductor-db-sync-f75mj" Nov 28 16:34:19 crc kubenswrapper[4909]: I1128 16:34:19.000732 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/93ff38ac-f623-4932-aaa4-fde31d11a4ed-scripts\") pod \"nova-cell1-conductor-db-sync-f75mj\" (UID: \"93ff38ac-f623-4932-aaa4-fde31d11a4ed\") " pod="openstack/nova-cell1-conductor-db-sync-f75mj" Nov 28 16:34:19 crc kubenswrapper[4909]: I1128 16:34:19.000763 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/93ff38ac-f623-4932-aaa4-fde31d11a4ed-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-f75mj\" (UID: \"93ff38ac-f623-4932-aaa4-fde31d11a4ed\") " pod="openstack/nova-cell1-conductor-db-sync-f75mj" Nov 28 16:34:19 crc kubenswrapper[4909]: I1128 16:34:19.005013 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/93ff38ac-f623-4932-aaa4-fde31d11a4ed-scripts\") pod \"nova-cell1-conductor-db-sync-f75mj\" (UID: \"93ff38ac-f623-4932-aaa4-fde31d11a4ed\") " pod="openstack/nova-cell1-conductor-db-sync-f75mj" Nov 28 16:34:19 crc kubenswrapper[4909]: I1128 16:34:19.007243 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/93ff38ac-f623-4932-aaa4-fde31d11a4ed-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-f75mj\" (UID: \"93ff38ac-f623-4932-aaa4-fde31d11a4ed\") " pod="openstack/nova-cell1-conductor-db-sync-f75mj" Nov 28 16:34:19 crc kubenswrapper[4909]: I1128 16:34:19.021244 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5np4r\" (UniqueName: \"kubernetes.io/projected/93ff38ac-f623-4932-aaa4-fde31d11a4ed-kube-api-access-5np4r\") pod \"nova-cell1-conductor-db-sync-f75mj\" (UID: \"93ff38ac-f623-4932-aaa4-fde31d11a4ed\") " pod="openstack/nova-cell1-conductor-db-sync-f75mj" Nov 28 16:34:19 crc kubenswrapper[4909]: I1128 16:34:19.021829 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/93ff38ac-f623-4932-aaa4-fde31d11a4ed-config-data\") pod \"nova-cell1-conductor-db-sync-f75mj\" (UID: \"93ff38ac-f623-4932-aaa4-fde31d11a4ed\") " pod="openstack/nova-cell1-conductor-db-sync-f75mj" Nov 28 16:34:19 crc kubenswrapper[4909]: I1128 16:34:19.052114 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-f75mj" Nov 28 16:34:19 crc kubenswrapper[4909]: I1128 16:34:19.616698 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-f75mj"] Nov 28 16:34:19 crc kubenswrapper[4909]: I1128 16:34:19.852720 4909 generic.go:334] "Generic (PLEG): container finished" podID="1c23d3ad-6031-41a7-89cd-f9b863351cc1" containerID="87c3eb1a8e998586487a23e05591adebf7a28f64c15f5f300ef14bc6f7e03672" exitCode=0 Nov 28 16:34:19 crc kubenswrapper[4909]: I1128 16:34:19.852825 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-865f5d856f-rmkzw" event={"ID":"1c23d3ad-6031-41a7-89cd-f9b863351cc1","Type":"ContainerDied","Data":"87c3eb1a8e998586487a23e05591adebf7a28f64c15f5f300ef14bc6f7e03672"} Nov 28 16:34:19 crc kubenswrapper[4909]: I1128 16:34:19.865713 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-f75mj" event={"ID":"93ff38ac-f623-4932-aaa4-fde31d11a4ed","Type":"ContainerStarted","Data":"9c3938c6d7392a0f01c5953f03f6a52aea6f73f194f9c47ea1858c4c65e8e5d0"} Nov 28 16:34:19 crc kubenswrapper[4909]: I1128 16:34:19.911134 4909 patch_prober.go:28] interesting pod/machine-config-daemon-d5nd7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 16:34:19 crc kubenswrapper[4909]: I1128 16:34:19.911419 4909 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 16:34:20 crc kubenswrapper[4909]: I1128 16:34:20.882328 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-f75mj" event={"ID":"93ff38ac-f623-4932-aaa4-fde31d11a4ed","Type":"ContainerStarted","Data":"b74657ce0e6f1d6aebf3eb081305b8dbfaf43ab222ea5e2a386baad9203784be"} Nov 28 16:34:20 crc kubenswrapper[4909]: I1128 16:34:20.902895 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-db-sync-f75mj" podStartSLOduration=2.902877735 podStartE2EDuration="2.902877735s" podCreationTimestamp="2025-11-28 16:34:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:34:20.902128265 +0000 UTC m=+1443.298812789" watchObservedRunningTime="2025-11-28 16:34:20.902877735 +0000 UTC m=+1443.299562259" Nov 28 16:34:21 crc kubenswrapper[4909]: I1128 16:34:21.191137 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 16:34:21 crc kubenswrapper[4909]: I1128 16:34:21.206514 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 28 16:34:22 crc kubenswrapper[4909]: I1128 16:34:22.833978 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-lc22m" Nov 28 16:34:22 crc kubenswrapper[4909]: I1128 16:34:22.834628 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-lc22m" Nov 28 16:34:22 crc kubenswrapper[4909]: I1128 16:34:22.903304 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-lc22m" Nov 28 16:34:23 crc kubenswrapper[4909]: I1128 16:34:23.867136 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"bfd233c2-72be-482b-a194-b68da87eb105","Type":"ContainerStarted","Data":"8bc3f6565ae2c6f85fd47e9cf2a340bbf95e6973525fadc39de6b532378ba0b8"} Nov 28 16:34:23 crc kubenswrapper[4909]: I1128 16:34:23.867787 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 28 16:34:23 crc kubenswrapper[4909]: I1128 16:34:23.870514 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"2287c28b-bb92-4e34-9ca4-a31222838904","Type":"ContainerStarted","Data":"533ddd4645f6a03848e6e0945714cb59a3d65f833d10c4bff08a109a1b038af9"} Nov 28 16:34:23 crc kubenswrapper[4909]: I1128 16:34:23.909316 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=1.974406758 podStartE2EDuration="9.90929365s" podCreationTimestamp="2025-11-28 16:34:14 +0000 UTC" firstStartedPulling="2025-11-28 16:34:15.531637061 +0000 UTC m=+1437.928321585" lastFinishedPulling="2025-11-28 16:34:23.466523943 +0000 UTC m=+1445.863208477" observedRunningTime="2025-11-28 16:34:23.891235485 +0000 UTC m=+1446.287920029" watchObservedRunningTime="2025-11-28 16:34:23.90929365 +0000 UTC m=+1446.305978194" Nov 28 16:34:23 crc kubenswrapper[4909]: I1128 16:34:23.933884 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-lc22m" Nov 28 16:34:23 crc kubenswrapper[4909]: I1128 16:34:23.994648 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-lc22m"] Nov 28 16:34:24 crc kubenswrapper[4909]: I1128 16:34:24.895902 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"43057ca4-f849-409a-9b27-810bd9343936","Type":"ContainerStarted","Data":"f86a1d0d322d35d86504f00d3dd24e46c97955d98a133004e6b64ba7f88c114d"} Nov 28 16:34:24 crc kubenswrapper[4909]: I1128 16:34:24.896198 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"43057ca4-f849-409a-9b27-810bd9343936","Type":"ContainerStarted","Data":"ba94e99c50abaac862f2a9fedc12bef106f5c940903d038749365aefd0b63795"} Nov 28 16:34:24 crc kubenswrapper[4909]: I1128 16:34:24.896898 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="43057ca4-f849-409a-9b27-810bd9343936" containerName="nova-metadata-log" containerID="cri-o://ba94e99c50abaac862f2a9fedc12bef106f5c940903d038749365aefd0b63795" gracePeriod=30 Nov 28 16:34:24 crc kubenswrapper[4909]: I1128 16:34:24.896890 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="43057ca4-f849-409a-9b27-810bd9343936" containerName="nova-metadata-metadata" containerID="cri-o://f86a1d0d322d35d86504f00d3dd24e46c97955d98a133004e6b64ba7f88c114d" gracePeriod=30 Nov 28 16:34:24 crc kubenswrapper[4909]: I1128 16:34:24.906613 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"2287c28b-bb92-4e34-9ca4-a31222838904","Type":"ContainerStarted","Data":"4617c6e4c5b2d05619a3bc749a1a471604f4d682ff6d15728202c61c72625a38"} Nov 28 16:34:24 crc kubenswrapper[4909]: I1128 16:34:24.922722 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"b94e81c7-38f5-47c0-b6f7-7624101ef17b","Type":"ContainerStarted","Data":"5c1abe607c19c7135c249b2516147df7ded4fcccb8eff97993dd74c4f627e143"} Nov 28 16:34:24 crc kubenswrapper[4909]: I1128 16:34:24.923461 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell1-novncproxy-0" podUID="b94e81c7-38f5-47c0-b6f7-7624101ef17b" containerName="nova-cell1-novncproxy-novncproxy" containerID="cri-o://5c1abe607c19c7135c249b2516147df7ded4fcccb8eff97993dd74c4f627e143" gracePeriod=30 Nov 28 16:34:24 crc kubenswrapper[4909]: I1128 16:34:24.925054 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"2d385d4c-d982-4d91-817f-f21d879b187d","Type":"ContainerStarted","Data":"2235b890424a297fe76c3bb587c2d413eb9da4efdc9dd1010816cd22052d28aa"} Nov 28 16:34:24 crc kubenswrapper[4909]: I1128 16:34:24.926551 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=4.014802492 podStartE2EDuration="8.92653236s" podCreationTimestamp="2025-11-28 16:34:16 +0000 UTC" firstStartedPulling="2025-11-28 16:34:18.596465174 +0000 UTC m=+1440.993149698" lastFinishedPulling="2025-11-28 16:34:23.508195042 +0000 UTC m=+1445.904879566" observedRunningTime="2025-11-28 16:34:24.921634769 +0000 UTC m=+1447.318319303" watchObservedRunningTime="2025-11-28 16:34:24.92653236 +0000 UTC m=+1447.323216884" Nov 28 16:34:24 crc kubenswrapper[4909]: I1128 16:34:24.930846 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-865f5d856f-rmkzw" event={"ID":"1c23d3ad-6031-41a7-89cd-f9b863351cc1","Type":"ContainerStarted","Data":"0d48820f5d21bc197eb801dacd5dc3118d806007250e25469dbf27b1b1789b47"} Nov 28 16:34:24 crc kubenswrapper[4909]: I1128 16:34:24.931030 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-865f5d856f-rmkzw" Nov 28 16:34:24 crc kubenswrapper[4909]: I1128 16:34:24.958080 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=3.284044163 podStartE2EDuration="8.958054317s" podCreationTimestamp="2025-11-28 16:34:16 +0000 UTC" firstStartedPulling="2025-11-28 16:34:17.834416194 +0000 UTC m=+1440.231100718" lastFinishedPulling="2025-11-28 16:34:23.508426348 +0000 UTC m=+1445.905110872" observedRunningTime="2025-11-28 16:34:24.942459218 +0000 UTC m=+1447.339143742" watchObservedRunningTime="2025-11-28 16:34:24.958054317 +0000 UTC m=+1447.354738841" Nov 28 16:34:24 crc kubenswrapper[4909]: I1128 16:34:24.974160 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-865f5d856f-rmkzw" podStartSLOduration=7.9741131880000005 podStartE2EDuration="7.974113188s" podCreationTimestamp="2025-11-28 16:34:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:34:24.965157777 +0000 UTC m=+1447.361842301" watchObservedRunningTime="2025-11-28 16:34:24.974113188 +0000 UTC m=+1447.370797712" Nov 28 16:34:24 crc kubenswrapper[4909]: I1128 16:34:24.989015 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=3.807346273 podStartE2EDuration="8.988993907s" podCreationTimestamp="2025-11-28 16:34:16 +0000 UTC" firstStartedPulling="2025-11-28 16:34:18.322764106 +0000 UTC m=+1440.719448630" lastFinishedPulling="2025-11-28 16:34:23.50441174 +0000 UTC m=+1445.901096264" observedRunningTime="2025-11-28 16:34:24.980708865 +0000 UTC m=+1447.377393429" watchObservedRunningTime="2025-11-28 16:34:24.988993907 +0000 UTC m=+1447.385678431" Nov 28 16:34:25 crc kubenswrapper[4909]: I1128 16:34:25.004639 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=3.229352862 podStartE2EDuration="8.004617427s" podCreationTimestamp="2025-11-28 16:34:17 +0000 UTC" firstStartedPulling="2025-11-28 16:34:18.733249566 +0000 UTC m=+1441.129934100" lastFinishedPulling="2025-11-28 16:34:23.508514141 +0000 UTC m=+1445.905198665" observedRunningTime="2025-11-28 16:34:25.00177224 +0000 UTC m=+1447.398456764" watchObservedRunningTime="2025-11-28 16:34:25.004617427 +0000 UTC m=+1447.401301951" Nov 28 16:34:25 crc kubenswrapper[4909]: I1128 16:34:25.682966 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 16:34:25 crc kubenswrapper[4909]: I1128 16:34:25.851434 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/43057ca4-f849-409a-9b27-810bd9343936-config-data\") pod \"43057ca4-f849-409a-9b27-810bd9343936\" (UID: \"43057ca4-f849-409a-9b27-810bd9343936\") " Nov 28 16:34:25 crc kubenswrapper[4909]: I1128 16:34:25.851581 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d6mv7\" (UniqueName: \"kubernetes.io/projected/43057ca4-f849-409a-9b27-810bd9343936-kube-api-access-d6mv7\") pod \"43057ca4-f849-409a-9b27-810bd9343936\" (UID: \"43057ca4-f849-409a-9b27-810bd9343936\") " Nov 28 16:34:25 crc kubenswrapper[4909]: I1128 16:34:25.851641 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/43057ca4-f849-409a-9b27-810bd9343936-combined-ca-bundle\") pod \"43057ca4-f849-409a-9b27-810bd9343936\" (UID: \"43057ca4-f849-409a-9b27-810bd9343936\") " Nov 28 16:34:25 crc kubenswrapper[4909]: I1128 16:34:25.851730 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/43057ca4-f849-409a-9b27-810bd9343936-logs\") pod \"43057ca4-f849-409a-9b27-810bd9343936\" (UID: \"43057ca4-f849-409a-9b27-810bd9343936\") " Nov 28 16:34:25 crc kubenswrapper[4909]: I1128 16:34:25.852073 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/43057ca4-f849-409a-9b27-810bd9343936-logs" (OuterVolumeSpecName: "logs") pod "43057ca4-f849-409a-9b27-810bd9343936" (UID: "43057ca4-f849-409a-9b27-810bd9343936"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:34:25 crc kubenswrapper[4909]: I1128 16:34:25.852351 4909 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/43057ca4-f849-409a-9b27-810bd9343936-logs\") on node \"crc\" DevicePath \"\"" Nov 28 16:34:25 crc kubenswrapper[4909]: I1128 16:34:25.859787 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/43057ca4-f849-409a-9b27-810bd9343936-kube-api-access-d6mv7" (OuterVolumeSpecName: "kube-api-access-d6mv7") pod "43057ca4-f849-409a-9b27-810bd9343936" (UID: "43057ca4-f849-409a-9b27-810bd9343936"). InnerVolumeSpecName "kube-api-access-d6mv7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:34:25 crc kubenswrapper[4909]: I1128 16:34:25.884800 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43057ca4-f849-409a-9b27-810bd9343936-config-data" (OuterVolumeSpecName: "config-data") pod "43057ca4-f849-409a-9b27-810bd9343936" (UID: "43057ca4-f849-409a-9b27-810bd9343936"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:34:25 crc kubenswrapper[4909]: I1128 16:34:25.892758 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43057ca4-f849-409a-9b27-810bd9343936-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "43057ca4-f849-409a-9b27-810bd9343936" (UID: "43057ca4-f849-409a-9b27-810bd9343936"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:34:25 crc kubenswrapper[4909]: I1128 16:34:25.939174 4909 generic.go:334] "Generic (PLEG): container finished" podID="43057ca4-f849-409a-9b27-810bd9343936" containerID="f86a1d0d322d35d86504f00d3dd24e46c97955d98a133004e6b64ba7f88c114d" exitCode=0 Nov 28 16:34:25 crc kubenswrapper[4909]: I1128 16:34:25.939205 4909 generic.go:334] "Generic (PLEG): container finished" podID="43057ca4-f849-409a-9b27-810bd9343936" containerID="ba94e99c50abaac862f2a9fedc12bef106f5c940903d038749365aefd0b63795" exitCode=143 Nov 28 16:34:25 crc kubenswrapper[4909]: I1128 16:34:25.940047 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 16:34:25 crc kubenswrapper[4909]: I1128 16:34:25.941083 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"43057ca4-f849-409a-9b27-810bd9343936","Type":"ContainerDied","Data":"f86a1d0d322d35d86504f00d3dd24e46c97955d98a133004e6b64ba7f88c114d"} Nov 28 16:34:25 crc kubenswrapper[4909]: I1128 16:34:25.941141 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"43057ca4-f849-409a-9b27-810bd9343936","Type":"ContainerDied","Data":"ba94e99c50abaac862f2a9fedc12bef106f5c940903d038749365aefd0b63795"} Nov 28 16:34:25 crc kubenswrapper[4909]: I1128 16:34:25.941156 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"43057ca4-f849-409a-9b27-810bd9343936","Type":"ContainerDied","Data":"47be0d04a0f1a1109b47c39249d6603d370c54687f262f537cc2cc5b8866f311"} Nov 28 16:34:25 crc kubenswrapper[4909]: I1128 16:34:25.941175 4909 scope.go:117] "RemoveContainer" containerID="f86a1d0d322d35d86504f00d3dd24e46c97955d98a133004e6b64ba7f88c114d" Nov 28 16:34:25 crc kubenswrapper[4909]: I1128 16:34:25.941613 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-lc22m" podUID="9b6c3766-5116-4179-aae7-17f9564115c0" containerName="registry-server" containerID="cri-o://924581b5519ed51dbb5bff40fa9f5672250e24b09e8125e170e9230cb63b1335" gracePeriod=2 Nov 28 16:34:25 crc kubenswrapper[4909]: I1128 16:34:25.961072 4909 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/43057ca4-f849-409a-9b27-810bd9343936-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:34:25 crc kubenswrapper[4909]: I1128 16:34:25.961102 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d6mv7\" (UniqueName: \"kubernetes.io/projected/43057ca4-f849-409a-9b27-810bd9343936-kube-api-access-d6mv7\") on node \"crc\" DevicePath \"\"" Nov 28 16:34:25 crc kubenswrapper[4909]: I1128 16:34:25.961114 4909 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/43057ca4-f849-409a-9b27-810bd9343936-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:34:25 crc kubenswrapper[4909]: I1128 16:34:25.978865 4909 scope.go:117] "RemoveContainer" containerID="ba94e99c50abaac862f2a9fedc12bef106f5c940903d038749365aefd0b63795" Nov 28 16:34:26 crc kubenswrapper[4909]: I1128 16:34:26.003913 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 16:34:26 crc kubenswrapper[4909]: I1128 16:34:26.021727 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 16:34:26 crc kubenswrapper[4909]: I1128 16:34:26.031726 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 28 16:34:26 crc kubenswrapper[4909]: E1128 16:34:26.032303 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="43057ca4-f849-409a-9b27-810bd9343936" containerName="nova-metadata-log" Nov 28 16:34:26 crc kubenswrapper[4909]: I1128 16:34:26.032327 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="43057ca4-f849-409a-9b27-810bd9343936" containerName="nova-metadata-log" Nov 28 16:34:26 crc kubenswrapper[4909]: E1128 16:34:26.032363 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="43057ca4-f849-409a-9b27-810bd9343936" containerName="nova-metadata-metadata" Nov 28 16:34:26 crc kubenswrapper[4909]: I1128 16:34:26.032373 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="43057ca4-f849-409a-9b27-810bd9343936" containerName="nova-metadata-metadata" Nov 28 16:34:26 crc kubenswrapper[4909]: I1128 16:34:26.032593 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="43057ca4-f849-409a-9b27-810bd9343936" containerName="nova-metadata-log" Nov 28 16:34:26 crc kubenswrapper[4909]: I1128 16:34:26.032613 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="43057ca4-f849-409a-9b27-810bd9343936" containerName="nova-metadata-metadata" Nov 28 16:34:26 crc kubenswrapper[4909]: I1128 16:34:26.033921 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 16:34:26 crc kubenswrapper[4909]: I1128 16:34:26.041865 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 28 16:34:26 crc kubenswrapper[4909]: I1128 16:34:26.042293 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Nov 28 16:34:26 crc kubenswrapper[4909]: I1128 16:34:26.042809 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 16:34:26 crc kubenswrapper[4909]: I1128 16:34:26.062548 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6njmr\" (UniqueName: \"kubernetes.io/projected/ba05ad6a-6e38-4c1e-b1ef-a38626a5ea39-kube-api-access-6njmr\") pod \"nova-metadata-0\" (UID: \"ba05ad6a-6e38-4c1e-b1ef-a38626a5ea39\") " pod="openstack/nova-metadata-0" Nov 28 16:34:26 crc kubenswrapper[4909]: I1128 16:34:26.062612 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ba05ad6a-6e38-4c1e-b1ef-a38626a5ea39-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"ba05ad6a-6e38-4c1e-b1ef-a38626a5ea39\") " pod="openstack/nova-metadata-0" Nov 28 16:34:26 crc kubenswrapper[4909]: I1128 16:34:26.062644 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ba05ad6a-6e38-4c1e-b1ef-a38626a5ea39-logs\") pod \"nova-metadata-0\" (UID: \"ba05ad6a-6e38-4c1e-b1ef-a38626a5ea39\") " pod="openstack/nova-metadata-0" Nov 28 16:34:26 crc kubenswrapper[4909]: I1128 16:34:26.062766 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/ba05ad6a-6e38-4c1e-b1ef-a38626a5ea39-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"ba05ad6a-6e38-4c1e-b1ef-a38626a5ea39\") " pod="openstack/nova-metadata-0" Nov 28 16:34:26 crc kubenswrapper[4909]: I1128 16:34:26.062850 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ba05ad6a-6e38-4c1e-b1ef-a38626a5ea39-config-data\") pod \"nova-metadata-0\" (UID: \"ba05ad6a-6e38-4c1e-b1ef-a38626a5ea39\") " pod="openstack/nova-metadata-0" Nov 28 16:34:26 crc kubenswrapper[4909]: I1128 16:34:26.094878 4909 scope.go:117] "RemoveContainer" containerID="f86a1d0d322d35d86504f00d3dd24e46c97955d98a133004e6b64ba7f88c114d" Nov 28 16:34:26 crc kubenswrapper[4909]: E1128 16:34:26.095305 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f86a1d0d322d35d86504f00d3dd24e46c97955d98a133004e6b64ba7f88c114d\": container with ID starting with f86a1d0d322d35d86504f00d3dd24e46c97955d98a133004e6b64ba7f88c114d not found: ID does not exist" containerID="f86a1d0d322d35d86504f00d3dd24e46c97955d98a133004e6b64ba7f88c114d" Nov 28 16:34:26 crc kubenswrapper[4909]: I1128 16:34:26.095332 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f86a1d0d322d35d86504f00d3dd24e46c97955d98a133004e6b64ba7f88c114d"} err="failed to get container status \"f86a1d0d322d35d86504f00d3dd24e46c97955d98a133004e6b64ba7f88c114d\": rpc error: code = NotFound desc = could not find container \"f86a1d0d322d35d86504f00d3dd24e46c97955d98a133004e6b64ba7f88c114d\": container with ID starting with f86a1d0d322d35d86504f00d3dd24e46c97955d98a133004e6b64ba7f88c114d not found: ID does not exist" Nov 28 16:34:26 crc kubenswrapper[4909]: I1128 16:34:26.095350 4909 scope.go:117] "RemoveContainer" containerID="ba94e99c50abaac862f2a9fedc12bef106f5c940903d038749365aefd0b63795" Nov 28 16:34:26 crc kubenswrapper[4909]: E1128 16:34:26.095545 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ba94e99c50abaac862f2a9fedc12bef106f5c940903d038749365aefd0b63795\": container with ID starting with ba94e99c50abaac862f2a9fedc12bef106f5c940903d038749365aefd0b63795 not found: ID does not exist" containerID="ba94e99c50abaac862f2a9fedc12bef106f5c940903d038749365aefd0b63795" Nov 28 16:34:26 crc kubenswrapper[4909]: I1128 16:34:26.095558 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ba94e99c50abaac862f2a9fedc12bef106f5c940903d038749365aefd0b63795"} err="failed to get container status \"ba94e99c50abaac862f2a9fedc12bef106f5c940903d038749365aefd0b63795\": rpc error: code = NotFound desc = could not find container \"ba94e99c50abaac862f2a9fedc12bef106f5c940903d038749365aefd0b63795\": container with ID starting with ba94e99c50abaac862f2a9fedc12bef106f5c940903d038749365aefd0b63795 not found: ID does not exist" Nov 28 16:34:26 crc kubenswrapper[4909]: I1128 16:34:26.095569 4909 scope.go:117] "RemoveContainer" containerID="f86a1d0d322d35d86504f00d3dd24e46c97955d98a133004e6b64ba7f88c114d" Nov 28 16:34:26 crc kubenswrapper[4909]: I1128 16:34:26.095742 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f86a1d0d322d35d86504f00d3dd24e46c97955d98a133004e6b64ba7f88c114d"} err="failed to get container status \"f86a1d0d322d35d86504f00d3dd24e46c97955d98a133004e6b64ba7f88c114d\": rpc error: code = NotFound desc = could not find container \"f86a1d0d322d35d86504f00d3dd24e46c97955d98a133004e6b64ba7f88c114d\": container with ID starting with f86a1d0d322d35d86504f00d3dd24e46c97955d98a133004e6b64ba7f88c114d not found: ID does not exist" Nov 28 16:34:26 crc kubenswrapper[4909]: I1128 16:34:26.095755 4909 scope.go:117] "RemoveContainer" containerID="ba94e99c50abaac862f2a9fedc12bef106f5c940903d038749365aefd0b63795" Nov 28 16:34:26 crc kubenswrapper[4909]: I1128 16:34:26.095928 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ba94e99c50abaac862f2a9fedc12bef106f5c940903d038749365aefd0b63795"} err="failed to get container status \"ba94e99c50abaac862f2a9fedc12bef106f5c940903d038749365aefd0b63795\": rpc error: code = NotFound desc = could not find container \"ba94e99c50abaac862f2a9fedc12bef106f5c940903d038749365aefd0b63795\": container with ID starting with ba94e99c50abaac862f2a9fedc12bef106f5c940903d038749365aefd0b63795 not found: ID does not exist" Nov 28 16:34:26 crc kubenswrapper[4909]: I1128 16:34:26.164452 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ba05ad6a-6e38-4c1e-b1ef-a38626a5ea39-config-data\") pod \"nova-metadata-0\" (UID: \"ba05ad6a-6e38-4c1e-b1ef-a38626a5ea39\") " pod="openstack/nova-metadata-0" Nov 28 16:34:26 crc kubenswrapper[4909]: I1128 16:34:26.164618 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6njmr\" (UniqueName: \"kubernetes.io/projected/ba05ad6a-6e38-4c1e-b1ef-a38626a5ea39-kube-api-access-6njmr\") pod \"nova-metadata-0\" (UID: \"ba05ad6a-6e38-4c1e-b1ef-a38626a5ea39\") " pod="openstack/nova-metadata-0" Nov 28 16:34:26 crc kubenswrapper[4909]: I1128 16:34:26.165030 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ba05ad6a-6e38-4c1e-b1ef-a38626a5ea39-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"ba05ad6a-6e38-4c1e-b1ef-a38626a5ea39\") " pod="openstack/nova-metadata-0" Nov 28 16:34:26 crc kubenswrapper[4909]: I1128 16:34:26.165066 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ba05ad6a-6e38-4c1e-b1ef-a38626a5ea39-logs\") pod \"nova-metadata-0\" (UID: \"ba05ad6a-6e38-4c1e-b1ef-a38626a5ea39\") " pod="openstack/nova-metadata-0" Nov 28 16:34:26 crc kubenswrapper[4909]: I1128 16:34:26.165146 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/ba05ad6a-6e38-4c1e-b1ef-a38626a5ea39-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"ba05ad6a-6e38-4c1e-b1ef-a38626a5ea39\") " pod="openstack/nova-metadata-0" Nov 28 16:34:26 crc kubenswrapper[4909]: I1128 16:34:26.166369 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ba05ad6a-6e38-4c1e-b1ef-a38626a5ea39-logs\") pod \"nova-metadata-0\" (UID: \"ba05ad6a-6e38-4c1e-b1ef-a38626a5ea39\") " pod="openstack/nova-metadata-0" Nov 28 16:34:26 crc kubenswrapper[4909]: I1128 16:34:26.169846 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ba05ad6a-6e38-4c1e-b1ef-a38626a5ea39-config-data\") pod \"nova-metadata-0\" (UID: \"ba05ad6a-6e38-4c1e-b1ef-a38626a5ea39\") " pod="openstack/nova-metadata-0" Nov 28 16:34:26 crc kubenswrapper[4909]: I1128 16:34:26.170378 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/ba05ad6a-6e38-4c1e-b1ef-a38626a5ea39-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"ba05ad6a-6e38-4c1e-b1ef-a38626a5ea39\") " pod="openstack/nova-metadata-0" Nov 28 16:34:26 crc kubenswrapper[4909]: I1128 16:34:26.176245 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ba05ad6a-6e38-4c1e-b1ef-a38626a5ea39-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"ba05ad6a-6e38-4c1e-b1ef-a38626a5ea39\") " pod="openstack/nova-metadata-0" Nov 28 16:34:26 crc kubenswrapper[4909]: I1128 16:34:26.185615 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6njmr\" (UniqueName: \"kubernetes.io/projected/ba05ad6a-6e38-4c1e-b1ef-a38626a5ea39-kube-api-access-6njmr\") pod \"nova-metadata-0\" (UID: \"ba05ad6a-6e38-4c1e-b1ef-a38626a5ea39\") " pod="openstack/nova-metadata-0" Nov 28 16:34:26 crc kubenswrapper[4909]: I1128 16:34:26.386932 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 16:34:26 crc kubenswrapper[4909]: I1128 16:34:26.389639 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-lc22m" Nov 28 16:34:26 crc kubenswrapper[4909]: I1128 16:34:26.574317 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9b6c3766-5116-4179-aae7-17f9564115c0-utilities\") pod \"9b6c3766-5116-4179-aae7-17f9564115c0\" (UID: \"9b6c3766-5116-4179-aae7-17f9564115c0\") " Nov 28 16:34:26 crc kubenswrapper[4909]: I1128 16:34:26.574609 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bgqf8\" (UniqueName: \"kubernetes.io/projected/9b6c3766-5116-4179-aae7-17f9564115c0-kube-api-access-bgqf8\") pod \"9b6c3766-5116-4179-aae7-17f9564115c0\" (UID: \"9b6c3766-5116-4179-aae7-17f9564115c0\") " Nov 28 16:34:26 crc kubenswrapper[4909]: I1128 16:34:26.574676 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9b6c3766-5116-4179-aae7-17f9564115c0-catalog-content\") pod \"9b6c3766-5116-4179-aae7-17f9564115c0\" (UID: \"9b6c3766-5116-4179-aae7-17f9564115c0\") " Nov 28 16:34:26 crc kubenswrapper[4909]: I1128 16:34:26.575334 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9b6c3766-5116-4179-aae7-17f9564115c0-utilities" (OuterVolumeSpecName: "utilities") pod "9b6c3766-5116-4179-aae7-17f9564115c0" (UID: "9b6c3766-5116-4179-aae7-17f9564115c0"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:34:26 crc kubenswrapper[4909]: I1128 16:34:26.594839 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9b6c3766-5116-4179-aae7-17f9564115c0-kube-api-access-bgqf8" (OuterVolumeSpecName: "kube-api-access-bgqf8") pod "9b6c3766-5116-4179-aae7-17f9564115c0" (UID: "9b6c3766-5116-4179-aae7-17f9564115c0"). InnerVolumeSpecName "kube-api-access-bgqf8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:34:26 crc kubenswrapper[4909]: I1128 16:34:26.666221 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9b6c3766-5116-4179-aae7-17f9564115c0-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "9b6c3766-5116-4179-aae7-17f9564115c0" (UID: "9b6c3766-5116-4179-aae7-17f9564115c0"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:34:26 crc kubenswrapper[4909]: I1128 16:34:26.683024 4909 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9b6c3766-5116-4179-aae7-17f9564115c0-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 16:34:26 crc kubenswrapper[4909]: I1128 16:34:26.683062 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bgqf8\" (UniqueName: \"kubernetes.io/projected/9b6c3766-5116-4179-aae7-17f9564115c0-kube-api-access-bgqf8\") on node \"crc\" DevicePath \"\"" Nov 28 16:34:26 crc kubenswrapper[4909]: I1128 16:34:26.683073 4909 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9b6c3766-5116-4179-aae7-17f9564115c0-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 16:34:26 crc kubenswrapper[4909]: I1128 16:34:26.953228 4909 generic.go:334] "Generic (PLEG): container finished" podID="9b6c3766-5116-4179-aae7-17f9564115c0" containerID="924581b5519ed51dbb5bff40fa9f5672250e24b09e8125e170e9230cb63b1335" exitCode=0 Nov 28 16:34:26 crc kubenswrapper[4909]: I1128 16:34:26.953274 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lc22m" event={"ID":"9b6c3766-5116-4179-aae7-17f9564115c0","Type":"ContainerDied","Data":"924581b5519ed51dbb5bff40fa9f5672250e24b09e8125e170e9230cb63b1335"} Nov 28 16:34:26 crc kubenswrapper[4909]: I1128 16:34:26.953308 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lc22m" event={"ID":"9b6c3766-5116-4179-aae7-17f9564115c0","Type":"ContainerDied","Data":"f4a5af0dae7a54b8be7d541d46d9ca4b7fd667001eddee9f2a1505f11f89f7a8"} Nov 28 16:34:26 crc kubenswrapper[4909]: I1128 16:34:26.953343 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-lc22m" Nov 28 16:34:26 crc kubenswrapper[4909]: I1128 16:34:26.953341 4909 scope.go:117] "RemoveContainer" containerID="924581b5519ed51dbb5bff40fa9f5672250e24b09e8125e170e9230cb63b1335" Nov 28 16:34:26 crc kubenswrapper[4909]: I1128 16:34:26.992837 4909 scope.go:117] "RemoveContainer" containerID="67e0f1c681dafb9f44527d9c932c6bbaf1e769e001c149ff04552c7d18d55fbf" Nov 28 16:34:27 crc kubenswrapper[4909]: I1128 16:34:27.006409 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-lc22m"] Nov 28 16:34:27 crc kubenswrapper[4909]: I1128 16:34:27.016037 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-lc22m"] Nov 28 16:34:27 crc kubenswrapper[4909]: I1128 16:34:27.025914 4909 scope.go:117] "RemoveContainer" containerID="7d42a28246fae2657fd4cb276d094f8e58a75c8280c1eca1f559b3a78ea22dd3" Nov 28 16:34:27 crc kubenswrapper[4909]: I1128 16:34:27.066453 4909 scope.go:117] "RemoveContainer" containerID="924581b5519ed51dbb5bff40fa9f5672250e24b09e8125e170e9230cb63b1335" Nov 28 16:34:27 crc kubenswrapper[4909]: E1128 16:34:27.068702 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"924581b5519ed51dbb5bff40fa9f5672250e24b09e8125e170e9230cb63b1335\": container with ID starting with 924581b5519ed51dbb5bff40fa9f5672250e24b09e8125e170e9230cb63b1335 not found: ID does not exist" containerID="924581b5519ed51dbb5bff40fa9f5672250e24b09e8125e170e9230cb63b1335" Nov 28 16:34:27 crc kubenswrapper[4909]: I1128 16:34:27.068750 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"924581b5519ed51dbb5bff40fa9f5672250e24b09e8125e170e9230cb63b1335"} err="failed to get container status \"924581b5519ed51dbb5bff40fa9f5672250e24b09e8125e170e9230cb63b1335\": rpc error: code = NotFound desc = could not find container \"924581b5519ed51dbb5bff40fa9f5672250e24b09e8125e170e9230cb63b1335\": container with ID starting with 924581b5519ed51dbb5bff40fa9f5672250e24b09e8125e170e9230cb63b1335 not found: ID does not exist" Nov 28 16:34:27 crc kubenswrapper[4909]: I1128 16:34:27.068780 4909 scope.go:117] "RemoveContainer" containerID="67e0f1c681dafb9f44527d9c932c6bbaf1e769e001c149ff04552c7d18d55fbf" Nov 28 16:34:27 crc kubenswrapper[4909]: E1128 16:34:27.069087 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"67e0f1c681dafb9f44527d9c932c6bbaf1e769e001c149ff04552c7d18d55fbf\": container with ID starting with 67e0f1c681dafb9f44527d9c932c6bbaf1e769e001c149ff04552c7d18d55fbf not found: ID does not exist" containerID="67e0f1c681dafb9f44527d9c932c6bbaf1e769e001c149ff04552c7d18d55fbf" Nov 28 16:34:27 crc kubenswrapper[4909]: I1128 16:34:27.069109 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"67e0f1c681dafb9f44527d9c932c6bbaf1e769e001c149ff04552c7d18d55fbf"} err="failed to get container status \"67e0f1c681dafb9f44527d9c932c6bbaf1e769e001c149ff04552c7d18d55fbf\": rpc error: code = NotFound desc = could not find container \"67e0f1c681dafb9f44527d9c932c6bbaf1e769e001c149ff04552c7d18d55fbf\": container with ID starting with 67e0f1c681dafb9f44527d9c932c6bbaf1e769e001c149ff04552c7d18d55fbf not found: ID does not exist" Nov 28 16:34:27 crc kubenswrapper[4909]: I1128 16:34:27.069124 4909 scope.go:117] "RemoveContainer" containerID="7d42a28246fae2657fd4cb276d094f8e58a75c8280c1eca1f559b3a78ea22dd3" Nov 28 16:34:27 crc kubenswrapper[4909]: E1128 16:34:27.069378 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7d42a28246fae2657fd4cb276d094f8e58a75c8280c1eca1f559b3a78ea22dd3\": container with ID starting with 7d42a28246fae2657fd4cb276d094f8e58a75c8280c1eca1f559b3a78ea22dd3 not found: ID does not exist" containerID="7d42a28246fae2657fd4cb276d094f8e58a75c8280c1eca1f559b3a78ea22dd3" Nov 28 16:34:27 crc kubenswrapper[4909]: I1128 16:34:27.069400 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7d42a28246fae2657fd4cb276d094f8e58a75c8280c1eca1f559b3a78ea22dd3"} err="failed to get container status \"7d42a28246fae2657fd4cb276d094f8e58a75c8280c1eca1f559b3a78ea22dd3\": rpc error: code = NotFound desc = could not find container \"7d42a28246fae2657fd4cb276d094f8e58a75c8280c1eca1f559b3a78ea22dd3\": container with ID starting with 7d42a28246fae2657fd4cb276d094f8e58a75c8280c1eca1f559b3a78ea22dd3 not found: ID does not exist" Nov 28 16:34:27 crc kubenswrapper[4909]: I1128 16:34:27.079209 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 16:34:27 crc kubenswrapper[4909]: I1128 16:34:27.303777 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 28 16:34:27 crc kubenswrapper[4909]: I1128 16:34:27.303840 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 28 16:34:27 crc kubenswrapper[4909]: I1128 16:34:27.537168 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Nov 28 16:34:27 crc kubenswrapper[4909]: I1128 16:34:27.606808 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Nov 28 16:34:27 crc kubenswrapper[4909]: I1128 16:34:27.606854 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Nov 28 16:34:27 crc kubenswrapper[4909]: I1128 16:34:27.634758 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Nov 28 16:34:27 crc kubenswrapper[4909]: I1128 16:34:27.915784 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="43057ca4-f849-409a-9b27-810bd9343936" path="/var/lib/kubelet/pods/43057ca4-f849-409a-9b27-810bd9343936/volumes" Nov 28 16:34:27 crc kubenswrapper[4909]: I1128 16:34:27.916553 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9b6c3766-5116-4179-aae7-17f9564115c0" path="/var/lib/kubelet/pods/9b6c3766-5116-4179-aae7-17f9564115c0/volumes" Nov 28 16:34:27 crc kubenswrapper[4909]: I1128 16:34:27.962386 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"ba05ad6a-6e38-4c1e-b1ef-a38626a5ea39","Type":"ContainerStarted","Data":"ef9d3ff12f0af1e1e0311f2fccff4806b4ac618ef71b94bb25137dffb8083265"} Nov 28 16:34:27 crc kubenswrapper[4909]: I1128 16:34:27.962438 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"ba05ad6a-6e38-4c1e-b1ef-a38626a5ea39","Type":"ContainerStarted","Data":"61121eb242dd15f4896fb597d8f71e979b60af364817f62031ebcfd55b24abe2"} Nov 28 16:34:27 crc kubenswrapper[4909]: I1128 16:34:27.962450 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"ba05ad6a-6e38-4c1e-b1ef-a38626a5ea39","Type":"ContainerStarted","Data":"7c708f5954462b69290926827fab370355e366ad2338d9b071de7f2a16672f1f"} Nov 28 16:34:27 crc kubenswrapper[4909]: I1128 16:34:27.992831 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.992813612 podStartE2EDuration="2.992813612s" podCreationTimestamp="2025-11-28 16:34:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:34:27.986102282 +0000 UTC m=+1450.382786806" watchObservedRunningTime="2025-11-28 16:34:27.992813612 +0000 UTC m=+1450.389498136" Nov 28 16:34:28 crc kubenswrapper[4909]: I1128 16:34:28.005571 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Nov 28 16:34:28 crc kubenswrapper[4909]: I1128 16:34:28.387842 4909 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="2287c28b-bb92-4e34-9ca4-a31222838904" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.0.181:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 28 16:34:28 crc kubenswrapper[4909]: I1128 16:34:28.387893 4909 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="2287c28b-bb92-4e34-9ca4-a31222838904" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.0.181:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 28 16:34:28 crc kubenswrapper[4909]: I1128 16:34:28.975747 4909 generic.go:334] "Generic (PLEG): container finished" podID="b69a490a-8b2d-405f-b41e-903268d44cca" containerID="ce2426fd0deaef85cce4c6ed203e672ca1bccd2feef4b79a7cc06bd0d0e935c9" exitCode=0 Nov 28 16:34:28 crc kubenswrapper[4909]: I1128 16:34:28.975830 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-fn62x" event={"ID":"b69a490a-8b2d-405f-b41e-903268d44cca","Type":"ContainerDied","Data":"ce2426fd0deaef85cce4c6ed203e672ca1bccd2feef4b79a7cc06bd0d0e935c9"} Nov 28 16:34:30 crc kubenswrapper[4909]: I1128 16:34:30.425231 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-fn62x" Nov 28 16:34:30 crc kubenswrapper[4909]: I1128 16:34:30.461415 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b69a490a-8b2d-405f-b41e-903268d44cca-combined-ca-bundle\") pod \"b69a490a-8b2d-405f-b41e-903268d44cca\" (UID: \"b69a490a-8b2d-405f-b41e-903268d44cca\") " Nov 28 16:34:30 crc kubenswrapper[4909]: I1128 16:34:30.461583 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b69a490a-8b2d-405f-b41e-903268d44cca-config-data\") pod \"b69a490a-8b2d-405f-b41e-903268d44cca\" (UID: \"b69a490a-8b2d-405f-b41e-903268d44cca\") " Nov 28 16:34:30 crc kubenswrapper[4909]: I1128 16:34:30.461838 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b69a490a-8b2d-405f-b41e-903268d44cca-scripts\") pod \"b69a490a-8b2d-405f-b41e-903268d44cca\" (UID: \"b69a490a-8b2d-405f-b41e-903268d44cca\") " Nov 28 16:34:30 crc kubenswrapper[4909]: I1128 16:34:30.461912 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dmwmx\" (UniqueName: \"kubernetes.io/projected/b69a490a-8b2d-405f-b41e-903268d44cca-kube-api-access-dmwmx\") pod \"b69a490a-8b2d-405f-b41e-903268d44cca\" (UID: \"b69a490a-8b2d-405f-b41e-903268d44cca\") " Nov 28 16:34:30 crc kubenswrapper[4909]: I1128 16:34:30.485950 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b69a490a-8b2d-405f-b41e-903268d44cca-kube-api-access-dmwmx" (OuterVolumeSpecName: "kube-api-access-dmwmx") pod "b69a490a-8b2d-405f-b41e-903268d44cca" (UID: "b69a490a-8b2d-405f-b41e-903268d44cca"). InnerVolumeSpecName "kube-api-access-dmwmx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:34:30 crc kubenswrapper[4909]: I1128 16:34:30.486090 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b69a490a-8b2d-405f-b41e-903268d44cca-scripts" (OuterVolumeSpecName: "scripts") pod "b69a490a-8b2d-405f-b41e-903268d44cca" (UID: "b69a490a-8b2d-405f-b41e-903268d44cca"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:34:30 crc kubenswrapper[4909]: I1128 16:34:30.500144 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b69a490a-8b2d-405f-b41e-903268d44cca-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b69a490a-8b2d-405f-b41e-903268d44cca" (UID: "b69a490a-8b2d-405f-b41e-903268d44cca"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:34:30 crc kubenswrapper[4909]: I1128 16:34:30.515320 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b69a490a-8b2d-405f-b41e-903268d44cca-config-data" (OuterVolumeSpecName: "config-data") pod "b69a490a-8b2d-405f-b41e-903268d44cca" (UID: "b69a490a-8b2d-405f-b41e-903268d44cca"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:34:30 crc kubenswrapper[4909]: I1128 16:34:30.563691 4909 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b69a490a-8b2d-405f-b41e-903268d44cca-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 16:34:30 crc kubenswrapper[4909]: I1128 16:34:30.563727 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dmwmx\" (UniqueName: \"kubernetes.io/projected/b69a490a-8b2d-405f-b41e-903268d44cca-kube-api-access-dmwmx\") on node \"crc\" DevicePath \"\"" Nov 28 16:34:30 crc kubenswrapper[4909]: I1128 16:34:30.563740 4909 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b69a490a-8b2d-405f-b41e-903268d44cca-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:34:30 crc kubenswrapper[4909]: I1128 16:34:30.563751 4909 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b69a490a-8b2d-405f-b41e-903268d44cca-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:34:30 crc kubenswrapper[4909]: I1128 16:34:30.995359 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-fn62x" event={"ID":"b69a490a-8b2d-405f-b41e-903268d44cca","Type":"ContainerDied","Data":"bd42249a773de9330a491b8acd0cfac8b5fd43e4c1a56e4840bd7c7d401a5744"} Nov 28 16:34:30 crc kubenswrapper[4909]: I1128 16:34:30.995679 4909 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="bd42249a773de9330a491b8acd0cfac8b5fd43e4c1a56e4840bd7c7d401a5744" Nov 28 16:34:30 crc kubenswrapper[4909]: I1128 16:34:30.995424 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-fn62x" Nov 28 16:34:31 crc kubenswrapper[4909]: I1128 16:34:31.172523 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 28 16:34:31 crc kubenswrapper[4909]: I1128 16:34:31.172953 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="2287c28b-bb92-4e34-9ca4-a31222838904" containerName="nova-api-api" containerID="cri-o://4617c6e4c5b2d05619a3bc749a1a471604f4d682ff6d15728202c61c72625a38" gracePeriod=30 Nov 28 16:34:31 crc kubenswrapper[4909]: I1128 16:34:31.172992 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="2287c28b-bb92-4e34-9ca4-a31222838904" containerName="nova-api-log" containerID="cri-o://533ddd4645f6a03848e6e0945714cb59a3d65f833d10c4bff08a109a1b038af9" gracePeriod=30 Nov 28 16:34:31 crc kubenswrapper[4909]: I1128 16:34:31.189308 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 16:34:31 crc kubenswrapper[4909]: I1128 16:34:31.189507 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="2d385d4c-d982-4d91-817f-f21d879b187d" containerName="nova-scheduler-scheduler" containerID="cri-o://2235b890424a297fe76c3bb587c2d413eb9da4efdc9dd1010816cd22052d28aa" gracePeriod=30 Nov 28 16:34:31 crc kubenswrapper[4909]: I1128 16:34:31.206464 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 16:34:31 crc kubenswrapper[4909]: I1128 16:34:31.206681 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="ba05ad6a-6e38-4c1e-b1ef-a38626a5ea39" containerName="nova-metadata-log" containerID="cri-o://61121eb242dd15f4896fb597d8f71e979b60af364817f62031ebcfd55b24abe2" gracePeriod=30 Nov 28 16:34:31 crc kubenswrapper[4909]: I1128 16:34:31.206787 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="ba05ad6a-6e38-4c1e-b1ef-a38626a5ea39" containerName="nova-metadata-metadata" containerID="cri-o://ef9d3ff12f0af1e1e0311f2fccff4806b4ac618ef71b94bb25137dffb8083265" gracePeriod=30 Nov 28 16:34:31 crc kubenswrapper[4909]: I1128 16:34:31.388104 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 28 16:34:31 crc kubenswrapper[4909]: I1128 16:34:31.388158 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 28 16:34:31 crc kubenswrapper[4909]: I1128 16:34:31.826677 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 16:34:31 crc kubenswrapper[4909]: I1128 16:34:31.885200 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6njmr\" (UniqueName: \"kubernetes.io/projected/ba05ad6a-6e38-4c1e-b1ef-a38626a5ea39-kube-api-access-6njmr\") pod \"ba05ad6a-6e38-4c1e-b1ef-a38626a5ea39\" (UID: \"ba05ad6a-6e38-4c1e-b1ef-a38626a5ea39\") " Nov 28 16:34:31 crc kubenswrapper[4909]: I1128 16:34:31.885295 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ba05ad6a-6e38-4c1e-b1ef-a38626a5ea39-combined-ca-bundle\") pod \"ba05ad6a-6e38-4c1e-b1ef-a38626a5ea39\" (UID: \"ba05ad6a-6e38-4c1e-b1ef-a38626a5ea39\") " Nov 28 16:34:31 crc kubenswrapper[4909]: I1128 16:34:31.885322 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ba05ad6a-6e38-4c1e-b1ef-a38626a5ea39-config-data\") pod \"ba05ad6a-6e38-4c1e-b1ef-a38626a5ea39\" (UID: \"ba05ad6a-6e38-4c1e-b1ef-a38626a5ea39\") " Nov 28 16:34:31 crc kubenswrapper[4909]: I1128 16:34:31.885387 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/ba05ad6a-6e38-4c1e-b1ef-a38626a5ea39-nova-metadata-tls-certs\") pod \"ba05ad6a-6e38-4c1e-b1ef-a38626a5ea39\" (UID: \"ba05ad6a-6e38-4c1e-b1ef-a38626a5ea39\") " Nov 28 16:34:31 crc kubenswrapper[4909]: I1128 16:34:31.885486 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ba05ad6a-6e38-4c1e-b1ef-a38626a5ea39-logs\") pod \"ba05ad6a-6e38-4c1e-b1ef-a38626a5ea39\" (UID: \"ba05ad6a-6e38-4c1e-b1ef-a38626a5ea39\") " Nov 28 16:34:31 crc kubenswrapper[4909]: I1128 16:34:31.886167 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ba05ad6a-6e38-4c1e-b1ef-a38626a5ea39-logs" (OuterVolumeSpecName: "logs") pod "ba05ad6a-6e38-4c1e-b1ef-a38626a5ea39" (UID: "ba05ad6a-6e38-4c1e-b1ef-a38626a5ea39"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:34:31 crc kubenswrapper[4909]: I1128 16:34:31.894994 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ba05ad6a-6e38-4c1e-b1ef-a38626a5ea39-kube-api-access-6njmr" (OuterVolumeSpecName: "kube-api-access-6njmr") pod "ba05ad6a-6e38-4c1e-b1ef-a38626a5ea39" (UID: "ba05ad6a-6e38-4c1e-b1ef-a38626a5ea39"). InnerVolumeSpecName "kube-api-access-6njmr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:34:31 crc kubenswrapper[4909]: I1128 16:34:31.927007 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ba05ad6a-6e38-4c1e-b1ef-a38626a5ea39-config-data" (OuterVolumeSpecName: "config-data") pod "ba05ad6a-6e38-4c1e-b1ef-a38626a5ea39" (UID: "ba05ad6a-6e38-4c1e-b1ef-a38626a5ea39"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:34:31 crc kubenswrapper[4909]: I1128 16:34:31.933364 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ba05ad6a-6e38-4c1e-b1ef-a38626a5ea39-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ba05ad6a-6e38-4c1e-b1ef-a38626a5ea39" (UID: "ba05ad6a-6e38-4c1e-b1ef-a38626a5ea39"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:34:31 crc kubenswrapper[4909]: I1128 16:34:31.966339 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ba05ad6a-6e38-4c1e-b1ef-a38626a5ea39-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "ba05ad6a-6e38-4c1e-b1ef-a38626a5ea39" (UID: "ba05ad6a-6e38-4c1e-b1ef-a38626a5ea39"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:34:31 crc kubenswrapper[4909]: I1128 16:34:31.987633 4909 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ba05ad6a-6e38-4c1e-b1ef-a38626a5ea39-logs\") on node \"crc\" DevicePath \"\"" Nov 28 16:34:31 crc kubenswrapper[4909]: I1128 16:34:31.987695 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6njmr\" (UniqueName: \"kubernetes.io/projected/ba05ad6a-6e38-4c1e-b1ef-a38626a5ea39-kube-api-access-6njmr\") on node \"crc\" DevicePath \"\"" Nov 28 16:34:31 crc kubenswrapper[4909]: I1128 16:34:31.987706 4909 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ba05ad6a-6e38-4c1e-b1ef-a38626a5ea39-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:34:31 crc kubenswrapper[4909]: I1128 16:34:31.987715 4909 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ba05ad6a-6e38-4c1e-b1ef-a38626a5ea39-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:34:31 crc kubenswrapper[4909]: I1128 16:34:31.987724 4909 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/ba05ad6a-6e38-4c1e-b1ef-a38626a5ea39-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 16:34:32 crc kubenswrapper[4909]: I1128 16:34:32.005368 4909 generic.go:334] "Generic (PLEG): container finished" podID="ba05ad6a-6e38-4c1e-b1ef-a38626a5ea39" containerID="ef9d3ff12f0af1e1e0311f2fccff4806b4ac618ef71b94bb25137dffb8083265" exitCode=0 Nov 28 16:34:32 crc kubenswrapper[4909]: I1128 16:34:32.005414 4909 generic.go:334] "Generic (PLEG): container finished" podID="ba05ad6a-6e38-4c1e-b1ef-a38626a5ea39" containerID="61121eb242dd15f4896fb597d8f71e979b60af364817f62031ebcfd55b24abe2" exitCode=143 Nov 28 16:34:32 crc kubenswrapper[4909]: I1128 16:34:32.005444 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"ba05ad6a-6e38-4c1e-b1ef-a38626a5ea39","Type":"ContainerDied","Data":"ef9d3ff12f0af1e1e0311f2fccff4806b4ac618ef71b94bb25137dffb8083265"} Nov 28 16:34:32 crc kubenswrapper[4909]: I1128 16:34:32.005491 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"ba05ad6a-6e38-4c1e-b1ef-a38626a5ea39","Type":"ContainerDied","Data":"61121eb242dd15f4896fb597d8f71e979b60af364817f62031ebcfd55b24abe2"} Nov 28 16:34:32 crc kubenswrapper[4909]: I1128 16:34:32.005504 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"ba05ad6a-6e38-4c1e-b1ef-a38626a5ea39","Type":"ContainerDied","Data":"7c708f5954462b69290926827fab370355e366ad2338d9b071de7f2a16672f1f"} Nov 28 16:34:32 crc kubenswrapper[4909]: I1128 16:34:32.005506 4909 scope.go:117] "RemoveContainer" containerID="ef9d3ff12f0af1e1e0311f2fccff4806b4ac618ef71b94bb25137dffb8083265" Nov 28 16:34:32 crc kubenswrapper[4909]: I1128 16:34:32.005414 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 16:34:32 crc kubenswrapper[4909]: I1128 16:34:32.007370 4909 generic.go:334] "Generic (PLEG): container finished" podID="93ff38ac-f623-4932-aaa4-fde31d11a4ed" containerID="b74657ce0e6f1d6aebf3eb081305b8dbfaf43ab222ea5e2a386baad9203784be" exitCode=0 Nov 28 16:34:32 crc kubenswrapper[4909]: I1128 16:34:32.007425 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-f75mj" event={"ID":"93ff38ac-f623-4932-aaa4-fde31d11a4ed","Type":"ContainerDied","Data":"b74657ce0e6f1d6aebf3eb081305b8dbfaf43ab222ea5e2a386baad9203784be"} Nov 28 16:34:32 crc kubenswrapper[4909]: I1128 16:34:32.010454 4909 generic.go:334] "Generic (PLEG): container finished" podID="2287c28b-bb92-4e34-9ca4-a31222838904" containerID="533ddd4645f6a03848e6e0945714cb59a3d65f833d10c4bff08a109a1b038af9" exitCode=143 Nov 28 16:34:32 crc kubenswrapper[4909]: I1128 16:34:32.010482 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"2287c28b-bb92-4e34-9ca4-a31222838904","Type":"ContainerDied","Data":"533ddd4645f6a03848e6e0945714cb59a3d65f833d10c4bff08a109a1b038af9"} Nov 28 16:34:32 crc kubenswrapper[4909]: I1128 16:34:32.040830 4909 scope.go:117] "RemoveContainer" containerID="61121eb242dd15f4896fb597d8f71e979b60af364817f62031ebcfd55b24abe2" Nov 28 16:34:32 crc kubenswrapper[4909]: I1128 16:34:32.051157 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 16:34:32 crc kubenswrapper[4909]: I1128 16:34:32.069938 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 16:34:32 crc kubenswrapper[4909]: I1128 16:34:32.102336 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 28 16:34:32 crc kubenswrapper[4909]: E1128 16:34:32.102876 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9b6c3766-5116-4179-aae7-17f9564115c0" containerName="extract-utilities" Nov 28 16:34:32 crc kubenswrapper[4909]: I1128 16:34:32.102901 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="9b6c3766-5116-4179-aae7-17f9564115c0" containerName="extract-utilities" Nov 28 16:34:32 crc kubenswrapper[4909]: E1128 16:34:32.102919 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ba05ad6a-6e38-4c1e-b1ef-a38626a5ea39" containerName="nova-metadata-metadata" Nov 28 16:34:32 crc kubenswrapper[4909]: I1128 16:34:32.102929 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="ba05ad6a-6e38-4c1e-b1ef-a38626a5ea39" containerName="nova-metadata-metadata" Nov 28 16:34:32 crc kubenswrapper[4909]: E1128 16:34:32.102951 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9b6c3766-5116-4179-aae7-17f9564115c0" containerName="registry-server" Nov 28 16:34:32 crc kubenswrapper[4909]: I1128 16:34:32.102959 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="9b6c3766-5116-4179-aae7-17f9564115c0" containerName="registry-server" Nov 28 16:34:32 crc kubenswrapper[4909]: E1128 16:34:32.102974 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b69a490a-8b2d-405f-b41e-903268d44cca" containerName="nova-manage" Nov 28 16:34:32 crc kubenswrapper[4909]: I1128 16:34:32.102981 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="b69a490a-8b2d-405f-b41e-903268d44cca" containerName="nova-manage" Nov 28 16:34:32 crc kubenswrapper[4909]: E1128 16:34:32.103001 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ba05ad6a-6e38-4c1e-b1ef-a38626a5ea39" containerName="nova-metadata-log" Nov 28 16:34:32 crc kubenswrapper[4909]: I1128 16:34:32.103009 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="ba05ad6a-6e38-4c1e-b1ef-a38626a5ea39" containerName="nova-metadata-log" Nov 28 16:34:32 crc kubenswrapper[4909]: E1128 16:34:32.103030 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9b6c3766-5116-4179-aae7-17f9564115c0" containerName="extract-content" Nov 28 16:34:32 crc kubenswrapper[4909]: I1128 16:34:32.103061 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="9b6c3766-5116-4179-aae7-17f9564115c0" containerName="extract-content" Nov 28 16:34:32 crc kubenswrapper[4909]: I1128 16:34:32.103295 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="b69a490a-8b2d-405f-b41e-903268d44cca" containerName="nova-manage" Nov 28 16:34:32 crc kubenswrapper[4909]: I1128 16:34:32.103310 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="ba05ad6a-6e38-4c1e-b1ef-a38626a5ea39" containerName="nova-metadata-metadata" Nov 28 16:34:32 crc kubenswrapper[4909]: I1128 16:34:32.103341 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="ba05ad6a-6e38-4c1e-b1ef-a38626a5ea39" containerName="nova-metadata-log" Nov 28 16:34:32 crc kubenswrapper[4909]: I1128 16:34:32.103355 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="9b6c3766-5116-4179-aae7-17f9564115c0" containerName="registry-server" Nov 28 16:34:32 crc kubenswrapper[4909]: I1128 16:34:32.104501 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 16:34:32 crc kubenswrapper[4909]: I1128 16:34:32.108204 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 28 16:34:32 crc kubenswrapper[4909]: I1128 16:34:32.108416 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Nov 28 16:34:32 crc kubenswrapper[4909]: I1128 16:34:32.109833 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 16:34:32 crc kubenswrapper[4909]: I1128 16:34:32.117969 4909 scope.go:117] "RemoveContainer" containerID="ef9d3ff12f0af1e1e0311f2fccff4806b4ac618ef71b94bb25137dffb8083265" Nov 28 16:34:32 crc kubenswrapper[4909]: E1128 16:34:32.118678 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ef9d3ff12f0af1e1e0311f2fccff4806b4ac618ef71b94bb25137dffb8083265\": container with ID starting with ef9d3ff12f0af1e1e0311f2fccff4806b4ac618ef71b94bb25137dffb8083265 not found: ID does not exist" containerID="ef9d3ff12f0af1e1e0311f2fccff4806b4ac618ef71b94bb25137dffb8083265" Nov 28 16:34:32 crc kubenswrapper[4909]: I1128 16:34:32.118711 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ef9d3ff12f0af1e1e0311f2fccff4806b4ac618ef71b94bb25137dffb8083265"} err="failed to get container status \"ef9d3ff12f0af1e1e0311f2fccff4806b4ac618ef71b94bb25137dffb8083265\": rpc error: code = NotFound desc = could not find container \"ef9d3ff12f0af1e1e0311f2fccff4806b4ac618ef71b94bb25137dffb8083265\": container with ID starting with ef9d3ff12f0af1e1e0311f2fccff4806b4ac618ef71b94bb25137dffb8083265 not found: ID does not exist" Nov 28 16:34:32 crc kubenswrapper[4909]: I1128 16:34:32.118733 4909 scope.go:117] "RemoveContainer" containerID="61121eb242dd15f4896fb597d8f71e979b60af364817f62031ebcfd55b24abe2" Nov 28 16:34:32 crc kubenswrapper[4909]: E1128 16:34:32.122341 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"61121eb242dd15f4896fb597d8f71e979b60af364817f62031ebcfd55b24abe2\": container with ID starting with 61121eb242dd15f4896fb597d8f71e979b60af364817f62031ebcfd55b24abe2 not found: ID does not exist" containerID="61121eb242dd15f4896fb597d8f71e979b60af364817f62031ebcfd55b24abe2" Nov 28 16:34:32 crc kubenswrapper[4909]: I1128 16:34:32.122396 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"61121eb242dd15f4896fb597d8f71e979b60af364817f62031ebcfd55b24abe2"} err="failed to get container status \"61121eb242dd15f4896fb597d8f71e979b60af364817f62031ebcfd55b24abe2\": rpc error: code = NotFound desc = could not find container \"61121eb242dd15f4896fb597d8f71e979b60af364817f62031ebcfd55b24abe2\": container with ID starting with 61121eb242dd15f4896fb597d8f71e979b60af364817f62031ebcfd55b24abe2 not found: ID does not exist" Nov 28 16:34:32 crc kubenswrapper[4909]: I1128 16:34:32.122427 4909 scope.go:117] "RemoveContainer" containerID="ef9d3ff12f0af1e1e0311f2fccff4806b4ac618ef71b94bb25137dffb8083265" Nov 28 16:34:32 crc kubenswrapper[4909]: I1128 16:34:32.123235 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ef9d3ff12f0af1e1e0311f2fccff4806b4ac618ef71b94bb25137dffb8083265"} err="failed to get container status \"ef9d3ff12f0af1e1e0311f2fccff4806b4ac618ef71b94bb25137dffb8083265\": rpc error: code = NotFound desc = could not find container \"ef9d3ff12f0af1e1e0311f2fccff4806b4ac618ef71b94bb25137dffb8083265\": container with ID starting with ef9d3ff12f0af1e1e0311f2fccff4806b4ac618ef71b94bb25137dffb8083265 not found: ID does not exist" Nov 28 16:34:32 crc kubenswrapper[4909]: I1128 16:34:32.123267 4909 scope.go:117] "RemoveContainer" containerID="61121eb242dd15f4896fb597d8f71e979b60af364817f62031ebcfd55b24abe2" Nov 28 16:34:32 crc kubenswrapper[4909]: I1128 16:34:32.123486 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"61121eb242dd15f4896fb597d8f71e979b60af364817f62031ebcfd55b24abe2"} err="failed to get container status \"61121eb242dd15f4896fb597d8f71e979b60af364817f62031ebcfd55b24abe2\": rpc error: code = NotFound desc = could not find container \"61121eb242dd15f4896fb597d8f71e979b60af364817f62031ebcfd55b24abe2\": container with ID starting with 61121eb242dd15f4896fb597d8f71e979b60af364817f62031ebcfd55b24abe2 not found: ID does not exist" Nov 28 16:34:32 crc kubenswrapper[4909]: I1128 16:34:32.192008 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b0692d4c-3e74-48d9-8066-d3c22d037012-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"b0692d4c-3e74-48d9-8066-d3c22d037012\") " pod="openstack/nova-metadata-0" Nov 28 16:34:32 crc kubenswrapper[4909]: I1128 16:34:32.192112 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7r249\" (UniqueName: \"kubernetes.io/projected/b0692d4c-3e74-48d9-8066-d3c22d037012-kube-api-access-7r249\") pod \"nova-metadata-0\" (UID: \"b0692d4c-3e74-48d9-8066-d3c22d037012\") " pod="openstack/nova-metadata-0" Nov 28 16:34:32 crc kubenswrapper[4909]: I1128 16:34:32.192149 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/b0692d4c-3e74-48d9-8066-d3c22d037012-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"b0692d4c-3e74-48d9-8066-d3c22d037012\") " pod="openstack/nova-metadata-0" Nov 28 16:34:32 crc kubenswrapper[4909]: I1128 16:34:32.192262 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b0692d4c-3e74-48d9-8066-d3c22d037012-config-data\") pod \"nova-metadata-0\" (UID: \"b0692d4c-3e74-48d9-8066-d3c22d037012\") " pod="openstack/nova-metadata-0" Nov 28 16:34:32 crc kubenswrapper[4909]: I1128 16:34:32.192385 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b0692d4c-3e74-48d9-8066-d3c22d037012-logs\") pod \"nova-metadata-0\" (UID: \"b0692d4c-3e74-48d9-8066-d3c22d037012\") " pod="openstack/nova-metadata-0" Nov 28 16:34:32 crc kubenswrapper[4909]: I1128 16:34:32.301267 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7r249\" (UniqueName: \"kubernetes.io/projected/b0692d4c-3e74-48d9-8066-d3c22d037012-kube-api-access-7r249\") pod \"nova-metadata-0\" (UID: \"b0692d4c-3e74-48d9-8066-d3c22d037012\") " pod="openstack/nova-metadata-0" Nov 28 16:34:32 crc kubenswrapper[4909]: I1128 16:34:32.301332 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/b0692d4c-3e74-48d9-8066-d3c22d037012-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"b0692d4c-3e74-48d9-8066-d3c22d037012\") " pod="openstack/nova-metadata-0" Nov 28 16:34:32 crc kubenswrapper[4909]: I1128 16:34:32.301367 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b0692d4c-3e74-48d9-8066-d3c22d037012-config-data\") pod \"nova-metadata-0\" (UID: \"b0692d4c-3e74-48d9-8066-d3c22d037012\") " pod="openstack/nova-metadata-0" Nov 28 16:34:32 crc kubenswrapper[4909]: I1128 16:34:32.301416 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b0692d4c-3e74-48d9-8066-d3c22d037012-logs\") pod \"nova-metadata-0\" (UID: \"b0692d4c-3e74-48d9-8066-d3c22d037012\") " pod="openstack/nova-metadata-0" Nov 28 16:34:32 crc kubenswrapper[4909]: I1128 16:34:32.301508 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b0692d4c-3e74-48d9-8066-d3c22d037012-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"b0692d4c-3e74-48d9-8066-d3c22d037012\") " pod="openstack/nova-metadata-0" Nov 28 16:34:32 crc kubenswrapper[4909]: I1128 16:34:32.304013 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b0692d4c-3e74-48d9-8066-d3c22d037012-logs\") pod \"nova-metadata-0\" (UID: \"b0692d4c-3e74-48d9-8066-d3c22d037012\") " pod="openstack/nova-metadata-0" Nov 28 16:34:32 crc kubenswrapper[4909]: I1128 16:34:32.307003 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b0692d4c-3e74-48d9-8066-d3c22d037012-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"b0692d4c-3e74-48d9-8066-d3c22d037012\") " pod="openstack/nova-metadata-0" Nov 28 16:34:32 crc kubenswrapper[4909]: I1128 16:34:32.307086 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/b0692d4c-3e74-48d9-8066-d3c22d037012-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"b0692d4c-3e74-48d9-8066-d3c22d037012\") " pod="openstack/nova-metadata-0" Nov 28 16:34:32 crc kubenswrapper[4909]: I1128 16:34:32.309069 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b0692d4c-3e74-48d9-8066-d3c22d037012-config-data\") pod \"nova-metadata-0\" (UID: \"b0692d4c-3e74-48d9-8066-d3c22d037012\") " pod="openstack/nova-metadata-0" Nov 28 16:34:32 crc kubenswrapper[4909]: I1128 16:34:32.357935 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7r249\" (UniqueName: \"kubernetes.io/projected/b0692d4c-3e74-48d9-8066-d3c22d037012-kube-api-access-7r249\") pod \"nova-metadata-0\" (UID: \"b0692d4c-3e74-48d9-8066-d3c22d037012\") " pod="openstack/nova-metadata-0" Nov 28 16:34:32 crc kubenswrapper[4909]: I1128 16:34:32.430181 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 16:34:32 crc kubenswrapper[4909]: E1128 16:34:32.608084 4909 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 2235b890424a297fe76c3bb587c2d413eb9da4efdc9dd1010816cd22052d28aa is running failed: container process not found" containerID="2235b890424a297fe76c3bb587c2d413eb9da4efdc9dd1010816cd22052d28aa" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 28 16:34:32 crc kubenswrapper[4909]: E1128 16:34:32.613619 4909 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 2235b890424a297fe76c3bb587c2d413eb9da4efdc9dd1010816cd22052d28aa is running failed: container process not found" containerID="2235b890424a297fe76c3bb587c2d413eb9da4efdc9dd1010816cd22052d28aa" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 28 16:34:32 crc kubenswrapper[4909]: E1128 16:34:32.614436 4909 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 2235b890424a297fe76c3bb587c2d413eb9da4efdc9dd1010816cd22052d28aa is running failed: container process not found" containerID="2235b890424a297fe76c3bb587c2d413eb9da4efdc9dd1010816cd22052d28aa" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 28 16:34:32 crc kubenswrapper[4909]: E1128 16:34:32.614515 4909 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 2235b890424a297fe76c3bb587c2d413eb9da4efdc9dd1010816cd22052d28aa is running failed: container process not found" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="2d385d4c-d982-4d91-817f-f21d879b187d" containerName="nova-scheduler-scheduler" Nov 28 16:34:32 crc kubenswrapper[4909]: I1128 16:34:32.650888 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-865f5d856f-rmkzw" Nov 28 16:34:32 crc kubenswrapper[4909]: I1128 16:34:32.715339 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6bb4fc677f-r4b99"] Nov 28 16:34:32 crc kubenswrapper[4909]: I1128 16:34:32.715626 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-6bb4fc677f-r4b99" podUID="96ca616f-e86a-4274-8bef-da335a23e12c" containerName="dnsmasq-dns" containerID="cri-o://ad4aa083ac971ebecec5bf41839833e063ceb2c80a55ad2e0be49f895214262f" gracePeriod=10 Nov 28 16:34:32 crc kubenswrapper[4909]: I1128 16:34:32.927377 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 16:34:32 crc kubenswrapper[4909]: I1128 16:34:32.966837 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 16:34:33 crc kubenswrapper[4909]: I1128 16:34:33.019461 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2d385d4c-d982-4d91-817f-f21d879b187d-combined-ca-bundle\") pod \"2d385d4c-d982-4d91-817f-f21d879b187d\" (UID: \"2d385d4c-d982-4d91-817f-f21d879b187d\") " Nov 28 16:34:33 crc kubenswrapper[4909]: I1128 16:34:33.019629 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wkxmb\" (UniqueName: \"kubernetes.io/projected/2d385d4c-d982-4d91-817f-f21d879b187d-kube-api-access-wkxmb\") pod \"2d385d4c-d982-4d91-817f-f21d879b187d\" (UID: \"2d385d4c-d982-4d91-817f-f21d879b187d\") " Nov 28 16:34:33 crc kubenswrapper[4909]: I1128 16:34:33.019713 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2d385d4c-d982-4d91-817f-f21d879b187d-config-data\") pod \"2d385d4c-d982-4d91-817f-f21d879b187d\" (UID: \"2d385d4c-d982-4d91-817f-f21d879b187d\") " Nov 28 16:34:33 crc kubenswrapper[4909]: I1128 16:34:33.023952 4909 generic.go:334] "Generic (PLEG): container finished" podID="2d385d4c-d982-4d91-817f-f21d879b187d" containerID="2235b890424a297fe76c3bb587c2d413eb9da4efdc9dd1010816cd22052d28aa" exitCode=0 Nov 28 16:34:33 crc kubenswrapper[4909]: I1128 16:34:33.023989 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 16:34:33 crc kubenswrapper[4909]: I1128 16:34:33.024034 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"2d385d4c-d982-4d91-817f-f21d879b187d","Type":"ContainerDied","Data":"2235b890424a297fe76c3bb587c2d413eb9da4efdc9dd1010816cd22052d28aa"} Nov 28 16:34:33 crc kubenswrapper[4909]: I1128 16:34:33.024086 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"2d385d4c-d982-4d91-817f-f21d879b187d","Type":"ContainerDied","Data":"ef9a871bc72782d62bb29d6250306b234e704f120c173470eeb4cfd7bd32ba73"} Nov 28 16:34:33 crc kubenswrapper[4909]: I1128 16:34:33.024110 4909 scope.go:117] "RemoveContainer" containerID="2235b890424a297fe76c3bb587c2d413eb9da4efdc9dd1010816cd22052d28aa" Nov 28 16:34:33 crc kubenswrapper[4909]: I1128 16:34:33.028810 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2d385d4c-d982-4d91-817f-f21d879b187d-kube-api-access-wkxmb" (OuterVolumeSpecName: "kube-api-access-wkxmb") pod "2d385d4c-d982-4d91-817f-f21d879b187d" (UID: "2d385d4c-d982-4d91-817f-f21d879b187d"). InnerVolumeSpecName "kube-api-access-wkxmb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:34:33 crc kubenswrapper[4909]: I1128 16:34:33.035475 4909 generic.go:334] "Generic (PLEG): container finished" podID="96ca616f-e86a-4274-8bef-da335a23e12c" containerID="ad4aa083ac971ebecec5bf41839833e063ceb2c80a55ad2e0be49f895214262f" exitCode=0 Nov 28 16:34:33 crc kubenswrapper[4909]: I1128 16:34:33.035572 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bb4fc677f-r4b99" event={"ID":"96ca616f-e86a-4274-8bef-da335a23e12c","Type":"ContainerDied","Data":"ad4aa083ac971ebecec5bf41839833e063ceb2c80a55ad2e0be49f895214262f"} Nov 28 16:34:33 crc kubenswrapper[4909]: I1128 16:34:33.045346 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"b0692d4c-3e74-48d9-8066-d3c22d037012","Type":"ContainerStarted","Data":"850453a2da71db65946c9c34844bc0abb3160e2bbec7fa09176c17adc7d090d3"} Nov 28 16:34:33 crc kubenswrapper[4909]: I1128 16:34:33.054630 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2d385d4c-d982-4d91-817f-f21d879b187d-config-data" (OuterVolumeSpecName: "config-data") pod "2d385d4c-d982-4d91-817f-f21d879b187d" (UID: "2d385d4c-d982-4d91-817f-f21d879b187d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:34:33 crc kubenswrapper[4909]: I1128 16:34:33.111981 4909 scope.go:117] "RemoveContainer" containerID="2235b890424a297fe76c3bb587c2d413eb9da4efdc9dd1010816cd22052d28aa" Nov 28 16:34:33 crc kubenswrapper[4909]: I1128 16:34:33.113420 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2d385d4c-d982-4d91-817f-f21d879b187d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "2d385d4c-d982-4d91-817f-f21d879b187d" (UID: "2d385d4c-d982-4d91-817f-f21d879b187d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:34:33 crc kubenswrapper[4909]: E1128 16:34:33.114254 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2235b890424a297fe76c3bb587c2d413eb9da4efdc9dd1010816cd22052d28aa\": container with ID starting with 2235b890424a297fe76c3bb587c2d413eb9da4efdc9dd1010816cd22052d28aa not found: ID does not exist" containerID="2235b890424a297fe76c3bb587c2d413eb9da4efdc9dd1010816cd22052d28aa" Nov 28 16:34:33 crc kubenswrapper[4909]: I1128 16:34:33.114390 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2235b890424a297fe76c3bb587c2d413eb9da4efdc9dd1010816cd22052d28aa"} err="failed to get container status \"2235b890424a297fe76c3bb587c2d413eb9da4efdc9dd1010816cd22052d28aa\": rpc error: code = NotFound desc = could not find container \"2235b890424a297fe76c3bb587c2d413eb9da4efdc9dd1010816cd22052d28aa\": container with ID starting with 2235b890424a297fe76c3bb587c2d413eb9da4efdc9dd1010816cd22052d28aa not found: ID does not exist" Nov 28 16:34:33 crc kubenswrapper[4909]: I1128 16:34:33.121995 4909 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2d385d4c-d982-4d91-817f-f21d879b187d-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:34:33 crc kubenswrapper[4909]: I1128 16:34:33.122030 4909 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2d385d4c-d982-4d91-817f-f21d879b187d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:34:33 crc kubenswrapper[4909]: I1128 16:34:33.122043 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wkxmb\" (UniqueName: \"kubernetes.io/projected/2d385d4c-d982-4d91-817f-f21d879b187d-kube-api-access-wkxmb\") on node \"crc\" DevicePath \"\"" Nov 28 16:34:33 crc kubenswrapper[4909]: I1128 16:34:33.190898 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6bb4fc677f-r4b99" Nov 28 16:34:33 crc kubenswrapper[4909]: I1128 16:34:33.223256 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/96ca616f-e86a-4274-8bef-da335a23e12c-dns-swift-storage-0\") pod \"96ca616f-e86a-4274-8bef-da335a23e12c\" (UID: \"96ca616f-e86a-4274-8bef-da335a23e12c\") " Nov 28 16:34:33 crc kubenswrapper[4909]: I1128 16:34:33.223596 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/96ca616f-e86a-4274-8bef-da335a23e12c-ovsdbserver-nb\") pod \"96ca616f-e86a-4274-8bef-da335a23e12c\" (UID: \"96ca616f-e86a-4274-8bef-da335a23e12c\") " Nov 28 16:34:33 crc kubenswrapper[4909]: I1128 16:34:33.223622 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9zl9h\" (UniqueName: \"kubernetes.io/projected/96ca616f-e86a-4274-8bef-da335a23e12c-kube-api-access-9zl9h\") pod \"96ca616f-e86a-4274-8bef-da335a23e12c\" (UID: \"96ca616f-e86a-4274-8bef-da335a23e12c\") " Nov 28 16:34:33 crc kubenswrapper[4909]: I1128 16:34:33.223770 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/96ca616f-e86a-4274-8bef-da335a23e12c-config\") pod \"96ca616f-e86a-4274-8bef-da335a23e12c\" (UID: \"96ca616f-e86a-4274-8bef-da335a23e12c\") " Nov 28 16:34:33 crc kubenswrapper[4909]: I1128 16:34:33.223822 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/96ca616f-e86a-4274-8bef-da335a23e12c-ovsdbserver-sb\") pod \"96ca616f-e86a-4274-8bef-da335a23e12c\" (UID: \"96ca616f-e86a-4274-8bef-da335a23e12c\") " Nov 28 16:34:33 crc kubenswrapper[4909]: I1128 16:34:33.223958 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/96ca616f-e86a-4274-8bef-da335a23e12c-dns-svc\") pod \"96ca616f-e86a-4274-8bef-da335a23e12c\" (UID: \"96ca616f-e86a-4274-8bef-da335a23e12c\") " Nov 28 16:34:33 crc kubenswrapper[4909]: I1128 16:34:33.229179 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/96ca616f-e86a-4274-8bef-da335a23e12c-kube-api-access-9zl9h" (OuterVolumeSpecName: "kube-api-access-9zl9h") pod "96ca616f-e86a-4274-8bef-da335a23e12c" (UID: "96ca616f-e86a-4274-8bef-da335a23e12c"). InnerVolumeSpecName "kube-api-access-9zl9h". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:34:33 crc kubenswrapper[4909]: I1128 16:34:33.328578 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9zl9h\" (UniqueName: \"kubernetes.io/projected/96ca616f-e86a-4274-8bef-da335a23e12c-kube-api-access-9zl9h\") on node \"crc\" DevicePath \"\"" Nov 28 16:34:33 crc kubenswrapper[4909]: I1128 16:34:33.344852 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/96ca616f-e86a-4274-8bef-da335a23e12c-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "96ca616f-e86a-4274-8bef-da335a23e12c" (UID: "96ca616f-e86a-4274-8bef-da335a23e12c"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:34:33 crc kubenswrapper[4909]: I1128 16:34:33.346408 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/96ca616f-e86a-4274-8bef-da335a23e12c-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "96ca616f-e86a-4274-8bef-da335a23e12c" (UID: "96ca616f-e86a-4274-8bef-da335a23e12c"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:34:33 crc kubenswrapper[4909]: I1128 16:34:33.363460 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/96ca616f-e86a-4274-8bef-da335a23e12c-config" (OuterVolumeSpecName: "config") pod "96ca616f-e86a-4274-8bef-da335a23e12c" (UID: "96ca616f-e86a-4274-8bef-da335a23e12c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:34:33 crc kubenswrapper[4909]: I1128 16:34:33.421155 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/96ca616f-e86a-4274-8bef-da335a23e12c-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "96ca616f-e86a-4274-8bef-da335a23e12c" (UID: "96ca616f-e86a-4274-8bef-da335a23e12c"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:34:33 crc kubenswrapper[4909]: I1128 16:34:33.431201 4909 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/96ca616f-e86a-4274-8bef-da335a23e12c-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 16:34:33 crc kubenswrapper[4909]: I1128 16:34:33.431447 4909 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/96ca616f-e86a-4274-8bef-da335a23e12c-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 28 16:34:33 crc kubenswrapper[4909]: I1128 16:34:33.431538 4909 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/96ca616f-e86a-4274-8bef-da335a23e12c-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 28 16:34:33 crc kubenswrapper[4909]: I1128 16:34:33.431672 4909 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/96ca616f-e86a-4274-8bef-da335a23e12c-config\") on node \"crc\" DevicePath \"\"" Nov 28 16:34:33 crc kubenswrapper[4909]: I1128 16:34:33.441317 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/96ca616f-e86a-4274-8bef-da335a23e12c-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "96ca616f-e86a-4274-8bef-da335a23e12c" (UID: "96ca616f-e86a-4274-8bef-da335a23e12c"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:34:33 crc kubenswrapper[4909]: I1128 16:34:33.533398 4909 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/96ca616f-e86a-4274-8bef-da335a23e12c-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 28 16:34:33 crc kubenswrapper[4909]: I1128 16:34:33.569765 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-f75mj" Nov 28 16:34:33 crc kubenswrapper[4909]: I1128 16:34:33.584923 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 16:34:33 crc kubenswrapper[4909]: I1128 16:34:33.605780 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 16:34:33 crc kubenswrapper[4909]: I1128 16:34:33.630433 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 16:34:33 crc kubenswrapper[4909]: E1128 16:34:33.630947 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="96ca616f-e86a-4274-8bef-da335a23e12c" containerName="init" Nov 28 16:34:33 crc kubenswrapper[4909]: I1128 16:34:33.630969 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="96ca616f-e86a-4274-8bef-da335a23e12c" containerName="init" Nov 28 16:34:33 crc kubenswrapper[4909]: E1128 16:34:33.630992 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="93ff38ac-f623-4932-aaa4-fde31d11a4ed" containerName="nova-cell1-conductor-db-sync" Nov 28 16:34:33 crc kubenswrapper[4909]: I1128 16:34:33.631001 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="93ff38ac-f623-4932-aaa4-fde31d11a4ed" containerName="nova-cell1-conductor-db-sync" Nov 28 16:34:33 crc kubenswrapper[4909]: E1128 16:34:33.631020 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="96ca616f-e86a-4274-8bef-da335a23e12c" containerName="dnsmasq-dns" Nov 28 16:34:33 crc kubenswrapper[4909]: I1128 16:34:33.631027 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="96ca616f-e86a-4274-8bef-da335a23e12c" containerName="dnsmasq-dns" Nov 28 16:34:33 crc kubenswrapper[4909]: E1128 16:34:33.631055 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2d385d4c-d982-4d91-817f-f21d879b187d" containerName="nova-scheduler-scheduler" Nov 28 16:34:33 crc kubenswrapper[4909]: I1128 16:34:33.631062 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="2d385d4c-d982-4d91-817f-f21d879b187d" containerName="nova-scheduler-scheduler" Nov 28 16:34:33 crc kubenswrapper[4909]: I1128 16:34:33.631297 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="2d385d4c-d982-4d91-817f-f21d879b187d" containerName="nova-scheduler-scheduler" Nov 28 16:34:33 crc kubenswrapper[4909]: I1128 16:34:33.631322 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="96ca616f-e86a-4274-8bef-da335a23e12c" containerName="dnsmasq-dns" Nov 28 16:34:33 crc kubenswrapper[4909]: I1128 16:34:33.631349 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="93ff38ac-f623-4932-aaa4-fde31d11a4ed" containerName="nova-cell1-conductor-db-sync" Nov 28 16:34:33 crc kubenswrapper[4909]: I1128 16:34:33.632094 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 16:34:33 crc kubenswrapper[4909]: I1128 16:34:33.633696 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 28 16:34:33 crc kubenswrapper[4909]: I1128 16:34:33.634130 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/93ff38ac-f623-4932-aaa4-fde31d11a4ed-config-data\") pod \"93ff38ac-f623-4932-aaa4-fde31d11a4ed\" (UID: \"93ff38ac-f623-4932-aaa4-fde31d11a4ed\") " Nov 28 16:34:33 crc kubenswrapper[4909]: I1128 16:34:33.634209 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/93ff38ac-f623-4932-aaa4-fde31d11a4ed-combined-ca-bundle\") pod \"93ff38ac-f623-4932-aaa4-fde31d11a4ed\" (UID: \"93ff38ac-f623-4932-aaa4-fde31d11a4ed\") " Nov 28 16:34:33 crc kubenswrapper[4909]: I1128 16:34:33.634244 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/93ff38ac-f623-4932-aaa4-fde31d11a4ed-scripts\") pod \"93ff38ac-f623-4932-aaa4-fde31d11a4ed\" (UID: \"93ff38ac-f623-4932-aaa4-fde31d11a4ed\") " Nov 28 16:34:33 crc kubenswrapper[4909]: I1128 16:34:33.634324 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5np4r\" (UniqueName: \"kubernetes.io/projected/93ff38ac-f623-4932-aaa4-fde31d11a4ed-kube-api-access-5np4r\") pod \"93ff38ac-f623-4932-aaa4-fde31d11a4ed\" (UID: \"93ff38ac-f623-4932-aaa4-fde31d11a4ed\") " Nov 28 16:34:33 crc kubenswrapper[4909]: I1128 16:34:33.634591 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3611cfd4-bf6b-41f0-8643-f8cb7f69a68c-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"3611cfd4-bf6b-41f0-8643-f8cb7f69a68c\") " pod="openstack/nova-scheduler-0" Nov 28 16:34:33 crc kubenswrapper[4909]: I1128 16:34:33.634709 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3611cfd4-bf6b-41f0-8643-f8cb7f69a68c-config-data\") pod \"nova-scheduler-0\" (UID: \"3611cfd4-bf6b-41f0-8643-f8cb7f69a68c\") " pod="openstack/nova-scheduler-0" Nov 28 16:34:33 crc kubenswrapper[4909]: I1128 16:34:33.634758 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nb96h\" (UniqueName: \"kubernetes.io/projected/3611cfd4-bf6b-41f0-8643-f8cb7f69a68c-kube-api-access-nb96h\") pod \"nova-scheduler-0\" (UID: \"3611cfd4-bf6b-41f0-8643-f8cb7f69a68c\") " pod="openstack/nova-scheduler-0" Nov 28 16:34:33 crc kubenswrapper[4909]: I1128 16:34:33.638000 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/93ff38ac-f623-4932-aaa4-fde31d11a4ed-scripts" (OuterVolumeSpecName: "scripts") pod "93ff38ac-f623-4932-aaa4-fde31d11a4ed" (UID: "93ff38ac-f623-4932-aaa4-fde31d11a4ed"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:34:33 crc kubenswrapper[4909]: I1128 16:34:33.638962 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 16:34:33 crc kubenswrapper[4909]: I1128 16:34:33.639560 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/93ff38ac-f623-4932-aaa4-fde31d11a4ed-kube-api-access-5np4r" (OuterVolumeSpecName: "kube-api-access-5np4r") pod "93ff38ac-f623-4932-aaa4-fde31d11a4ed" (UID: "93ff38ac-f623-4932-aaa4-fde31d11a4ed"). InnerVolumeSpecName "kube-api-access-5np4r". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:34:33 crc kubenswrapper[4909]: I1128 16:34:33.673270 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/93ff38ac-f623-4932-aaa4-fde31d11a4ed-config-data" (OuterVolumeSpecName: "config-data") pod "93ff38ac-f623-4932-aaa4-fde31d11a4ed" (UID: "93ff38ac-f623-4932-aaa4-fde31d11a4ed"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:34:33 crc kubenswrapper[4909]: I1128 16:34:33.704692 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/93ff38ac-f623-4932-aaa4-fde31d11a4ed-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "93ff38ac-f623-4932-aaa4-fde31d11a4ed" (UID: "93ff38ac-f623-4932-aaa4-fde31d11a4ed"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:34:33 crc kubenswrapper[4909]: I1128 16:34:33.736117 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3611cfd4-bf6b-41f0-8643-f8cb7f69a68c-config-data\") pod \"nova-scheduler-0\" (UID: \"3611cfd4-bf6b-41f0-8643-f8cb7f69a68c\") " pod="openstack/nova-scheduler-0" Nov 28 16:34:33 crc kubenswrapper[4909]: I1128 16:34:33.736859 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nb96h\" (UniqueName: \"kubernetes.io/projected/3611cfd4-bf6b-41f0-8643-f8cb7f69a68c-kube-api-access-nb96h\") pod \"nova-scheduler-0\" (UID: \"3611cfd4-bf6b-41f0-8643-f8cb7f69a68c\") " pod="openstack/nova-scheduler-0" Nov 28 16:34:33 crc kubenswrapper[4909]: I1128 16:34:33.737008 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3611cfd4-bf6b-41f0-8643-f8cb7f69a68c-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"3611cfd4-bf6b-41f0-8643-f8cb7f69a68c\") " pod="openstack/nova-scheduler-0" Nov 28 16:34:33 crc kubenswrapper[4909]: I1128 16:34:33.738430 4909 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/93ff38ac-f623-4932-aaa4-fde31d11a4ed-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:34:33 crc kubenswrapper[4909]: I1128 16:34:33.738452 4909 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/93ff38ac-f623-4932-aaa4-fde31d11a4ed-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 16:34:33 crc kubenswrapper[4909]: I1128 16:34:33.738465 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5np4r\" (UniqueName: \"kubernetes.io/projected/93ff38ac-f623-4932-aaa4-fde31d11a4ed-kube-api-access-5np4r\") on node \"crc\" DevicePath \"\"" Nov 28 16:34:33 crc kubenswrapper[4909]: I1128 16:34:33.738477 4909 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/93ff38ac-f623-4932-aaa4-fde31d11a4ed-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:34:33 crc kubenswrapper[4909]: I1128 16:34:33.740959 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3611cfd4-bf6b-41f0-8643-f8cb7f69a68c-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"3611cfd4-bf6b-41f0-8643-f8cb7f69a68c\") " pod="openstack/nova-scheduler-0" Nov 28 16:34:33 crc kubenswrapper[4909]: I1128 16:34:33.741050 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3611cfd4-bf6b-41f0-8643-f8cb7f69a68c-config-data\") pod \"nova-scheduler-0\" (UID: \"3611cfd4-bf6b-41f0-8643-f8cb7f69a68c\") " pod="openstack/nova-scheduler-0" Nov 28 16:34:33 crc kubenswrapper[4909]: I1128 16:34:33.755546 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nb96h\" (UniqueName: \"kubernetes.io/projected/3611cfd4-bf6b-41f0-8643-f8cb7f69a68c-kube-api-access-nb96h\") pod \"nova-scheduler-0\" (UID: \"3611cfd4-bf6b-41f0-8643-f8cb7f69a68c\") " pod="openstack/nova-scheduler-0" Nov 28 16:34:33 crc kubenswrapper[4909]: I1128 16:34:33.912889 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2d385d4c-d982-4d91-817f-f21d879b187d" path="/var/lib/kubelet/pods/2d385d4c-d982-4d91-817f-f21d879b187d/volumes" Nov 28 16:34:33 crc kubenswrapper[4909]: I1128 16:34:33.914193 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ba05ad6a-6e38-4c1e-b1ef-a38626a5ea39" path="/var/lib/kubelet/pods/ba05ad6a-6e38-4c1e-b1ef-a38626a5ea39/volumes" Nov 28 16:34:34 crc kubenswrapper[4909]: I1128 16:34:34.042675 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 16:34:34 crc kubenswrapper[4909]: I1128 16:34:34.057798 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bb4fc677f-r4b99" event={"ID":"96ca616f-e86a-4274-8bef-da335a23e12c","Type":"ContainerDied","Data":"d56ebfd2c561b84e64bd05e2dca3f294a89009e11c1f056777865d5a9b793367"} Nov 28 16:34:34 crc kubenswrapper[4909]: I1128 16:34:34.057855 4909 scope.go:117] "RemoveContainer" containerID="ad4aa083ac971ebecec5bf41839833e063ceb2c80a55ad2e0be49f895214262f" Nov 28 16:34:34 crc kubenswrapper[4909]: I1128 16:34:34.057979 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6bb4fc677f-r4b99" Nov 28 16:34:34 crc kubenswrapper[4909]: I1128 16:34:34.060783 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-f75mj" event={"ID":"93ff38ac-f623-4932-aaa4-fde31d11a4ed","Type":"ContainerDied","Data":"9c3938c6d7392a0f01c5953f03f6a52aea6f73f194f9c47ea1858c4c65e8e5d0"} Nov 28 16:34:34 crc kubenswrapper[4909]: I1128 16:34:34.060817 4909 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9c3938c6d7392a0f01c5953f03f6a52aea6f73f194f9c47ea1858c4c65e8e5d0" Nov 28 16:34:34 crc kubenswrapper[4909]: I1128 16:34:34.060868 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-f75mj" Nov 28 16:34:34 crc kubenswrapper[4909]: I1128 16:34:34.068372 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"b0692d4c-3e74-48d9-8066-d3c22d037012","Type":"ContainerStarted","Data":"057ee363c7521e970233c04fb4162ace8577a08d4267b8ed194cc078ec87f1d4"} Nov 28 16:34:34 crc kubenswrapper[4909]: I1128 16:34:34.068411 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"b0692d4c-3e74-48d9-8066-d3c22d037012","Type":"ContainerStarted","Data":"3629aeffd7fb127c4a8a700fa819c08da4bf2eabf9807520f842192129e82072"} Nov 28 16:34:34 crc kubenswrapper[4909]: I1128 16:34:34.085868 4909 scope.go:117] "RemoveContainer" containerID="7502fa8c34f56280ee4bedf0f0fe8c08cfe511501d8ba742a03861959af17b0a" Nov 28 16:34:34 crc kubenswrapper[4909]: I1128 16:34:34.088909 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6bb4fc677f-r4b99"] Nov 28 16:34:34 crc kubenswrapper[4909]: I1128 16:34:34.102073 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6bb4fc677f-r4b99"] Nov 28 16:34:34 crc kubenswrapper[4909]: I1128 16:34:34.124160 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 28 16:34:34 crc kubenswrapper[4909]: I1128 16:34:34.125507 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 28 16:34:34 crc kubenswrapper[4909]: I1128 16:34:34.129119 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Nov 28 16:34:34 crc kubenswrapper[4909]: I1128 16:34:34.131318 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.131300966 podStartE2EDuration="2.131300966s" podCreationTimestamp="2025-11-28 16:34:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:34:34.106270194 +0000 UTC m=+1456.502954728" watchObservedRunningTime="2025-11-28 16:34:34.131300966 +0000 UTC m=+1456.527985490" Nov 28 16:34:34 crc kubenswrapper[4909]: I1128 16:34:34.139867 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 28 16:34:34 crc kubenswrapper[4909]: I1128 16:34:34.144826 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b2ea37c8-3213-4043-9da2-a9e76f9284e4-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"b2ea37c8-3213-4043-9da2-a9e76f9284e4\") " pod="openstack/nova-cell1-conductor-0" Nov 28 16:34:34 crc kubenswrapper[4909]: I1128 16:34:34.144995 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pvdkl\" (UniqueName: \"kubernetes.io/projected/b2ea37c8-3213-4043-9da2-a9e76f9284e4-kube-api-access-pvdkl\") pod \"nova-cell1-conductor-0\" (UID: \"b2ea37c8-3213-4043-9da2-a9e76f9284e4\") " pod="openstack/nova-cell1-conductor-0" Nov 28 16:34:34 crc kubenswrapper[4909]: I1128 16:34:34.145017 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b2ea37c8-3213-4043-9da2-a9e76f9284e4-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"b2ea37c8-3213-4043-9da2-a9e76f9284e4\") " pod="openstack/nova-cell1-conductor-0" Nov 28 16:34:34 crc kubenswrapper[4909]: I1128 16:34:34.246341 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b2ea37c8-3213-4043-9da2-a9e76f9284e4-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"b2ea37c8-3213-4043-9da2-a9e76f9284e4\") " pod="openstack/nova-cell1-conductor-0" Nov 28 16:34:34 crc kubenswrapper[4909]: I1128 16:34:34.246761 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pvdkl\" (UniqueName: \"kubernetes.io/projected/b2ea37c8-3213-4043-9da2-a9e76f9284e4-kube-api-access-pvdkl\") pod \"nova-cell1-conductor-0\" (UID: \"b2ea37c8-3213-4043-9da2-a9e76f9284e4\") " pod="openstack/nova-cell1-conductor-0" Nov 28 16:34:34 crc kubenswrapper[4909]: I1128 16:34:34.246788 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b2ea37c8-3213-4043-9da2-a9e76f9284e4-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"b2ea37c8-3213-4043-9da2-a9e76f9284e4\") " pod="openstack/nova-cell1-conductor-0" Nov 28 16:34:34 crc kubenswrapper[4909]: I1128 16:34:34.251806 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b2ea37c8-3213-4043-9da2-a9e76f9284e4-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"b2ea37c8-3213-4043-9da2-a9e76f9284e4\") " pod="openstack/nova-cell1-conductor-0" Nov 28 16:34:34 crc kubenswrapper[4909]: I1128 16:34:34.252215 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b2ea37c8-3213-4043-9da2-a9e76f9284e4-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"b2ea37c8-3213-4043-9da2-a9e76f9284e4\") " pod="openstack/nova-cell1-conductor-0" Nov 28 16:34:34 crc kubenswrapper[4909]: I1128 16:34:34.265081 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pvdkl\" (UniqueName: \"kubernetes.io/projected/b2ea37c8-3213-4043-9da2-a9e76f9284e4-kube-api-access-pvdkl\") pod \"nova-cell1-conductor-0\" (UID: \"b2ea37c8-3213-4043-9da2-a9e76f9284e4\") " pod="openstack/nova-cell1-conductor-0" Nov 28 16:34:34 crc kubenswrapper[4909]: I1128 16:34:34.448906 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 28 16:34:34 crc kubenswrapper[4909]: I1128 16:34:34.584744 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 16:34:34 crc kubenswrapper[4909]: W1128 16:34:34.602873 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3611cfd4_bf6b_41f0_8643_f8cb7f69a68c.slice/crio-1c6f7892d95d67c8bb8028cb7786323eda23e3100fe033ab99e22782d68698da WatchSource:0}: Error finding container 1c6f7892d95d67c8bb8028cb7786323eda23e3100fe033ab99e22782d68698da: Status 404 returned error can't find the container with id 1c6f7892d95d67c8bb8028cb7786323eda23e3100fe033ab99e22782d68698da Nov 28 16:34:34 crc kubenswrapper[4909]: I1128 16:34:34.898967 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 28 16:34:34 crc kubenswrapper[4909]: W1128 16:34:34.904704 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb2ea37c8_3213_4043_9da2_a9e76f9284e4.slice/crio-78ad3448d2834c66fa31516dc972b1d20a281375bf506a32666ec4282e9e84f6 WatchSource:0}: Error finding container 78ad3448d2834c66fa31516dc972b1d20a281375bf506a32666ec4282e9e84f6: Status 404 returned error can't find the container with id 78ad3448d2834c66fa31516dc972b1d20a281375bf506a32666ec4282e9e84f6 Nov 28 16:34:35 crc kubenswrapper[4909]: I1128 16:34:35.079433 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"3611cfd4-bf6b-41f0-8643-f8cb7f69a68c","Type":"ContainerStarted","Data":"1c6f7892d95d67c8bb8028cb7786323eda23e3100fe033ab99e22782d68698da"} Nov 28 16:34:35 crc kubenswrapper[4909]: I1128 16:34:35.083882 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"b2ea37c8-3213-4043-9da2-a9e76f9284e4","Type":"ContainerStarted","Data":"78ad3448d2834c66fa31516dc972b1d20a281375bf506a32666ec4282e9e84f6"} Nov 28 16:34:35 crc kubenswrapper[4909]: I1128 16:34:35.086471 4909 generic.go:334] "Generic (PLEG): container finished" podID="2287c28b-bb92-4e34-9ca4-a31222838904" containerID="4617c6e4c5b2d05619a3bc749a1a471604f4d682ff6d15728202c61c72625a38" exitCode=0 Nov 28 16:34:35 crc kubenswrapper[4909]: I1128 16:34:35.086541 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"2287c28b-bb92-4e34-9ca4-a31222838904","Type":"ContainerDied","Data":"4617c6e4c5b2d05619a3bc749a1a471604f4d682ff6d15728202c61c72625a38"} Nov 28 16:34:35 crc kubenswrapper[4909]: I1128 16:34:35.916933 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="96ca616f-e86a-4274-8bef-da335a23e12c" path="/var/lib/kubelet/pods/96ca616f-e86a-4274-8bef-da335a23e12c/volumes" Nov 28 16:34:36 crc kubenswrapper[4909]: I1128 16:34:36.069329 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 16:34:36 crc kubenswrapper[4909]: I1128 16:34:36.098685 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"3611cfd4-bf6b-41f0-8643-f8cb7f69a68c","Type":"ContainerStarted","Data":"48d4eb596206cf59e62c9d2b98ee171960a0e66612d54c0e9d38b1a3f77d5529"} Nov 28 16:34:36 crc kubenswrapper[4909]: I1128 16:34:36.103091 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"b2ea37c8-3213-4043-9da2-a9e76f9284e4","Type":"ContainerStarted","Data":"0dc92530d7c3a493fba4c36b3d79070c26b600b170baa4f05c83ac54ca1f0cd4"} Nov 28 16:34:36 crc kubenswrapper[4909]: I1128 16:34:36.103171 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-conductor-0" Nov 28 16:34:36 crc kubenswrapper[4909]: I1128 16:34:36.105900 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"2287c28b-bb92-4e34-9ca4-a31222838904","Type":"ContainerDied","Data":"017d003f78858088f93bea3a24c4bc9aca35af382cc8d43cecbcc3eb1a7f06f0"} Nov 28 16:34:36 crc kubenswrapper[4909]: I1128 16:34:36.105978 4909 scope.go:117] "RemoveContainer" containerID="4617c6e4c5b2d05619a3bc749a1a471604f4d682ff6d15728202c61c72625a38" Nov 28 16:34:36 crc kubenswrapper[4909]: I1128 16:34:36.105978 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 16:34:36 crc kubenswrapper[4909]: I1128 16:34:36.133066 4909 scope.go:117] "RemoveContainer" containerID="533ddd4645f6a03848e6e0945714cb59a3d65f833d10c4bff08a109a1b038af9" Nov 28 16:34:36 crc kubenswrapper[4909]: I1128 16:34:36.134328 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=3.134307251 podStartE2EDuration="3.134307251s" podCreationTimestamp="2025-11-28 16:34:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:34:36.125739051 +0000 UTC m=+1458.522423585" watchObservedRunningTime="2025-11-28 16:34:36.134307251 +0000 UTC m=+1458.530991775" Nov 28 16:34:36 crc kubenswrapper[4909]: I1128 16:34:36.159710 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-0" podStartSLOduration=2.159684423 podStartE2EDuration="2.159684423s" podCreationTimestamp="2025-11-28 16:34:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:34:36.142860451 +0000 UTC m=+1458.539544975" watchObservedRunningTime="2025-11-28 16:34:36.159684423 +0000 UTC m=+1458.556368947" Nov 28 16:34:36 crc kubenswrapper[4909]: I1128 16:34:36.181585 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2287c28b-bb92-4e34-9ca4-a31222838904-logs\") pod \"2287c28b-bb92-4e34-9ca4-a31222838904\" (UID: \"2287c28b-bb92-4e34-9ca4-a31222838904\") " Nov 28 16:34:36 crc kubenswrapper[4909]: I1128 16:34:36.181780 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2287c28b-bb92-4e34-9ca4-a31222838904-combined-ca-bundle\") pod \"2287c28b-bb92-4e34-9ca4-a31222838904\" (UID: \"2287c28b-bb92-4e34-9ca4-a31222838904\") " Nov 28 16:34:36 crc kubenswrapper[4909]: I1128 16:34:36.181821 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jcwj9\" (UniqueName: \"kubernetes.io/projected/2287c28b-bb92-4e34-9ca4-a31222838904-kube-api-access-jcwj9\") pod \"2287c28b-bb92-4e34-9ca4-a31222838904\" (UID: \"2287c28b-bb92-4e34-9ca4-a31222838904\") " Nov 28 16:34:36 crc kubenswrapper[4909]: I1128 16:34:36.181848 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2287c28b-bb92-4e34-9ca4-a31222838904-config-data\") pod \"2287c28b-bb92-4e34-9ca4-a31222838904\" (UID: \"2287c28b-bb92-4e34-9ca4-a31222838904\") " Nov 28 16:34:36 crc kubenswrapper[4909]: I1128 16:34:36.183503 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2287c28b-bb92-4e34-9ca4-a31222838904-logs" (OuterVolumeSpecName: "logs") pod "2287c28b-bb92-4e34-9ca4-a31222838904" (UID: "2287c28b-bb92-4e34-9ca4-a31222838904"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:34:36 crc kubenswrapper[4909]: I1128 16:34:36.188878 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2287c28b-bb92-4e34-9ca4-a31222838904-kube-api-access-jcwj9" (OuterVolumeSpecName: "kube-api-access-jcwj9") pod "2287c28b-bb92-4e34-9ca4-a31222838904" (UID: "2287c28b-bb92-4e34-9ca4-a31222838904"). InnerVolumeSpecName "kube-api-access-jcwj9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:34:36 crc kubenswrapper[4909]: I1128 16:34:36.211873 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2287c28b-bb92-4e34-9ca4-a31222838904-config-data" (OuterVolumeSpecName: "config-data") pod "2287c28b-bb92-4e34-9ca4-a31222838904" (UID: "2287c28b-bb92-4e34-9ca4-a31222838904"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:34:36 crc kubenswrapper[4909]: I1128 16:34:36.221144 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2287c28b-bb92-4e34-9ca4-a31222838904-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "2287c28b-bb92-4e34-9ca4-a31222838904" (UID: "2287c28b-bb92-4e34-9ca4-a31222838904"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:34:36 crc kubenswrapper[4909]: I1128 16:34:36.284155 4909 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2287c28b-bb92-4e34-9ca4-a31222838904-logs\") on node \"crc\" DevicePath \"\"" Nov 28 16:34:36 crc kubenswrapper[4909]: I1128 16:34:36.284186 4909 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2287c28b-bb92-4e34-9ca4-a31222838904-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:34:36 crc kubenswrapper[4909]: I1128 16:34:36.284229 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jcwj9\" (UniqueName: \"kubernetes.io/projected/2287c28b-bb92-4e34-9ca4-a31222838904-kube-api-access-jcwj9\") on node \"crc\" DevicePath \"\"" Nov 28 16:34:36 crc kubenswrapper[4909]: I1128 16:34:36.284239 4909 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2287c28b-bb92-4e34-9ca4-a31222838904-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:34:36 crc kubenswrapper[4909]: I1128 16:34:36.436835 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 28 16:34:36 crc kubenswrapper[4909]: I1128 16:34:36.455443 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 28 16:34:36 crc kubenswrapper[4909]: I1128 16:34:36.468405 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 28 16:34:36 crc kubenswrapper[4909]: E1128 16:34:36.468908 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2287c28b-bb92-4e34-9ca4-a31222838904" containerName="nova-api-log" Nov 28 16:34:36 crc kubenswrapper[4909]: I1128 16:34:36.468925 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="2287c28b-bb92-4e34-9ca4-a31222838904" containerName="nova-api-log" Nov 28 16:34:36 crc kubenswrapper[4909]: E1128 16:34:36.468936 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2287c28b-bb92-4e34-9ca4-a31222838904" containerName="nova-api-api" Nov 28 16:34:36 crc kubenswrapper[4909]: I1128 16:34:36.468943 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="2287c28b-bb92-4e34-9ca4-a31222838904" containerName="nova-api-api" Nov 28 16:34:36 crc kubenswrapper[4909]: I1128 16:34:36.469175 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="2287c28b-bb92-4e34-9ca4-a31222838904" containerName="nova-api-api" Nov 28 16:34:36 crc kubenswrapper[4909]: I1128 16:34:36.469214 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="2287c28b-bb92-4e34-9ca4-a31222838904" containerName="nova-api-log" Nov 28 16:34:36 crc kubenswrapper[4909]: I1128 16:34:36.470411 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 16:34:36 crc kubenswrapper[4909]: I1128 16:34:36.472624 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 28 16:34:36 crc kubenswrapper[4909]: I1128 16:34:36.477940 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 28 16:34:36 crc kubenswrapper[4909]: I1128 16:34:36.590229 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-97qcv\" (UniqueName: \"kubernetes.io/projected/e00fa182-8ee5-4760-bacd-79db454c12db-kube-api-access-97qcv\") pod \"nova-api-0\" (UID: \"e00fa182-8ee5-4760-bacd-79db454c12db\") " pod="openstack/nova-api-0" Nov 28 16:34:36 crc kubenswrapper[4909]: I1128 16:34:36.590294 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e00fa182-8ee5-4760-bacd-79db454c12db-config-data\") pod \"nova-api-0\" (UID: \"e00fa182-8ee5-4760-bacd-79db454c12db\") " pod="openstack/nova-api-0" Nov 28 16:34:36 crc kubenswrapper[4909]: I1128 16:34:36.590343 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e00fa182-8ee5-4760-bacd-79db454c12db-logs\") pod \"nova-api-0\" (UID: \"e00fa182-8ee5-4760-bacd-79db454c12db\") " pod="openstack/nova-api-0" Nov 28 16:34:36 crc kubenswrapper[4909]: I1128 16:34:36.590616 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e00fa182-8ee5-4760-bacd-79db454c12db-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"e00fa182-8ee5-4760-bacd-79db454c12db\") " pod="openstack/nova-api-0" Nov 28 16:34:36 crc kubenswrapper[4909]: I1128 16:34:36.692488 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e00fa182-8ee5-4760-bacd-79db454c12db-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"e00fa182-8ee5-4760-bacd-79db454c12db\") " pod="openstack/nova-api-0" Nov 28 16:34:36 crc kubenswrapper[4909]: I1128 16:34:36.692600 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-97qcv\" (UniqueName: \"kubernetes.io/projected/e00fa182-8ee5-4760-bacd-79db454c12db-kube-api-access-97qcv\") pod \"nova-api-0\" (UID: \"e00fa182-8ee5-4760-bacd-79db454c12db\") " pod="openstack/nova-api-0" Nov 28 16:34:36 crc kubenswrapper[4909]: I1128 16:34:36.692622 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e00fa182-8ee5-4760-bacd-79db454c12db-config-data\") pod \"nova-api-0\" (UID: \"e00fa182-8ee5-4760-bacd-79db454c12db\") " pod="openstack/nova-api-0" Nov 28 16:34:36 crc kubenswrapper[4909]: I1128 16:34:36.692640 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e00fa182-8ee5-4760-bacd-79db454c12db-logs\") pod \"nova-api-0\" (UID: \"e00fa182-8ee5-4760-bacd-79db454c12db\") " pod="openstack/nova-api-0" Nov 28 16:34:36 crc kubenswrapper[4909]: I1128 16:34:36.693112 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e00fa182-8ee5-4760-bacd-79db454c12db-logs\") pod \"nova-api-0\" (UID: \"e00fa182-8ee5-4760-bacd-79db454c12db\") " pod="openstack/nova-api-0" Nov 28 16:34:36 crc kubenswrapper[4909]: I1128 16:34:36.698032 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e00fa182-8ee5-4760-bacd-79db454c12db-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"e00fa182-8ee5-4760-bacd-79db454c12db\") " pod="openstack/nova-api-0" Nov 28 16:34:36 crc kubenswrapper[4909]: I1128 16:34:36.701056 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e00fa182-8ee5-4760-bacd-79db454c12db-config-data\") pod \"nova-api-0\" (UID: \"e00fa182-8ee5-4760-bacd-79db454c12db\") " pod="openstack/nova-api-0" Nov 28 16:34:36 crc kubenswrapper[4909]: I1128 16:34:36.717761 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-97qcv\" (UniqueName: \"kubernetes.io/projected/e00fa182-8ee5-4760-bacd-79db454c12db-kube-api-access-97qcv\") pod \"nova-api-0\" (UID: \"e00fa182-8ee5-4760-bacd-79db454c12db\") " pod="openstack/nova-api-0" Nov 28 16:34:36 crc kubenswrapper[4909]: I1128 16:34:36.795324 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 16:34:37 crc kubenswrapper[4909]: I1128 16:34:37.237884 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 28 16:34:37 crc kubenswrapper[4909]: I1128 16:34:37.432783 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 28 16:34:37 crc kubenswrapper[4909]: I1128 16:34:37.433274 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 28 16:34:37 crc kubenswrapper[4909]: I1128 16:34:37.917101 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2287c28b-bb92-4e34-9ca4-a31222838904" path="/var/lib/kubelet/pods/2287c28b-bb92-4e34-9ca4-a31222838904/volumes" Nov 28 16:34:38 crc kubenswrapper[4909]: I1128 16:34:38.014094 4909 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-6bb4fc677f-r4b99" podUID="96ca616f-e86a-4274-8bef-da335a23e12c" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.158:5353: i/o timeout" Nov 28 16:34:38 crc kubenswrapper[4909]: I1128 16:34:38.125243 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"e00fa182-8ee5-4760-bacd-79db454c12db","Type":"ContainerStarted","Data":"9d3d4f37db4abbc9d7c0cbfac5e417b8bc6478e6be99028f9aa59cf725a2d526"} Nov 28 16:34:38 crc kubenswrapper[4909]: I1128 16:34:38.125281 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"e00fa182-8ee5-4760-bacd-79db454c12db","Type":"ContainerStarted","Data":"267b16dc0ea3c147a2f9c1f952c921ebc96282c7f3ba7da4511c95a28002f169"} Nov 28 16:34:38 crc kubenswrapper[4909]: I1128 16:34:38.125292 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"e00fa182-8ee5-4760-bacd-79db454c12db","Type":"ContainerStarted","Data":"0e24666fce4c2c696b564552100b875d8853b625090d7c92b12ee3208c5d0eab"} Nov 28 16:34:38 crc kubenswrapper[4909]: I1128 16:34:38.158891 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.158843825 podStartE2EDuration="2.158843825s" podCreationTimestamp="2025-11-28 16:34:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:34:38.149629007 +0000 UTC m=+1460.546313551" watchObservedRunningTime="2025-11-28 16:34:38.158843825 +0000 UTC m=+1460.555528349" Nov 28 16:34:39 crc kubenswrapper[4909]: I1128 16:34:39.044297 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Nov 28 16:34:42 crc kubenswrapper[4909]: I1128 16:34:42.430531 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 28 16:34:42 crc kubenswrapper[4909]: I1128 16:34:42.431145 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 28 16:34:43 crc kubenswrapper[4909]: I1128 16:34:43.439026 4909 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="b0692d4c-3e74-48d9-8066-d3c22d037012" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.188:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 28 16:34:43 crc kubenswrapper[4909]: I1128 16:34:43.439118 4909 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="b0692d4c-3e74-48d9-8066-d3c22d037012" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.188:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 28 16:34:44 crc kubenswrapper[4909]: I1128 16:34:44.043796 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Nov 28 16:34:44 crc kubenswrapper[4909]: I1128 16:34:44.069253 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Nov 28 16:34:44 crc kubenswrapper[4909]: I1128 16:34:44.220804 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Nov 28 16:34:44 crc kubenswrapper[4909]: I1128 16:34:44.484915 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-conductor-0" Nov 28 16:34:45 crc kubenswrapper[4909]: I1128 16:34:45.085998 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Nov 28 16:34:46 crc kubenswrapper[4909]: I1128 16:34:46.795890 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 28 16:34:46 crc kubenswrapper[4909]: I1128 16:34:46.796235 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 28 16:34:47 crc kubenswrapper[4909]: I1128 16:34:47.879839 4909 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="e00fa182-8ee5-4760-bacd-79db454c12db" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.0.191:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 28 16:34:47 crc kubenswrapper[4909]: I1128 16:34:47.879839 4909 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="e00fa182-8ee5-4760-bacd-79db454c12db" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.0.191:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 28 16:34:48 crc kubenswrapper[4909]: I1128 16:34:48.927410 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 28 16:34:48 crc kubenswrapper[4909]: I1128 16:34:48.927882 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/kube-state-metrics-0" podUID="9075f51d-3271-438e-b4e4-cf6ccf65a6eb" containerName="kube-state-metrics" containerID="cri-o://b8c2b6121fa77c8742b8b9510e8ffcb85bed8675ecae8d86cce549aa45594ae3" gracePeriod=30 Nov 28 16:34:49 crc kubenswrapper[4909]: I1128 16:34:49.232541 4909 generic.go:334] "Generic (PLEG): container finished" podID="9075f51d-3271-438e-b4e4-cf6ccf65a6eb" containerID="b8c2b6121fa77c8742b8b9510e8ffcb85bed8675ecae8d86cce549aa45594ae3" exitCode=2 Nov 28 16:34:49 crc kubenswrapper[4909]: I1128 16:34:49.232583 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"9075f51d-3271-438e-b4e4-cf6ccf65a6eb","Type":"ContainerDied","Data":"b8c2b6121fa77c8742b8b9510e8ffcb85bed8675ecae8d86cce549aa45594ae3"} Nov 28 16:34:49 crc kubenswrapper[4909]: I1128 16:34:49.506769 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 28 16:34:49 crc kubenswrapper[4909]: I1128 16:34:49.641215 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qhlht\" (UniqueName: \"kubernetes.io/projected/9075f51d-3271-438e-b4e4-cf6ccf65a6eb-kube-api-access-qhlht\") pod \"9075f51d-3271-438e-b4e4-cf6ccf65a6eb\" (UID: \"9075f51d-3271-438e-b4e4-cf6ccf65a6eb\") " Nov 28 16:34:49 crc kubenswrapper[4909]: I1128 16:34:49.654567 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9075f51d-3271-438e-b4e4-cf6ccf65a6eb-kube-api-access-qhlht" (OuterVolumeSpecName: "kube-api-access-qhlht") pod "9075f51d-3271-438e-b4e4-cf6ccf65a6eb" (UID: "9075f51d-3271-438e-b4e4-cf6ccf65a6eb"). InnerVolumeSpecName "kube-api-access-qhlht". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:34:49 crc kubenswrapper[4909]: I1128 16:34:49.743115 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qhlht\" (UniqueName: \"kubernetes.io/projected/9075f51d-3271-438e-b4e4-cf6ccf65a6eb-kube-api-access-qhlht\") on node \"crc\" DevicePath \"\"" Nov 28 16:34:49 crc kubenswrapper[4909]: I1128 16:34:49.910764 4909 patch_prober.go:28] interesting pod/machine-config-daemon-d5nd7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 16:34:49 crc kubenswrapper[4909]: I1128 16:34:49.910836 4909 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 16:34:49 crc kubenswrapper[4909]: I1128 16:34:49.911373 4909 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" Nov 28 16:34:49 crc kubenswrapper[4909]: I1128 16:34:49.911849 4909 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"076c0f146f0317096c61ce94d56824e15d18793233a7703c2d57740fa454a4f9"} pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 16:34:49 crc kubenswrapper[4909]: I1128 16:34:49.911907 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerName="machine-config-daemon" containerID="cri-o://076c0f146f0317096c61ce94d56824e15d18793233a7703c2d57740fa454a4f9" gracePeriod=600 Nov 28 16:34:50 crc kubenswrapper[4909]: I1128 16:34:50.246044 4909 generic.go:334] "Generic (PLEG): container finished" podID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerID="076c0f146f0317096c61ce94d56824e15d18793233a7703c2d57740fa454a4f9" exitCode=0 Nov 28 16:34:50 crc kubenswrapper[4909]: I1128 16:34:50.246128 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" event={"ID":"5f0ac931-d37b-4342-8c12-c2779b455cc5","Type":"ContainerDied","Data":"076c0f146f0317096c61ce94d56824e15d18793233a7703c2d57740fa454a4f9"} Nov 28 16:34:50 crc kubenswrapper[4909]: I1128 16:34:50.246527 4909 scope.go:117] "RemoveContainer" containerID="938b3015525903a828287e46b6bae7ec7c3c38edf7df86757b71b4c9037a7ecd" Nov 28 16:34:50 crc kubenswrapper[4909]: I1128 16:34:50.249128 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"9075f51d-3271-438e-b4e4-cf6ccf65a6eb","Type":"ContainerDied","Data":"b6999729045961873f3efe2c5e3d2c2962fa224c22914a18d74eeeb0be95a0d5"} Nov 28 16:34:50 crc kubenswrapper[4909]: I1128 16:34:50.249206 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 28 16:34:50 crc kubenswrapper[4909]: I1128 16:34:50.276174 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 28 16:34:50 crc kubenswrapper[4909]: I1128 16:34:50.280585 4909 scope.go:117] "RemoveContainer" containerID="b8c2b6121fa77c8742b8b9510e8ffcb85bed8675ecae8d86cce549aa45594ae3" Nov 28 16:34:50 crc kubenswrapper[4909]: I1128 16:34:50.286893 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 28 16:34:50 crc kubenswrapper[4909]: I1128 16:34:50.308561 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Nov 28 16:34:50 crc kubenswrapper[4909]: E1128 16:34:50.309026 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9075f51d-3271-438e-b4e4-cf6ccf65a6eb" containerName="kube-state-metrics" Nov 28 16:34:50 crc kubenswrapper[4909]: I1128 16:34:50.309043 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="9075f51d-3271-438e-b4e4-cf6ccf65a6eb" containerName="kube-state-metrics" Nov 28 16:34:50 crc kubenswrapper[4909]: I1128 16:34:50.309253 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="9075f51d-3271-438e-b4e4-cf6ccf65a6eb" containerName="kube-state-metrics" Nov 28 16:34:50 crc kubenswrapper[4909]: I1128 16:34:50.309890 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 28 16:34:50 crc kubenswrapper[4909]: I1128 16:34:50.311593 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-kube-state-metrics-svc" Nov 28 16:34:50 crc kubenswrapper[4909]: I1128 16:34:50.311900 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"kube-state-metrics-tls-config" Nov 28 16:34:50 crc kubenswrapper[4909]: I1128 16:34:50.347955 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 28 16:34:50 crc kubenswrapper[4909]: I1128 16:34:50.354765 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lgt79\" (UniqueName: \"kubernetes.io/projected/64b9a5c2-09a8-48fb-9e1b-b66c1003cf61-kube-api-access-lgt79\") pod \"kube-state-metrics-0\" (UID: \"64b9a5c2-09a8-48fb-9e1b-b66c1003cf61\") " pod="openstack/kube-state-metrics-0" Nov 28 16:34:50 crc kubenswrapper[4909]: I1128 16:34:50.354838 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/64b9a5c2-09a8-48fb-9e1b-b66c1003cf61-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"64b9a5c2-09a8-48fb-9e1b-b66c1003cf61\") " pod="openstack/kube-state-metrics-0" Nov 28 16:34:50 crc kubenswrapper[4909]: I1128 16:34:50.354888 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/64b9a5c2-09a8-48fb-9e1b-b66c1003cf61-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"64b9a5c2-09a8-48fb-9e1b-b66c1003cf61\") " pod="openstack/kube-state-metrics-0" Nov 28 16:34:50 crc kubenswrapper[4909]: I1128 16:34:50.354947 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/64b9a5c2-09a8-48fb-9e1b-b66c1003cf61-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"64b9a5c2-09a8-48fb-9e1b-b66c1003cf61\") " pod="openstack/kube-state-metrics-0" Nov 28 16:34:50 crc kubenswrapper[4909]: I1128 16:34:50.456640 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lgt79\" (UniqueName: \"kubernetes.io/projected/64b9a5c2-09a8-48fb-9e1b-b66c1003cf61-kube-api-access-lgt79\") pod \"kube-state-metrics-0\" (UID: \"64b9a5c2-09a8-48fb-9e1b-b66c1003cf61\") " pod="openstack/kube-state-metrics-0" Nov 28 16:34:50 crc kubenswrapper[4909]: I1128 16:34:50.456782 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/64b9a5c2-09a8-48fb-9e1b-b66c1003cf61-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"64b9a5c2-09a8-48fb-9e1b-b66c1003cf61\") " pod="openstack/kube-state-metrics-0" Nov 28 16:34:50 crc kubenswrapper[4909]: I1128 16:34:50.456838 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/64b9a5c2-09a8-48fb-9e1b-b66c1003cf61-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"64b9a5c2-09a8-48fb-9e1b-b66c1003cf61\") " pod="openstack/kube-state-metrics-0" Nov 28 16:34:50 crc kubenswrapper[4909]: I1128 16:34:50.456899 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/64b9a5c2-09a8-48fb-9e1b-b66c1003cf61-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"64b9a5c2-09a8-48fb-9e1b-b66c1003cf61\") " pod="openstack/kube-state-metrics-0" Nov 28 16:34:50 crc kubenswrapper[4909]: I1128 16:34:50.461520 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/64b9a5c2-09a8-48fb-9e1b-b66c1003cf61-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"64b9a5c2-09a8-48fb-9e1b-b66c1003cf61\") " pod="openstack/kube-state-metrics-0" Nov 28 16:34:50 crc kubenswrapper[4909]: I1128 16:34:50.461878 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/64b9a5c2-09a8-48fb-9e1b-b66c1003cf61-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"64b9a5c2-09a8-48fb-9e1b-b66c1003cf61\") " pod="openstack/kube-state-metrics-0" Nov 28 16:34:50 crc kubenswrapper[4909]: I1128 16:34:50.463945 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/64b9a5c2-09a8-48fb-9e1b-b66c1003cf61-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"64b9a5c2-09a8-48fb-9e1b-b66c1003cf61\") " pod="openstack/kube-state-metrics-0" Nov 28 16:34:50 crc kubenswrapper[4909]: I1128 16:34:50.479026 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lgt79\" (UniqueName: \"kubernetes.io/projected/64b9a5c2-09a8-48fb-9e1b-b66c1003cf61-kube-api-access-lgt79\") pod \"kube-state-metrics-0\" (UID: \"64b9a5c2-09a8-48fb-9e1b-b66c1003cf61\") " pod="openstack/kube-state-metrics-0" Nov 28 16:34:50 crc kubenswrapper[4909]: I1128 16:34:50.632004 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 28 16:34:50 crc kubenswrapper[4909]: I1128 16:34:50.761671 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 28 16:34:50 crc kubenswrapper[4909]: I1128 16:34:50.761932 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="bfd233c2-72be-482b-a194-b68da87eb105" containerName="ceilometer-central-agent" containerID="cri-o://02db1c4e3450806668eec7d06c775b02cd0475c718da433f81486b1beac23fcc" gracePeriod=30 Nov 28 16:34:50 crc kubenswrapper[4909]: I1128 16:34:50.762330 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="bfd233c2-72be-482b-a194-b68da87eb105" containerName="proxy-httpd" containerID="cri-o://8bc3f6565ae2c6f85fd47e9cf2a340bbf95e6973525fadc39de6b532378ba0b8" gracePeriod=30 Nov 28 16:34:50 crc kubenswrapper[4909]: I1128 16:34:50.762390 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="bfd233c2-72be-482b-a194-b68da87eb105" containerName="sg-core" containerID="cri-o://89a939d5748b3d516cb33bd8aaf468f21514ffdd33b84f75867920b98d05018f" gracePeriod=30 Nov 28 16:34:50 crc kubenswrapper[4909]: I1128 16:34:50.762427 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="bfd233c2-72be-482b-a194-b68da87eb105" containerName="ceilometer-notification-agent" containerID="cri-o://0bf94391061ba390529c2e2964c9e83a8be207bca24a9d6ec6465491a60c316a" gracePeriod=30 Nov 28 16:34:51 crc kubenswrapper[4909]: E1128 16:34:51.069705 4909 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podbfd233c2_72be_482b_a194_b68da87eb105.slice/crio-conmon-8bc3f6565ae2c6f85fd47e9cf2a340bbf95e6973525fadc39de6b532378ba0b8.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podbfd233c2_72be_482b_a194_b68da87eb105.slice/crio-8bc3f6565ae2c6f85fd47e9cf2a340bbf95e6973525fadc39de6b532378ba0b8.scope\": RecentStats: unable to find data in memory cache]" Nov 28 16:34:51 crc kubenswrapper[4909]: I1128 16:34:51.097160 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 28 16:34:51 crc kubenswrapper[4909]: I1128 16:34:51.261336 4909 generic.go:334] "Generic (PLEG): container finished" podID="bfd233c2-72be-482b-a194-b68da87eb105" containerID="8bc3f6565ae2c6f85fd47e9cf2a340bbf95e6973525fadc39de6b532378ba0b8" exitCode=0 Nov 28 16:34:51 crc kubenswrapper[4909]: I1128 16:34:51.261562 4909 generic.go:334] "Generic (PLEG): container finished" podID="bfd233c2-72be-482b-a194-b68da87eb105" containerID="89a939d5748b3d516cb33bd8aaf468f21514ffdd33b84f75867920b98d05018f" exitCode=2 Nov 28 16:34:51 crc kubenswrapper[4909]: I1128 16:34:51.261572 4909 generic.go:334] "Generic (PLEG): container finished" podID="bfd233c2-72be-482b-a194-b68da87eb105" containerID="02db1c4e3450806668eec7d06c775b02cd0475c718da433f81486b1beac23fcc" exitCode=0 Nov 28 16:34:51 crc kubenswrapper[4909]: I1128 16:34:51.261521 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"bfd233c2-72be-482b-a194-b68da87eb105","Type":"ContainerDied","Data":"8bc3f6565ae2c6f85fd47e9cf2a340bbf95e6973525fadc39de6b532378ba0b8"} Nov 28 16:34:51 crc kubenswrapper[4909]: I1128 16:34:51.261637 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"bfd233c2-72be-482b-a194-b68da87eb105","Type":"ContainerDied","Data":"89a939d5748b3d516cb33bd8aaf468f21514ffdd33b84f75867920b98d05018f"} Nov 28 16:34:51 crc kubenswrapper[4909]: I1128 16:34:51.261664 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"bfd233c2-72be-482b-a194-b68da87eb105","Type":"ContainerDied","Data":"02db1c4e3450806668eec7d06c775b02cd0475c718da433f81486b1beac23fcc"} Nov 28 16:34:51 crc kubenswrapper[4909]: I1128 16:34:51.263073 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"64b9a5c2-09a8-48fb-9e1b-b66c1003cf61","Type":"ContainerStarted","Data":"6a5f3255286ce870c817ddb1b48a301fe20009afca6ef58437cd668e092f9b4e"} Nov 28 16:34:51 crc kubenswrapper[4909]: I1128 16:34:51.265537 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" event={"ID":"5f0ac931-d37b-4342-8c12-c2779b455cc5","Type":"ContainerStarted","Data":"0051ae960019817a1b9d0126f56eb43672e5b1694e62841c31a539d1caca21e9"} Nov 28 16:34:51 crc kubenswrapper[4909]: I1128 16:34:51.913261 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9075f51d-3271-438e-b4e4-cf6ccf65a6eb" path="/var/lib/kubelet/pods/9075f51d-3271-438e-b4e4-cf6ccf65a6eb/volumes" Nov 28 16:34:52 crc kubenswrapper[4909]: I1128 16:34:52.278518 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"64b9a5c2-09a8-48fb-9e1b-b66c1003cf61","Type":"ContainerStarted","Data":"995a1412416d800250460929b1d713faf90c65aaefa1d997100a9293ee18ff38"} Nov 28 16:34:52 crc kubenswrapper[4909]: I1128 16:34:52.278880 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Nov 28 16:34:52 crc kubenswrapper[4909]: I1128 16:34:52.297610 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=1.921630613 podStartE2EDuration="2.297587196s" podCreationTimestamp="2025-11-28 16:34:50 +0000 UTC" firstStartedPulling="2025-11-28 16:34:51.111526074 +0000 UTC m=+1473.508210598" lastFinishedPulling="2025-11-28 16:34:51.487482657 +0000 UTC m=+1473.884167181" observedRunningTime="2025-11-28 16:34:52.290989469 +0000 UTC m=+1474.687673993" watchObservedRunningTime="2025-11-28 16:34:52.297587196 +0000 UTC m=+1474.694271720" Nov 28 16:34:52 crc kubenswrapper[4909]: I1128 16:34:52.552139 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 28 16:34:52 crc kubenswrapper[4909]: I1128 16:34:52.565852 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 28 16:34:52 crc kubenswrapper[4909]: I1128 16:34:52.589975 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 28 16:34:53 crc kubenswrapper[4909]: I1128 16:34:53.293747 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 28 16:34:55 crc kubenswrapper[4909]: I1128 16:34:55.306697 4909 generic.go:334] "Generic (PLEG): container finished" podID="b94e81c7-38f5-47c0-b6f7-7624101ef17b" containerID="5c1abe607c19c7135c249b2516147df7ded4fcccb8eff97993dd74c4f627e143" exitCode=137 Nov 28 16:34:55 crc kubenswrapper[4909]: I1128 16:34:55.307922 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"b94e81c7-38f5-47c0-b6f7-7624101ef17b","Type":"ContainerDied","Data":"5c1abe607c19c7135c249b2516147df7ded4fcccb8eff97993dd74c4f627e143"} Nov 28 16:34:55 crc kubenswrapper[4909]: I1128 16:34:55.308011 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"b94e81c7-38f5-47c0-b6f7-7624101ef17b","Type":"ContainerDied","Data":"c496250ab43f390a4e55378a43b247acee7b9004fcb35f4346bff5d92d079ceb"} Nov 28 16:34:55 crc kubenswrapper[4909]: I1128 16:34:55.308022 4909 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c496250ab43f390a4e55378a43b247acee7b9004fcb35f4346bff5d92d079ceb" Nov 28 16:34:55 crc kubenswrapper[4909]: I1128 16:34:55.336212 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 28 16:34:55 crc kubenswrapper[4909]: I1128 16:34:55.445428 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qgzrr\" (UniqueName: \"kubernetes.io/projected/b94e81c7-38f5-47c0-b6f7-7624101ef17b-kube-api-access-qgzrr\") pod \"b94e81c7-38f5-47c0-b6f7-7624101ef17b\" (UID: \"b94e81c7-38f5-47c0-b6f7-7624101ef17b\") " Nov 28 16:34:55 crc kubenswrapper[4909]: I1128 16:34:55.445478 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b94e81c7-38f5-47c0-b6f7-7624101ef17b-config-data\") pod \"b94e81c7-38f5-47c0-b6f7-7624101ef17b\" (UID: \"b94e81c7-38f5-47c0-b6f7-7624101ef17b\") " Nov 28 16:34:55 crc kubenswrapper[4909]: I1128 16:34:55.445616 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b94e81c7-38f5-47c0-b6f7-7624101ef17b-combined-ca-bundle\") pod \"b94e81c7-38f5-47c0-b6f7-7624101ef17b\" (UID: \"b94e81c7-38f5-47c0-b6f7-7624101ef17b\") " Nov 28 16:34:55 crc kubenswrapper[4909]: I1128 16:34:55.451265 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b94e81c7-38f5-47c0-b6f7-7624101ef17b-kube-api-access-qgzrr" (OuterVolumeSpecName: "kube-api-access-qgzrr") pod "b94e81c7-38f5-47c0-b6f7-7624101ef17b" (UID: "b94e81c7-38f5-47c0-b6f7-7624101ef17b"). InnerVolumeSpecName "kube-api-access-qgzrr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:34:55 crc kubenswrapper[4909]: I1128 16:34:55.473899 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b94e81c7-38f5-47c0-b6f7-7624101ef17b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b94e81c7-38f5-47c0-b6f7-7624101ef17b" (UID: "b94e81c7-38f5-47c0-b6f7-7624101ef17b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:34:55 crc kubenswrapper[4909]: I1128 16:34:55.480876 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b94e81c7-38f5-47c0-b6f7-7624101ef17b-config-data" (OuterVolumeSpecName: "config-data") pod "b94e81c7-38f5-47c0-b6f7-7624101ef17b" (UID: "b94e81c7-38f5-47c0-b6f7-7624101ef17b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:34:55 crc kubenswrapper[4909]: I1128 16:34:55.548148 4909 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b94e81c7-38f5-47c0-b6f7-7624101ef17b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:34:55 crc kubenswrapper[4909]: I1128 16:34:55.548183 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qgzrr\" (UniqueName: \"kubernetes.io/projected/b94e81c7-38f5-47c0-b6f7-7624101ef17b-kube-api-access-qgzrr\") on node \"crc\" DevicePath \"\"" Nov 28 16:34:55 crc kubenswrapper[4909]: I1128 16:34:55.548193 4909 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b94e81c7-38f5-47c0-b6f7-7624101ef17b-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:34:56 crc kubenswrapper[4909]: I1128 16:34:56.030273 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 16:34:56 crc kubenswrapper[4909]: I1128 16:34:56.160849 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bfd233c2-72be-482b-a194-b68da87eb105-scripts\") pod \"bfd233c2-72be-482b-a194-b68da87eb105\" (UID: \"bfd233c2-72be-482b-a194-b68da87eb105\") " Nov 28 16:34:56 crc kubenswrapper[4909]: I1128 16:34:56.160894 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bfd233c2-72be-482b-a194-b68da87eb105-log-httpd\") pod \"bfd233c2-72be-482b-a194-b68da87eb105\" (UID: \"bfd233c2-72be-482b-a194-b68da87eb105\") " Nov 28 16:34:56 crc kubenswrapper[4909]: I1128 16:34:56.160944 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bfd233c2-72be-482b-a194-b68da87eb105-config-data\") pod \"bfd233c2-72be-482b-a194-b68da87eb105\" (UID: \"bfd233c2-72be-482b-a194-b68da87eb105\") " Nov 28 16:34:56 crc kubenswrapper[4909]: I1128 16:34:56.161035 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cxr2k\" (UniqueName: \"kubernetes.io/projected/bfd233c2-72be-482b-a194-b68da87eb105-kube-api-access-cxr2k\") pod \"bfd233c2-72be-482b-a194-b68da87eb105\" (UID: \"bfd233c2-72be-482b-a194-b68da87eb105\") " Nov 28 16:34:56 crc kubenswrapper[4909]: I1128 16:34:56.161067 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bfd233c2-72be-482b-a194-b68da87eb105-combined-ca-bundle\") pod \"bfd233c2-72be-482b-a194-b68da87eb105\" (UID: \"bfd233c2-72be-482b-a194-b68da87eb105\") " Nov 28 16:34:56 crc kubenswrapper[4909]: I1128 16:34:56.161136 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/bfd233c2-72be-482b-a194-b68da87eb105-sg-core-conf-yaml\") pod \"bfd233c2-72be-482b-a194-b68da87eb105\" (UID: \"bfd233c2-72be-482b-a194-b68da87eb105\") " Nov 28 16:34:56 crc kubenswrapper[4909]: I1128 16:34:56.161169 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bfd233c2-72be-482b-a194-b68da87eb105-run-httpd\") pod \"bfd233c2-72be-482b-a194-b68da87eb105\" (UID: \"bfd233c2-72be-482b-a194-b68da87eb105\") " Nov 28 16:34:56 crc kubenswrapper[4909]: I1128 16:34:56.161723 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bfd233c2-72be-482b-a194-b68da87eb105-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "bfd233c2-72be-482b-a194-b68da87eb105" (UID: "bfd233c2-72be-482b-a194-b68da87eb105"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:34:56 crc kubenswrapper[4909]: I1128 16:34:56.161874 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bfd233c2-72be-482b-a194-b68da87eb105-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "bfd233c2-72be-482b-a194-b68da87eb105" (UID: "bfd233c2-72be-482b-a194-b68da87eb105"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:34:56 crc kubenswrapper[4909]: I1128 16:34:56.166531 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bfd233c2-72be-482b-a194-b68da87eb105-scripts" (OuterVolumeSpecName: "scripts") pod "bfd233c2-72be-482b-a194-b68da87eb105" (UID: "bfd233c2-72be-482b-a194-b68da87eb105"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:34:56 crc kubenswrapper[4909]: I1128 16:34:56.166861 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bfd233c2-72be-482b-a194-b68da87eb105-kube-api-access-cxr2k" (OuterVolumeSpecName: "kube-api-access-cxr2k") pod "bfd233c2-72be-482b-a194-b68da87eb105" (UID: "bfd233c2-72be-482b-a194-b68da87eb105"). InnerVolumeSpecName "kube-api-access-cxr2k". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:34:56 crc kubenswrapper[4909]: I1128 16:34:56.192756 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bfd233c2-72be-482b-a194-b68da87eb105-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "bfd233c2-72be-482b-a194-b68da87eb105" (UID: "bfd233c2-72be-482b-a194-b68da87eb105"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:34:56 crc kubenswrapper[4909]: I1128 16:34:56.241150 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bfd233c2-72be-482b-a194-b68da87eb105-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "bfd233c2-72be-482b-a194-b68da87eb105" (UID: "bfd233c2-72be-482b-a194-b68da87eb105"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:34:56 crc kubenswrapper[4909]: I1128 16:34:56.266712 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cxr2k\" (UniqueName: \"kubernetes.io/projected/bfd233c2-72be-482b-a194-b68da87eb105-kube-api-access-cxr2k\") on node \"crc\" DevicePath \"\"" Nov 28 16:34:56 crc kubenswrapper[4909]: I1128 16:34:56.266750 4909 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bfd233c2-72be-482b-a194-b68da87eb105-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:34:56 crc kubenswrapper[4909]: I1128 16:34:56.266787 4909 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/bfd233c2-72be-482b-a194-b68da87eb105-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 28 16:34:56 crc kubenswrapper[4909]: I1128 16:34:56.266802 4909 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bfd233c2-72be-482b-a194-b68da87eb105-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 28 16:34:56 crc kubenswrapper[4909]: I1128 16:34:56.266813 4909 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bfd233c2-72be-482b-a194-b68da87eb105-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 16:34:56 crc kubenswrapper[4909]: I1128 16:34:56.266825 4909 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bfd233c2-72be-482b-a194-b68da87eb105-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 28 16:34:56 crc kubenswrapper[4909]: I1128 16:34:56.269702 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bfd233c2-72be-482b-a194-b68da87eb105-config-data" (OuterVolumeSpecName: "config-data") pod "bfd233c2-72be-482b-a194-b68da87eb105" (UID: "bfd233c2-72be-482b-a194-b68da87eb105"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:34:56 crc kubenswrapper[4909]: I1128 16:34:56.323298 4909 generic.go:334] "Generic (PLEG): container finished" podID="bfd233c2-72be-482b-a194-b68da87eb105" containerID="0bf94391061ba390529c2e2964c9e83a8be207bca24a9d6ec6465491a60c316a" exitCode=0 Nov 28 16:34:56 crc kubenswrapper[4909]: I1128 16:34:56.323386 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"bfd233c2-72be-482b-a194-b68da87eb105","Type":"ContainerDied","Data":"0bf94391061ba390529c2e2964c9e83a8be207bca24a9d6ec6465491a60c316a"} Nov 28 16:34:56 crc kubenswrapper[4909]: I1128 16:34:56.323410 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 28 16:34:56 crc kubenswrapper[4909]: I1128 16:34:56.323441 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"bfd233c2-72be-482b-a194-b68da87eb105","Type":"ContainerDied","Data":"efbd0e447d3ba60537b057f0fcecf38cfff86dd3cddc453609da0881f3a4b5b5"} Nov 28 16:34:56 crc kubenswrapper[4909]: I1128 16:34:56.323466 4909 scope.go:117] "RemoveContainer" containerID="8bc3f6565ae2c6f85fd47e9cf2a340bbf95e6973525fadc39de6b532378ba0b8" Nov 28 16:34:56 crc kubenswrapper[4909]: I1128 16:34:56.323617 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 16:34:56 crc kubenswrapper[4909]: I1128 16:34:56.364645 4909 scope.go:117] "RemoveContainer" containerID="89a939d5748b3d516cb33bd8aaf468f21514ffdd33b84f75867920b98d05018f" Nov 28 16:34:56 crc kubenswrapper[4909]: I1128 16:34:56.375736 4909 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bfd233c2-72be-482b-a194-b68da87eb105-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:34:56 crc kubenswrapper[4909]: I1128 16:34:56.412622 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 28 16:34:56 crc kubenswrapper[4909]: I1128 16:34:56.420964 4909 scope.go:117] "RemoveContainer" containerID="0bf94391061ba390529c2e2964c9e83a8be207bca24a9d6ec6465491a60c316a" Nov 28 16:34:56 crc kubenswrapper[4909]: I1128 16:34:56.469711 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 28 16:34:56 crc kubenswrapper[4909]: I1128 16:34:56.487896 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 28 16:34:56 crc kubenswrapper[4909]: I1128 16:34:56.496619 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 28 16:34:56 crc kubenswrapper[4909]: I1128 16:34:56.505726 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 28 16:34:56 crc kubenswrapper[4909]: E1128 16:34:56.506437 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bfd233c2-72be-482b-a194-b68da87eb105" containerName="proxy-httpd" Nov 28 16:34:56 crc kubenswrapper[4909]: I1128 16:34:56.506556 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="bfd233c2-72be-482b-a194-b68da87eb105" containerName="proxy-httpd" Nov 28 16:34:56 crc kubenswrapper[4909]: E1128 16:34:56.506668 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b94e81c7-38f5-47c0-b6f7-7624101ef17b" containerName="nova-cell1-novncproxy-novncproxy" Nov 28 16:34:56 crc kubenswrapper[4909]: I1128 16:34:56.506757 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="b94e81c7-38f5-47c0-b6f7-7624101ef17b" containerName="nova-cell1-novncproxy-novncproxy" Nov 28 16:34:56 crc kubenswrapper[4909]: E1128 16:34:56.506837 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bfd233c2-72be-482b-a194-b68da87eb105" containerName="ceilometer-notification-agent" Nov 28 16:34:56 crc kubenswrapper[4909]: I1128 16:34:56.506908 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="bfd233c2-72be-482b-a194-b68da87eb105" containerName="ceilometer-notification-agent" Nov 28 16:34:56 crc kubenswrapper[4909]: E1128 16:34:56.507014 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bfd233c2-72be-482b-a194-b68da87eb105" containerName="sg-core" Nov 28 16:34:56 crc kubenswrapper[4909]: I1128 16:34:56.507097 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="bfd233c2-72be-482b-a194-b68da87eb105" containerName="sg-core" Nov 28 16:34:56 crc kubenswrapper[4909]: E1128 16:34:56.507173 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bfd233c2-72be-482b-a194-b68da87eb105" containerName="ceilometer-central-agent" Nov 28 16:34:56 crc kubenswrapper[4909]: I1128 16:34:56.507269 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="bfd233c2-72be-482b-a194-b68da87eb105" containerName="ceilometer-central-agent" Nov 28 16:34:56 crc kubenswrapper[4909]: I1128 16:34:56.507576 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="bfd233c2-72be-482b-a194-b68da87eb105" containerName="ceilometer-notification-agent" Nov 28 16:34:56 crc kubenswrapper[4909]: I1128 16:34:56.507676 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="bfd233c2-72be-482b-a194-b68da87eb105" containerName="ceilometer-central-agent" Nov 28 16:34:56 crc kubenswrapper[4909]: I1128 16:34:56.507772 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="bfd233c2-72be-482b-a194-b68da87eb105" containerName="sg-core" Nov 28 16:34:56 crc kubenswrapper[4909]: I1128 16:34:56.507861 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="bfd233c2-72be-482b-a194-b68da87eb105" containerName="proxy-httpd" Nov 28 16:34:56 crc kubenswrapper[4909]: I1128 16:34:56.507932 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="b94e81c7-38f5-47c0-b6f7-7624101ef17b" containerName="nova-cell1-novncproxy-novncproxy" Nov 28 16:34:56 crc kubenswrapper[4909]: I1128 16:34:56.508866 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 28 16:34:56 crc kubenswrapper[4909]: I1128 16:34:56.513844 4909 scope.go:117] "RemoveContainer" containerID="02db1c4e3450806668eec7d06c775b02cd0475c718da433f81486b1beac23fcc" Nov 28 16:34:56 crc kubenswrapper[4909]: I1128 16:34:56.514524 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Nov 28 16:34:56 crc kubenswrapper[4909]: I1128 16:34:56.514525 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-public-svc" Nov 28 16:34:56 crc kubenswrapper[4909]: I1128 16:34:56.514625 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 28 16:34:56 crc kubenswrapper[4909]: I1128 16:34:56.519150 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-vencrypt" Nov 28 16:34:56 crc kubenswrapper[4909]: I1128 16:34:56.521724 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 28 16:34:56 crc kubenswrapper[4909]: I1128 16:34:56.524100 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 16:34:56 crc kubenswrapper[4909]: I1128 16:34:56.526774 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 28 16:34:56 crc kubenswrapper[4909]: I1128 16:34:56.529212 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 28 16:34:56 crc kubenswrapper[4909]: I1128 16:34:56.530876 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Nov 28 16:34:56 crc kubenswrapper[4909]: I1128 16:34:56.531130 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 28 16:34:56 crc kubenswrapper[4909]: I1128 16:34:56.573993 4909 scope.go:117] "RemoveContainer" containerID="8bc3f6565ae2c6f85fd47e9cf2a340bbf95e6973525fadc39de6b532378ba0b8" Nov 28 16:34:56 crc kubenswrapper[4909]: E1128 16:34:56.575837 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8bc3f6565ae2c6f85fd47e9cf2a340bbf95e6973525fadc39de6b532378ba0b8\": container with ID starting with 8bc3f6565ae2c6f85fd47e9cf2a340bbf95e6973525fadc39de6b532378ba0b8 not found: ID does not exist" containerID="8bc3f6565ae2c6f85fd47e9cf2a340bbf95e6973525fadc39de6b532378ba0b8" Nov 28 16:34:56 crc kubenswrapper[4909]: I1128 16:34:56.575895 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8bc3f6565ae2c6f85fd47e9cf2a340bbf95e6973525fadc39de6b532378ba0b8"} err="failed to get container status \"8bc3f6565ae2c6f85fd47e9cf2a340bbf95e6973525fadc39de6b532378ba0b8\": rpc error: code = NotFound desc = could not find container \"8bc3f6565ae2c6f85fd47e9cf2a340bbf95e6973525fadc39de6b532378ba0b8\": container with ID starting with 8bc3f6565ae2c6f85fd47e9cf2a340bbf95e6973525fadc39de6b532378ba0b8 not found: ID does not exist" Nov 28 16:34:56 crc kubenswrapper[4909]: I1128 16:34:56.575923 4909 scope.go:117] "RemoveContainer" containerID="89a939d5748b3d516cb33bd8aaf468f21514ffdd33b84f75867920b98d05018f" Nov 28 16:34:56 crc kubenswrapper[4909]: E1128 16:34:56.579792 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"89a939d5748b3d516cb33bd8aaf468f21514ffdd33b84f75867920b98d05018f\": container with ID starting with 89a939d5748b3d516cb33bd8aaf468f21514ffdd33b84f75867920b98d05018f not found: ID does not exist" containerID="89a939d5748b3d516cb33bd8aaf468f21514ffdd33b84f75867920b98d05018f" Nov 28 16:34:56 crc kubenswrapper[4909]: I1128 16:34:56.579853 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"89a939d5748b3d516cb33bd8aaf468f21514ffdd33b84f75867920b98d05018f"} err="failed to get container status \"89a939d5748b3d516cb33bd8aaf468f21514ffdd33b84f75867920b98d05018f\": rpc error: code = NotFound desc = could not find container \"89a939d5748b3d516cb33bd8aaf468f21514ffdd33b84f75867920b98d05018f\": container with ID starting with 89a939d5748b3d516cb33bd8aaf468f21514ffdd33b84f75867920b98d05018f not found: ID does not exist" Nov 28 16:34:56 crc kubenswrapper[4909]: I1128 16:34:56.579878 4909 scope.go:117] "RemoveContainer" containerID="0bf94391061ba390529c2e2964c9e83a8be207bca24a9d6ec6465491a60c316a" Nov 28 16:34:56 crc kubenswrapper[4909]: E1128 16:34:56.589843 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0bf94391061ba390529c2e2964c9e83a8be207bca24a9d6ec6465491a60c316a\": container with ID starting with 0bf94391061ba390529c2e2964c9e83a8be207bca24a9d6ec6465491a60c316a not found: ID does not exist" containerID="0bf94391061ba390529c2e2964c9e83a8be207bca24a9d6ec6465491a60c316a" Nov 28 16:34:56 crc kubenswrapper[4909]: I1128 16:34:56.589895 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0bf94391061ba390529c2e2964c9e83a8be207bca24a9d6ec6465491a60c316a"} err="failed to get container status \"0bf94391061ba390529c2e2964c9e83a8be207bca24a9d6ec6465491a60c316a\": rpc error: code = NotFound desc = could not find container \"0bf94391061ba390529c2e2964c9e83a8be207bca24a9d6ec6465491a60c316a\": container with ID starting with 0bf94391061ba390529c2e2964c9e83a8be207bca24a9d6ec6465491a60c316a not found: ID does not exist" Nov 28 16:34:56 crc kubenswrapper[4909]: I1128 16:34:56.589922 4909 scope.go:117] "RemoveContainer" containerID="02db1c4e3450806668eec7d06c775b02cd0475c718da433f81486b1beac23fcc" Nov 28 16:34:56 crc kubenswrapper[4909]: I1128 16:34:56.589988 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c4163067-0e3f-4cab-9b27-6e7fccc3045e-config-data\") pod \"ceilometer-0\" (UID: \"c4163067-0e3f-4cab-9b27-6e7fccc3045e\") " pod="openstack/ceilometer-0" Nov 28 16:34:56 crc kubenswrapper[4909]: I1128 16:34:56.590084 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c4163067-0e3f-4cab-9b27-6e7fccc3045e-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"c4163067-0e3f-4cab-9b27-6e7fccc3045e\") " pod="openstack/ceilometer-0" Nov 28 16:34:56 crc kubenswrapper[4909]: I1128 16:34:56.590112 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/bac06af4-bbe1-482a-8815-14a9cf2a1699-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"bac06af4-bbe1-482a-8815-14a9cf2a1699\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 16:34:56 crc kubenswrapper[4909]: I1128 16:34:56.590253 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c4163067-0e3f-4cab-9b27-6e7fccc3045e-scripts\") pod \"ceilometer-0\" (UID: \"c4163067-0e3f-4cab-9b27-6e7fccc3045e\") " pod="openstack/ceilometer-0" Nov 28 16:34:56 crc kubenswrapper[4909]: I1128 16:34:56.590315 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g4f4z\" (UniqueName: \"kubernetes.io/projected/c4163067-0e3f-4cab-9b27-6e7fccc3045e-kube-api-access-g4f4z\") pod \"ceilometer-0\" (UID: \"c4163067-0e3f-4cab-9b27-6e7fccc3045e\") " pod="openstack/ceilometer-0" Nov 28 16:34:56 crc kubenswrapper[4909]: I1128 16:34:56.590339 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c4163067-0e3f-4cab-9b27-6e7fccc3045e-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"c4163067-0e3f-4cab-9b27-6e7fccc3045e\") " pod="openstack/ceilometer-0" Nov 28 16:34:56 crc kubenswrapper[4909]: I1128 16:34:56.590373 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/c4163067-0e3f-4cab-9b27-6e7fccc3045e-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"c4163067-0e3f-4cab-9b27-6e7fccc3045e\") " pod="openstack/ceilometer-0" Nov 28 16:34:56 crc kubenswrapper[4909]: I1128 16:34:56.590404 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/bac06af4-bbe1-482a-8815-14a9cf2a1699-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"bac06af4-bbe1-482a-8815-14a9cf2a1699\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 16:34:56 crc kubenswrapper[4909]: I1128 16:34:56.590430 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c4163067-0e3f-4cab-9b27-6e7fccc3045e-log-httpd\") pod \"ceilometer-0\" (UID: \"c4163067-0e3f-4cab-9b27-6e7fccc3045e\") " pod="openstack/ceilometer-0" Nov 28 16:34:56 crc kubenswrapper[4909]: I1128 16:34:56.590456 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zc77c\" (UniqueName: \"kubernetes.io/projected/bac06af4-bbe1-482a-8815-14a9cf2a1699-kube-api-access-zc77c\") pod \"nova-cell1-novncproxy-0\" (UID: \"bac06af4-bbe1-482a-8815-14a9cf2a1699\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 16:34:56 crc kubenswrapper[4909]: I1128 16:34:56.590479 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bac06af4-bbe1-482a-8815-14a9cf2a1699-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"bac06af4-bbe1-482a-8815-14a9cf2a1699\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 16:34:56 crc kubenswrapper[4909]: I1128 16:34:56.590497 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c4163067-0e3f-4cab-9b27-6e7fccc3045e-run-httpd\") pod \"ceilometer-0\" (UID: \"c4163067-0e3f-4cab-9b27-6e7fccc3045e\") " pod="openstack/ceilometer-0" Nov 28 16:34:56 crc kubenswrapper[4909]: I1128 16:34:56.590539 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bac06af4-bbe1-482a-8815-14a9cf2a1699-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"bac06af4-bbe1-482a-8815-14a9cf2a1699\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 16:34:56 crc kubenswrapper[4909]: E1128 16:34:56.591600 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"02db1c4e3450806668eec7d06c775b02cd0475c718da433f81486b1beac23fcc\": container with ID starting with 02db1c4e3450806668eec7d06c775b02cd0475c718da433f81486b1beac23fcc not found: ID does not exist" containerID="02db1c4e3450806668eec7d06c775b02cd0475c718da433f81486b1beac23fcc" Nov 28 16:34:56 crc kubenswrapper[4909]: I1128 16:34:56.591748 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"02db1c4e3450806668eec7d06c775b02cd0475c718da433f81486b1beac23fcc"} err="failed to get container status \"02db1c4e3450806668eec7d06c775b02cd0475c718da433f81486b1beac23fcc\": rpc error: code = NotFound desc = could not find container \"02db1c4e3450806668eec7d06c775b02cd0475c718da433f81486b1beac23fcc\": container with ID starting with 02db1c4e3450806668eec7d06c775b02cd0475c718da433f81486b1beac23fcc not found: ID does not exist" Nov 28 16:34:56 crc kubenswrapper[4909]: I1128 16:34:56.692114 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/c4163067-0e3f-4cab-9b27-6e7fccc3045e-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"c4163067-0e3f-4cab-9b27-6e7fccc3045e\") " pod="openstack/ceilometer-0" Nov 28 16:34:56 crc kubenswrapper[4909]: I1128 16:34:56.692179 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/bac06af4-bbe1-482a-8815-14a9cf2a1699-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"bac06af4-bbe1-482a-8815-14a9cf2a1699\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 16:34:56 crc kubenswrapper[4909]: I1128 16:34:56.692212 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c4163067-0e3f-4cab-9b27-6e7fccc3045e-log-httpd\") pod \"ceilometer-0\" (UID: \"c4163067-0e3f-4cab-9b27-6e7fccc3045e\") " pod="openstack/ceilometer-0" Nov 28 16:34:56 crc kubenswrapper[4909]: I1128 16:34:56.692234 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zc77c\" (UniqueName: \"kubernetes.io/projected/bac06af4-bbe1-482a-8815-14a9cf2a1699-kube-api-access-zc77c\") pod \"nova-cell1-novncproxy-0\" (UID: \"bac06af4-bbe1-482a-8815-14a9cf2a1699\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 16:34:56 crc kubenswrapper[4909]: I1128 16:34:56.692254 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bac06af4-bbe1-482a-8815-14a9cf2a1699-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"bac06af4-bbe1-482a-8815-14a9cf2a1699\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 16:34:56 crc kubenswrapper[4909]: I1128 16:34:56.692268 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c4163067-0e3f-4cab-9b27-6e7fccc3045e-run-httpd\") pod \"ceilometer-0\" (UID: \"c4163067-0e3f-4cab-9b27-6e7fccc3045e\") " pod="openstack/ceilometer-0" Nov 28 16:34:56 crc kubenswrapper[4909]: I1128 16:34:56.692294 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bac06af4-bbe1-482a-8815-14a9cf2a1699-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"bac06af4-bbe1-482a-8815-14a9cf2a1699\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 16:34:56 crc kubenswrapper[4909]: I1128 16:34:56.692336 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c4163067-0e3f-4cab-9b27-6e7fccc3045e-config-data\") pod \"ceilometer-0\" (UID: \"c4163067-0e3f-4cab-9b27-6e7fccc3045e\") " pod="openstack/ceilometer-0" Nov 28 16:34:56 crc kubenswrapper[4909]: I1128 16:34:56.692358 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c4163067-0e3f-4cab-9b27-6e7fccc3045e-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"c4163067-0e3f-4cab-9b27-6e7fccc3045e\") " pod="openstack/ceilometer-0" Nov 28 16:34:56 crc kubenswrapper[4909]: I1128 16:34:56.692374 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/bac06af4-bbe1-482a-8815-14a9cf2a1699-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"bac06af4-bbe1-482a-8815-14a9cf2a1699\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 16:34:56 crc kubenswrapper[4909]: I1128 16:34:56.692431 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c4163067-0e3f-4cab-9b27-6e7fccc3045e-scripts\") pod \"ceilometer-0\" (UID: \"c4163067-0e3f-4cab-9b27-6e7fccc3045e\") " pod="openstack/ceilometer-0" Nov 28 16:34:56 crc kubenswrapper[4909]: I1128 16:34:56.692460 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g4f4z\" (UniqueName: \"kubernetes.io/projected/c4163067-0e3f-4cab-9b27-6e7fccc3045e-kube-api-access-g4f4z\") pod \"ceilometer-0\" (UID: \"c4163067-0e3f-4cab-9b27-6e7fccc3045e\") " pod="openstack/ceilometer-0" Nov 28 16:34:56 crc kubenswrapper[4909]: I1128 16:34:56.692478 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c4163067-0e3f-4cab-9b27-6e7fccc3045e-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"c4163067-0e3f-4cab-9b27-6e7fccc3045e\") " pod="openstack/ceilometer-0" Nov 28 16:34:56 crc kubenswrapper[4909]: I1128 16:34:56.693123 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c4163067-0e3f-4cab-9b27-6e7fccc3045e-run-httpd\") pod \"ceilometer-0\" (UID: \"c4163067-0e3f-4cab-9b27-6e7fccc3045e\") " pod="openstack/ceilometer-0" Nov 28 16:34:56 crc kubenswrapper[4909]: I1128 16:34:56.693293 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c4163067-0e3f-4cab-9b27-6e7fccc3045e-log-httpd\") pod \"ceilometer-0\" (UID: \"c4163067-0e3f-4cab-9b27-6e7fccc3045e\") " pod="openstack/ceilometer-0" Nov 28 16:34:56 crc kubenswrapper[4909]: I1128 16:34:56.696834 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c4163067-0e3f-4cab-9b27-6e7fccc3045e-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"c4163067-0e3f-4cab-9b27-6e7fccc3045e\") " pod="openstack/ceilometer-0" Nov 28 16:34:56 crc kubenswrapper[4909]: I1128 16:34:56.697113 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bac06af4-bbe1-482a-8815-14a9cf2a1699-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"bac06af4-bbe1-482a-8815-14a9cf2a1699\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 16:34:56 crc kubenswrapper[4909]: I1128 16:34:56.697929 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c4163067-0e3f-4cab-9b27-6e7fccc3045e-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"c4163067-0e3f-4cab-9b27-6e7fccc3045e\") " pod="openstack/ceilometer-0" Nov 28 16:34:56 crc kubenswrapper[4909]: I1128 16:34:56.698702 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/c4163067-0e3f-4cab-9b27-6e7fccc3045e-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"c4163067-0e3f-4cab-9b27-6e7fccc3045e\") " pod="openstack/ceilometer-0" Nov 28 16:34:56 crc kubenswrapper[4909]: I1128 16:34:56.699137 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c4163067-0e3f-4cab-9b27-6e7fccc3045e-config-data\") pod \"ceilometer-0\" (UID: \"c4163067-0e3f-4cab-9b27-6e7fccc3045e\") " pod="openstack/ceilometer-0" Nov 28 16:34:56 crc kubenswrapper[4909]: I1128 16:34:56.699197 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/bac06af4-bbe1-482a-8815-14a9cf2a1699-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"bac06af4-bbe1-482a-8815-14a9cf2a1699\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 16:34:56 crc kubenswrapper[4909]: I1128 16:34:56.708236 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bac06af4-bbe1-482a-8815-14a9cf2a1699-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"bac06af4-bbe1-482a-8815-14a9cf2a1699\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 16:34:56 crc kubenswrapper[4909]: I1128 16:34:56.708238 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/bac06af4-bbe1-482a-8815-14a9cf2a1699-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"bac06af4-bbe1-482a-8815-14a9cf2a1699\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 16:34:56 crc kubenswrapper[4909]: I1128 16:34:56.708487 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c4163067-0e3f-4cab-9b27-6e7fccc3045e-scripts\") pod \"ceilometer-0\" (UID: \"c4163067-0e3f-4cab-9b27-6e7fccc3045e\") " pod="openstack/ceilometer-0" Nov 28 16:34:56 crc kubenswrapper[4909]: I1128 16:34:56.710638 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g4f4z\" (UniqueName: \"kubernetes.io/projected/c4163067-0e3f-4cab-9b27-6e7fccc3045e-kube-api-access-g4f4z\") pod \"ceilometer-0\" (UID: \"c4163067-0e3f-4cab-9b27-6e7fccc3045e\") " pod="openstack/ceilometer-0" Nov 28 16:34:56 crc kubenswrapper[4909]: I1128 16:34:56.713413 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zc77c\" (UniqueName: \"kubernetes.io/projected/bac06af4-bbe1-482a-8815-14a9cf2a1699-kube-api-access-zc77c\") pod \"nova-cell1-novncproxy-0\" (UID: \"bac06af4-bbe1-482a-8815-14a9cf2a1699\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 16:34:56 crc kubenswrapper[4909]: I1128 16:34:56.800645 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 28 16:34:56 crc kubenswrapper[4909]: I1128 16:34:56.801535 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 28 16:34:56 crc kubenswrapper[4909]: I1128 16:34:56.803028 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 28 16:34:56 crc kubenswrapper[4909]: I1128 16:34:56.805235 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 28 16:34:56 crc kubenswrapper[4909]: I1128 16:34:56.849872 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 28 16:34:56 crc kubenswrapper[4909]: I1128 16:34:56.860775 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 16:34:57 crc kubenswrapper[4909]: I1128 16:34:57.307826 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 28 16:34:57 crc kubenswrapper[4909]: I1128 16:34:57.353725 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"bac06af4-bbe1-482a-8815-14a9cf2a1699","Type":"ContainerStarted","Data":"6369879d221d0173522f49293e5e5d88085918cc8e071a781bf1b66beb112e5f"} Nov 28 16:34:57 crc kubenswrapper[4909]: I1128 16:34:57.356492 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 28 16:34:57 crc kubenswrapper[4909]: I1128 16:34:57.373392 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 28 16:34:57 crc kubenswrapper[4909]: I1128 16:34:57.389229 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 28 16:34:57 crc kubenswrapper[4909]: I1128 16:34:57.554924 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5c7b6c5df9-gbql4"] Nov 28 16:34:57 crc kubenswrapper[4909]: I1128 16:34:57.557010 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c7b6c5df9-gbql4" Nov 28 16:34:57 crc kubenswrapper[4909]: I1128 16:34:57.574880 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5c7b6c5df9-gbql4"] Nov 28 16:34:57 crc kubenswrapper[4909]: I1128 16:34:57.621331 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-62hxf\" (UniqueName: \"kubernetes.io/projected/20c5e3ee-fe01-49c7-96fd-153897da815e-kube-api-access-62hxf\") pod \"dnsmasq-dns-5c7b6c5df9-gbql4\" (UID: \"20c5e3ee-fe01-49c7-96fd-153897da815e\") " pod="openstack/dnsmasq-dns-5c7b6c5df9-gbql4" Nov 28 16:34:57 crc kubenswrapper[4909]: I1128 16:34:57.621388 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/20c5e3ee-fe01-49c7-96fd-153897da815e-dns-svc\") pod \"dnsmasq-dns-5c7b6c5df9-gbql4\" (UID: \"20c5e3ee-fe01-49c7-96fd-153897da815e\") " pod="openstack/dnsmasq-dns-5c7b6c5df9-gbql4" Nov 28 16:34:57 crc kubenswrapper[4909]: I1128 16:34:57.621423 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/20c5e3ee-fe01-49c7-96fd-153897da815e-ovsdbserver-nb\") pod \"dnsmasq-dns-5c7b6c5df9-gbql4\" (UID: \"20c5e3ee-fe01-49c7-96fd-153897da815e\") " pod="openstack/dnsmasq-dns-5c7b6c5df9-gbql4" Nov 28 16:34:57 crc kubenswrapper[4909]: I1128 16:34:57.621473 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/20c5e3ee-fe01-49c7-96fd-153897da815e-ovsdbserver-sb\") pod \"dnsmasq-dns-5c7b6c5df9-gbql4\" (UID: \"20c5e3ee-fe01-49c7-96fd-153897da815e\") " pod="openstack/dnsmasq-dns-5c7b6c5df9-gbql4" Nov 28 16:34:57 crc kubenswrapper[4909]: I1128 16:34:57.621528 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/20c5e3ee-fe01-49c7-96fd-153897da815e-dns-swift-storage-0\") pod \"dnsmasq-dns-5c7b6c5df9-gbql4\" (UID: \"20c5e3ee-fe01-49c7-96fd-153897da815e\") " pod="openstack/dnsmasq-dns-5c7b6c5df9-gbql4" Nov 28 16:34:57 crc kubenswrapper[4909]: I1128 16:34:57.621578 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/20c5e3ee-fe01-49c7-96fd-153897da815e-config\") pod \"dnsmasq-dns-5c7b6c5df9-gbql4\" (UID: \"20c5e3ee-fe01-49c7-96fd-153897da815e\") " pod="openstack/dnsmasq-dns-5c7b6c5df9-gbql4" Nov 28 16:34:57 crc kubenswrapper[4909]: I1128 16:34:57.723096 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/20c5e3ee-fe01-49c7-96fd-153897da815e-config\") pod \"dnsmasq-dns-5c7b6c5df9-gbql4\" (UID: \"20c5e3ee-fe01-49c7-96fd-153897da815e\") " pod="openstack/dnsmasq-dns-5c7b6c5df9-gbql4" Nov 28 16:34:57 crc kubenswrapper[4909]: I1128 16:34:57.723231 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-62hxf\" (UniqueName: \"kubernetes.io/projected/20c5e3ee-fe01-49c7-96fd-153897da815e-kube-api-access-62hxf\") pod \"dnsmasq-dns-5c7b6c5df9-gbql4\" (UID: \"20c5e3ee-fe01-49c7-96fd-153897da815e\") " pod="openstack/dnsmasq-dns-5c7b6c5df9-gbql4" Nov 28 16:34:57 crc kubenswrapper[4909]: I1128 16:34:57.723264 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/20c5e3ee-fe01-49c7-96fd-153897da815e-dns-svc\") pod \"dnsmasq-dns-5c7b6c5df9-gbql4\" (UID: \"20c5e3ee-fe01-49c7-96fd-153897da815e\") " pod="openstack/dnsmasq-dns-5c7b6c5df9-gbql4" Nov 28 16:34:57 crc kubenswrapper[4909]: I1128 16:34:57.723297 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/20c5e3ee-fe01-49c7-96fd-153897da815e-ovsdbserver-nb\") pod \"dnsmasq-dns-5c7b6c5df9-gbql4\" (UID: \"20c5e3ee-fe01-49c7-96fd-153897da815e\") " pod="openstack/dnsmasq-dns-5c7b6c5df9-gbql4" Nov 28 16:34:57 crc kubenswrapper[4909]: I1128 16:34:57.723353 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/20c5e3ee-fe01-49c7-96fd-153897da815e-ovsdbserver-sb\") pod \"dnsmasq-dns-5c7b6c5df9-gbql4\" (UID: \"20c5e3ee-fe01-49c7-96fd-153897da815e\") " pod="openstack/dnsmasq-dns-5c7b6c5df9-gbql4" Nov 28 16:34:57 crc kubenswrapper[4909]: I1128 16:34:57.723410 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/20c5e3ee-fe01-49c7-96fd-153897da815e-dns-swift-storage-0\") pod \"dnsmasq-dns-5c7b6c5df9-gbql4\" (UID: \"20c5e3ee-fe01-49c7-96fd-153897da815e\") " pod="openstack/dnsmasq-dns-5c7b6c5df9-gbql4" Nov 28 16:34:57 crc kubenswrapper[4909]: I1128 16:34:57.724213 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/20c5e3ee-fe01-49c7-96fd-153897da815e-config\") pod \"dnsmasq-dns-5c7b6c5df9-gbql4\" (UID: \"20c5e3ee-fe01-49c7-96fd-153897da815e\") " pod="openstack/dnsmasq-dns-5c7b6c5df9-gbql4" Nov 28 16:34:57 crc kubenswrapper[4909]: I1128 16:34:57.724392 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/20c5e3ee-fe01-49c7-96fd-153897da815e-dns-swift-storage-0\") pod \"dnsmasq-dns-5c7b6c5df9-gbql4\" (UID: \"20c5e3ee-fe01-49c7-96fd-153897da815e\") " pod="openstack/dnsmasq-dns-5c7b6c5df9-gbql4" Nov 28 16:34:57 crc kubenswrapper[4909]: I1128 16:34:57.724953 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/20c5e3ee-fe01-49c7-96fd-153897da815e-ovsdbserver-sb\") pod \"dnsmasq-dns-5c7b6c5df9-gbql4\" (UID: \"20c5e3ee-fe01-49c7-96fd-153897da815e\") " pod="openstack/dnsmasq-dns-5c7b6c5df9-gbql4" Nov 28 16:34:57 crc kubenswrapper[4909]: I1128 16:34:57.724985 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/20c5e3ee-fe01-49c7-96fd-153897da815e-dns-svc\") pod \"dnsmasq-dns-5c7b6c5df9-gbql4\" (UID: \"20c5e3ee-fe01-49c7-96fd-153897da815e\") " pod="openstack/dnsmasq-dns-5c7b6c5df9-gbql4" Nov 28 16:34:57 crc kubenswrapper[4909]: I1128 16:34:57.725071 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/20c5e3ee-fe01-49c7-96fd-153897da815e-ovsdbserver-nb\") pod \"dnsmasq-dns-5c7b6c5df9-gbql4\" (UID: \"20c5e3ee-fe01-49c7-96fd-153897da815e\") " pod="openstack/dnsmasq-dns-5c7b6c5df9-gbql4" Nov 28 16:34:57 crc kubenswrapper[4909]: I1128 16:34:57.745561 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-62hxf\" (UniqueName: \"kubernetes.io/projected/20c5e3ee-fe01-49c7-96fd-153897da815e-kube-api-access-62hxf\") pod \"dnsmasq-dns-5c7b6c5df9-gbql4\" (UID: \"20c5e3ee-fe01-49c7-96fd-153897da815e\") " pod="openstack/dnsmasq-dns-5c7b6c5df9-gbql4" Nov 28 16:34:57 crc kubenswrapper[4909]: I1128 16:34:57.914336 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b94e81c7-38f5-47c0-b6f7-7624101ef17b" path="/var/lib/kubelet/pods/b94e81c7-38f5-47c0-b6f7-7624101ef17b/volumes" Nov 28 16:34:57 crc kubenswrapper[4909]: I1128 16:34:57.914502 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c7b6c5df9-gbql4" Nov 28 16:34:57 crc kubenswrapper[4909]: I1128 16:34:57.928369 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bfd233c2-72be-482b-a194-b68da87eb105" path="/var/lib/kubelet/pods/bfd233c2-72be-482b-a194-b68da87eb105/volumes" Nov 28 16:34:58 crc kubenswrapper[4909]: I1128 16:34:58.366757 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"bac06af4-bbe1-482a-8815-14a9cf2a1699","Type":"ContainerStarted","Data":"d5719a7de701581a9790f618ddf6bcdd49af95eea721edc447a568c570efffdc"} Nov 28 16:34:58 crc kubenswrapper[4909]: I1128 16:34:58.372647 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c4163067-0e3f-4cab-9b27-6e7fccc3045e","Type":"ContainerStarted","Data":"cb48c388822147c5cb47f0075953c1709612ad0e000c56a371cbf869928de7af"} Nov 28 16:34:58 crc kubenswrapper[4909]: I1128 16:34:58.397716 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=2.39769316 podStartE2EDuration="2.39769316s" podCreationTimestamp="2025-11-28 16:34:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:34:58.3869276 +0000 UTC m=+1480.783612134" watchObservedRunningTime="2025-11-28 16:34:58.39769316 +0000 UTC m=+1480.794377684" Nov 28 16:34:58 crc kubenswrapper[4909]: W1128 16:34:58.462618 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod20c5e3ee_fe01_49c7_96fd_153897da815e.slice/crio-f7812f891f5cd5938f3d04d22c5106203e9fdc5d055f01111bdcf8abbac0bec3 WatchSource:0}: Error finding container f7812f891f5cd5938f3d04d22c5106203e9fdc5d055f01111bdcf8abbac0bec3: Status 404 returned error can't find the container with id f7812f891f5cd5938f3d04d22c5106203e9fdc5d055f01111bdcf8abbac0bec3 Nov 28 16:34:58 crc kubenswrapper[4909]: I1128 16:34:58.499602 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5c7b6c5df9-gbql4"] Nov 28 16:34:59 crc kubenswrapper[4909]: I1128 16:34:59.383254 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c4163067-0e3f-4cab-9b27-6e7fccc3045e","Type":"ContainerStarted","Data":"93a640f058f780cc4f4794ce69b9f50dcac257ebdd705d5948e1de800375f228"} Nov 28 16:34:59 crc kubenswrapper[4909]: I1128 16:34:59.384478 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c4163067-0e3f-4cab-9b27-6e7fccc3045e","Type":"ContainerStarted","Data":"9497b0e101862c9212a0c8ca1797b369e47ced803eb3aa3847d0148c0405ef6b"} Nov 28 16:34:59 crc kubenswrapper[4909]: I1128 16:34:59.384903 4909 generic.go:334] "Generic (PLEG): container finished" podID="20c5e3ee-fe01-49c7-96fd-153897da815e" containerID="4c75e9e01c7896ba8550fe35a51a4e7f8f2739fa601933d4cd40f37cdc45075b" exitCode=0 Nov 28 16:34:59 crc kubenswrapper[4909]: I1128 16:34:59.384958 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c7b6c5df9-gbql4" event={"ID":"20c5e3ee-fe01-49c7-96fd-153897da815e","Type":"ContainerDied","Data":"4c75e9e01c7896ba8550fe35a51a4e7f8f2739fa601933d4cd40f37cdc45075b"} Nov 28 16:34:59 crc kubenswrapper[4909]: I1128 16:34:59.385027 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c7b6c5df9-gbql4" event={"ID":"20c5e3ee-fe01-49c7-96fd-153897da815e","Type":"ContainerStarted","Data":"f7812f891f5cd5938f3d04d22c5106203e9fdc5d055f01111bdcf8abbac0bec3"} Nov 28 16:35:00 crc kubenswrapper[4909]: I1128 16:35:00.399156 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c4163067-0e3f-4cab-9b27-6e7fccc3045e","Type":"ContainerStarted","Data":"bf7e7fbcc2e9d708a577956c1de7bb144f29500e8eb3d1470d8c1d579b71213c"} Nov 28 16:35:00 crc kubenswrapper[4909]: I1128 16:35:00.404191 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c7b6c5df9-gbql4" event={"ID":"20c5e3ee-fe01-49c7-96fd-153897da815e","Type":"ContainerStarted","Data":"a20717125aece580e07ee96759127e5cfd62fd2d73b26608a64fc09ab56c1ffa"} Nov 28 16:35:00 crc kubenswrapper[4909]: I1128 16:35:00.405325 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5c7b6c5df9-gbql4" Nov 28 16:35:00 crc kubenswrapper[4909]: I1128 16:35:00.426389 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5c7b6c5df9-gbql4" podStartSLOduration=3.426365494 podStartE2EDuration="3.426365494s" podCreationTimestamp="2025-11-28 16:34:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:35:00.421648237 +0000 UTC m=+1482.818332761" watchObservedRunningTime="2025-11-28 16:35:00.426365494 +0000 UTC m=+1482.823050028" Nov 28 16:35:00 crc kubenswrapper[4909]: I1128 16:35:00.541294 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 28 16:35:00 crc kubenswrapper[4909]: I1128 16:35:00.541534 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="e00fa182-8ee5-4760-bacd-79db454c12db" containerName="nova-api-log" containerID="cri-o://267b16dc0ea3c147a2f9c1f952c921ebc96282c7f3ba7da4511c95a28002f169" gracePeriod=30 Nov 28 16:35:00 crc kubenswrapper[4909]: I1128 16:35:00.541701 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="e00fa182-8ee5-4760-bacd-79db454c12db" containerName="nova-api-api" containerID="cri-o://9d3d4f37db4abbc9d7c0cbfac5e417b8bc6478e6be99028f9aa59cf725a2d526" gracePeriod=30 Nov 28 16:35:00 crc kubenswrapper[4909]: I1128 16:35:00.640954 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Nov 28 16:35:00 crc kubenswrapper[4909]: I1128 16:35:00.782801 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 28 16:35:01 crc kubenswrapper[4909]: I1128 16:35:01.436134 4909 generic.go:334] "Generic (PLEG): container finished" podID="e00fa182-8ee5-4760-bacd-79db454c12db" containerID="267b16dc0ea3c147a2f9c1f952c921ebc96282c7f3ba7da4511c95a28002f169" exitCode=143 Nov 28 16:35:01 crc kubenswrapper[4909]: I1128 16:35:01.436249 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"e00fa182-8ee5-4760-bacd-79db454c12db","Type":"ContainerDied","Data":"267b16dc0ea3c147a2f9c1f952c921ebc96282c7f3ba7da4511c95a28002f169"} Nov 28 16:35:01 crc kubenswrapper[4909]: I1128 16:35:01.850446 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Nov 28 16:35:02 crc kubenswrapper[4909]: I1128 16:35:02.449350 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c4163067-0e3f-4cab-9b27-6e7fccc3045e","Type":"ContainerStarted","Data":"433f8784e51999ac38e227f4b81bc2f5b98895c12cf00fa1a569a446bf98a115"} Nov 28 16:35:02 crc kubenswrapper[4909]: I1128 16:35:02.449349 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="c4163067-0e3f-4cab-9b27-6e7fccc3045e" containerName="ceilometer-central-agent" containerID="cri-o://9497b0e101862c9212a0c8ca1797b369e47ced803eb3aa3847d0148c0405ef6b" gracePeriod=30 Nov 28 16:35:02 crc kubenswrapper[4909]: I1128 16:35:02.449411 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 28 16:35:02 crc kubenswrapper[4909]: I1128 16:35:02.449492 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="c4163067-0e3f-4cab-9b27-6e7fccc3045e" containerName="proxy-httpd" containerID="cri-o://433f8784e51999ac38e227f4b81bc2f5b98895c12cf00fa1a569a446bf98a115" gracePeriod=30 Nov 28 16:35:02 crc kubenswrapper[4909]: I1128 16:35:02.449548 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="c4163067-0e3f-4cab-9b27-6e7fccc3045e" containerName="sg-core" containerID="cri-o://bf7e7fbcc2e9d708a577956c1de7bb144f29500e8eb3d1470d8c1d579b71213c" gracePeriod=30 Nov 28 16:35:02 crc kubenswrapper[4909]: I1128 16:35:02.449578 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="c4163067-0e3f-4cab-9b27-6e7fccc3045e" containerName="ceilometer-notification-agent" containerID="cri-o://93a640f058f780cc4f4794ce69b9f50dcac257ebdd705d5948e1de800375f228" gracePeriod=30 Nov 28 16:35:02 crc kubenswrapper[4909]: I1128 16:35:02.470746 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.652046859 podStartE2EDuration="6.470729251s" podCreationTimestamp="2025-11-28 16:34:56 +0000 UTC" firstStartedPulling="2025-11-28 16:34:57.408239275 +0000 UTC m=+1479.804923819" lastFinishedPulling="2025-11-28 16:35:01.226921687 +0000 UTC m=+1483.623606211" observedRunningTime="2025-11-28 16:35:02.470137905 +0000 UTC m=+1484.866822449" watchObservedRunningTime="2025-11-28 16:35:02.470729251 +0000 UTC m=+1484.867413775" Nov 28 16:35:03 crc kubenswrapper[4909]: I1128 16:35:03.460385 4909 generic.go:334] "Generic (PLEG): container finished" podID="c4163067-0e3f-4cab-9b27-6e7fccc3045e" containerID="433f8784e51999ac38e227f4b81bc2f5b98895c12cf00fa1a569a446bf98a115" exitCode=0 Nov 28 16:35:03 crc kubenswrapper[4909]: I1128 16:35:03.460437 4909 generic.go:334] "Generic (PLEG): container finished" podID="c4163067-0e3f-4cab-9b27-6e7fccc3045e" containerID="bf7e7fbcc2e9d708a577956c1de7bb144f29500e8eb3d1470d8c1d579b71213c" exitCode=2 Nov 28 16:35:03 crc kubenswrapper[4909]: I1128 16:35:03.460447 4909 generic.go:334] "Generic (PLEG): container finished" podID="c4163067-0e3f-4cab-9b27-6e7fccc3045e" containerID="93a640f058f780cc4f4794ce69b9f50dcac257ebdd705d5948e1de800375f228" exitCode=0 Nov 28 16:35:03 crc kubenswrapper[4909]: I1128 16:35:03.460471 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c4163067-0e3f-4cab-9b27-6e7fccc3045e","Type":"ContainerDied","Data":"433f8784e51999ac38e227f4b81bc2f5b98895c12cf00fa1a569a446bf98a115"} Nov 28 16:35:03 crc kubenswrapper[4909]: I1128 16:35:03.460538 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c4163067-0e3f-4cab-9b27-6e7fccc3045e","Type":"ContainerDied","Data":"bf7e7fbcc2e9d708a577956c1de7bb144f29500e8eb3d1470d8c1d579b71213c"} Nov 28 16:35:03 crc kubenswrapper[4909]: I1128 16:35:03.460558 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c4163067-0e3f-4cab-9b27-6e7fccc3045e","Type":"ContainerDied","Data":"93a640f058f780cc4f4794ce69b9f50dcac257ebdd705d5948e1de800375f228"} Nov 28 16:35:04 crc kubenswrapper[4909]: I1128 16:35:04.199098 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 16:35:04 crc kubenswrapper[4909]: I1128 16:35:04.352324 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-97qcv\" (UniqueName: \"kubernetes.io/projected/e00fa182-8ee5-4760-bacd-79db454c12db-kube-api-access-97qcv\") pod \"e00fa182-8ee5-4760-bacd-79db454c12db\" (UID: \"e00fa182-8ee5-4760-bacd-79db454c12db\") " Nov 28 16:35:04 crc kubenswrapper[4909]: I1128 16:35:04.352793 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e00fa182-8ee5-4760-bacd-79db454c12db-logs\") pod \"e00fa182-8ee5-4760-bacd-79db454c12db\" (UID: \"e00fa182-8ee5-4760-bacd-79db454c12db\") " Nov 28 16:35:04 crc kubenswrapper[4909]: I1128 16:35:04.352846 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e00fa182-8ee5-4760-bacd-79db454c12db-config-data\") pod \"e00fa182-8ee5-4760-bacd-79db454c12db\" (UID: \"e00fa182-8ee5-4760-bacd-79db454c12db\") " Nov 28 16:35:04 crc kubenswrapper[4909]: I1128 16:35:04.352941 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e00fa182-8ee5-4760-bacd-79db454c12db-combined-ca-bundle\") pod \"e00fa182-8ee5-4760-bacd-79db454c12db\" (UID: \"e00fa182-8ee5-4760-bacd-79db454c12db\") " Nov 28 16:35:04 crc kubenswrapper[4909]: I1128 16:35:04.355550 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e00fa182-8ee5-4760-bacd-79db454c12db-logs" (OuterVolumeSpecName: "logs") pod "e00fa182-8ee5-4760-bacd-79db454c12db" (UID: "e00fa182-8ee5-4760-bacd-79db454c12db"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:35:04 crc kubenswrapper[4909]: I1128 16:35:04.374060 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e00fa182-8ee5-4760-bacd-79db454c12db-kube-api-access-97qcv" (OuterVolumeSpecName: "kube-api-access-97qcv") pod "e00fa182-8ee5-4760-bacd-79db454c12db" (UID: "e00fa182-8ee5-4760-bacd-79db454c12db"). InnerVolumeSpecName "kube-api-access-97qcv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:35:04 crc kubenswrapper[4909]: I1128 16:35:04.392124 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e00fa182-8ee5-4760-bacd-79db454c12db-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e00fa182-8ee5-4760-bacd-79db454c12db" (UID: "e00fa182-8ee5-4760-bacd-79db454c12db"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:35:04 crc kubenswrapper[4909]: I1128 16:35:04.408249 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e00fa182-8ee5-4760-bacd-79db454c12db-config-data" (OuterVolumeSpecName: "config-data") pod "e00fa182-8ee5-4760-bacd-79db454c12db" (UID: "e00fa182-8ee5-4760-bacd-79db454c12db"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:35:04 crc kubenswrapper[4909]: I1128 16:35:04.455403 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-97qcv\" (UniqueName: \"kubernetes.io/projected/e00fa182-8ee5-4760-bacd-79db454c12db-kube-api-access-97qcv\") on node \"crc\" DevicePath \"\"" Nov 28 16:35:04 crc kubenswrapper[4909]: I1128 16:35:04.455431 4909 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e00fa182-8ee5-4760-bacd-79db454c12db-logs\") on node \"crc\" DevicePath \"\"" Nov 28 16:35:04 crc kubenswrapper[4909]: I1128 16:35:04.455442 4909 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e00fa182-8ee5-4760-bacd-79db454c12db-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:35:04 crc kubenswrapper[4909]: I1128 16:35:04.455451 4909 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e00fa182-8ee5-4760-bacd-79db454c12db-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:35:04 crc kubenswrapper[4909]: I1128 16:35:04.475571 4909 generic.go:334] "Generic (PLEG): container finished" podID="e00fa182-8ee5-4760-bacd-79db454c12db" containerID="9d3d4f37db4abbc9d7c0cbfac5e417b8bc6478e6be99028f9aa59cf725a2d526" exitCode=0 Nov 28 16:35:04 crc kubenswrapper[4909]: I1128 16:35:04.475669 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"e00fa182-8ee5-4760-bacd-79db454c12db","Type":"ContainerDied","Data":"9d3d4f37db4abbc9d7c0cbfac5e417b8bc6478e6be99028f9aa59cf725a2d526"} Nov 28 16:35:04 crc kubenswrapper[4909]: I1128 16:35:04.475702 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"e00fa182-8ee5-4760-bacd-79db454c12db","Type":"ContainerDied","Data":"0e24666fce4c2c696b564552100b875d8853b625090d7c92b12ee3208c5d0eab"} Nov 28 16:35:04 crc kubenswrapper[4909]: I1128 16:35:04.475722 4909 scope.go:117] "RemoveContainer" containerID="9d3d4f37db4abbc9d7c0cbfac5e417b8bc6478e6be99028f9aa59cf725a2d526" Nov 28 16:35:04 crc kubenswrapper[4909]: I1128 16:35:04.475876 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 16:35:04 crc kubenswrapper[4909]: I1128 16:35:04.518911 4909 scope.go:117] "RemoveContainer" containerID="267b16dc0ea3c147a2f9c1f952c921ebc96282c7f3ba7da4511c95a28002f169" Nov 28 16:35:04 crc kubenswrapper[4909]: I1128 16:35:04.532490 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 28 16:35:04 crc kubenswrapper[4909]: I1128 16:35:04.532556 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 28 16:35:04 crc kubenswrapper[4909]: I1128 16:35:04.551850 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 28 16:35:04 crc kubenswrapper[4909]: E1128 16:35:04.553872 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e00fa182-8ee5-4760-bacd-79db454c12db" containerName="nova-api-log" Nov 28 16:35:04 crc kubenswrapper[4909]: I1128 16:35:04.553915 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="e00fa182-8ee5-4760-bacd-79db454c12db" containerName="nova-api-log" Nov 28 16:35:04 crc kubenswrapper[4909]: E1128 16:35:04.553941 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e00fa182-8ee5-4760-bacd-79db454c12db" containerName="nova-api-api" Nov 28 16:35:04 crc kubenswrapper[4909]: I1128 16:35:04.553947 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="e00fa182-8ee5-4760-bacd-79db454c12db" containerName="nova-api-api" Nov 28 16:35:04 crc kubenswrapper[4909]: I1128 16:35:04.554190 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="e00fa182-8ee5-4760-bacd-79db454c12db" containerName="nova-api-api" Nov 28 16:35:04 crc kubenswrapper[4909]: I1128 16:35:04.554248 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="e00fa182-8ee5-4760-bacd-79db454c12db" containerName="nova-api-log" Nov 28 16:35:04 crc kubenswrapper[4909]: I1128 16:35:04.557466 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 16:35:04 crc kubenswrapper[4909]: I1128 16:35:04.560032 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Nov 28 16:35:04 crc kubenswrapper[4909]: I1128 16:35:04.560134 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Nov 28 16:35:04 crc kubenswrapper[4909]: I1128 16:35:04.560430 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 28 16:35:04 crc kubenswrapper[4909]: I1128 16:35:04.580399 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 28 16:35:04 crc kubenswrapper[4909]: I1128 16:35:04.583687 4909 scope.go:117] "RemoveContainer" containerID="9d3d4f37db4abbc9d7c0cbfac5e417b8bc6478e6be99028f9aa59cf725a2d526" Nov 28 16:35:04 crc kubenswrapper[4909]: E1128 16:35:04.584750 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9d3d4f37db4abbc9d7c0cbfac5e417b8bc6478e6be99028f9aa59cf725a2d526\": container with ID starting with 9d3d4f37db4abbc9d7c0cbfac5e417b8bc6478e6be99028f9aa59cf725a2d526 not found: ID does not exist" containerID="9d3d4f37db4abbc9d7c0cbfac5e417b8bc6478e6be99028f9aa59cf725a2d526" Nov 28 16:35:04 crc kubenswrapper[4909]: I1128 16:35:04.584792 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9d3d4f37db4abbc9d7c0cbfac5e417b8bc6478e6be99028f9aa59cf725a2d526"} err="failed to get container status \"9d3d4f37db4abbc9d7c0cbfac5e417b8bc6478e6be99028f9aa59cf725a2d526\": rpc error: code = NotFound desc = could not find container \"9d3d4f37db4abbc9d7c0cbfac5e417b8bc6478e6be99028f9aa59cf725a2d526\": container with ID starting with 9d3d4f37db4abbc9d7c0cbfac5e417b8bc6478e6be99028f9aa59cf725a2d526 not found: ID does not exist" Nov 28 16:35:04 crc kubenswrapper[4909]: I1128 16:35:04.584820 4909 scope.go:117] "RemoveContainer" containerID="267b16dc0ea3c147a2f9c1f952c921ebc96282c7f3ba7da4511c95a28002f169" Nov 28 16:35:04 crc kubenswrapper[4909]: E1128 16:35:04.586296 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"267b16dc0ea3c147a2f9c1f952c921ebc96282c7f3ba7da4511c95a28002f169\": container with ID starting with 267b16dc0ea3c147a2f9c1f952c921ebc96282c7f3ba7da4511c95a28002f169 not found: ID does not exist" containerID="267b16dc0ea3c147a2f9c1f952c921ebc96282c7f3ba7da4511c95a28002f169" Nov 28 16:35:04 crc kubenswrapper[4909]: I1128 16:35:04.586340 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"267b16dc0ea3c147a2f9c1f952c921ebc96282c7f3ba7da4511c95a28002f169"} err="failed to get container status \"267b16dc0ea3c147a2f9c1f952c921ebc96282c7f3ba7da4511c95a28002f169\": rpc error: code = NotFound desc = could not find container \"267b16dc0ea3c147a2f9c1f952c921ebc96282c7f3ba7da4511c95a28002f169\": container with ID starting with 267b16dc0ea3c147a2f9c1f952c921ebc96282c7f3ba7da4511c95a28002f169 not found: ID does not exist" Nov 28 16:35:04 crc kubenswrapper[4909]: I1128 16:35:04.760602 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/38e07fb0-7023-49ad-9e0a-e0ee7f565ef4-internal-tls-certs\") pod \"nova-api-0\" (UID: \"38e07fb0-7023-49ad-9e0a-e0ee7f565ef4\") " pod="openstack/nova-api-0" Nov 28 16:35:04 crc kubenswrapper[4909]: I1128 16:35:04.760671 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/38e07fb0-7023-49ad-9e0a-e0ee7f565ef4-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"38e07fb0-7023-49ad-9e0a-e0ee7f565ef4\") " pod="openstack/nova-api-0" Nov 28 16:35:04 crc kubenswrapper[4909]: I1128 16:35:04.760717 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/38e07fb0-7023-49ad-9e0a-e0ee7f565ef4-public-tls-certs\") pod \"nova-api-0\" (UID: \"38e07fb0-7023-49ad-9e0a-e0ee7f565ef4\") " pod="openstack/nova-api-0" Nov 28 16:35:04 crc kubenswrapper[4909]: I1128 16:35:04.760779 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h6nlv\" (UniqueName: \"kubernetes.io/projected/38e07fb0-7023-49ad-9e0a-e0ee7f565ef4-kube-api-access-h6nlv\") pod \"nova-api-0\" (UID: \"38e07fb0-7023-49ad-9e0a-e0ee7f565ef4\") " pod="openstack/nova-api-0" Nov 28 16:35:04 crc kubenswrapper[4909]: I1128 16:35:04.760824 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/38e07fb0-7023-49ad-9e0a-e0ee7f565ef4-logs\") pod \"nova-api-0\" (UID: \"38e07fb0-7023-49ad-9e0a-e0ee7f565ef4\") " pod="openstack/nova-api-0" Nov 28 16:35:04 crc kubenswrapper[4909]: I1128 16:35:04.760905 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/38e07fb0-7023-49ad-9e0a-e0ee7f565ef4-config-data\") pod \"nova-api-0\" (UID: \"38e07fb0-7023-49ad-9e0a-e0ee7f565ef4\") " pod="openstack/nova-api-0" Nov 28 16:35:04 crc kubenswrapper[4909]: I1128 16:35:04.862505 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/38e07fb0-7023-49ad-9e0a-e0ee7f565ef4-logs\") pod \"nova-api-0\" (UID: \"38e07fb0-7023-49ad-9e0a-e0ee7f565ef4\") " pod="openstack/nova-api-0" Nov 28 16:35:04 crc kubenswrapper[4909]: I1128 16:35:04.862911 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/38e07fb0-7023-49ad-9e0a-e0ee7f565ef4-config-data\") pod \"nova-api-0\" (UID: \"38e07fb0-7023-49ad-9e0a-e0ee7f565ef4\") " pod="openstack/nova-api-0" Nov 28 16:35:04 crc kubenswrapper[4909]: I1128 16:35:04.863041 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/38e07fb0-7023-49ad-9e0a-e0ee7f565ef4-internal-tls-certs\") pod \"nova-api-0\" (UID: \"38e07fb0-7023-49ad-9e0a-e0ee7f565ef4\") " pod="openstack/nova-api-0" Nov 28 16:35:04 crc kubenswrapper[4909]: I1128 16:35:04.863064 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/38e07fb0-7023-49ad-9e0a-e0ee7f565ef4-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"38e07fb0-7023-49ad-9e0a-e0ee7f565ef4\") " pod="openstack/nova-api-0" Nov 28 16:35:04 crc kubenswrapper[4909]: I1128 16:35:04.863097 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/38e07fb0-7023-49ad-9e0a-e0ee7f565ef4-public-tls-certs\") pod \"nova-api-0\" (UID: \"38e07fb0-7023-49ad-9e0a-e0ee7f565ef4\") " pod="openstack/nova-api-0" Nov 28 16:35:04 crc kubenswrapper[4909]: I1128 16:35:04.863106 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/38e07fb0-7023-49ad-9e0a-e0ee7f565ef4-logs\") pod \"nova-api-0\" (UID: \"38e07fb0-7023-49ad-9e0a-e0ee7f565ef4\") " pod="openstack/nova-api-0" Nov 28 16:35:04 crc kubenswrapper[4909]: I1128 16:35:04.863131 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h6nlv\" (UniqueName: \"kubernetes.io/projected/38e07fb0-7023-49ad-9e0a-e0ee7f565ef4-kube-api-access-h6nlv\") pod \"nova-api-0\" (UID: \"38e07fb0-7023-49ad-9e0a-e0ee7f565ef4\") " pod="openstack/nova-api-0" Nov 28 16:35:04 crc kubenswrapper[4909]: I1128 16:35:04.879740 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/38e07fb0-7023-49ad-9e0a-e0ee7f565ef4-internal-tls-certs\") pod \"nova-api-0\" (UID: \"38e07fb0-7023-49ad-9e0a-e0ee7f565ef4\") " pod="openstack/nova-api-0" Nov 28 16:35:04 crc kubenswrapper[4909]: I1128 16:35:04.879761 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/38e07fb0-7023-49ad-9e0a-e0ee7f565ef4-public-tls-certs\") pod \"nova-api-0\" (UID: \"38e07fb0-7023-49ad-9e0a-e0ee7f565ef4\") " pod="openstack/nova-api-0" Nov 28 16:35:04 crc kubenswrapper[4909]: I1128 16:35:04.883031 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/38e07fb0-7023-49ad-9e0a-e0ee7f565ef4-config-data\") pod \"nova-api-0\" (UID: \"38e07fb0-7023-49ad-9e0a-e0ee7f565ef4\") " pod="openstack/nova-api-0" Nov 28 16:35:04 crc kubenswrapper[4909]: I1128 16:35:04.883043 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h6nlv\" (UniqueName: \"kubernetes.io/projected/38e07fb0-7023-49ad-9e0a-e0ee7f565ef4-kube-api-access-h6nlv\") pod \"nova-api-0\" (UID: \"38e07fb0-7023-49ad-9e0a-e0ee7f565ef4\") " pod="openstack/nova-api-0" Nov 28 16:35:04 crc kubenswrapper[4909]: I1128 16:35:04.888454 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/38e07fb0-7023-49ad-9e0a-e0ee7f565ef4-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"38e07fb0-7023-49ad-9e0a-e0ee7f565ef4\") " pod="openstack/nova-api-0" Nov 28 16:35:04 crc kubenswrapper[4909]: I1128 16:35:04.898269 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 16:35:05 crc kubenswrapper[4909]: I1128 16:35:05.129837 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 16:35:05 crc kubenswrapper[4909]: I1128 16:35:05.284862 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c4163067-0e3f-4cab-9b27-6e7fccc3045e-sg-core-conf-yaml\") pod \"c4163067-0e3f-4cab-9b27-6e7fccc3045e\" (UID: \"c4163067-0e3f-4cab-9b27-6e7fccc3045e\") " Nov 28 16:35:05 crc kubenswrapper[4909]: I1128 16:35:05.285358 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c4163067-0e3f-4cab-9b27-6e7fccc3045e-config-data\") pod \"c4163067-0e3f-4cab-9b27-6e7fccc3045e\" (UID: \"c4163067-0e3f-4cab-9b27-6e7fccc3045e\") " Nov 28 16:35:05 crc kubenswrapper[4909]: I1128 16:35:05.285437 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/c4163067-0e3f-4cab-9b27-6e7fccc3045e-ceilometer-tls-certs\") pod \"c4163067-0e3f-4cab-9b27-6e7fccc3045e\" (UID: \"c4163067-0e3f-4cab-9b27-6e7fccc3045e\") " Nov 28 16:35:05 crc kubenswrapper[4909]: I1128 16:35:05.285713 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c4163067-0e3f-4cab-9b27-6e7fccc3045e-log-httpd\") pod \"c4163067-0e3f-4cab-9b27-6e7fccc3045e\" (UID: \"c4163067-0e3f-4cab-9b27-6e7fccc3045e\") " Nov 28 16:35:05 crc kubenswrapper[4909]: I1128 16:35:05.285885 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c4163067-0e3f-4cab-9b27-6e7fccc3045e-scripts\") pod \"c4163067-0e3f-4cab-9b27-6e7fccc3045e\" (UID: \"c4163067-0e3f-4cab-9b27-6e7fccc3045e\") " Nov 28 16:35:05 crc kubenswrapper[4909]: I1128 16:35:05.286217 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c4163067-0e3f-4cab-9b27-6e7fccc3045e-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "c4163067-0e3f-4cab-9b27-6e7fccc3045e" (UID: "c4163067-0e3f-4cab-9b27-6e7fccc3045e"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:35:05 crc kubenswrapper[4909]: I1128 16:35:05.286803 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c4163067-0e3f-4cab-9b27-6e7fccc3045e-run-httpd\") pod \"c4163067-0e3f-4cab-9b27-6e7fccc3045e\" (UID: \"c4163067-0e3f-4cab-9b27-6e7fccc3045e\") " Nov 28 16:35:05 crc kubenswrapper[4909]: I1128 16:35:05.286880 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c4163067-0e3f-4cab-9b27-6e7fccc3045e-combined-ca-bundle\") pod \"c4163067-0e3f-4cab-9b27-6e7fccc3045e\" (UID: \"c4163067-0e3f-4cab-9b27-6e7fccc3045e\") " Nov 28 16:35:05 crc kubenswrapper[4909]: I1128 16:35:05.286940 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g4f4z\" (UniqueName: \"kubernetes.io/projected/c4163067-0e3f-4cab-9b27-6e7fccc3045e-kube-api-access-g4f4z\") pod \"c4163067-0e3f-4cab-9b27-6e7fccc3045e\" (UID: \"c4163067-0e3f-4cab-9b27-6e7fccc3045e\") " Nov 28 16:35:05 crc kubenswrapper[4909]: I1128 16:35:05.287457 4909 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c4163067-0e3f-4cab-9b27-6e7fccc3045e-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 28 16:35:05 crc kubenswrapper[4909]: I1128 16:35:05.287991 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c4163067-0e3f-4cab-9b27-6e7fccc3045e-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "c4163067-0e3f-4cab-9b27-6e7fccc3045e" (UID: "c4163067-0e3f-4cab-9b27-6e7fccc3045e"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:35:05 crc kubenswrapper[4909]: I1128 16:35:05.290342 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c4163067-0e3f-4cab-9b27-6e7fccc3045e-kube-api-access-g4f4z" (OuterVolumeSpecName: "kube-api-access-g4f4z") pod "c4163067-0e3f-4cab-9b27-6e7fccc3045e" (UID: "c4163067-0e3f-4cab-9b27-6e7fccc3045e"). InnerVolumeSpecName "kube-api-access-g4f4z". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:35:05 crc kubenswrapper[4909]: I1128 16:35:05.293268 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c4163067-0e3f-4cab-9b27-6e7fccc3045e-scripts" (OuterVolumeSpecName: "scripts") pod "c4163067-0e3f-4cab-9b27-6e7fccc3045e" (UID: "c4163067-0e3f-4cab-9b27-6e7fccc3045e"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:35:05 crc kubenswrapper[4909]: I1128 16:35:05.316133 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c4163067-0e3f-4cab-9b27-6e7fccc3045e-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "c4163067-0e3f-4cab-9b27-6e7fccc3045e" (UID: "c4163067-0e3f-4cab-9b27-6e7fccc3045e"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:35:05 crc kubenswrapper[4909]: I1128 16:35:05.360457 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 28 16:35:05 crc kubenswrapper[4909]: W1128 16:35:05.365546 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod38e07fb0_7023_49ad_9e0a_e0ee7f565ef4.slice/crio-079407fbe7f56713f49fa96a506e8dd01a74cc28ee1b68d6b34075081b559e0f WatchSource:0}: Error finding container 079407fbe7f56713f49fa96a506e8dd01a74cc28ee1b68d6b34075081b559e0f: Status 404 returned error can't find the container with id 079407fbe7f56713f49fa96a506e8dd01a74cc28ee1b68d6b34075081b559e0f Nov 28 16:35:05 crc kubenswrapper[4909]: I1128 16:35:05.381774 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c4163067-0e3f-4cab-9b27-6e7fccc3045e-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "c4163067-0e3f-4cab-9b27-6e7fccc3045e" (UID: "c4163067-0e3f-4cab-9b27-6e7fccc3045e"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:35:05 crc kubenswrapper[4909]: I1128 16:35:05.382203 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c4163067-0e3f-4cab-9b27-6e7fccc3045e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c4163067-0e3f-4cab-9b27-6e7fccc3045e" (UID: "c4163067-0e3f-4cab-9b27-6e7fccc3045e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:35:05 crc kubenswrapper[4909]: I1128 16:35:05.389956 4909 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c4163067-0e3f-4cab-9b27-6e7fccc3045e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:35:05 crc kubenswrapper[4909]: I1128 16:35:05.389989 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g4f4z\" (UniqueName: \"kubernetes.io/projected/c4163067-0e3f-4cab-9b27-6e7fccc3045e-kube-api-access-g4f4z\") on node \"crc\" DevicePath \"\"" Nov 28 16:35:05 crc kubenswrapper[4909]: I1128 16:35:05.390005 4909 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c4163067-0e3f-4cab-9b27-6e7fccc3045e-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 28 16:35:05 crc kubenswrapper[4909]: I1128 16:35:05.390016 4909 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/c4163067-0e3f-4cab-9b27-6e7fccc3045e-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 16:35:05 crc kubenswrapper[4909]: I1128 16:35:05.390031 4909 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c4163067-0e3f-4cab-9b27-6e7fccc3045e-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 16:35:05 crc kubenswrapper[4909]: I1128 16:35:05.390075 4909 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c4163067-0e3f-4cab-9b27-6e7fccc3045e-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 28 16:35:05 crc kubenswrapper[4909]: I1128 16:35:05.396337 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c4163067-0e3f-4cab-9b27-6e7fccc3045e-config-data" (OuterVolumeSpecName: "config-data") pod "c4163067-0e3f-4cab-9b27-6e7fccc3045e" (UID: "c4163067-0e3f-4cab-9b27-6e7fccc3045e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:35:05 crc kubenswrapper[4909]: I1128 16:35:05.486302 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"38e07fb0-7023-49ad-9e0a-e0ee7f565ef4","Type":"ContainerStarted","Data":"079407fbe7f56713f49fa96a506e8dd01a74cc28ee1b68d6b34075081b559e0f"} Nov 28 16:35:05 crc kubenswrapper[4909]: I1128 16:35:05.491537 4909 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c4163067-0e3f-4cab-9b27-6e7fccc3045e-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:35:05 crc kubenswrapper[4909]: I1128 16:35:05.491636 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 16:35:05 crc kubenswrapper[4909]: I1128 16:35:05.491649 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c4163067-0e3f-4cab-9b27-6e7fccc3045e","Type":"ContainerDied","Data":"9497b0e101862c9212a0c8ca1797b369e47ced803eb3aa3847d0148c0405ef6b"} Nov 28 16:35:05 crc kubenswrapper[4909]: I1128 16:35:05.491747 4909 scope.go:117] "RemoveContainer" containerID="433f8784e51999ac38e227f4b81bc2f5b98895c12cf00fa1a569a446bf98a115" Nov 28 16:35:05 crc kubenswrapper[4909]: I1128 16:35:05.491537 4909 generic.go:334] "Generic (PLEG): container finished" podID="c4163067-0e3f-4cab-9b27-6e7fccc3045e" containerID="9497b0e101862c9212a0c8ca1797b369e47ced803eb3aa3847d0148c0405ef6b" exitCode=0 Nov 28 16:35:05 crc kubenswrapper[4909]: I1128 16:35:05.493587 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c4163067-0e3f-4cab-9b27-6e7fccc3045e","Type":"ContainerDied","Data":"cb48c388822147c5cb47f0075953c1709612ad0e000c56a371cbf869928de7af"} Nov 28 16:35:05 crc kubenswrapper[4909]: I1128 16:35:05.516556 4909 scope.go:117] "RemoveContainer" containerID="bf7e7fbcc2e9d708a577956c1de7bb144f29500e8eb3d1470d8c1d579b71213c" Nov 28 16:35:05 crc kubenswrapper[4909]: I1128 16:35:05.538695 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 28 16:35:05 crc kubenswrapper[4909]: I1128 16:35:05.538911 4909 scope.go:117] "RemoveContainer" containerID="93a640f058f780cc4f4794ce69b9f50dcac257ebdd705d5948e1de800375f228" Nov 28 16:35:05 crc kubenswrapper[4909]: I1128 16:35:05.554970 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 28 16:35:05 crc kubenswrapper[4909]: I1128 16:35:05.569980 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 28 16:35:05 crc kubenswrapper[4909]: E1128 16:35:05.570677 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c4163067-0e3f-4cab-9b27-6e7fccc3045e" containerName="proxy-httpd" Nov 28 16:35:05 crc kubenswrapper[4909]: I1128 16:35:05.570699 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="c4163067-0e3f-4cab-9b27-6e7fccc3045e" containerName="proxy-httpd" Nov 28 16:35:05 crc kubenswrapper[4909]: E1128 16:35:05.570723 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c4163067-0e3f-4cab-9b27-6e7fccc3045e" containerName="ceilometer-central-agent" Nov 28 16:35:05 crc kubenswrapper[4909]: I1128 16:35:05.570734 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="c4163067-0e3f-4cab-9b27-6e7fccc3045e" containerName="ceilometer-central-agent" Nov 28 16:35:05 crc kubenswrapper[4909]: E1128 16:35:05.570759 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c4163067-0e3f-4cab-9b27-6e7fccc3045e" containerName="ceilometer-notification-agent" Nov 28 16:35:05 crc kubenswrapper[4909]: I1128 16:35:05.570787 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="c4163067-0e3f-4cab-9b27-6e7fccc3045e" containerName="ceilometer-notification-agent" Nov 28 16:35:05 crc kubenswrapper[4909]: E1128 16:35:05.570802 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c4163067-0e3f-4cab-9b27-6e7fccc3045e" containerName="sg-core" Nov 28 16:35:05 crc kubenswrapper[4909]: I1128 16:35:05.570808 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="c4163067-0e3f-4cab-9b27-6e7fccc3045e" containerName="sg-core" Nov 28 16:35:05 crc kubenswrapper[4909]: I1128 16:35:05.571053 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="c4163067-0e3f-4cab-9b27-6e7fccc3045e" containerName="ceilometer-notification-agent" Nov 28 16:35:05 crc kubenswrapper[4909]: I1128 16:35:05.571067 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="c4163067-0e3f-4cab-9b27-6e7fccc3045e" containerName="ceilometer-central-agent" Nov 28 16:35:05 crc kubenswrapper[4909]: I1128 16:35:05.571087 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="c4163067-0e3f-4cab-9b27-6e7fccc3045e" containerName="sg-core" Nov 28 16:35:05 crc kubenswrapper[4909]: I1128 16:35:05.571099 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="c4163067-0e3f-4cab-9b27-6e7fccc3045e" containerName="proxy-httpd" Nov 28 16:35:05 crc kubenswrapper[4909]: I1128 16:35:05.574988 4909 scope.go:117] "RemoveContainer" containerID="9497b0e101862c9212a0c8ca1797b369e47ced803eb3aa3847d0148c0405ef6b" Nov 28 16:35:05 crc kubenswrapper[4909]: I1128 16:35:05.585347 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 16:35:05 crc kubenswrapper[4909]: I1128 16:35:05.588431 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Nov 28 16:35:05 crc kubenswrapper[4909]: I1128 16:35:05.588826 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 28 16:35:05 crc kubenswrapper[4909]: I1128 16:35:05.589169 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 28 16:35:05 crc kubenswrapper[4909]: I1128 16:35:05.593952 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/44195e2b-7f1d-4542-8948-93a818071fd2-scripts\") pod \"ceilometer-0\" (UID: \"44195e2b-7f1d-4542-8948-93a818071fd2\") " pod="openstack/ceilometer-0" Nov 28 16:35:05 crc kubenswrapper[4909]: I1128 16:35:05.594218 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/44195e2b-7f1d-4542-8948-93a818071fd2-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"44195e2b-7f1d-4542-8948-93a818071fd2\") " pod="openstack/ceilometer-0" Nov 28 16:35:05 crc kubenswrapper[4909]: I1128 16:35:05.594637 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/44195e2b-7f1d-4542-8948-93a818071fd2-config-data\") pod \"ceilometer-0\" (UID: \"44195e2b-7f1d-4542-8948-93a818071fd2\") " pod="openstack/ceilometer-0" Nov 28 16:35:05 crc kubenswrapper[4909]: I1128 16:35:05.594711 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fjm7s\" (UniqueName: \"kubernetes.io/projected/44195e2b-7f1d-4542-8948-93a818071fd2-kube-api-access-fjm7s\") pod \"ceilometer-0\" (UID: \"44195e2b-7f1d-4542-8948-93a818071fd2\") " pod="openstack/ceilometer-0" Nov 28 16:35:05 crc kubenswrapper[4909]: I1128 16:35:05.594879 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/44195e2b-7f1d-4542-8948-93a818071fd2-run-httpd\") pod \"ceilometer-0\" (UID: \"44195e2b-7f1d-4542-8948-93a818071fd2\") " pod="openstack/ceilometer-0" Nov 28 16:35:05 crc kubenswrapper[4909]: I1128 16:35:05.594986 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/44195e2b-7f1d-4542-8948-93a818071fd2-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"44195e2b-7f1d-4542-8948-93a818071fd2\") " pod="openstack/ceilometer-0" Nov 28 16:35:05 crc kubenswrapper[4909]: I1128 16:35:05.595094 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/44195e2b-7f1d-4542-8948-93a818071fd2-log-httpd\") pod \"ceilometer-0\" (UID: \"44195e2b-7f1d-4542-8948-93a818071fd2\") " pod="openstack/ceilometer-0" Nov 28 16:35:05 crc kubenswrapper[4909]: I1128 16:35:05.595164 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/44195e2b-7f1d-4542-8948-93a818071fd2-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"44195e2b-7f1d-4542-8948-93a818071fd2\") " pod="openstack/ceilometer-0" Nov 28 16:35:05 crc kubenswrapper[4909]: I1128 16:35:05.604349 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 28 16:35:05 crc kubenswrapper[4909]: I1128 16:35:05.627016 4909 scope.go:117] "RemoveContainer" containerID="433f8784e51999ac38e227f4b81bc2f5b98895c12cf00fa1a569a446bf98a115" Nov 28 16:35:05 crc kubenswrapper[4909]: E1128 16:35:05.627647 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"433f8784e51999ac38e227f4b81bc2f5b98895c12cf00fa1a569a446bf98a115\": container with ID starting with 433f8784e51999ac38e227f4b81bc2f5b98895c12cf00fa1a569a446bf98a115 not found: ID does not exist" containerID="433f8784e51999ac38e227f4b81bc2f5b98895c12cf00fa1a569a446bf98a115" Nov 28 16:35:05 crc kubenswrapper[4909]: I1128 16:35:05.627862 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"433f8784e51999ac38e227f4b81bc2f5b98895c12cf00fa1a569a446bf98a115"} err="failed to get container status \"433f8784e51999ac38e227f4b81bc2f5b98895c12cf00fa1a569a446bf98a115\": rpc error: code = NotFound desc = could not find container \"433f8784e51999ac38e227f4b81bc2f5b98895c12cf00fa1a569a446bf98a115\": container with ID starting with 433f8784e51999ac38e227f4b81bc2f5b98895c12cf00fa1a569a446bf98a115 not found: ID does not exist" Nov 28 16:35:05 crc kubenswrapper[4909]: I1128 16:35:05.627894 4909 scope.go:117] "RemoveContainer" containerID="bf7e7fbcc2e9d708a577956c1de7bb144f29500e8eb3d1470d8c1d579b71213c" Nov 28 16:35:05 crc kubenswrapper[4909]: E1128 16:35:05.628349 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bf7e7fbcc2e9d708a577956c1de7bb144f29500e8eb3d1470d8c1d579b71213c\": container with ID starting with bf7e7fbcc2e9d708a577956c1de7bb144f29500e8eb3d1470d8c1d579b71213c not found: ID does not exist" containerID="bf7e7fbcc2e9d708a577956c1de7bb144f29500e8eb3d1470d8c1d579b71213c" Nov 28 16:35:05 crc kubenswrapper[4909]: I1128 16:35:05.628413 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bf7e7fbcc2e9d708a577956c1de7bb144f29500e8eb3d1470d8c1d579b71213c"} err="failed to get container status \"bf7e7fbcc2e9d708a577956c1de7bb144f29500e8eb3d1470d8c1d579b71213c\": rpc error: code = NotFound desc = could not find container \"bf7e7fbcc2e9d708a577956c1de7bb144f29500e8eb3d1470d8c1d579b71213c\": container with ID starting with bf7e7fbcc2e9d708a577956c1de7bb144f29500e8eb3d1470d8c1d579b71213c not found: ID does not exist" Nov 28 16:35:05 crc kubenswrapper[4909]: I1128 16:35:05.628456 4909 scope.go:117] "RemoveContainer" containerID="93a640f058f780cc4f4794ce69b9f50dcac257ebdd705d5948e1de800375f228" Nov 28 16:35:05 crc kubenswrapper[4909]: E1128 16:35:05.628885 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"93a640f058f780cc4f4794ce69b9f50dcac257ebdd705d5948e1de800375f228\": container with ID starting with 93a640f058f780cc4f4794ce69b9f50dcac257ebdd705d5948e1de800375f228 not found: ID does not exist" containerID="93a640f058f780cc4f4794ce69b9f50dcac257ebdd705d5948e1de800375f228" Nov 28 16:35:05 crc kubenswrapper[4909]: I1128 16:35:05.628917 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"93a640f058f780cc4f4794ce69b9f50dcac257ebdd705d5948e1de800375f228"} err="failed to get container status \"93a640f058f780cc4f4794ce69b9f50dcac257ebdd705d5948e1de800375f228\": rpc error: code = NotFound desc = could not find container \"93a640f058f780cc4f4794ce69b9f50dcac257ebdd705d5948e1de800375f228\": container with ID starting with 93a640f058f780cc4f4794ce69b9f50dcac257ebdd705d5948e1de800375f228 not found: ID does not exist" Nov 28 16:35:05 crc kubenswrapper[4909]: I1128 16:35:05.628941 4909 scope.go:117] "RemoveContainer" containerID="9497b0e101862c9212a0c8ca1797b369e47ced803eb3aa3847d0148c0405ef6b" Nov 28 16:35:05 crc kubenswrapper[4909]: E1128 16:35:05.629232 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9497b0e101862c9212a0c8ca1797b369e47ced803eb3aa3847d0148c0405ef6b\": container with ID starting with 9497b0e101862c9212a0c8ca1797b369e47ced803eb3aa3847d0148c0405ef6b not found: ID does not exist" containerID="9497b0e101862c9212a0c8ca1797b369e47ced803eb3aa3847d0148c0405ef6b" Nov 28 16:35:05 crc kubenswrapper[4909]: I1128 16:35:05.629271 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9497b0e101862c9212a0c8ca1797b369e47ced803eb3aa3847d0148c0405ef6b"} err="failed to get container status \"9497b0e101862c9212a0c8ca1797b369e47ced803eb3aa3847d0148c0405ef6b\": rpc error: code = NotFound desc = could not find container \"9497b0e101862c9212a0c8ca1797b369e47ced803eb3aa3847d0148c0405ef6b\": container with ID starting with 9497b0e101862c9212a0c8ca1797b369e47ced803eb3aa3847d0148c0405ef6b not found: ID does not exist" Nov 28 16:35:05 crc kubenswrapper[4909]: I1128 16:35:05.696304 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/44195e2b-7f1d-4542-8948-93a818071fd2-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"44195e2b-7f1d-4542-8948-93a818071fd2\") " pod="openstack/ceilometer-0" Nov 28 16:35:05 crc kubenswrapper[4909]: I1128 16:35:05.696410 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/44195e2b-7f1d-4542-8948-93a818071fd2-log-httpd\") pod \"ceilometer-0\" (UID: \"44195e2b-7f1d-4542-8948-93a818071fd2\") " pod="openstack/ceilometer-0" Nov 28 16:35:05 crc kubenswrapper[4909]: I1128 16:35:05.696460 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/44195e2b-7f1d-4542-8948-93a818071fd2-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"44195e2b-7f1d-4542-8948-93a818071fd2\") " pod="openstack/ceilometer-0" Nov 28 16:35:05 crc kubenswrapper[4909]: I1128 16:35:05.696491 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/44195e2b-7f1d-4542-8948-93a818071fd2-scripts\") pod \"ceilometer-0\" (UID: \"44195e2b-7f1d-4542-8948-93a818071fd2\") " pod="openstack/ceilometer-0" Nov 28 16:35:05 crc kubenswrapper[4909]: I1128 16:35:05.696557 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/44195e2b-7f1d-4542-8948-93a818071fd2-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"44195e2b-7f1d-4542-8948-93a818071fd2\") " pod="openstack/ceilometer-0" Nov 28 16:35:05 crc kubenswrapper[4909]: I1128 16:35:05.696630 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/44195e2b-7f1d-4542-8948-93a818071fd2-config-data\") pod \"ceilometer-0\" (UID: \"44195e2b-7f1d-4542-8948-93a818071fd2\") " pod="openstack/ceilometer-0" Nov 28 16:35:05 crc kubenswrapper[4909]: I1128 16:35:05.696686 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fjm7s\" (UniqueName: \"kubernetes.io/projected/44195e2b-7f1d-4542-8948-93a818071fd2-kube-api-access-fjm7s\") pod \"ceilometer-0\" (UID: \"44195e2b-7f1d-4542-8948-93a818071fd2\") " pod="openstack/ceilometer-0" Nov 28 16:35:05 crc kubenswrapper[4909]: I1128 16:35:05.696732 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/44195e2b-7f1d-4542-8948-93a818071fd2-run-httpd\") pod \"ceilometer-0\" (UID: \"44195e2b-7f1d-4542-8948-93a818071fd2\") " pod="openstack/ceilometer-0" Nov 28 16:35:05 crc kubenswrapper[4909]: I1128 16:35:05.697462 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/44195e2b-7f1d-4542-8948-93a818071fd2-run-httpd\") pod \"ceilometer-0\" (UID: \"44195e2b-7f1d-4542-8948-93a818071fd2\") " pod="openstack/ceilometer-0" Nov 28 16:35:05 crc kubenswrapper[4909]: I1128 16:35:05.697738 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/44195e2b-7f1d-4542-8948-93a818071fd2-log-httpd\") pod \"ceilometer-0\" (UID: \"44195e2b-7f1d-4542-8948-93a818071fd2\") " pod="openstack/ceilometer-0" Nov 28 16:35:05 crc kubenswrapper[4909]: I1128 16:35:05.701980 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/44195e2b-7f1d-4542-8948-93a818071fd2-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"44195e2b-7f1d-4542-8948-93a818071fd2\") " pod="openstack/ceilometer-0" Nov 28 16:35:05 crc kubenswrapper[4909]: I1128 16:35:05.702363 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/44195e2b-7f1d-4542-8948-93a818071fd2-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"44195e2b-7f1d-4542-8948-93a818071fd2\") " pod="openstack/ceilometer-0" Nov 28 16:35:05 crc kubenswrapper[4909]: I1128 16:35:05.702389 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/44195e2b-7f1d-4542-8948-93a818071fd2-scripts\") pod \"ceilometer-0\" (UID: \"44195e2b-7f1d-4542-8948-93a818071fd2\") " pod="openstack/ceilometer-0" Nov 28 16:35:05 crc kubenswrapper[4909]: I1128 16:35:05.702510 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/44195e2b-7f1d-4542-8948-93a818071fd2-config-data\") pod \"ceilometer-0\" (UID: \"44195e2b-7f1d-4542-8948-93a818071fd2\") " pod="openstack/ceilometer-0" Nov 28 16:35:05 crc kubenswrapper[4909]: I1128 16:35:05.704000 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/44195e2b-7f1d-4542-8948-93a818071fd2-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"44195e2b-7f1d-4542-8948-93a818071fd2\") " pod="openstack/ceilometer-0" Nov 28 16:35:05 crc kubenswrapper[4909]: I1128 16:35:05.720427 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fjm7s\" (UniqueName: \"kubernetes.io/projected/44195e2b-7f1d-4542-8948-93a818071fd2-kube-api-access-fjm7s\") pod \"ceilometer-0\" (UID: \"44195e2b-7f1d-4542-8948-93a818071fd2\") " pod="openstack/ceilometer-0" Nov 28 16:35:05 crc kubenswrapper[4909]: I1128 16:35:05.909649 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 16:35:05 crc kubenswrapper[4909]: I1128 16:35:05.912823 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c4163067-0e3f-4cab-9b27-6e7fccc3045e" path="/var/lib/kubelet/pods/c4163067-0e3f-4cab-9b27-6e7fccc3045e/volumes" Nov 28 16:35:05 crc kubenswrapper[4909]: I1128 16:35:05.913794 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e00fa182-8ee5-4760-bacd-79db454c12db" path="/var/lib/kubelet/pods/e00fa182-8ee5-4760-bacd-79db454c12db/volumes" Nov 28 16:35:06 crc kubenswrapper[4909]: I1128 16:35:06.376225 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 28 16:35:06 crc kubenswrapper[4909]: W1128 16:35:06.393805 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod44195e2b_7f1d_4542_8948_93a818071fd2.slice/crio-65a39c69d94ebd3682bf32718aecafa565b14fc87de765b90fc2a780dd455ff1 WatchSource:0}: Error finding container 65a39c69d94ebd3682bf32718aecafa565b14fc87de765b90fc2a780dd455ff1: Status 404 returned error can't find the container with id 65a39c69d94ebd3682bf32718aecafa565b14fc87de765b90fc2a780dd455ff1 Nov 28 16:35:06 crc kubenswrapper[4909]: I1128 16:35:06.507535 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"38e07fb0-7023-49ad-9e0a-e0ee7f565ef4","Type":"ContainerStarted","Data":"b32756291bf15b5830af5975d69130c26a296034d720c73a8689c03ff65ea3dd"} Nov 28 16:35:06 crc kubenswrapper[4909]: I1128 16:35:06.507587 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"38e07fb0-7023-49ad-9e0a-e0ee7f565ef4","Type":"ContainerStarted","Data":"66a4f9480caa4bf88a2395446821189a734d4ac714eafbf43f5d8cfef52a1d61"} Nov 28 16:35:06 crc kubenswrapper[4909]: I1128 16:35:06.511572 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"44195e2b-7f1d-4542-8948-93a818071fd2","Type":"ContainerStarted","Data":"65a39c69d94ebd3682bf32718aecafa565b14fc87de765b90fc2a780dd455ff1"} Nov 28 16:35:06 crc kubenswrapper[4909]: I1128 16:35:06.531369 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.531349579 podStartE2EDuration="2.531349579s" podCreationTimestamp="2025-11-28 16:35:04 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:35:06.530332111 +0000 UTC m=+1488.927016625" watchObservedRunningTime="2025-11-28 16:35:06.531349579 +0000 UTC m=+1488.928034093" Nov 28 16:35:06 crc kubenswrapper[4909]: I1128 16:35:06.850247 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-cell1-novncproxy-0" Nov 28 16:35:06 crc kubenswrapper[4909]: I1128 16:35:06.871020 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-cell1-novncproxy-0" Nov 28 16:35:07 crc kubenswrapper[4909]: I1128 16:35:07.523943 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"44195e2b-7f1d-4542-8948-93a818071fd2","Type":"ContainerStarted","Data":"100c610b01cd8cbae563a7661d097874303002f0d958013b0241d5cf74e9cfd2"} Nov 28 16:35:07 crc kubenswrapper[4909]: I1128 16:35:07.541412 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-novncproxy-0" Nov 28 16:35:07 crc kubenswrapper[4909]: I1128 16:35:07.721768 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-cell-mapping-hqb8m"] Nov 28 16:35:07 crc kubenswrapper[4909]: I1128 16:35:07.723535 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-hqb8m" Nov 28 16:35:07 crc kubenswrapper[4909]: I1128 16:35:07.728075 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-scripts" Nov 28 16:35:07 crc kubenswrapper[4909]: I1128 16:35:07.734231 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-hqb8m"] Nov 28 16:35:07 crc kubenswrapper[4909]: I1128 16:35:07.738583 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-config-data" Nov 28 16:35:07 crc kubenswrapper[4909]: I1128 16:35:07.742800 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a5b41266-6099-4bf7-a26b-4a98b01aa9b6-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-hqb8m\" (UID: \"a5b41266-6099-4bf7-a26b-4a98b01aa9b6\") " pod="openstack/nova-cell1-cell-mapping-hqb8m" Nov 28 16:35:07 crc kubenswrapper[4909]: I1128 16:35:07.742853 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a5b41266-6099-4bf7-a26b-4a98b01aa9b6-config-data\") pod \"nova-cell1-cell-mapping-hqb8m\" (UID: \"a5b41266-6099-4bf7-a26b-4a98b01aa9b6\") " pod="openstack/nova-cell1-cell-mapping-hqb8m" Nov 28 16:35:07 crc kubenswrapper[4909]: I1128 16:35:07.743438 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lszjn\" (UniqueName: \"kubernetes.io/projected/a5b41266-6099-4bf7-a26b-4a98b01aa9b6-kube-api-access-lszjn\") pod \"nova-cell1-cell-mapping-hqb8m\" (UID: \"a5b41266-6099-4bf7-a26b-4a98b01aa9b6\") " pod="openstack/nova-cell1-cell-mapping-hqb8m" Nov 28 16:35:07 crc kubenswrapper[4909]: I1128 16:35:07.743703 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a5b41266-6099-4bf7-a26b-4a98b01aa9b6-scripts\") pod \"nova-cell1-cell-mapping-hqb8m\" (UID: \"a5b41266-6099-4bf7-a26b-4a98b01aa9b6\") " pod="openstack/nova-cell1-cell-mapping-hqb8m" Nov 28 16:35:07 crc kubenswrapper[4909]: I1128 16:35:07.845396 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a5b41266-6099-4bf7-a26b-4a98b01aa9b6-scripts\") pod \"nova-cell1-cell-mapping-hqb8m\" (UID: \"a5b41266-6099-4bf7-a26b-4a98b01aa9b6\") " pod="openstack/nova-cell1-cell-mapping-hqb8m" Nov 28 16:35:07 crc kubenswrapper[4909]: I1128 16:35:07.845513 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a5b41266-6099-4bf7-a26b-4a98b01aa9b6-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-hqb8m\" (UID: \"a5b41266-6099-4bf7-a26b-4a98b01aa9b6\") " pod="openstack/nova-cell1-cell-mapping-hqb8m" Nov 28 16:35:07 crc kubenswrapper[4909]: I1128 16:35:07.845536 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a5b41266-6099-4bf7-a26b-4a98b01aa9b6-config-data\") pod \"nova-cell1-cell-mapping-hqb8m\" (UID: \"a5b41266-6099-4bf7-a26b-4a98b01aa9b6\") " pod="openstack/nova-cell1-cell-mapping-hqb8m" Nov 28 16:35:07 crc kubenswrapper[4909]: I1128 16:35:07.845576 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lszjn\" (UniqueName: \"kubernetes.io/projected/a5b41266-6099-4bf7-a26b-4a98b01aa9b6-kube-api-access-lszjn\") pod \"nova-cell1-cell-mapping-hqb8m\" (UID: \"a5b41266-6099-4bf7-a26b-4a98b01aa9b6\") " pod="openstack/nova-cell1-cell-mapping-hqb8m" Nov 28 16:35:07 crc kubenswrapper[4909]: I1128 16:35:07.860044 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a5b41266-6099-4bf7-a26b-4a98b01aa9b6-config-data\") pod \"nova-cell1-cell-mapping-hqb8m\" (UID: \"a5b41266-6099-4bf7-a26b-4a98b01aa9b6\") " pod="openstack/nova-cell1-cell-mapping-hqb8m" Nov 28 16:35:07 crc kubenswrapper[4909]: I1128 16:35:07.861624 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a5b41266-6099-4bf7-a26b-4a98b01aa9b6-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-hqb8m\" (UID: \"a5b41266-6099-4bf7-a26b-4a98b01aa9b6\") " pod="openstack/nova-cell1-cell-mapping-hqb8m" Nov 28 16:35:07 crc kubenswrapper[4909]: I1128 16:35:07.872319 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a5b41266-6099-4bf7-a26b-4a98b01aa9b6-scripts\") pod \"nova-cell1-cell-mapping-hqb8m\" (UID: \"a5b41266-6099-4bf7-a26b-4a98b01aa9b6\") " pod="openstack/nova-cell1-cell-mapping-hqb8m" Nov 28 16:35:07 crc kubenswrapper[4909]: I1128 16:35:07.882080 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lszjn\" (UniqueName: \"kubernetes.io/projected/a5b41266-6099-4bf7-a26b-4a98b01aa9b6-kube-api-access-lszjn\") pod \"nova-cell1-cell-mapping-hqb8m\" (UID: \"a5b41266-6099-4bf7-a26b-4a98b01aa9b6\") " pod="openstack/nova-cell1-cell-mapping-hqb8m" Nov 28 16:35:07 crc kubenswrapper[4909]: I1128 16:35:07.923197 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-5c7b6c5df9-gbql4" Nov 28 16:35:08 crc kubenswrapper[4909]: I1128 16:35:08.041395 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-865f5d856f-rmkzw"] Nov 28 16:35:08 crc kubenswrapper[4909]: I1128 16:35:08.042689 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-865f5d856f-rmkzw" podUID="1c23d3ad-6031-41a7-89cd-f9b863351cc1" containerName="dnsmasq-dns" containerID="cri-o://0d48820f5d21bc197eb801dacd5dc3118d806007250e25469dbf27b1b1789b47" gracePeriod=10 Nov 28 16:35:08 crc kubenswrapper[4909]: I1128 16:35:08.043339 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-hqb8m" Nov 28 16:35:08 crc kubenswrapper[4909]: I1128 16:35:08.534299 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"44195e2b-7f1d-4542-8948-93a818071fd2","Type":"ContainerStarted","Data":"243efd87260faad6273959bee1e446c951a950b25ee3ad8a4d986ebc6dff73bb"} Nov 28 16:35:08 crc kubenswrapper[4909]: I1128 16:35:08.537291 4909 generic.go:334] "Generic (PLEG): container finished" podID="1c23d3ad-6031-41a7-89cd-f9b863351cc1" containerID="0d48820f5d21bc197eb801dacd5dc3118d806007250e25469dbf27b1b1789b47" exitCode=0 Nov 28 16:35:08 crc kubenswrapper[4909]: I1128 16:35:08.537347 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-865f5d856f-rmkzw" event={"ID":"1c23d3ad-6031-41a7-89cd-f9b863351cc1","Type":"ContainerDied","Data":"0d48820f5d21bc197eb801dacd5dc3118d806007250e25469dbf27b1b1789b47"} Nov 28 16:35:08 crc kubenswrapper[4909]: I1128 16:35:08.537386 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-865f5d856f-rmkzw" event={"ID":"1c23d3ad-6031-41a7-89cd-f9b863351cc1","Type":"ContainerDied","Data":"de9c72cc855c99206312d060643947b52b77e5ea882c520360218724f6568a84"} Nov 28 16:35:08 crc kubenswrapper[4909]: I1128 16:35:08.537411 4909 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="de9c72cc855c99206312d060643947b52b77e5ea882c520360218724f6568a84" Nov 28 16:35:08 crc kubenswrapper[4909]: I1128 16:35:08.601115 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-865f5d856f-rmkzw" Nov 28 16:35:08 crc kubenswrapper[4909]: I1128 16:35:08.665426 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1c23d3ad-6031-41a7-89cd-f9b863351cc1-ovsdbserver-sb\") pod \"1c23d3ad-6031-41a7-89cd-f9b863351cc1\" (UID: \"1c23d3ad-6031-41a7-89cd-f9b863351cc1\") " Nov 28 16:35:08 crc kubenswrapper[4909]: I1128 16:35:08.665465 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-56mff\" (UniqueName: \"kubernetes.io/projected/1c23d3ad-6031-41a7-89cd-f9b863351cc1-kube-api-access-56mff\") pod \"1c23d3ad-6031-41a7-89cd-f9b863351cc1\" (UID: \"1c23d3ad-6031-41a7-89cd-f9b863351cc1\") " Nov 28 16:35:08 crc kubenswrapper[4909]: I1128 16:35:08.665543 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1c23d3ad-6031-41a7-89cd-f9b863351cc1-dns-svc\") pod \"1c23d3ad-6031-41a7-89cd-f9b863351cc1\" (UID: \"1c23d3ad-6031-41a7-89cd-f9b863351cc1\") " Nov 28 16:35:08 crc kubenswrapper[4909]: I1128 16:35:08.665592 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/1c23d3ad-6031-41a7-89cd-f9b863351cc1-dns-swift-storage-0\") pod \"1c23d3ad-6031-41a7-89cd-f9b863351cc1\" (UID: \"1c23d3ad-6031-41a7-89cd-f9b863351cc1\") " Nov 28 16:35:08 crc kubenswrapper[4909]: I1128 16:35:08.665620 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1c23d3ad-6031-41a7-89cd-f9b863351cc1-ovsdbserver-nb\") pod \"1c23d3ad-6031-41a7-89cd-f9b863351cc1\" (UID: \"1c23d3ad-6031-41a7-89cd-f9b863351cc1\") " Nov 28 16:35:08 crc kubenswrapper[4909]: I1128 16:35:08.665722 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1c23d3ad-6031-41a7-89cd-f9b863351cc1-config\") pod \"1c23d3ad-6031-41a7-89cd-f9b863351cc1\" (UID: \"1c23d3ad-6031-41a7-89cd-f9b863351cc1\") " Nov 28 16:35:08 crc kubenswrapper[4909]: I1128 16:35:08.673832 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1c23d3ad-6031-41a7-89cd-f9b863351cc1-kube-api-access-56mff" (OuterVolumeSpecName: "kube-api-access-56mff") pod "1c23d3ad-6031-41a7-89cd-f9b863351cc1" (UID: "1c23d3ad-6031-41a7-89cd-f9b863351cc1"). InnerVolumeSpecName "kube-api-access-56mff". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:35:08 crc kubenswrapper[4909]: W1128 16:35:08.693876 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda5b41266_6099_4bf7_a26b_4a98b01aa9b6.slice/crio-2a268b374817314eeff480750577d53208c582d298aa233ecb6a8efedf201ccb WatchSource:0}: Error finding container 2a268b374817314eeff480750577d53208c582d298aa233ecb6a8efedf201ccb: Status 404 returned error can't find the container with id 2a268b374817314eeff480750577d53208c582d298aa233ecb6a8efedf201ccb Nov 28 16:35:08 crc kubenswrapper[4909]: I1128 16:35:08.697312 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-hqb8m"] Nov 28 16:35:08 crc kubenswrapper[4909]: I1128 16:35:08.728385 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1c23d3ad-6031-41a7-89cd-f9b863351cc1-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "1c23d3ad-6031-41a7-89cd-f9b863351cc1" (UID: "1c23d3ad-6031-41a7-89cd-f9b863351cc1"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:35:08 crc kubenswrapper[4909]: I1128 16:35:08.732244 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1c23d3ad-6031-41a7-89cd-f9b863351cc1-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "1c23d3ad-6031-41a7-89cd-f9b863351cc1" (UID: "1c23d3ad-6031-41a7-89cd-f9b863351cc1"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:35:08 crc kubenswrapper[4909]: I1128 16:35:08.735962 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1c23d3ad-6031-41a7-89cd-f9b863351cc1-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "1c23d3ad-6031-41a7-89cd-f9b863351cc1" (UID: "1c23d3ad-6031-41a7-89cd-f9b863351cc1"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:35:08 crc kubenswrapper[4909]: I1128 16:35:08.745766 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1c23d3ad-6031-41a7-89cd-f9b863351cc1-config" (OuterVolumeSpecName: "config") pod "1c23d3ad-6031-41a7-89cd-f9b863351cc1" (UID: "1c23d3ad-6031-41a7-89cd-f9b863351cc1"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:35:08 crc kubenswrapper[4909]: I1128 16:35:08.749991 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1c23d3ad-6031-41a7-89cd-f9b863351cc1-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "1c23d3ad-6031-41a7-89cd-f9b863351cc1" (UID: "1c23d3ad-6031-41a7-89cd-f9b863351cc1"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:35:08 crc kubenswrapper[4909]: I1128 16:35:08.767536 4909 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1c23d3ad-6031-41a7-89cd-f9b863351cc1-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 28 16:35:08 crc kubenswrapper[4909]: I1128 16:35:08.767579 4909 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1c23d3ad-6031-41a7-89cd-f9b863351cc1-config\") on node \"crc\" DevicePath \"\"" Nov 28 16:35:08 crc kubenswrapper[4909]: I1128 16:35:08.767592 4909 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1c23d3ad-6031-41a7-89cd-f9b863351cc1-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 28 16:35:08 crc kubenswrapper[4909]: I1128 16:35:08.767604 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-56mff\" (UniqueName: \"kubernetes.io/projected/1c23d3ad-6031-41a7-89cd-f9b863351cc1-kube-api-access-56mff\") on node \"crc\" DevicePath \"\"" Nov 28 16:35:08 crc kubenswrapper[4909]: I1128 16:35:08.767620 4909 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1c23d3ad-6031-41a7-89cd-f9b863351cc1-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 16:35:08 crc kubenswrapper[4909]: I1128 16:35:08.767634 4909 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/1c23d3ad-6031-41a7-89cd-f9b863351cc1-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 28 16:35:09 crc kubenswrapper[4909]: I1128 16:35:09.550257 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"44195e2b-7f1d-4542-8948-93a818071fd2","Type":"ContainerStarted","Data":"66d5e1af763ac9fbd2d79ed29f67762e31a80531d747a095842ae46df8e3741e"} Nov 28 16:35:09 crc kubenswrapper[4909]: I1128 16:35:09.552139 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-865f5d856f-rmkzw" Nov 28 16:35:09 crc kubenswrapper[4909]: I1128 16:35:09.552195 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-hqb8m" event={"ID":"a5b41266-6099-4bf7-a26b-4a98b01aa9b6","Type":"ContainerStarted","Data":"49028e9be7659ad35e5108e0a2c128957971bb8f2381bd1914434307ad430e5b"} Nov 28 16:35:09 crc kubenswrapper[4909]: I1128 16:35:09.552247 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-hqb8m" event={"ID":"a5b41266-6099-4bf7-a26b-4a98b01aa9b6","Type":"ContainerStarted","Data":"2a268b374817314eeff480750577d53208c582d298aa233ecb6a8efedf201ccb"} Nov 28 16:35:09 crc kubenswrapper[4909]: I1128 16:35:09.575193 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-cell-mapping-hqb8m" podStartSLOduration=2.5751722839999998 podStartE2EDuration="2.575172284s" podCreationTimestamp="2025-11-28 16:35:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:35:09.57205867 +0000 UTC m=+1491.968743204" watchObservedRunningTime="2025-11-28 16:35:09.575172284 +0000 UTC m=+1491.971856808" Nov 28 16:35:09 crc kubenswrapper[4909]: I1128 16:35:09.595358 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-865f5d856f-rmkzw"] Nov 28 16:35:09 crc kubenswrapper[4909]: I1128 16:35:09.603278 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-865f5d856f-rmkzw"] Nov 28 16:35:09 crc kubenswrapper[4909]: I1128 16:35:09.913089 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1c23d3ad-6031-41a7-89cd-f9b863351cc1" path="/var/lib/kubelet/pods/1c23d3ad-6031-41a7-89cd-f9b863351cc1/volumes" Nov 28 16:35:11 crc kubenswrapper[4909]: I1128 16:35:11.595996 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"44195e2b-7f1d-4542-8948-93a818071fd2","Type":"ContainerStarted","Data":"2eb951aa3c283ce1fd52ad843a7234b8357e48ad0ed3f9fa1b30578a1af9fa7a"} Nov 28 16:35:11 crc kubenswrapper[4909]: I1128 16:35:11.597281 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 28 16:35:11 crc kubenswrapper[4909]: I1128 16:35:11.632290 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.598901761 podStartE2EDuration="6.632270779s" podCreationTimestamp="2025-11-28 16:35:05 +0000 UTC" firstStartedPulling="2025-11-28 16:35:06.40326445 +0000 UTC m=+1488.799948974" lastFinishedPulling="2025-11-28 16:35:10.436633468 +0000 UTC m=+1492.833317992" observedRunningTime="2025-11-28 16:35:11.625686442 +0000 UTC m=+1494.022370966" watchObservedRunningTime="2025-11-28 16:35:11.632270779 +0000 UTC m=+1494.028955313" Nov 28 16:35:14 crc kubenswrapper[4909]: I1128 16:35:14.623401 4909 generic.go:334] "Generic (PLEG): container finished" podID="a5b41266-6099-4bf7-a26b-4a98b01aa9b6" containerID="49028e9be7659ad35e5108e0a2c128957971bb8f2381bd1914434307ad430e5b" exitCode=0 Nov 28 16:35:14 crc kubenswrapper[4909]: I1128 16:35:14.623484 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-hqb8m" event={"ID":"a5b41266-6099-4bf7-a26b-4a98b01aa9b6","Type":"ContainerDied","Data":"49028e9be7659ad35e5108e0a2c128957971bb8f2381bd1914434307ad430e5b"} Nov 28 16:35:14 crc kubenswrapper[4909]: I1128 16:35:14.898994 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 28 16:35:14 crc kubenswrapper[4909]: I1128 16:35:14.899063 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 28 16:35:15 crc kubenswrapper[4909]: I1128 16:35:15.913356 4909 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="38e07fb0-7023-49ad-9e0a-e0ee7f565ef4" containerName="nova-api-api" probeResult="failure" output="Get \"https://10.217.0.196:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 28 16:35:15 crc kubenswrapper[4909]: I1128 16:35:15.913382 4909 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="38e07fb0-7023-49ad-9e0a-e0ee7f565ef4" containerName="nova-api-log" probeResult="failure" output="Get \"https://10.217.0.196:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 28 16:35:16 crc kubenswrapper[4909]: I1128 16:35:16.051779 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-hqb8m" Nov 28 16:35:16 crc kubenswrapper[4909]: I1128 16:35:16.110153 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a5b41266-6099-4bf7-a26b-4a98b01aa9b6-scripts\") pod \"a5b41266-6099-4bf7-a26b-4a98b01aa9b6\" (UID: \"a5b41266-6099-4bf7-a26b-4a98b01aa9b6\") " Nov 28 16:35:16 crc kubenswrapper[4909]: I1128 16:35:16.110258 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a5b41266-6099-4bf7-a26b-4a98b01aa9b6-combined-ca-bundle\") pod \"a5b41266-6099-4bf7-a26b-4a98b01aa9b6\" (UID: \"a5b41266-6099-4bf7-a26b-4a98b01aa9b6\") " Nov 28 16:35:16 crc kubenswrapper[4909]: I1128 16:35:16.110301 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lszjn\" (UniqueName: \"kubernetes.io/projected/a5b41266-6099-4bf7-a26b-4a98b01aa9b6-kube-api-access-lszjn\") pod \"a5b41266-6099-4bf7-a26b-4a98b01aa9b6\" (UID: \"a5b41266-6099-4bf7-a26b-4a98b01aa9b6\") " Nov 28 16:35:16 crc kubenswrapper[4909]: I1128 16:35:16.110378 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a5b41266-6099-4bf7-a26b-4a98b01aa9b6-config-data\") pod \"a5b41266-6099-4bf7-a26b-4a98b01aa9b6\" (UID: \"a5b41266-6099-4bf7-a26b-4a98b01aa9b6\") " Nov 28 16:35:16 crc kubenswrapper[4909]: I1128 16:35:16.130334 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a5b41266-6099-4bf7-a26b-4a98b01aa9b6-scripts" (OuterVolumeSpecName: "scripts") pod "a5b41266-6099-4bf7-a26b-4a98b01aa9b6" (UID: "a5b41266-6099-4bf7-a26b-4a98b01aa9b6"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:35:16 crc kubenswrapper[4909]: I1128 16:35:16.139685 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a5b41266-6099-4bf7-a26b-4a98b01aa9b6-kube-api-access-lszjn" (OuterVolumeSpecName: "kube-api-access-lszjn") pod "a5b41266-6099-4bf7-a26b-4a98b01aa9b6" (UID: "a5b41266-6099-4bf7-a26b-4a98b01aa9b6"). InnerVolumeSpecName "kube-api-access-lszjn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:35:16 crc kubenswrapper[4909]: I1128 16:35:16.150855 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a5b41266-6099-4bf7-a26b-4a98b01aa9b6-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a5b41266-6099-4bf7-a26b-4a98b01aa9b6" (UID: "a5b41266-6099-4bf7-a26b-4a98b01aa9b6"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:35:16 crc kubenswrapper[4909]: I1128 16:35:16.183216 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a5b41266-6099-4bf7-a26b-4a98b01aa9b6-config-data" (OuterVolumeSpecName: "config-data") pod "a5b41266-6099-4bf7-a26b-4a98b01aa9b6" (UID: "a5b41266-6099-4bf7-a26b-4a98b01aa9b6"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:35:16 crc kubenswrapper[4909]: I1128 16:35:16.216039 4909 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a5b41266-6099-4bf7-a26b-4a98b01aa9b6-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:35:16 crc kubenswrapper[4909]: I1128 16:35:16.216078 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lszjn\" (UniqueName: \"kubernetes.io/projected/a5b41266-6099-4bf7-a26b-4a98b01aa9b6-kube-api-access-lszjn\") on node \"crc\" DevicePath \"\"" Nov 28 16:35:16 crc kubenswrapper[4909]: I1128 16:35:16.216093 4909 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a5b41266-6099-4bf7-a26b-4a98b01aa9b6-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:35:16 crc kubenswrapper[4909]: I1128 16:35:16.216103 4909 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a5b41266-6099-4bf7-a26b-4a98b01aa9b6-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 16:35:16 crc kubenswrapper[4909]: I1128 16:35:16.655120 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-hqb8m" event={"ID":"a5b41266-6099-4bf7-a26b-4a98b01aa9b6","Type":"ContainerDied","Data":"2a268b374817314eeff480750577d53208c582d298aa233ecb6a8efedf201ccb"} Nov 28 16:35:16 crc kubenswrapper[4909]: I1128 16:35:16.655176 4909 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2a268b374817314eeff480750577d53208c582d298aa233ecb6a8efedf201ccb" Nov 28 16:35:16 crc kubenswrapper[4909]: I1128 16:35:16.655255 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-hqb8m" Nov 28 16:35:16 crc kubenswrapper[4909]: I1128 16:35:16.839841 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 28 16:35:16 crc kubenswrapper[4909]: I1128 16:35:16.840111 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="38e07fb0-7023-49ad-9e0a-e0ee7f565ef4" containerName="nova-api-log" containerID="cri-o://66a4f9480caa4bf88a2395446821189a734d4ac714eafbf43f5d8cfef52a1d61" gracePeriod=30 Nov 28 16:35:16 crc kubenswrapper[4909]: I1128 16:35:16.840204 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="38e07fb0-7023-49ad-9e0a-e0ee7f565ef4" containerName="nova-api-api" containerID="cri-o://b32756291bf15b5830af5975d69130c26a296034d720c73a8689c03ff65ea3dd" gracePeriod=30 Nov 28 16:35:16 crc kubenswrapper[4909]: I1128 16:35:16.867100 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 16:35:16 crc kubenswrapper[4909]: I1128 16:35:16.867375 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="3611cfd4-bf6b-41f0-8643-f8cb7f69a68c" containerName="nova-scheduler-scheduler" containerID="cri-o://48d4eb596206cf59e62c9d2b98ee171960a0e66612d54c0e9d38b1a3f77d5529" gracePeriod=30 Nov 28 16:35:16 crc kubenswrapper[4909]: I1128 16:35:16.900877 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 16:35:16 crc kubenswrapper[4909]: I1128 16:35:16.901178 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="b0692d4c-3e74-48d9-8066-d3c22d037012" containerName="nova-metadata-log" containerID="cri-o://3629aeffd7fb127c4a8a700fa819c08da4bf2eabf9807520f842192129e82072" gracePeriod=30 Nov 28 16:35:16 crc kubenswrapper[4909]: I1128 16:35:16.901782 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="b0692d4c-3e74-48d9-8066-d3c22d037012" containerName="nova-metadata-metadata" containerID="cri-o://057ee363c7521e970233c04fb4162ace8577a08d4267b8ed194cc078ec87f1d4" gracePeriod=30 Nov 28 16:35:17 crc kubenswrapper[4909]: I1128 16:35:17.671802 4909 generic.go:334] "Generic (PLEG): container finished" podID="38e07fb0-7023-49ad-9e0a-e0ee7f565ef4" containerID="66a4f9480caa4bf88a2395446821189a734d4ac714eafbf43f5d8cfef52a1d61" exitCode=143 Nov 28 16:35:17 crc kubenswrapper[4909]: I1128 16:35:17.671862 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"38e07fb0-7023-49ad-9e0a-e0ee7f565ef4","Type":"ContainerDied","Data":"66a4f9480caa4bf88a2395446821189a734d4ac714eafbf43f5d8cfef52a1d61"} Nov 28 16:35:17 crc kubenswrapper[4909]: I1128 16:35:17.674993 4909 generic.go:334] "Generic (PLEG): container finished" podID="b0692d4c-3e74-48d9-8066-d3c22d037012" containerID="3629aeffd7fb127c4a8a700fa819c08da4bf2eabf9807520f842192129e82072" exitCode=143 Nov 28 16:35:17 crc kubenswrapper[4909]: I1128 16:35:17.675021 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"b0692d4c-3e74-48d9-8066-d3c22d037012","Type":"ContainerDied","Data":"3629aeffd7fb127c4a8a700fa819c08da4bf2eabf9807520f842192129e82072"} Nov 28 16:35:19 crc kubenswrapper[4909]: E1128 16:35:19.043681 4909 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 48d4eb596206cf59e62c9d2b98ee171960a0e66612d54c0e9d38b1a3f77d5529 is running failed: container process not found" containerID="48d4eb596206cf59e62c9d2b98ee171960a0e66612d54c0e9d38b1a3f77d5529" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 28 16:35:19 crc kubenswrapper[4909]: E1128 16:35:19.044429 4909 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 48d4eb596206cf59e62c9d2b98ee171960a0e66612d54c0e9d38b1a3f77d5529 is running failed: container process not found" containerID="48d4eb596206cf59e62c9d2b98ee171960a0e66612d54c0e9d38b1a3f77d5529" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 28 16:35:19 crc kubenswrapper[4909]: E1128 16:35:19.047899 4909 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 48d4eb596206cf59e62c9d2b98ee171960a0e66612d54c0e9d38b1a3f77d5529 is running failed: container process not found" containerID="48d4eb596206cf59e62c9d2b98ee171960a0e66612d54c0e9d38b1a3f77d5529" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 28 16:35:19 crc kubenswrapper[4909]: E1128 16:35:19.047937 4909 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 48d4eb596206cf59e62c9d2b98ee171960a0e66612d54c0e9d38b1a3f77d5529 is running failed: container process not found" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="3611cfd4-bf6b-41f0-8643-f8cb7f69a68c" containerName="nova-scheduler-scheduler" Nov 28 16:35:19 crc kubenswrapper[4909]: I1128 16:35:19.187071 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 16:35:19 crc kubenswrapper[4909]: I1128 16:35:19.273215 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3611cfd4-bf6b-41f0-8643-f8cb7f69a68c-combined-ca-bundle\") pod \"3611cfd4-bf6b-41f0-8643-f8cb7f69a68c\" (UID: \"3611cfd4-bf6b-41f0-8643-f8cb7f69a68c\") " Nov 28 16:35:19 crc kubenswrapper[4909]: I1128 16:35:19.273362 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nb96h\" (UniqueName: \"kubernetes.io/projected/3611cfd4-bf6b-41f0-8643-f8cb7f69a68c-kube-api-access-nb96h\") pod \"3611cfd4-bf6b-41f0-8643-f8cb7f69a68c\" (UID: \"3611cfd4-bf6b-41f0-8643-f8cb7f69a68c\") " Nov 28 16:35:19 crc kubenswrapper[4909]: I1128 16:35:19.273479 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3611cfd4-bf6b-41f0-8643-f8cb7f69a68c-config-data\") pod \"3611cfd4-bf6b-41f0-8643-f8cb7f69a68c\" (UID: \"3611cfd4-bf6b-41f0-8643-f8cb7f69a68c\") " Nov 28 16:35:19 crc kubenswrapper[4909]: I1128 16:35:19.282043 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3611cfd4-bf6b-41f0-8643-f8cb7f69a68c-kube-api-access-nb96h" (OuterVolumeSpecName: "kube-api-access-nb96h") pod "3611cfd4-bf6b-41f0-8643-f8cb7f69a68c" (UID: "3611cfd4-bf6b-41f0-8643-f8cb7f69a68c"). InnerVolumeSpecName "kube-api-access-nb96h". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:35:19 crc kubenswrapper[4909]: I1128 16:35:19.299614 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3611cfd4-bf6b-41f0-8643-f8cb7f69a68c-config-data" (OuterVolumeSpecName: "config-data") pod "3611cfd4-bf6b-41f0-8643-f8cb7f69a68c" (UID: "3611cfd4-bf6b-41f0-8643-f8cb7f69a68c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:35:19 crc kubenswrapper[4909]: I1128 16:35:19.302801 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3611cfd4-bf6b-41f0-8643-f8cb7f69a68c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "3611cfd4-bf6b-41f0-8643-f8cb7f69a68c" (UID: "3611cfd4-bf6b-41f0-8643-f8cb7f69a68c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:35:19 crc kubenswrapper[4909]: I1128 16:35:19.375945 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nb96h\" (UniqueName: \"kubernetes.io/projected/3611cfd4-bf6b-41f0-8643-f8cb7f69a68c-kube-api-access-nb96h\") on node \"crc\" DevicePath \"\"" Nov 28 16:35:19 crc kubenswrapper[4909]: I1128 16:35:19.375985 4909 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3611cfd4-bf6b-41f0-8643-f8cb7f69a68c-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:35:19 crc kubenswrapper[4909]: I1128 16:35:19.375999 4909 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3611cfd4-bf6b-41f0-8643-f8cb7f69a68c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:35:19 crc kubenswrapper[4909]: I1128 16:35:19.694239 4909 generic.go:334] "Generic (PLEG): container finished" podID="3611cfd4-bf6b-41f0-8643-f8cb7f69a68c" containerID="48d4eb596206cf59e62c9d2b98ee171960a0e66612d54c0e9d38b1a3f77d5529" exitCode=0 Nov 28 16:35:19 crc kubenswrapper[4909]: I1128 16:35:19.694279 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"3611cfd4-bf6b-41f0-8643-f8cb7f69a68c","Type":"ContainerDied","Data":"48d4eb596206cf59e62c9d2b98ee171960a0e66612d54c0e9d38b1a3f77d5529"} Nov 28 16:35:19 crc kubenswrapper[4909]: I1128 16:35:19.694314 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"3611cfd4-bf6b-41f0-8643-f8cb7f69a68c","Type":"ContainerDied","Data":"1c6f7892d95d67c8bb8028cb7786323eda23e3100fe033ab99e22782d68698da"} Nov 28 16:35:19 crc kubenswrapper[4909]: I1128 16:35:19.694331 4909 scope.go:117] "RemoveContainer" containerID="48d4eb596206cf59e62c9d2b98ee171960a0e66612d54c0e9d38b1a3f77d5529" Nov 28 16:35:19 crc kubenswrapper[4909]: I1128 16:35:19.694375 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 16:35:19 crc kubenswrapper[4909]: I1128 16:35:19.714369 4909 scope.go:117] "RemoveContainer" containerID="48d4eb596206cf59e62c9d2b98ee171960a0e66612d54c0e9d38b1a3f77d5529" Nov 28 16:35:19 crc kubenswrapper[4909]: E1128 16:35:19.714787 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"48d4eb596206cf59e62c9d2b98ee171960a0e66612d54c0e9d38b1a3f77d5529\": container with ID starting with 48d4eb596206cf59e62c9d2b98ee171960a0e66612d54c0e9d38b1a3f77d5529 not found: ID does not exist" containerID="48d4eb596206cf59e62c9d2b98ee171960a0e66612d54c0e9d38b1a3f77d5529" Nov 28 16:35:19 crc kubenswrapper[4909]: I1128 16:35:19.714817 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"48d4eb596206cf59e62c9d2b98ee171960a0e66612d54c0e9d38b1a3f77d5529"} err="failed to get container status \"48d4eb596206cf59e62c9d2b98ee171960a0e66612d54c0e9d38b1a3f77d5529\": rpc error: code = NotFound desc = could not find container \"48d4eb596206cf59e62c9d2b98ee171960a0e66612d54c0e9d38b1a3f77d5529\": container with ID starting with 48d4eb596206cf59e62c9d2b98ee171960a0e66612d54c0e9d38b1a3f77d5529 not found: ID does not exist" Nov 28 16:35:19 crc kubenswrapper[4909]: I1128 16:35:19.735301 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 16:35:19 crc kubenswrapper[4909]: I1128 16:35:19.748668 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 16:35:19 crc kubenswrapper[4909]: I1128 16:35:19.758976 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 16:35:19 crc kubenswrapper[4909]: E1128 16:35:19.759499 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1c23d3ad-6031-41a7-89cd-f9b863351cc1" containerName="dnsmasq-dns" Nov 28 16:35:19 crc kubenswrapper[4909]: I1128 16:35:19.759583 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="1c23d3ad-6031-41a7-89cd-f9b863351cc1" containerName="dnsmasq-dns" Nov 28 16:35:19 crc kubenswrapper[4909]: E1128 16:35:19.759678 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a5b41266-6099-4bf7-a26b-4a98b01aa9b6" containerName="nova-manage" Nov 28 16:35:19 crc kubenswrapper[4909]: I1128 16:35:19.759749 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="a5b41266-6099-4bf7-a26b-4a98b01aa9b6" containerName="nova-manage" Nov 28 16:35:19 crc kubenswrapper[4909]: E1128 16:35:19.759820 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3611cfd4-bf6b-41f0-8643-f8cb7f69a68c" containerName="nova-scheduler-scheduler" Nov 28 16:35:19 crc kubenswrapper[4909]: I1128 16:35:19.759896 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="3611cfd4-bf6b-41f0-8643-f8cb7f69a68c" containerName="nova-scheduler-scheduler" Nov 28 16:35:19 crc kubenswrapper[4909]: E1128 16:35:19.759967 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1c23d3ad-6031-41a7-89cd-f9b863351cc1" containerName="init" Nov 28 16:35:19 crc kubenswrapper[4909]: I1128 16:35:19.760020 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="1c23d3ad-6031-41a7-89cd-f9b863351cc1" containerName="init" Nov 28 16:35:19 crc kubenswrapper[4909]: I1128 16:35:19.760243 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="a5b41266-6099-4bf7-a26b-4a98b01aa9b6" containerName="nova-manage" Nov 28 16:35:19 crc kubenswrapper[4909]: I1128 16:35:19.760328 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="3611cfd4-bf6b-41f0-8643-f8cb7f69a68c" containerName="nova-scheduler-scheduler" Nov 28 16:35:19 crc kubenswrapper[4909]: I1128 16:35:19.760391 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="1c23d3ad-6031-41a7-89cd-f9b863351cc1" containerName="dnsmasq-dns" Nov 28 16:35:19 crc kubenswrapper[4909]: I1128 16:35:19.761077 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 16:35:19 crc kubenswrapper[4909]: I1128 16:35:19.767324 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 28 16:35:19 crc kubenswrapper[4909]: I1128 16:35:19.788420 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 16:35:19 crc kubenswrapper[4909]: I1128 16:35:19.888430 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e8b95945-6169-4e44-861a-f4abd48a7161-config-data\") pod \"nova-scheduler-0\" (UID: \"e8b95945-6169-4e44-861a-f4abd48a7161\") " pod="openstack/nova-scheduler-0" Nov 28 16:35:19 crc kubenswrapper[4909]: I1128 16:35:19.888585 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mqsll\" (UniqueName: \"kubernetes.io/projected/e8b95945-6169-4e44-861a-f4abd48a7161-kube-api-access-mqsll\") pod \"nova-scheduler-0\" (UID: \"e8b95945-6169-4e44-861a-f4abd48a7161\") " pod="openstack/nova-scheduler-0" Nov 28 16:35:19 crc kubenswrapper[4909]: I1128 16:35:19.888814 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e8b95945-6169-4e44-861a-f4abd48a7161-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"e8b95945-6169-4e44-861a-f4abd48a7161\") " pod="openstack/nova-scheduler-0" Nov 28 16:35:19 crc kubenswrapper[4909]: I1128 16:35:19.913082 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3611cfd4-bf6b-41f0-8643-f8cb7f69a68c" path="/var/lib/kubelet/pods/3611cfd4-bf6b-41f0-8643-f8cb7f69a68c/volumes" Nov 28 16:35:19 crc kubenswrapper[4909]: I1128 16:35:19.990467 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mqsll\" (UniqueName: \"kubernetes.io/projected/e8b95945-6169-4e44-861a-f4abd48a7161-kube-api-access-mqsll\") pod \"nova-scheduler-0\" (UID: \"e8b95945-6169-4e44-861a-f4abd48a7161\") " pod="openstack/nova-scheduler-0" Nov 28 16:35:19 crc kubenswrapper[4909]: I1128 16:35:19.990916 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e8b95945-6169-4e44-861a-f4abd48a7161-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"e8b95945-6169-4e44-861a-f4abd48a7161\") " pod="openstack/nova-scheduler-0" Nov 28 16:35:19 crc kubenswrapper[4909]: I1128 16:35:19.991091 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e8b95945-6169-4e44-861a-f4abd48a7161-config-data\") pod \"nova-scheduler-0\" (UID: \"e8b95945-6169-4e44-861a-f4abd48a7161\") " pod="openstack/nova-scheduler-0" Nov 28 16:35:19 crc kubenswrapper[4909]: I1128 16:35:19.995090 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e8b95945-6169-4e44-861a-f4abd48a7161-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"e8b95945-6169-4e44-861a-f4abd48a7161\") " pod="openstack/nova-scheduler-0" Nov 28 16:35:20 crc kubenswrapper[4909]: I1128 16:35:20.007402 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e8b95945-6169-4e44-861a-f4abd48a7161-config-data\") pod \"nova-scheduler-0\" (UID: \"e8b95945-6169-4e44-861a-f4abd48a7161\") " pod="openstack/nova-scheduler-0" Nov 28 16:35:20 crc kubenswrapper[4909]: I1128 16:35:20.018290 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mqsll\" (UniqueName: \"kubernetes.io/projected/e8b95945-6169-4e44-861a-f4abd48a7161-kube-api-access-mqsll\") pod \"nova-scheduler-0\" (UID: \"e8b95945-6169-4e44-861a-f4abd48a7161\") " pod="openstack/nova-scheduler-0" Nov 28 16:35:20 crc kubenswrapper[4909]: I1128 16:35:20.079073 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 16:35:20 crc kubenswrapper[4909]: I1128 16:35:20.148264 4909 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="b0692d4c-3e74-48d9-8066-d3c22d037012" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.188:8775/\": read tcp 10.217.0.2:54046->10.217.0.188:8775: read: connection reset by peer" Nov 28 16:35:20 crc kubenswrapper[4909]: I1128 16:35:20.148320 4909 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="b0692d4c-3e74-48d9-8066-d3c22d037012" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.188:8775/\": read tcp 10.217.0.2:54048->10.217.0.188:8775: read: connection reset by peer" Nov 28 16:35:20 crc kubenswrapper[4909]: I1128 16:35:20.531923 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 16:35:20 crc kubenswrapper[4909]: I1128 16:35:20.571576 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 16:35:20 crc kubenswrapper[4909]: I1128 16:35:20.601924 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7r249\" (UniqueName: \"kubernetes.io/projected/b0692d4c-3e74-48d9-8066-d3c22d037012-kube-api-access-7r249\") pod \"b0692d4c-3e74-48d9-8066-d3c22d037012\" (UID: \"b0692d4c-3e74-48d9-8066-d3c22d037012\") " Nov 28 16:35:20 crc kubenswrapper[4909]: I1128 16:35:20.602075 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b0692d4c-3e74-48d9-8066-d3c22d037012-combined-ca-bundle\") pod \"b0692d4c-3e74-48d9-8066-d3c22d037012\" (UID: \"b0692d4c-3e74-48d9-8066-d3c22d037012\") " Nov 28 16:35:20 crc kubenswrapper[4909]: I1128 16:35:20.602102 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b0692d4c-3e74-48d9-8066-d3c22d037012-config-data\") pod \"b0692d4c-3e74-48d9-8066-d3c22d037012\" (UID: \"b0692d4c-3e74-48d9-8066-d3c22d037012\") " Nov 28 16:35:20 crc kubenswrapper[4909]: I1128 16:35:20.602215 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b0692d4c-3e74-48d9-8066-d3c22d037012-logs\") pod \"b0692d4c-3e74-48d9-8066-d3c22d037012\" (UID: \"b0692d4c-3e74-48d9-8066-d3c22d037012\") " Nov 28 16:35:20 crc kubenswrapper[4909]: I1128 16:35:20.602243 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/b0692d4c-3e74-48d9-8066-d3c22d037012-nova-metadata-tls-certs\") pod \"b0692d4c-3e74-48d9-8066-d3c22d037012\" (UID: \"b0692d4c-3e74-48d9-8066-d3c22d037012\") " Nov 28 16:35:20 crc kubenswrapper[4909]: I1128 16:35:20.604387 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b0692d4c-3e74-48d9-8066-d3c22d037012-logs" (OuterVolumeSpecName: "logs") pod "b0692d4c-3e74-48d9-8066-d3c22d037012" (UID: "b0692d4c-3e74-48d9-8066-d3c22d037012"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:35:20 crc kubenswrapper[4909]: I1128 16:35:20.608538 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b0692d4c-3e74-48d9-8066-d3c22d037012-kube-api-access-7r249" (OuterVolumeSpecName: "kube-api-access-7r249") pod "b0692d4c-3e74-48d9-8066-d3c22d037012" (UID: "b0692d4c-3e74-48d9-8066-d3c22d037012"). InnerVolumeSpecName "kube-api-access-7r249". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:35:20 crc kubenswrapper[4909]: I1128 16:35:20.632101 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b0692d4c-3e74-48d9-8066-d3c22d037012-config-data" (OuterVolumeSpecName: "config-data") pod "b0692d4c-3e74-48d9-8066-d3c22d037012" (UID: "b0692d4c-3e74-48d9-8066-d3c22d037012"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:35:20 crc kubenswrapper[4909]: I1128 16:35:20.640434 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b0692d4c-3e74-48d9-8066-d3c22d037012-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b0692d4c-3e74-48d9-8066-d3c22d037012" (UID: "b0692d4c-3e74-48d9-8066-d3c22d037012"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:35:20 crc kubenswrapper[4909]: I1128 16:35:20.703916 4909 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b0692d4c-3e74-48d9-8066-d3c22d037012-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:35:20 crc kubenswrapper[4909]: I1128 16:35:20.704193 4909 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b0692d4c-3e74-48d9-8066-d3c22d037012-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:35:20 crc kubenswrapper[4909]: I1128 16:35:20.704279 4909 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b0692d4c-3e74-48d9-8066-d3c22d037012-logs\") on node \"crc\" DevicePath \"\"" Nov 28 16:35:20 crc kubenswrapper[4909]: I1128 16:35:20.704370 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7r249\" (UniqueName: \"kubernetes.io/projected/b0692d4c-3e74-48d9-8066-d3c22d037012-kube-api-access-7r249\") on node \"crc\" DevicePath \"\"" Nov 28 16:35:20 crc kubenswrapper[4909]: I1128 16:35:20.704428 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"e8b95945-6169-4e44-861a-f4abd48a7161","Type":"ContainerStarted","Data":"a5be97e66ba6d28d38e21c6092e9c3c674ed31badc5bd35ffc952b4c55772330"} Nov 28 16:35:20 crc kubenswrapper[4909]: I1128 16:35:20.706201 4909 generic.go:334] "Generic (PLEG): container finished" podID="b0692d4c-3e74-48d9-8066-d3c22d037012" containerID="057ee363c7521e970233c04fb4162ace8577a08d4267b8ed194cc078ec87f1d4" exitCode=0 Nov 28 16:35:20 crc kubenswrapper[4909]: I1128 16:35:20.706383 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"b0692d4c-3e74-48d9-8066-d3c22d037012","Type":"ContainerDied","Data":"057ee363c7521e970233c04fb4162ace8577a08d4267b8ed194cc078ec87f1d4"} Nov 28 16:35:20 crc kubenswrapper[4909]: I1128 16:35:20.706507 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"b0692d4c-3e74-48d9-8066-d3c22d037012","Type":"ContainerDied","Data":"850453a2da71db65946c9c34844bc0abb3160e2bbec7fa09176c17adc7d090d3"} Nov 28 16:35:20 crc kubenswrapper[4909]: I1128 16:35:20.706604 4909 scope.go:117] "RemoveContainer" containerID="057ee363c7521e970233c04fb4162ace8577a08d4267b8ed194cc078ec87f1d4" Nov 28 16:35:20 crc kubenswrapper[4909]: I1128 16:35:20.706859 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 16:35:20 crc kubenswrapper[4909]: I1128 16:35:20.761128 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b0692d4c-3e74-48d9-8066-d3c22d037012-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "b0692d4c-3e74-48d9-8066-d3c22d037012" (UID: "b0692d4c-3e74-48d9-8066-d3c22d037012"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:35:20 crc kubenswrapper[4909]: I1128 16:35:20.785291 4909 scope.go:117] "RemoveContainer" containerID="3629aeffd7fb127c4a8a700fa819c08da4bf2eabf9807520f842192129e82072" Nov 28 16:35:20 crc kubenswrapper[4909]: I1128 16:35:20.807364 4909 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/b0692d4c-3e74-48d9-8066-d3c22d037012-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 16:35:20 crc kubenswrapper[4909]: I1128 16:35:20.812887 4909 scope.go:117] "RemoveContainer" containerID="057ee363c7521e970233c04fb4162ace8577a08d4267b8ed194cc078ec87f1d4" Nov 28 16:35:20 crc kubenswrapper[4909]: E1128 16:35:20.814848 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"057ee363c7521e970233c04fb4162ace8577a08d4267b8ed194cc078ec87f1d4\": container with ID starting with 057ee363c7521e970233c04fb4162ace8577a08d4267b8ed194cc078ec87f1d4 not found: ID does not exist" containerID="057ee363c7521e970233c04fb4162ace8577a08d4267b8ed194cc078ec87f1d4" Nov 28 16:35:20 crc kubenswrapper[4909]: I1128 16:35:20.814900 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"057ee363c7521e970233c04fb4162ace8577a08d4267b8ed194cc078ec87f1d4"} err="failed to get container status \"057ee363c7521e970233c04fb4162ace8577a08d4267b8ed194cc078ec87f1d4\": rpc error: code = NotFound desc = could not find container \"057ee363c7521e970233c04fb4162ace8577a08d4267b8ed194cc078ec87f1d4\": container with ID starting with 057ee363c7521e970233c04fb4162ace8577a08d4267b8ed194cc078ec87f1d4 not found: ID does not exist" Nov 28 16:35:20 crc kubenswrapper[4909]: I1128 16:35:20.814926 4909 scope.go:117] "RemoveContainer" containerID="3629aeffd7fb127c4a8a700fa819c08da4bf2eabf9807520f842192129e82072" Nov 28 16:35:20 crc kubenswrapper[4909]: E1128 16:35:20.815351 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3629aeffd7fb127c4a8a700fa819c08da4bf2eabf9807520f842192129e82072\": container with ID starting with 3629aeffd7fb127c4a8a700fa819c08da4bf2eabf9807520f842192129e82072 not found: ID does not exist" containerID="3629aeffd7fb127c4a8a700fa819c08da4bf2eabf9807520f842192129e82072" Nov 28 16:35:20 crc kubenswrapper[4909]: I1128 16:35:20.815398 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3629aeffd7fb127c4a8a700fa819c08da4bf2eabf9807520f842192129e82072"} err="failed to get container status \"3629aeffd7fb127c4a8a700fa819c08da4bf2eabf9807520f842192129e82072\": rpc error: code = NotFound desc = could not find container \"3629aeffd7fb127c4a8a700fa819c08da4bf2eabf9807520f842192129e82072\": container with ID starting with 3629aeffd7fb127c4a8a700fa819c08da4bf2eabf9807520f842192129e82072 not found: ID does not exist" Nov 28 16:35:21 crc kubenswrapper[4909]: I1128 16:35:21.081625 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 16:35:21 crc kubenswrapper[4909]: I1128 16:35:21.091227 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 16:35:21 crc kubenswrapper[4909]: I1128 16:35:21.109004 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 28 16:35:21 crc kubenswrapper[4909]: E1128 16:35:21.109482 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b0692d4c-3e74-48d9-8066-d3c22d037012" containerName="nova-metadata-metadata" Nov 28 16:35:21 crc kubenswrapper[4909]: I1128 16:35:21.109502 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="b0692d4c-3e74-48d9-8066-d3c22d037012" containerName="nova-metadata-metadata" Nov 28 16:35:21 crc kubenswrapper[4909]: E1128 16:35:21.109524 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b0692d4c-3e74-48d9-8066-d3c22d037012" containerName="nova-metadata-log" Nov 28 16:35:21 crc kubenswrapper[4909]: I1128 16:35:21.109532 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="b0692d4c-3e74-48d9-8066-d3c22d037012" containerName="nova-metadata-log" Nov 28 16:35:21 crc kubenswrapper[4909]: I1128 16:35:21.109787 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="b0692d4c-3e74-48d9-8066-d3c22d037012" containerName="nova-metadata-metadata" Nov 28 16:35:21 crc kubenswrapper[4909]: I1128 16:35:21.109815 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="b0692d4c-3e74-48d9-8066-d3c22d037012" containerName="nova-metadata-log" Nov 28 16:35:21 crc kubenswrapper[4909]: I1128 16:35:21.114977 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 16:35:21 crc kubenswrapper[4909]: I1128 16:35:21.118746 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Nov 28 16:35:21 crc kubenswrapper[4909]: I1128 16:35:21.118811 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 28 16:35:21 crc kubenswrapper[4909]: I1128 16:35:21.123184 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 16:35:21 crc kubenswrapper[4909]: I1128 16:35:21.214322 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e8837df0-c6fe-42a6-bf0f-8ca14f1961a6-logs\") pod \"nova-metadata-0\" (UID: \"e8837df0-c6fe-42a6-bf0f-8ca14f1961a6\") " pod="openstack/nova-metadata-0" Nov 28 16:35:21 crc kubenswrapper[4909]: I1128 16:35:21.214382 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/e8837df0-c6fe-42a6-bf0f-8ca14f1961a6-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"e8837df0-c6fe-42a6-bf0f-8ca14f1961a6\") " pod="openstack/nova-metadata-0" Nov 28 16:35:21 crc kubenswrapper[4909]: I1128 16:35:21.214480 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e8837df0-c6fe-42a6-bf0f-8ca14f1961a6-config-data\") pod \"nova-metadata-0\" (UID: \"e8837df0-c6fe-42a6-bf0f-8ca14f1961a6\") " pod="openstack/nova-metadata-0" Nov 28 16:35:21 crc kubenswrapper[4909]: I1128 16:35:21.214569 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bbp46\" (UniqueName: \"kubernetes.io/projected/e8837df0-c6fe-42a6-bf0f-8ca14f1961a6-kube-api-access-bbp46\") pod \"nova-metadata-0\" (UID: \"e8837df0-c6fe-42a6-bf0f-8ca14f1961a6\") " pod="openstack/nova-metadata-0" Nov 28 16:35:21 crc kubenswrapper[4909]: I1128 16:35:21.214608 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e8837df0-c6fe-42a6-bf0f-8ca14f1961a6-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"e8837df0-c6fe-42a6-bf0f-8ca14f1961a6\") " pod="openstack/nova-metadata-0" Nov 28 16:35:21 crc kubenswrapper[4909]: I1128 16:35:21.317227 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e8837df0-c6fe-42a6-bf0f-8ca14f1961a6-logs\") pod \"nova-metadata-0\" (UID: \"e8837df0-c6fe-42a6-bf0f-8ca14f1961a6\") " pod="openstack/nova-metadata-0" Nov 28 16:35:21 crc kubenswrapper[4909]: I1128 16:35:21.317284 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/e8837df0-c6fe-42a6-bf0f-8ca14f1961a6-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"e8837df0-c6fe-42a6-bf0f-8ca14f1961a6\") " pod="openstack/nova-metadata-0" Nov 28 16:35:21 crc kubenswrapper[4909]: I1128 16:35:21.317338 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e8837df0-c6fe-42a6-bf0f-8ca14f1961a6-config-data\") pod \"nova-metadata-0\" (UID: \"e8837df0-c6fe-42a6-bf0f-8ca14f1961a6\") " pod="openstack/nova-metadata-0" Nov 28 16:35:21 crc kubenswrapper[4909]: I1128 16:35:21.317383 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bbp46\" (UniqueName: \"kubernetes.io/projected/e8837df0-c6fe-42a6-bf0f-8ca14f1961a6-kube-api-access-bbp46\") pod \"nova-metadata-0\" (UID: \"e8837df0-c6fe-42a6-bf0f-8ca14f1961a6\") " pod="openstack/nova-metadata-0" Nov 28 16:35:21 crc kubenswrapper[4909]: I1128 16:35:21.317415 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e8837df0-c6fe-42a6-bf0f-8ca14f1961a6-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"e8837df0-c6fe-42a6-bf0f-8ca14f1961a6\") " pod="openstack/nova-metadata-0" Nov 28 16:35:21 crc kubenswrapper[4909]: I1128 16:35:21.317789 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e8837df0-c6fe-42a6-bf0f-8ca14f1961a6-logs\") pod \"nova-metadata-0\" (UID: \"e8837df0-c6fe-42a6-bf0f-8ca14f1961a6\") " pod="openstack/nova-metadata-0" Nov 28 16:35:21 crc kubenswrapper[4909]: I1128 16:35:21.323385 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e8837df0-c6fe-42a6-bf0f-8ca14f1961a6-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"e8837df0-c6fe-42a6-bf0f-8ca14f1961a6\") " pod="openstack/nova-metadata-0" Nov 28 16:35:21 crc kubenswrapper[4909]: I1128 16:35:21.325175 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/e8837df0-c6fe-42a6-bf0f-8ca14f1961a6-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"e8837df0-c6fe-42a6-bf0f-8ca14f1961a6\") " pod="openstack/nova-metadata-0" Nov 28 16:35:21 crc kubenswrapper[4909]: I1128 16:35:21.325806 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e8837df0-c6fe-42a6-bf0f-8ca14f1961a6-config-data\") pod \"nova-metadata-0\" (UID: \"e8837df0-c6fe-42a6-bf0f-8ca14f1961a6\") " pod="openstack/nova-metadata-0" Nov 28 16:35:21 crc kubenswrapper[4909]: I1128 16:35:21.335622 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bbp46\" (UniqueName: \"kubernetes.io/projected/e8837df0-c6fe-42a6-bf0f-8ca14f1961a6-kube-api-access-bbp46\") pod \"nova-metadata-0\" (UID: \"e8837df0-c6fe-42a6-bf0f-8ca14f1961a6\") " pod="openstack/nova-metadata-0" Nov 28 16:35:21 crc kubenswrapper[4909]: I1128 16:35:21.442594 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 16:35:21 crc kubenswrapper[4909]: I1128 16:35:21.730931 4909 generic.go:334] "Generic (PLEG): container finished" podID="38e07fb0-7023-49ad-9e0a-e0ee7f565ef4" containerID="b32756291bf15b5830af5975d69130c26a296034d720c73a8689c03ff65ea3dd" exitCode=0 Nov 28 16:35:21 crc kubenswrapper[4909]: I1128 16:35:21.731015 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"38e07fb0-7023-49ad-9e0a-e0ee7f565ef4","Type":"ContainerDied","Data":"b32756291bf15b5830af5975d69130c26a296034d720c73a8689c03ff65ea3dd"} Nov 28 16:35:21 crc kubenswrapper[4909]: I1128 16:35:21.734555 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"e8b95945-6169-4e44-861a-f4abd48a7161","Type":"ContainerStarted","Data":"bd5449498d8b191c04307f37b3bba2f646c9d3de3fd836f283ecb8d81e786377"} Nov 28 16:35:21 crc kubenswrapper[4909]: I1128 16:35:21.766945 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 16:35:21 crc kubenswrapper[4909]: I1128 16:35:21.766982 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.766962907 podStartE2EDuration="2.766962907s" podCreationTimestamp="2025-11-28 16:35:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:35:21.757431971 +0000 UTC m=+1504.154116495" watchObservedRunningTime="2025-11-28 16:35:21.766962907 +0000 UTC m=+1504.163647431" Nov 28 16:35:21 crc kubenswrapper[4909]: I1128 16:35:21.824839 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/38e07fb0-7023-49ad-9e0a-e0ee7f565ef4-logs\") pod \"38e07fb0-7023-49ad-9e0a-e0ee7f565ef4\" (UID: \"38e07fb0-7023-49ad-9e0a-e0ee7f565ef4\") " Nov 28 16:35:21 crc kubenswrapper[4909]: I1128 16:35:21.824955 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/38e07fb0-7023-49ad-9e0a-e0ee7f565ef4-config-data\") pod \"38e07fb0-7023-49ad-9e0a-e0ee7f565ef4\" (UID: \"38e07fb0-7023-49ad-9e0a-e0ee7f565ef4\") " Nov 28 16:35:21 crc kubenswrapper[4909]: I1128 16:35:21.825082 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h6nlv\" (UniqueName: \"kubernetes.io/projected/38e07fb0-7023-49ad-9e0a-e0ee7f565ef4-kube-api-access-h6nlv\") pod \"38e07fb0-7023-49ad-9e0a-e0ee7f565ef4\" (UID: \"38e07fb0-7023-49ad-9e0a-e0ee7f565ef4\") " Nov 28 16:35:21 crc kubenswrapper[4909]: I1128 16:35:21.825109 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/38e07fb0-7023-49ad-9e0a-e0ee7f565ef4-internal-tls-certs\") pod \"38e07fb0-7023-49ad-9e0a-e0ee7f565ef4\" (UID: \"38e07fb0-7023-49ad-9e0a-e0ee7f565ef4\") " Nov 28 16:35:21 crc kubenswrapper[4909]: I1128 16:35:21.825404 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/38e07fb0-7023-49ad-9e0a-e0ee7f565ef4-logs" (OuterVolumeSpecName: "logs") pod "38e07fb0-7023-49ad-9e0a-e0ee7f565ef4" (UID: "38e07fb0-7023-49ad-9e0a-e0ee7f565ef4"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:35:21 crc kubenswrapper[4909]: I1128 16:35:21.825597 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/38e07fb0-7023-49ad-9e0a-e0ee7f565ef4-public-tls-certs\") pod \"38e07fb0-7023-49ad-9e0a-e0ee7f565ef4\" (UID: \"38e07fb0-7023-49ad-9e0a-e0ee7f565ef4\") " Nov 28 16:35:21 crc kubenswrapper[4909]: I1128 16:35:21.825643 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/38e07fb0-7023-49ad-9e0a-e0ee7f565ef4-combined-ca-bundle\") pod \"38e07fb0-7023-49ad-9e0a-e0ee7f565ef4\" (UID: \"38e07fb0-7023-49ad-9e0a-e0ee7f565ef4\") " Nov 28 16:35:21 crc kubenswrapper[4909]: I1128 16:35:21.826289 4909 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/38e07fb0-7023-49ad-9e0a-e0ee7f565ef4-logs\") on node \"crc\" DevicePath \"\"" Nov 28 16:35:21 crc kubenswrapper[4909]: I1128 16:35:21.829947 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/38e07fb0-7023-49ad-9e0a-e0ee7f565ef4-kube-api-access-h6nlv" (OuterVolumeSpecName: "kube-api-access-h6nlv") pod "38e07fb0-7023-49ad-9e0a-e0ee7f565ef4" (UID: "38e07fb0-7023-49ad-9e0a-e0ee7f565ef4"). InnerVolumeSpecName "kube-api-access-h6nlv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:35:21 crc kubenswrapper[4909]: I1128 16:35:21.854912 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/38e07fb0-7023-49ad-9e0a-e0ee7f565ef4-config-data" (OuterVolumeSpecName: "config-data") pod "38e07fb0-7023-49ad-9e0a-e0ee7f565ef4" (UID: "38e07fb0-7023-49ad-9e0a-e0ee7f565ef4"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:35:21 crc kubenswrapper[4909]: I1128 16:35:21.857548 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/38e07fb0-7023-49ad-9e0a-e0ee7f565ef4-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "38e07fb0-7023-49ad-9e0a-e0ee7f565ef4" (UID: "38e07fb0-7023-49ad-9e0a-e0ee7f565ef4"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:35:21 crc kubenswrapper[4909]: I1128 16:35:21.898862 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/38e07fb0-7023-49ad-9e0a-e0ee7f565ef4-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "38e07fb0-7023-49ad-9e0a-e0ee7f565ef4" (UID: "38e07fb0-7023-49ad-9e0a-e0ee7f565ef4"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:35:21 crc kubenswrapper[4909]: I1128 16:35:21.901833 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/38e07fb0-7023-49ad-9e0a-e0ee7f565ef4-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "38e07fb0-7023-49ad-9e0a-e0ee7f565ef4" (UID: "38e07fb0-7023-49ad-9e0a-e0ee7f565ef4"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:35:21 crc kubenswrapper[4909]: I1128 16:35:21.918616 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b0692d4c-3e74-48d9-8066-d3c22d037012" path="/var/lib/kubelet/pods/b0692d4c-3e74-48d9-8066-d3c22d037012/volumes" Nov 28 16:35:21 crc kubenswrapper[4909]: I1128 16:35:21.930474 4909 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/38e07fb0-7023-49ad-9e0a-e0ee7f565ef4-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:35:21 crc kubenswrapper[4909]: I1128 16:35:21.930522 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h6nlv\" (UniqueName: \"kubernetes.io/projected/38e07fb0-7023-49ad-9e0a-e0ee7f565ef4-kube-api-access-h6nlv\") on node \"crc\" DevicePath \"\"" Nov 28 16:35:21 crc kubenswrapper[4909]: I1128 16:35:21.930535 4909 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/38e07fb0-7023-49ad-9e0a-e0ee7f565ef4-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 16:35:21 crc kubenswrapper[4909]: I1128 16:35:21.930546 4909 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/38e07fb0-7023-49ad-9e0a-e0ee7f565ef4-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 16:35:21 crc kubenswrapper[4909]: I1128 16:35:21.930558 4909 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/38e07fb0-7023-49ad-9e0a-e0ee7f565ef4-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:35:22 crc kubenswrapper[4909]: I1128 16:35:22.074991 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 16:35:22 crc kubenswrapper[4909]: I1128 16:35:22.748326 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"38e07fb0-7023-49ad-9e0a-e0ee7f565ef4","Type":"ContainerDied","Data":"079407fbe7f56713f49fa96a506e8dd01a74cc28ee1b68d6b34075081b559e0f"} Nov 28 16:35:22 crc kubenswrapper[4909]: I1128 16:35:22.748358 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 16:35:22 crc kubenswrapper[4909]: I1128 16:35:22.748786 4909 scope.go:117] "RemoveContainer" containerID="b32756291bf15b5830af5975d69130c26a296034d720c73a8689c03ff65ea3dd" Nov 28 16:35:22 crc kubenswrapper[4909]: I1128 16:35:22.750402 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"e8837df0-c6fe-42a6-bf0f-8ca14f1961a6","Type":"ContainerStarted","Data":"34cdf5b11d6117bafb37ddcc3824f5ce702d1b9711769b9e114b66075bba4f47"} Nov 28 16:35:22 crc kubenswrapper[4909]: I1128 16:35:22.750457 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"e8837df0-c6fe-42a6-bf0f-8ca14f1961a6","Type":"ContainerStarted","Data":"09cb7681bd82577f5dec1afd70b7dfc60e7c497bc0efb2d3202eab82a5623018"} Nov 28 16:35:22 crc kubenswrapper[4909]: I1128 16:35:22.750474 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"e8837df0-c6fe-42a6-bf0f-8ca14f1961a6","Type":"ContainerStarted","Data":"6a0fe3c93870048d7b5a5cbaa3d6a3d69d63bbddc0cc7161d897e534dd32e64d"} Nov 28 16:35:22 crc kubenswrapper[4909]: I1128 16:35:22.774844 4909 scope.go:117] "RemoveContainer" containerID="66a4f9480caa4bf88a2395446821189a734d4ac714eafbf43f5d8cfef52a1d61" Nov 28 16:35:22 crc kubenswrapper[4909]: I1128 16:35:22.777302 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=1.7772820089999999 podStartE2EDuration="1.777282009s" podCreationTimestamp="2025-11-28 16:35:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:35:22.774183716 +0000 UTC m=+1505.170868260" watchObservedRunningTime="2025-11-28 16:35:22.777282009 +0000 UTC m=+1505.173966533" Nov 28 16:35:22 crc kubenswrapper[4909]: I1128 16:35:22.794446 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 28 16:35:22 crc kubenswrapper[4909]: I1128 16:35:22.804451 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 28 16:35:22 crc kubenswrapper[4909]: I1128 16:35:22.820182 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 28 16:35:22 crc kubenswrapper[4909]: E1128 16:35:22.820711 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="38e07fb0-7023-49ad-9e0a-e0ee7f565ef4" containerName="nova-api-log" Nov 28 16:35:22 crc kubenswrapper[4909]: I1128 16:35:22.820731 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="38e07fb0-7023-49ad-9e0a-e0ee7f565ef4" containerName="nova-api-log" Nov 28 16:35:22 crc kubenswrapper[4909]: E1128 16:35:22.820814 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="38e07fb0-7023-49ad-9e0a-e0ee7f565ef4" containerName="nova-api-api" Nov 28 16:35:22 crc kubenswrapper[4909]: I1128 16:35:22.820822 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="38e07fb0-7023-49ad-9e0a-e0ee7f565ef4" containerName="nova-api-api" Nov 28 16:35:22 crc kubenswrapper[4909]: I1128 16:35:22.821040 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="38e07fb0-7023-49ad-9e0a-e0ee7f565ef4" containerName="nova-api-log" Nov 28 16:35:22 crc kubenswrapper[4909]: I1128 16:35:22.821056 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="38e07fb0-7023-49ad-9e0a-e0ee7f565ef4" containerName="nova-api-api" Nov 28 16:35:22 crc kubenswrapper[4909]: I1128 16:35:22.822299 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 16:35:22 crc kubenswrapper[4909]: I1128 16:35:22.828186 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 28 16:35:22 crc kubenswrapper[4909]: I1128 16:35:22.828253 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Nov 28 16:35:22 crc kubenswrapper[4909]: I1128 16:35:22.828646 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Nov 28 16:35:22 crc kubenswrapper[4909]: I1128 16:35:22.847379 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/1c81b153-5498-4d63-9c98-fa8b79d5acdd-public-tls-certs\") pod \"nova-api-0\" (UID: \"1c81b153-5498-4d63-9c98-fa8b79d5acdd\") " pod="openstack/nova-api-0" Nov 28 16:35:22 crc kubenswrapper[4909]: I1128 16:35:22.847453 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1c81b153-5498-4d63-9c98-fa8b79d5acdd-logs\") pod \"nova-api-0\" (UID: \"1c81b153-5498-4d63-9c98-fa8b79d5acdd\") " pod="openstack/nova-api-0" Nov 28 16:35:22 crc kubenswrapper[4909]: I1128 16:35:22.847493 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lfzwk\" (UniqueName: \"kubernetes.io/projected/1c81b153-5498-4d63-9c98-fa8b79d5acdd-kube-api-access-lfzwk\") pod \"nova-api-0\" (UID: \"1c81b153-5498-4d63-9c98-fa8b79d5acdd\") " pod="openstack/nova-api-0" Nov 28 16:35:22 crc kubenswrapper[4909]: I1128 16:35:22.847518 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1c81b153-5498-4d63-9c98-fa8b79d5acdd-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"1c81b153-5498-4d63-9c98-fa8b79d5acdd\") " pod="openstack/nova-api-0" Nov 28 16:35:22 crc kubenswrapper[4909]: I1128 16:35:22.847599 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/1c81b153-5498-4d63-9c98-fa8b79d5acdd-internal-tls-certs\") pod \"nova-api-0\" (UID: \"1c81b153-5498-4d63-9c98-fa8b79d5acdd\") " pod="openstack/nova-api-0" Nov 28 16:35:22 crc kubenswrapper[4909]: I1128 16:35:22.847678 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1c81b153-5498-4d63-9c98-fa8b79d5acdd-config-data\") pod \"nova-api-0\" (UID: \"1c81b153-5498-4d63-9c98-fa8b79d5acdd\") " pod="openstack/nova-api-0" Nov 28 16:35:22 crc kubenswrapper[4909]: I1128 16:35:22.856268 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 28 16:35:22 crc kubenswrapper[4909]: I1128 16:35:22.949995 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1c81b153-5498-4d63-9c98-fa8b79d5acdd-logs\") pod \"nova-api-0\" (UID: \"1c81b153-5498-4d63-9c98-fa8b79d5acdd\") " pod="openstack/nova-api-0" Nov 28 16:35:22 crc kubenswrapper[4909]: I1128 16:35:22.950101 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lfzwk\" (UniqueName: \"kubernetes.io/projected/1c81b153-5498-4d63-9c98-fa8b79d5acdd-kube-api-access-lfzwk\") pod \"nova-api-0\" (UID: \"1c81b153-5498-4d63-9c98-fa8b79d5acdd\") " pod="openstack/nova-api-0" Nov 28 16:35:22 crc kubenswrapper[4909]: I1128 16:35:22.950181 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1c81b153-5498-4d63-9c98-fa8b79d5acdd-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"1c81b153-5498-4d63-9c98-fa8b79d5acdd\") " pod="openstack/nova-api-0" Nov 28 16:35:22 crc kubenswrapper[4909]: I1128 16:35:22.950264 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/1c81b153-5498-4d63-9c98-fa8b79d5acdd-internal-tls-certs\") pod \"nova-api-0\" (UID: \"1c81b153-5498-4d63-9c98-fa8b79d5acdd\") " pod="openstack/nova-api-0" Nov 28 16:35:22 crc kubenswrapper[4909]: I1128 16:35:22.950364 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1c81b153-5498-4d63-9c98-fa8b79d5acdd-config-data\") pod \"nova-api-0\" (UID: \"1c81b153-5498-4d63-9c98-fa8b79d5acdd\") " pod="openstack/nova-api-0" Nov 28 16:35:22 crc kubenswrapper[4909]: I1128 16:35:22.950406 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/1c81b153-5498-4d63-9c98-fa8b79d5acdd-public-tls-certs\") pod \"nova-api-0\" (UID: \"1c81b153-5498-4d63-9c98-fa8b79d5acdd\") " pod="openstack/nova-api-0" Nov 28 16:35:22 crc kubenswrapper[4909]: I1128 16:35:22.950424 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1c81b153-5498-4d63-9c98-fa8b79d5acdd-logs\") pod \"nova-api-0\" (UID: \"1c81b153-5498-4d63-9c98-fa8b79d5acdd\") " pod="openstack/nova-api-0" Nov 28 16:35:22 crc kubenswrapper[4909]: I1128 16:35:22.954381 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/1c81b153-5498-4d63-9c98-fa8b79d5acdd-internal-tls-certs\") pod \"nova-api-0\" (UID: \"1c81b153-5498-4d63-9c98-fa8b79d5acdd\") " pod="openstack/nova-api-0" Nov 28 16:35:22 crc kubenswrapper[4909]: I1128 16:35:22.954456 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/1c81b153-5498-4d63-9c98-fa8b79d5acdd-public-tls-certs\") pod \"nova-api-0\" (UID: \"1c81b153-5498-4d63-9c98-fa8b79d5acdd\") " pod="openstack/nova-api-0" Nov 28 16:35:22 crc kubenswrapper[4909]: I1128 16:35:22.954551 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1c81b153-5498-4d63-9c98-fa8b79d5acdd-config-data\") pod \"nova-api-0\" (UID: \"1c81b153-5498-4d63-9c98-fa8b79d5acdd\") " pod="openstack/nova-api-0" Nov 28 16:35:22 crc kubenswrapper[4909]: I1128 16:35:22.956820 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1c81b153-5498-4d63-9c98-fa8b79d5acdd-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"1c81b153-5498-4d63-9c98-fa8b79d5acdd\") " pod="openstack/nova-api-0" Nov 28 16:35:22 crc kubenswrapper[4909]: I1128 16:35:22.968847 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lfzwk\" (UniqueName: \"kubernetes.io/projected/1c81b153-5498-4d63-9c98-fa8b79d5acdd-kube-api-access-lfzwk\") pod \"nova-api-0\" (UID: \"1c81b153-5498-4d63-9c98-fa8b79d5acdd\") " pod="openstack/nova-api-0" Nov 28 16:35:23 crc kubenswrapper[4909]: I1128 16:35:23.150385 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 16:35:23 crc kubenswrapper[4909]: I1128 16:35:23.627702 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 28 16:35:23 crc kubenswrapper[4909]: I1128 16:35:23.760434 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"1c81b153-5498-4d63-9c98-fa8b79d5acdd","Type":"ContainerStarted","Data":"3300409b6cb731b3216e287bc8588cf577b91e17d54c3318d7cace8d3b1223ef"} Nov 28 16:35:23 crc kubenswrapper[4909]: I1128 16:35:23.919065 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="38e07fb0-7023-49ad-9e0a-e0ee7f565ef4" path="/var/lib/kubelet/pods/38e07fb0-7023-49ad-9e0a-e0ee7f565ef4/volumes" Nov 28 16:35:24 crc kubenswrapper[4909]: I1128 16:35:24.776454 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"1c81b153-5498-4d63-9c98-fa8b79d5acdd","Type":"ContainerStarted","Data":"edce0e3a1b79a461c6e384cfefd0cbf3c0e7f50280e3c51aafc961a31f14493c"} Nov 28 16:35:24 crc kubenswrapper[4909]: I1128 16:35:24.776882 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"1c81b153-5498-4d63-9c98-fa8b79d5acdd","Type":"ContainerStarted","Data":"bcd3a169e67b44354a85ac02fdf79896704f5e85915fcde17e813b5bf5c5d5ac"} Nov 28 16:35:24 crc kubenswrapper[4909]: I1128 16:35:24.800698 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.8006736869999997 podStartE2EDuration="2.800673687s" podCreationTimestamp="2025-11-28 16:35:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:35:24.794968074 +0000 UTC m=+1507.191652608" watchObservedRunningTime="2025-11-28 16:35:24.800673687 +0000 UTC m=+1507.197358211" Nov 28 16:35:25 crc kubenswrapper[4909]: I1128 16:35:25.079914 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Nov 28 16:35:26 crc kubenswrapper[4909]: I1128 16:35:26.443900 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 28 16:35:26 crc kubenswrapper[4909]: I1128 16:35:26.444224 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 28 16:35:30 crc kubenswrapper[4909]: I1128 16:35:30.080067 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Nov 28 16:35:30 crc kubenswrapper[4909]: I1128 16:35:30.109677 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Nov 28 16:35:30 crc kubenswrapper[4909]: I1128 16:35:30.923717 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Nov 28 16:35:31 crc kubenswrapper[4909]: I1128 16:35:31.444141 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 28 16:35:31 crc kubenswrapper[4909]: I1128 16:35:31.444528 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 28 16:35:32 crc kubenswrapper[4909]: I1128 16:35:32.481856 4909 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="e8837df0-c6fe-42a6-bf0f-8ca14f1961a6" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.200:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 28 16:35:32 crc kubenswrapper[4909]: I1128 16:35:32.481875 4909 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="e8837df0-c6fe-42a6-bf0f-8ca14f1961a6" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.200:8775/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 28 16:35:33 crc kubenswrapper[4909]: I1128 16:35:33.150853 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 28 16:35:33 crc kubenswrapper[4909]: I1128 16:35:33.150908 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 28 16:35:34 crc kubenswrapper[4909]: I1128 16:35:34.163829 4909 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="1c81b153-5498-4d63-9c98-fa8b79d5acdd" containerName="nova-api-api" probeResult="failure" output="Get \"https://10.217.0.201:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 28 16:35:34 crc kubenswrapper[4909]: I1128 16:35:34.163845 4909 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="1c81b153-5498-4d63-9c98-fa8b79d5acdd" containerName="nova-api-log" probeResult="failure" output="Get \"https://10.217.0.201:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 28 16:35:35 crc kubenswrapper[4909]: I1128 16:35:35.647335 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-k9d5p"] Nov 28 16:35:35 crc kubenswrapper[4909]: I1128 16:35:35.649414 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-k9d5p" Nov 28 16:35:35 crc kubenswrapper[4909]: I1128 16:35:35.664211 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-k9d5p"] Nov 28 16:35:35 crc kubenswrapper[4909]: I1128 16:35:35.833242 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b32a597a-2269-4e7d-be36-0ac0c9cea132-utilities\") pod \"redhat-marketplace-k9d5p\" (UID: \"b32a597a-2269-4e7d-be36-0ac0c9cea132\") " pod="openshift-marketplace/redhat-marketplace-k9d5p" Nov 28 16:35:35 crc kubenswrapper[4909]: I1128 16:35:35.833354 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b32a597a-2269-4e7d-be36-0ac0c9cea132-catalog-content\") pod \"redhat-marketplace-k9d5p\" (UID: \"b32a597a-2269-4e7d-be36-0ac0c9cea132\") " pod="openshift-marketplace/redhat-marketplace-k9d5p" Nov 28 16:35:35 crc kubenswrapper[4909]: I1128 16:35:35.833639 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c74vr\" (UniqueName: \"kubernetes.io/projected/b32a597a-2269-4e7d-be36-0ac0c9cea132-kube-api-access-c74vr\") pod \"redhat-marketplace-k9d5p\" (UID: \"b32a597a-2269-4e7d-be36-0ac0c9cea132\") " pod="openshift-marketplace/redhat-marketplace-k9d5p" Nov 28 16:35:35 crc kubenswrapper[4909]: I1128 16:35:35.923544 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Nov 28 16:35:35 crc kubenswrapper[4909]: I1128 16:35:35.935486 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b32a597a-2269-4e7d-be36-0ac0c9cea132-utilities\") pod \"redhat-marketplace-k9d5p\" (UID: \"b32a597a-2269-4e7d-be36-0ac0c9cea132\") " pod="openshift-marketplace/redhat-marketplace-k9d5p" Nov 28 16:35:35 crc kubenswrapper[4909]: I1128 16:35:35.935636 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b32a597a-2269-4e7d-be36-0ac0c9cea132-catalog-content\") pod \"redhat-marketplace-k9d5p\" (UID: \"b32a597a-2269-4e7d-be36-0ac0c9cea132\") " pod="openshift-marketplace/redhat-marketplace-k9d5p" Nov 28 16:35:35 crc kubenswrapper[4909]: I1128 16:35:35.935881 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c74vr\" (UniqueName: \"kubernetes.io/projected/b32a597a-2269-4e7d-be36-0ac0c9cea132-kube-api-access-c74vr\") pod \"redhat-marketplace-k9d5p\" (UID: \"b32a597a-2269-4e7d-be36-0ac0c9cea132\") " pod="openshift-marketplace/redhat-marketplace-k9d5p" Nov 28 16:35:35 crc kubenswrapper[4909]: I1128 16:35:35.936852 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b32a597a-2269-4e7d-be36-0ac0c9cea132-utilities\") pod \"redhat-marketplace-k9d5p\" (UID: \"b32a597a-2269-4e7d-be36-0ac0c9cea132\") " pod="openshift-marketplace/redhat-marketplace-k9d5p" Nov 28 16:35:35 crc kubenswrapper[4909]: I1128 16:35:35.936908 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b32a597a-2269-4e7d-be36-0ac0c9cea132-catalog-content\") pod \"redhat-marketplace-k9d5p\" (UID: \"b32a597a-2269-4e7d-be36-0ac0c9cea132\") " pod="openshift-marketplace/redhat-marketplace-k9d5p" Nov 28 16:35:35 crc kubenswrapper[4909]: I1128 16:35:35.964342 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c74vr\" (UniqueName: \"kubernetes.io/projected/b32a597a-2269-4e7d-be36-0ac0c9cea132-kube-api-access-c74vr\") pod \"redhat-marketplace-k9d5p\" (UID: \"b32a597a-2269-4e7d-be36-0ac0c9cea132\") " pod="openshift-marketplace/redhat-marketplace-k9d5p" Nov 28 16:35:35 crc kubenswrapper[4909]: I1128 16:35:35.980106 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-k9d5p" Nov 28 16:35:36 crc kubenswrapper[4909]: I1128 16:35:36.483210 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-k9d5p"] Nov 28 16:35:36 crc kubenswrapper[4909]: W1128 16:35:36.491245 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb32a597a_2269_4e7d_be36_0ac0c9cea132.slice/crio-1dac9be5bd5940f5c0fc3ad65306d19ef4a497257c6fa372f162a03de76028d7 WatchSource:0}: Error finding container 1dac9be5bd5940f5c0fc3ad65306d19ef4a497257c6fa372f162a03de76028d7: Status 404 returned error can't find the container with id 1dac9be5bd5940f5c0fc3ad65306d19ef4a497257c6fa372f162a03de76028d7 Nov 28 16:35:36 crc kubenswrapper[4909]: I1128 16:35:36.946679 4909 generic.go:334] "Generic (PLEG): container finished" podID="b32a597a-2269-4e7d-be36-0ac0c9cea132" containerID="85bc892f485ead62ff7a3454236fc204edfbf282d2ecb39184a891c168d3bf1b" exitCode=0 Nov 28 16:35:36 crc kubenswrapper[4909]: I1128 16:35:36.946747 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-k9d5p" event={"ID":"b32a597a-2269-4e7d-be36-0ac0c9cea132","Type":"ContainerDied","Data":"85bc892f485ead62ff7a3454236fc204edfbf282d2ecb39184a891c168d3bf1b"} Nov 28 16:35:36 crc kubenswrapper[4909]: I1128 16:35:36.947068 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-k9d5p" event={"ID":"b32a597a-2269-4e7d-be36-0ac0c9cea132","Type":"ContainerStarted","Data":"1dac9be5bd5940f5c0fc3ad65306d19ef4a497257c6fa372f162a03de76028d7"} Nov 28 16:35:38 crc kubenswrapper[4909]: I1128 16:35:38.968869 4909 generic.go:334] "Generic (PLEG): container finished" podID="b32a597a-2269-4e7d-be36-0ac0c9cea132" containerID="fd84d749d965cf5cee571a2a5d5cf252fed713747b600b28bdfecaeac7232d7b" exitCode=0 Nov 28 16:35:38 crc kubenswrapper[4909]: I1128 16:35:38.969044 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-k9d5p" event={"ID":"b32a597a-2269-4e7d-be36-0ac0c9cea132","Type":"ContainerDied","Data":"fd84d749d965cf5cee571a2a5d5cf252fed713747b600b28bdfecaeac7232d7b"} Nov 28 16:35:39 crc kubenswrapper[4909]: I1128 16:35:39.980956 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-k9d5p" event={"ID":"b32a597a-2269-4e7d-be36-0ac0c9cea132","Type":"ContainerStarted","Data":"8ac297b5d405d834d8680a5f9f28838b752040016567cfba2f20902c9525c643"} Nov 28 16:35:40 crc kubenswrapper[4909]: I1128 16:35:40.006714 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-k9d5p" podStartSLOduration=2.547003762 podStartE2EDuration="5.006688657s" podCreationTimestamp="2025-11-28 16:35:35 +0000 UTC" firstStartedPulling="2025-11-28 16:35:36.949856464 +0000 UTC m=+1519.346540988" lastFinishedPulling="2025-11-28 16:35:39.409541359 +0000 UTC m=+1521.806225883" observedRunningTime="2025-11-28 16:35:39.998443005 +0000 UTC m=+1522.395127539" watchObservedRunningTime="2025-11-28 16:35:40.006688657 +0000 UTC m=+1522.403373181" Nov 28 16:35:41 crc kubenswrapper[4909]: I1128 16:35:41.450749 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 28 16:35:41 crc kubenswrapper[4909]: I1128 16:35:41.460947 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 28 16:35:41 crc kubenswrapper[4909]: I1128 16:35:41.470983 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 28 16:35:42 crc kubenswrapper[4909]: I1128 16:35:42.007466 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 28 16:35:43 crc kubenswrapper[4909]: I1128 16:35:43.158510 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 28 16:35:43 crc kubenswrapper[4909]: I1128 16:35:43.160086 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 28 16:35:43 crc kubenswrapper[4909]: I1128 16:35:43.161498 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 28 16:35:43 crc kubenswrapper[4909]: I1128 16:35:43.167388 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 28 16:35:44 crc kubenswrapper[4909]: I1128 16:35:44.020021 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 28 16:35:44 crc kubenswrapper[4909]: I1128 16:35:44.028859 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 28 16:35:45 crc kubenswrapper[4909]: I1128 16:35:45.981121 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-k9d5p" Nov 28 16:35:45 crc kubenswrapper[4909]: I1128 16:35:45.981373 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-k9d5p" Nov 28 16:35:46 crc kubenswrapper[4909]: I1128 16:35:46.033172 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-k9d5p" Nov 28 16:35:46 crc kubenswrapper[4909]: I1128 16:35:46.119359 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-k9d5p" Nov 28 16:35:46 crc kubenswrapper[4909]: I1128 16:35:46.273273 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-k9d5p"] Nov 28 16:35:48 crc kubenswrapper[4909]: I1128 16:35:48.077873 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-k9d5p" podUID="b32a597a-2269-4e7d-be36-0ac0c9cea132" containerName="registry-server" containerID="cri-o://8ac297b5d405d834d8680a5f9f28838b752040016567cfba2f20902c9525c643" gracePeriod=2 Nov 28 16:35:49 crc kubenswrapper[4909]: I1128 16:35:49.092373 4909 generic.go:334] "Generic (PLEG): container finished" podID="b32a597a-2269-4e7d-be36-0ac0c9cea132" containerID="8ac297b5d405d834d8680a5f9f28838b752040016567cfba2f20902c9525c643" exitCode=0 Nov 28 16:35:49 crc kubenswrapper[4909]: I1128 16:35:49.092440 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-k9d5p" event={"ID":"b32a597a-2269-4e7d-be36-0ac0c9cea132","Type":"ContainerDied","Data":"8ac297b5d405d834d8680a5f9f28838b752040016567cfba2f20902c9525c643"} Nov 28 16:35:49 crc kubenswrapper[4909]: I1128 16:35:49.093058 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-k9d5p" event={"ID":"b32a597a-2269-4e7d-be36-0ac0c9cea132","Type":"ContainerDied","Data":"1dac9be5bd5940f5c0fc3ad65306d19ef4a497257c6fa372f162a03de76028d7"} Nov 28 16:35:49 crc kubenswrapper[4909]: I1128 16:35:49.093076 4909 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1dac9be5bd5940f5c0fc3ad65306d19ef4a497257c6fa372f162a03de76028d7" Nov 28 16:35:49 crc kubenswrapper[4909]: I1128 16:35:49.107239 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-k9d5p" Nov 28 16:35:49 crc kubenswrapper[4909]: I1128 16:35:49.290016 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c74vr\" (UniqueName: \"kubernetes.io/projected/b32a597a-2269-4e7d-be36-0ac0c9cea132-kube-api-access-c74vr\") pod \"b32a597a-2269-4e7d-be36-0ac0c9cea132\" (UID: \"b32a597a-2269-4e7d-be36-0ac0c9cea132\") " Nov 28 16:35:49 crc kubenswrapper[4909]: I1128 16:35:49.290142 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b32a597a-2269-4e7d-be36-0ac0c9cea132-utilities\") pod \"b32a597a-2269-4e7d-be36-0ac0c9cea132\" (UID: \"b32a597a-2269-4e7d-be36-0ac0c9cea132\") " Nov 28 16:35:49 crc kubenswrapper[4909]: I1128 16:35:49.290274 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b32a597a-2269-4e7d-be36-0ac0c9cea132-catalog-content\") pod \"b32a597a-2269-4e7d-be36-0ac0c9cea132\" (UID: \"b32a597a-2269-4e7d-be36-0ac0c9cea132\") " Nov 28 16:35:49 crc kubenswrapper[4909]: I1128 16:35:49.291196 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b32a597a-2269-4e7d-be36-0ac0c9cea132-utilities" (OuterVolumeSpecName: "utilities") pod "b32a597a-2269-4e7d-be36-0ac0c9cea132" (UID: "b32a597a-2269-4e7d-be36-0ac0c9cea132"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:35:49 crc kubenswrapper[4909]: I1128 16:35:49.306252 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b32a597a-2269-4e7d-be36-0ac0c9cea132-kube-api-access-c74vr" (OuterVolumeSpecName: "kube-api-access-c74vr") pod "b32a597a-2269-4e7d-be36-0ac0c9cea132" (UID: "b32a597a-2269-4e7d-be36-0ac0c9cea132"). InnerVolumeSpecName "kube-api-access-c74vr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:35:49 crc kubenswrapper[4909]: I1128 16:35:49.312795 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b32a597a-2269-4e7d-be36-0ac0c9cea132-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b32a597a-2269-4e7d-be36-0ac0c9cea132" (UID: "b32a597a-2269-4e7d-be36-0ac0c9cea132"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:35:49 crc kubenswrapper[4909]: I1128 16:35:49.392271 4909 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b32a597a-2269-4e7d-be36-0ac0c9cea132-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 16:35:49 crc kubenswrapper[4909]: I1128 16:35:49.392300 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c74vr\" (UniqueName: \"kubernetes.io/projected/b32a597a-2269-4e7d-be36-0ac0c9cea132-kube-api-access-c74vr\") on node \"crc\" DevicePath \"\"" Nov 28 16:35:49 crc kubenswrapper[4909]: I1128 16:35:49.392313 4909 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b32a597a-2269-4e7d-be36-0ac0c9cea132-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 16:35:50 crc kubenswrapper[4909]: I1128 16:35:50.099713 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-k9d5p" Nov 28 16:35:50 crc kubenswrapper[4909]: I1128 16:35:50.126822 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-k9d5p"] Nov 28 16:35:50 crc kubenswrapper[4909]: I1128 16:35:50.140353 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-k9d5p"] Nov 28 16:35:51 crc kubenswrapper[4909]: I1128 16:35:51.913123 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b32a597a-2269-4e7d-be36-0ac0c9cea132" path="/var/lib/kubelet/pods/b32a597a-2269-4e7d-be36-0ac0c9cea132/volumes" Nov 28 16:36:03 crc kubenswrapper[4909]: I1128 16:36:03.807731 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-worker-84ff6c46f-q849h"] Nov 28 16:36:03 crc kubenswrapper[4909]: E1128 16:36:03.808872 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b32a597a-2269-4e7d-be36-0ac0c9cea132" containerName="extract-content" Nov 28 16:36:03 crc kubenswrapper[4909]: I1128 16:36:03.808894 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="b32a597a-2269-4e7d-be36-0ac0c9cea132" containerName="extract-content" Nov 28 16:36:03 crc kubenswrapper[4909]: E1128 16:36:03.808912 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b32a597a-2269-4e7d-be36-0ac0c9cea132" containerName="extract-utilities" Nov 28 16:36:03 crc kubenswrapper[4909]: I1128 16:36:03.808920 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="b32a597a-2269-4e7d-be36-0ac0c9cea132" containerName="extract-utilities" Nov 28 16:36:03 crc kubenswrapper[4909]: E1128 16:36:03.808939 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b32a597a-2269-4e7d-be36-0ac0c9cea132" containerName="registry-server" Nov 28 16:36:03 crc kubenswrapper[4909]: I1128 16:36:03.808947 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="b32a597a-2269-4e7d-be36-0ac0c9cea132" containerName="registry-server" Nov 28 16:36:03 crc kubenswrapper[4909]: I1128 16:36:03.809197 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="b32a597a-2269-4e7d-be36-0ac0c9cea132" containerName="registry-server" Nov 28 16:36:03 crc kubenswrapper[4909]: I1128 16:36:03.810549 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-84ff6c46f-q849h" Nov 28 16:36:03 crc kubenswrapper[4909]: I1128 16:36:03.839496 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-keystone-listener-8588dd4f7d-772fj"] Nov 28 16:36:03 crc kubenswrapper[4909]: I1128 16:36:03.841054 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-8588dd4f7d-772fj" Nov 28 16:36:03 crc kubenswrapper[4909]: I1128 16:36:03.854703 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-84ff6c46f-q849h"] Nov 28 16:36:03 crc kubenswrapper[4909]: I1128 16:36:03.883786 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-8588dd4f7d-772fj"] Nov 28 16:36:03 crc kubenswrapper[4909]: I1128 16:36:03.930489 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstackclient"] Nov 28 16:36:03 crc kubenswrapper[4909]: I1128 16:36:03.930700 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/openstackclient" podUID="f8d630e7-c532-4fc8-b3f4-0a2a6b06f5ae" containerName="openstackclient" containerID="cri-o://15d6d7bb76cdcf47fdb12971445027a116e8951d0899335b43ee4f4fb9c7586a" gracePeriod=2 Nov 28 16:36:03 crc kubenswrapper[4909]: I1128 16:36:03.949512 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/openstackclient"] Nov 28 16:36:03 crc kubenswrapper[4909]: I1128 16:36:03.986803 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/99398e49-db85-4878-b759-367747402c8b-logs\") pod \"barbican-worker-84ff6c46f-q849h\" (UID: \"99398e49-db85-4878-b759-367747402c8b\") " pod="openstack/barbican-worker-84ff6c46f-q849h" Nov 28 16:36:03 crc kubenswrapper[4909]: I1128 16:36:03.986851 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0e95a7b6-74fd-4db5-bb83-b8e8f80a698f-combined-ca-bundle\") pod \"barbican-keystone-listener-8588dd4f7d-772fj\" (UID: \"0e95a7b6-74fd-4db5-bb83-b8e8f80a698f\") " pod="openstack/barbican-keystone-listener-8588dd4f7d-772fj" Nov 28 16:36:03 crc kubenswrapper[4909]: I1128 16:36:03.986897 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0e95a7b6-74fd-4db5-bb83-b8e8f80a698f-logs\") pod \"barbican-keystone-listener-8588dd4f7d-772fj\" (UID: \"0e95a7b6-74fd-4db5-bb83-b8e8f80a698f\") " pod="openstack/barbican-keystone-listener-8588dd4f7d-772fj" Nov 28 16:36:03 crc kubenswrapper[4909]: I1128 16:36:03.986923 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0e95a7b6-74fd-4db5-bb83-b8e8f80a698f-config-data\") pod \"barbican-keystone-listener-8588dd4f7d-772fj\" (UID: \"0e95a7b6-74fd-4db5-bb83-b8e8f80a698f\") " pod="openstack/barbican-keystone-listener-8588dd4f7d-772fj" Nov 28 16:36:03 crc kubenswrapper[4909]: I1128 16:36:03.986941 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wwckv\" (UniqueName: \"kubernetes.io/projected/99398e49-db85-4878-b759-367747402c8b-kube-api-access-wwckv\") pod \"barbican-worker-84ff6c46f-q849h\" (UID: \"99398e49-db85-4878-b759-367747402c8b\") " pod="openstack/barbican-worker-84ff6c46f-q849h" Nov 28 16:36:03 crc kubenswrapper[4909]: I1128 16:36:03.986970 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/99398e49-db85-4878-b759-367747402c8b-combined-ca-bundle\") pod \"barbican-worker-84ff6c46f-q849h\" (UID: \"99398e49-db85-4878-b759-367747402c8b\") " pod="openstack/barbican-worker-84ff6c46f-q849h" Nov 28 16:36:03 crc kubenswrapper[4909]: I1128 16:36:03.987005 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z6zdj\" (UniqueName: \"kubernetes.io/projected/0e95a7b6-74fd-4db5-bb83-b8e8f80a698f-kube-api-access-z6zdj\") pod \"barbican-keystone-listener-8588dd4f7d-772fj\" (UID: \"0e95a7b6-74fd-4db5-bb83-b8e8f80a698f\") " pod="openstack/barbican-keystone-listener-8588dd4f7d-772fj" Nov 28 16:36:03 crc kubenswrapper[4909]: I1128 16:36:03.987077 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/99398e49-db85-4878-b759-367747402c8b-config-data-custom\") pod \"barbican-worker-84ff6c46f-q849h\" (UID: \"99398e49-db85-4878-b759-367747402c8b\") " pod="openstack/barbican-worker-84ff6c46f-q849h" Nov 28 16:36:03 crc kubenswrapper[4909]: I1128 16:36:03.987095 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/99398e49-db85-4878-b759-367747402c8b-config-data\") pod \"barbican-worker-84ff6c46f-q849h\" (UID: \"99398e49-db85-4878-b759-367747402c8b\") " pod="openstack/barbican-worker-84ff6c46f-q849h" Nov 28 16:36:03 crc kubenswrapper[4909]: I1128 16:36:03.987111 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0e95a7b6-74fd-4db5-bb83-b8e8f80a698f-config-data-custom\") pod \"barbican-keystone-listener-8588dd4f7d-772fj\" (UID: \"0e95a7b6-74fd-4db5-bb83-b8e8f80a698f\") " pod="openstack/barbican-keystone-listener-8588dd4f7d-772fj" Nov 28 16:36:04 crc kubenswrapper[4909]: I1128 16:36:04.093877 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0e95a7b6-74fd-4db5-bb83-b8e8f80a698f-logs\") pod \"barbican-keystone-listener-8588dd4f7d-772fj\" (UID: \"0e95a7b6-74fd-4db5-bb83-b8e8f80a698f\") " pod="openstack/barbican-keystone-listener-8588dd4f7d-772fj" Nov 28 16:36:04 crc kubenswrapper[4909]: I1128 16:36:04.093940 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0e95a7b6-74fd-4db5-bb83-b8e8f80a698f-config-data\") pod \"barbican-keystone-listener-8588dd4f7d-772fj\" (UID: \"0e95a7b6-74fd-4db5-bb83-b8e8f80a698f\") " pod="openstack/barbican-keystone-listener-8588dd4f7d-772fj" Nov 28 16:36:04 crc kubenswrapper[4909]: I1128 16:36:04.093966 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wwckv\" (UniqueName: \"kubernetes.io/projected/99398e49-db85-4878-b759-367747402c8b-kube-api-access-wwckv\") pod \"barbican-worker-84ff6c46f-q849h\" (UID: \"99398e49-db85-4878-b759-367747402c8b\") " pod="openstack/barbican-worker-84ff6c46f-q849h" Nov 28 16:36:04 crc kubenswrapper[4909]: I1128 16:36:04.094003 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/99398e49-db85-4878-b759-367747402c8b-combined-ca-bundle\") pod \"barbican-worker-84ff6c46f-q849h\" (UID: \"99398e49-db85-4878-b759-367747402c8b\") " pod="openstack/barbican-worker-84ff6c46f-q849h" Nov 28 16:36:04 crc kubenswrapper[4909]: I1128 16:36:04.094039 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z6zdj\" (UniqueName: \"kubernetes.io/projected/0e95a7b6-74fd-4db5-bb83-b8e8f80a698f-kube-api-access-z6zdj\") pod \"barbican-keystone-listener-8588dd4f7d-772fj\" (UID: \"0e95a7b6-74fd-4db5-bb83-b8e8f80a698f\") " pod="openstack/barbican-keystone-listener-8588dd4f7d-772fj" Nov 28 16:36:04 crc kubenswrapper[4909]: I1128 16:36:04.094118 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/99398e49-db85-4878-b759-367747402c8b-config-data-custom\") pod \"barbican-worker-84ff6c46f-q849h\" (UID: \"99398e49-db85-4878-b759-367747402c8b\") " pod="openstack/barbican-worker-84ff6c46f-q849h" Nov 28 16:36:04 crc kubenswrapper[4909]: I1128 16:36:04.094144 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/99398e49-db85-4878-b759-367747402c8b-config-data\") pod \"barbican-worker-84ff6c46f-q849h\" (UID: \"99398e49-db85-4878-b759-367747402c8b\") " pod="openstack/barbican-worker-84ff6c46f-q849h" Nov 28 16:36:04 crc kubenswrapper[4909]: I1128 16:36:04.094168 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0e95a7b6-74fd-4db5-bb83-b8e8f80a698f-config-data-custom\") pod \"barbican-keystone-listener-8588dd4f7d-772fj\" (UID: \"0e95a7b6-74fd-4db5-bb83-b8e8f80a698f\") " pod="openstack/barbican-keystone-listener-8588dd4f7d-772fj" Nov 28 16:36:04 crc kubenswrapper[4909]: I1128 16:36:04.094215 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/99398e49-db85-4878-b759-367747402c8b-logs\") pod \"barbican-worker-84ff6c46f-q849h\" (UID: \"99398e49-db85-4878-b759-367747402c8b\") " pod="openstack/barbican-worker-84ff6c46f-q849h" Nov 28 16:36:04 crc kubenswrapper[4909]: I1128 16:36:04.094248 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0e95a7b6-74fd-4db5-bb83-b8e8f80a698f-combined-ca-bundle\") pod \"barbican-keystone-listener-8588dd4f7d-772fj\" (UID: \"0e95a7b6-74fd-4db5-bb83-b8e8f80a698f\") " pod="openstack/barbican-keystone-listener-8588dd4f7d-772fj" Nov 28 16:36:04 crc kubenswrapper[4909]: I1128 16:36:04.097083 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0e95a7b6-74fd-4db5-bb83-b8e8f80a698f-logs\") pod \"barbican-keystone-listener-8588dd4f7d-772fj\" (UID: \"0e95a7b6-74fd-4db5-bb83-b8e8f80a698f\") " pod="openstack/barbican-keystone-listener-8588dd4f7d-772fj" Nov 28 16:36:04 crc kubenswrapper[4909]: I1128 16:36:04.101644 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/99398e49-db85-4878-b759-367747402c8b-logs\") pod \"barbican-worker-84ff6c46f-q849h\" (UID: \"99398e49-db85-4878-b759-367747402c8b\") " pod="openstack/barbican-worker-84ff6c46f-q849h" Nov 28 16:36:04 crc kubenswrapper[4909]: I1128 16:36:04.107160 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/99398e49-db85-4878-b759-367747402c8b-config-data\") pod \"barbican-worker-84ff6c46f-q849h\" (UID: \"99398e49-db85-4878-b759-367747402c8b\") " pod="openstack/barbican-worker-84ff6c46f-q849h" Nov 28 16:36:04 crc kubenswrapper[4909]: I1128 16:36:04.108533 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0e95a7b6-74fd-4db5-bb83-b8e8f80a698f-config-data\") pod \"barbican-keystone-listener-8588dd4f7d-772fj\" (UID: \"0e95a7b6-74fd-4db5-bb83-b8e8f80a698f\") " pod="openstack/barbican-keystone-listener-8588dd4f7d-772fj" Nov 28 16:36:04 crc kubenswrapper[4909]: I1128 16:36:04.110135 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0e95a7b6-74fd-4db5-bb83-b8e8f80a698f-combined-ca-bundle\") pod \"barbican-keystone-listener-8588dd4f7d-772fj\" (UID: \"0e95a7b6-74fd-4db5-bb83-b8e8f80a698f\") " pod="openstack/barbican-keystone-listener-8588dd4f7d-772fj" Nov 28 16:36:04 crc kubenswrapper[4909]: I1128 16:36:04.112302 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0e95a7b6-74fd-4db5-bb83-b8e8f80a698f-config-data-custom\") pod \"barbican-keystone-listener-8588dd4f7d-772fj\" (UID: \"0e95a7b6-74fd-4db5-bb83-b8e8f80a698f\") " pod="openstack/barbican-keystone-listener-8588dd4f7d-772fj" Nov 28 16:36:04 crc kubenswrapper[4909]: I1128 16:36:04.116210 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/99398e49-db85-4878-b759-367747402c8b-config-data-custom\") pod \"barbican-worker-84ff6c46f-q849h\" (UID: \"99398e49-db85-4878-b759-367747402c8b\") " pod="openstack/barbican-worker-84ff6c46f-q849h" Nov 28 16:36:04 crc kubenswrapper[4909]: I1128 16:36:04.128427 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/99398e49-db85-4878-b759-367747402c8b-combined-ca-bundle\") pod \"barbican-worker-84ff6c46f-q849h\" (UID: \"99398e49-db85-4878-b759-367747402c8b\") " pod="openstack/barbican-worker-84ff6c46f-q849h" Nov 28 16:36:04 crc kubenswrapper[4909]: I1128 16:36:04.186603 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z6zdj\" (UniqueName: \"kubernetes.io/projected/0e95a7b6-74fd-4db5-bb83-b8e8f80a698f-kube-api-access-z6zdj\") pod \"barbican-keystone-listener-8588dd4f7d-772fj\" (UID: \"0e95a7b6-74fd-4db5-bb83-b8e8f80a698f\") " pod="openstack/barbican-keystone-listener-8588dd4f7d-772fj" Nov 28 16:36:04 crc kubenswrapper[4909]: I1128 16:36:04.198134 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wwckv\" (UniqueName: \"kubernetes.io/projected/99398e49-db85-4878-b759-367747402c8b-kube-api-access-wwckv\") pod \"barbican-worker-84ff6c46f-q849h\" (UID: \"99398e49-db85-4878-b759-367747402c8b\") " pod="openstack/barbican-worker-84ff6c46f-q849h" Nov 28 16:36:04 crc kubenswrapper[4909]: I1128 16:36:04.246454 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 28 16:36:04 crc kubenswrapper[4909]: I1128 16:36:04.367771 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 28 16:36:04 crc kubenswrapper[4909]: I1128 16:36:04.368515 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovsdbserver-sb-0" podUID="bdf7de93-de28-49b8-b83f-1174c23bbd2f" containerName="openstack-network-exporter" containerID="cri-o://11ac9a6b55dccdabe7e64c630e4c33e9f41f6c8c26d42b4e367805b2de03dbc8" gracePeriod=300 Nov 28 16:36:04 crc kubenswrapper[4909]: I1128 16:36:04.435731 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-84ff6c46f-q849h" Nov 28 16:36:04 crc kubenswrapper[4909]: I1128 16:36:04.465128 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-8588dd4f7d-772fj" Nov 28 16:36:04 crc kubenswrapper[4909]: I1128 16:36:04.473510 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance2631-account-delete-2c2fs"] Nov 28 16:36:04 crc kubenswrapper[4909]: E1128 16:36:04.473913 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f8d630e7-c532-4fc8-b3f4-0a2a6b06f5ae" containerName="openstackclient" Nov 28 16:36:04 crc kubenswrapper[4909]: I1128 16:36:04.473929 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="f8d630e7-c532-4fc8-b3f4-0a2a6b06f5ae" containerName="openstackclient" Nov 28 16:36:04 crc kubenswrapper[4909]: I1128 16:36:04.474134 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="f8d630e7-c532-4fc8-b3f4-0a2a6b06f5ae" containerName="openstackclient" Nov 28 16:36:04 crc kubenswrapper[4909]: I1128 16:36:04.474795 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance2631-account-delete-2c2fs" Nov 28 16:36:04 crc kubenswrapper[4909]: I1128 16:36:04.489516 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-65c6d9c7fd-627g9"] Nov 28 16:36:04 crc kubenswrapper[4909]: I1128 16:36:04.491225 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-65c6d9c7fd-627g9" Nov 28 16:36:04 crc kubenswrapper[4909]: I1128 16:36:04.501126 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance2631-account-delete-2c2fs"] Nov 28 16:36:04 crc kubenswrapper[4909]: E1128 16:36:04.509673 4909 configmap.go:193] Couldn't get configMap openstack/rabbitmq-config-data: configmap "rabbitmq-config-data" not found Nov 28 16:36:04 crc kubenswrapper[4909]: E1128 16:36:04.509752 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444-config-data podName:7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444 nodeName:}" failed. No retries permitted until 2025-11-28 16:36:05.009729088 +0000 UTC m=+1547.406413612 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444-config-data") pod "rabbitmq-server-0" (UID: "7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444") : configmap "rabbitmq-config-data" not found Nov 28 16:36:04 crc kubenswrapper[4909]: I1128 16:36:04.511741 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovsdbserver-sb-0" podUID="bdf7de93-de28-49b8-b83f-1174c23bbd2f" containerName="ovsdbserver-sb" containerID="cri-o://f3031d38398a1299821a38dba71739cd77b4202204dd1cd5dd6367b419b14d7b" gracePeriod=300 Nov 28 16:36:04 crc kubenswrapper[4909]: I1128 16:36:04.547700 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-65c6d9c7fd-627g9"] Nov 28 16:36:04 crc kubenswrapper[4909]: I1128 16:36:04.611152 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5ad0a326-11b9-40c8-b251-5994a436110a-combined-ca-bundle\") pod \"barbican-api-65c6d9c7fd-627g9\" (UID: \"5ad0a326-11b9-40c8-b251-5994a436110a\") " pod="openstack/barbican-api-65c6d9c7fd-627g9" Nov 28 16:36:04 crc kubenswrapper[4909]: I1128 16:36:04.611361 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5ad0a326-11b9-40c8-b251-5994a436110a-logs\") pod \"barbican-api-65c6d9c7fd-627g9\" (UID: \"5ad0a326-11b9-40c8-b251-5994a436110a\") " pod="openstack/barbican-api-65c6d9c7fd-627g9" Nov 28 16:36:04 crc kubenswrapper[4909]: I1128 16:36:04.611496 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/5ad0a326-11b9-40c8-b251-5994a436110a-internal-tls-certs\") pod \"barbican-api-65c6d9c7fd-627g9\" (UID: \"5ad0a326-11b9-40c8-b251-5994a436110a\") " pod="openstack/barbican-api-65c6d9c7fd-627g9" Nov 28 16:36:04 crc kubenswrapper[4909]: I1128 16:36:04.611642 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f8lx5\" (UniqueName: \"kubernetes.io/projected/5ad0a326-11b9-40c8-b251-5994a436110a-kube-api-access-f8lx5\") pod \"barbican-api-65c6d9c7fd-627g9\" (UID: \"5ad0a326-11b9-40c8-b251-5994a436110a\") " pod="openstack/barbican-api-65c6d9c7fd-627g9" Nov 28 16:36:04 crc kubenswrapper[4909]: I1128 16:36:04.611917 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/5ad0a326-11b9-40c8-b251-5994a436110a-public-tls-certs\") pod \"barbican-api-65c6d9c7fd-627g9\" (UID: \"5ad0a326-11b9-40c8-b251-5994a436110a\") " pod="openstack/barbican-api-65c6d9c7fd-627g9" Nov 28 16:36:04 crc kubenswrapper[4909]: I1128 16:36:04.612005 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/5ad0a326-11b9-40c8-b251-5994a436110a-config-data-custom\") pod \"barbican-api-65c6d9c7fd-627g9\" (UID: \"5ad0a326-11b9-40c8-b251-5994a436110a\") " pod="openstack/barbican-api-65c6d9c7fd-627g9" Nov 28 16:36:04 crc kubenswrapper[4909]: I1128 16:36:04.613256 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b0f3cfb2-6884-4ef1-9844-cf494a2e21bb-operator-scripts\") pod \"glance2631-account-delete-2c2fs\" (UID: \"b0f3cfb2-6884-4ef1-9844-cf494a2e21bb\") " pod="openstack/glance2631-account-delete-2c2fs" Nov 28 16:36:04 crc kubenswrapper[4909]: I1128 16:36:04.613361 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lpx9c\" (UniqueName: \"kubernetes.io/projected/b0f3cfb2-6884-4ef1-9844-cf494a2e21bb-kube-api-access-lpx9c\") pod \"glance2631-account-delete-2c2fs\" (UID: \"b0f3cfb2-6884-4ef1-9844-cf494a2e21bb\") " pod="openstack/glance2631-account-delete-2c2fs" Nov 28 16:36:04 crc kubenswrapper[4909]: I1128 16:36:04.613462 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5ad0a326-11b9-40c8-b251-5994a436110a-config-data\") pod \"barbican-api-65c6d9c7fd-627g9\" (UID: \"5ad0a326-11b9-40c8-b251-5994a436110a\") " pod="openstack/barbican-api-65c6d9c7fd-627g9" Nov 28 16:36:04 crc kubenswrapper[4909]: I1128 16:36:04.691557 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder937d-account-delete-4rdjd"] Nov 28 16:36:04 crc kubenswrapper[4909]: I1128 16:36:04.693286 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder937d-account-delete-4rdjd" Nov 28 16:36:04 crc kubenswrapper[4909]: I1128 16:36:04.715671 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5ad0a326-11b9-40c8-b251-5994a436110a-combined-ca-bundle\") pod \"barbican-api-65c6d9c7fd-627g9\" (UID: \"5ad0a326-11b9-40c8-b251-5994a436110a\") " pod="openstack/barbican-api-65c6d9c7fd-627g9" Nov 28 16:36:04 crc kubenswrapper[4909]: I1128 16:36:04.716581 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5ad0a326-11b9-40c8-b251-5994a436110a-logs\") pod \"barbican-api-65c6d9c7fd-627g9\" (UID: \"5ad0a326-11b9-40c8-b251-5994a436110a\") " pod="openstack/barbican-api-65c6d9c7fd-627g9" Nov 28 16:36:04 crc kubenswrapper[4909]: I1128 16:36:04.716692 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/5ad0a326-11b9-40c8-b251-5994a436110a-internal-tls-certs\") pod \"barbican-api-65c6d9c7fd-627g9\" (UID: \"5ad0a326-11b9-40c8-b251-5994a436110a\") " pod="openstack/barbican-api-65c6d9c7fd-627g9" Nov 28 16:36:04 crc kubenswrapper[4909]: I1128 16:36:04.716797 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f8lx5\" (UniqueName: \"kubernetes.io/projected/5ad0a326-11b9-40c8-b251-5994a436110a-kube-api-access-f8lx5\") pod \"barbican-api-65c6d9c7fd-627g9\" (UID: \"5ad0a326-11b9-40c8-b251-5994a436110a\") " pod="openstack/barbican-api-65c6d9c7fd-627g9" Nov 28 16:36:04 crc kubenswrapper[4909]: I1128 16:36:04.716928 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/5ad0a326-11b9-40c8-b251-5994a436110a-public-tls-certs\") pod \"barbican-api-65c6d9c7fd-627g9\" (UID: \"5ad0a326-11b9-40c8-b251-5994a436110a\") " pod="openstack/barbican-api-65c6d9c7fd-627g9" Nov 28 16:36:04 crc kubenswrapper[4909]: I1128 16:36:04.717015 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/5ad0a326-11b9-40c8-b251-5994a436110a-config-data-custom\") pod \"barbican-api-65c6d9c7fd-627g9\" (UID: \"5ad0a326-11b9-40c8-b251-5994a436110a\") " pod="openstack/barbican-api-65c6d9c7fd-627g9" Nov 28 16:36:04 crc kubenswrapper[4909]: I1128 16:36:04.717113 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b0f3cfb2-6884-4ef1-9844-cf494a2e21bb-operator-scripts\") pod \"glance2631-account-delete-2c2fs\" (UID: \"b0f3cfb2-6884-4ef1-9844-cf494a2e21bb\") " pod="openstack/glance2631-account-delete-2c2fs" Nov 28 16:36:04 crc kubenswrapper[4909]: I1128 16:36:04.717197 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lpx9c\" (UniqueName: \"kubernetes.io/projected/b0f3cfb2-6884-4ef1-9844-cf494a2e21bb-kube-api-access-lpx9c\") pod \"glance2631-account-delete-2c2fs\" (UID: \"b0f3cfb2-6884-4ef1-9844-cf494a2e21bb\") " pod="openstack/glance2631-account-delete-2c2fs" Nov 28 16:36:04 crc kubenswrapper[4909]: I1128 16:36:04.717367 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5ad0a326-11b9-40c8-b251-5994a436110a-config-data\") pod \"barbican-api-65c6d9c7fd-627g9\" (UID: \"5ad0a326-11b9-40c8-b251-5994a436110a\") " pod="openstack/barbican-api-65c6d9c7fd-627g9" Nov 28 16:36:04 crc kubenswrapper[4909]: I1128 16:36:04.722428 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5ad0a326-11b9-40c8-b251-5994a436110a-logs\") pod \"barbican-api-65c6d9c7fd-627g9\" (UID: \"5ad0a326-11b9-40c8-b251-5994a436110a\") " pod="openstack/barbican-api-65c6d9c7fd-627g9" Nov 28 16:36:04 crc kubenswrapper[4909]: I1128 16:36:04.724700 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5ad0a326-11b9-40c8-b251-5994a436110a-config-data\") pod \"barbican-api-65c6d9c7fd-627g9\" (UID: \"5ad0a326-11b9-40c8-b251-5994a436110a\") " pod="openstack/barbican-api-65c6d9c7fd-627g9" Nov 28 16:36:04 crc kubenswrapper[4909]: I1128 16:36:04.725342 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b0f3cfb2-6884-4ef1-9844-cf494a2e21bb-operator-scripts\") pod \"glance2631-account-delete-2c2fs\" (UID: \"b0f3cfb2-6884-4ef1-9844-cf494a2e21bb\") " pod="openstack/glance2631-account-delete-2c2fs" Nov 28 16:36:04 crc kubenswrapper[4909]: I1128 16:36:04.737171 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/5ad0a326-11b9-40c8-b251-5994a436110a-internal-tls-certs\") pod \"barbican-api-65c6d9c7fd-627g9\" (UID: \"5ad0a326-11b9-40c8-b251-5994a436110a\") " pod="openstack/barbican-api-65c6d9c7fd-627g9" Nov 28 16:36:04 crc kubenswrapper[4909]: I1128 16:36:04.738128 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5ad0a326-11b9-40c8-b251-5994a436110a-combined-ca-bundle\") pod \"barbican-api-65c6d9c7fd-627g9\" (UID: \"5ad0a326-11b9-40c8-b251-5994a436110a\") " pod="openstack/barbican-api-65c6d9c7fd-627g9" Nov 28 16:36:04 crc kubenswrapper[4909]: I1128 16:36:04.738220 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/5ad0a326-11b9-40c8-b251-5994a436110a-config-data-custom\") pod \"barbican-api-65c6d9c7fd-627g9\" (UID: \"5ad0a326-11b9-40c8-b251-5994a436110a\") " pod="openstack/barbican-api-65c6d9c7fd-627g9" Nov 28 16:36:04 crc kubenswrapper[4909]: I1128 16:36:04.743761 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/5ad0a326-11b9-40c8-b251-5994a436110a-public-tls-certs\") pod \"barbican-api-65c6d9c7fd-627g9\" (UID: \"5ad0a326-11b9-40c8-b251-5994a436110a\") " pod="openstack/barbican-api-65c6d9c7fd-627g9" Nov 28 16:36:04 crc kubenswrapper[4909]: I1128 16:36:04.746797 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder937d-account-delete-4rdjd"] Nov 28 16:36:04 crc kubenswrapper[4909]: I1128 16:36:04.797586 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lpx9c\" (UniqueName: \"kubernetes.io/projected/b0f3cfb2-6884-4ef1-9844-cf494a2e21bb-kube-api-access-lpx9c\") pod \"glance2631-account-delete-2c2fs\" (UID: \"b0f3cfb2-6884-4ef1-9844-cf494a2e21bb\") " pod="openstack/glance2631-account-delete-2c2fs" Nov 28 16:36:04 crc kubenswrapper[4909]: I1128 16:36:04.806922 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-northd-0"] Nov 28 16:36:04 crc kubenswrapper[4909]: I1128 16:36:04.807136 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovn-northd-0" podUID="dc78dec8-567e-41a1-9fbf-793224410d3b" containerName="ovn-northd" containerID="cri-o://604d9593fae6f90c84804afb01b99c2a6be4dbebec46ec2fa908b5b83bb8c9dc" gracePeriod=30 Nov 28 16:36:04 crc kubenswrapper[4909]: I1128 16:36:04.807252 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovn-northd-0" podUID="dc78dec8-567e-41a1-9fbf-793224410d3b" containerName="openstack-network-exporter" containerID="cri-o://1c3a9af8648dff180e4db3b4e37877beb5c3240d62fe2ea612826fb90703150d" gracePeriod=30 Nov 28 16:36:04 crc kubenswrapper[4909]: I1128 16:36:04.831236 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wttq4\" (UniqueName: \"kubernetes.io/projected/c2cc9842-6f8f-4afc-9895-2b7a75a9696c-kube-api-access-wttq4\") pod \"cinder937d-account-delete-4rdjd\" (UID: \"c2cc9842-6f8f-4afc-9895-2b7a75a9696c\") " pod="openstack/cinder937d-account-delete-4rdjd" Nov 28 16:36:04 crc kubenswrapper[4909]: I1128 16:36:04.831422 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c2cc9842-6f8f-4afc-9895-2b7a75a9696c-operator-scripts\") pod \"cinder937d-account-delete-4rdjd\" (UID: \"c2cc9842-6f8f-4afc-9895-2b7a75a9696c\") " pod="openstack/cinder937d-account-delete-4rdjd" Nov 28 16:36:04 crc kubenswrapper[4909]: I1128 16:36:04.940031 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f8lx5\" (UniqueName: \"kubernetes.io/projected/5ad0a326-11b9-40c8-b251-5994a436110a-kube-api-access-f8lx5\") pod \"barbican-api-65c6d9c7fd-627g9\" (UID: \"5ad0a326-11b9-40c8-b251-5994a436110a\") " pod="openstack/barbican-api-65c6d9c7fd-627g9" Nov 28 16:36:04 crc kubenswrapper[4909]: I1128 16:36:04.950436 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance2631-account-delete-2c2fs" Nov 28 16:36:04 crc kubenswrapper[4909]: I1128 16:36:04.978072 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wttq4\" (UniqueName: \"kubernetes.io/projected/c2cc9842-6f8f-4afc-9895-2b7a75a9696c-kube-api-access-wttq4\") pod \"cinder937d-account-delete-4rdjd\" (UID: \"c2cc9842-6f8f-4afc-9895-2b7a75a9696c\") " pod="openstack/cinder937d-account-delete-4rdjd" Nov 28 16:36:04 crc kubenswrapper[4909]: I1128 16:36:04.978458 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c2cc9842-6f8f-4afc-9895-2b7a75a9696c-operator-scripts\") pod \"cinder937d-account-delete-4rdjd\" (UID: \"c2cc9842-6f8f-4afc-9895-2b7a75a9696c\") " pod="openstack/cinder937d-account-delete-4rdjd" Nov 28 16:36:04 crc kubenswrapper[4909]: I1128 16:36:04.978239 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-65c6d9c7fd-627g9" Nov 28 16:36:05 crc kubenswrapper[4909]: I1128 16:36:05.007170 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c2cc9842-6f8f-4afc-9895-2b7a75a9696c-operator-scripts\") pod \"cinder937d-account-delete-4rdjd\" (UID: \"c2cc9842-6f8f-4afc-9895-2b7a75a9696c\") " pod="openstack/cinder937d-account-delete-4rdjd" Nov 28 16:36:05 crc kubenswrapper[4909]: I1128 16:36:05.145301 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-sync-wz42m"] Nov 28 16:36:05 crc kubenswrapper[4909]: E1128 16:36:05.150577 4909 configmap.go:193] Couldn't get configMap openstack/rabbitmq-config-data: configmap "rabbitmq-config-data" not found Nov 28 16:36:05 crc kubenswrapper[4909]: E1128 16:36:05.150723 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444-config-data podName:7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444 nodeName:}" failed. No retries permitted until 2025-11-28 16:36:06.150696436 +0000 UTC m=+1548.547380960 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444-config-data") pod "rabbitmq-server-0" (UID: "7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444") : configmap "rabbitmq-config-data" not found Nov 28 16:36:05 crc kubenswrapper[4909]: I1128 16:36:05.171458 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wttq4\" (UniqueName: \"kubernetes.io/projected/c2cc9842-6f8f-4afc-9895-2b7a75a9696c-kube-api-access-wttq4\") pod \"cinder937d-account-delete-4rdjd\" (UID: \"c2cc9842-6f8f-4afc-9895-2b7a75a9696c\") " pod="openstack/cinder937d-account-delete-4rdjd" Nov 28 16:36:05 crc kubenswrapper[4909]: I1128 16:36:05.232734 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-sync-wz42m"] Nov 28 16:36:05 crc kubenswrapper[4909]: I1128 16:36:05.330123 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder937d-account-delete-4rdjd" Nov 28 16:36:05 crc kubenswrapper[4909]: I1128 16:36:05.342063 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-sync-cj8sl"] Nov 28 16:36:05 crc kubenswrapper[4909]: I1128 16:36:05.356022 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_bdf7de93-de28-49b8-b83f-1174c23bbd2f/ovsdbserver-sb/0.log" Nov 28 16:36:05 crc kubenswrapper[4909]: I1128 16:36:05.356068 4909 generic.go:334] "Generic (PLEG): container finished" podID="bdf7de93-de28-49b8-b83f-1174c23bbd2f" containerID="11ac9a6b55dccdabe7e64c630e4c33e9f41f6c8c26d42b4e367805b2de03dbc8" exitCode=2 Nov 28 16:36:05 crc kubenswrapper[4909]: I1128 16:36:05.356087 4909 generic.go:334] "Generic (PLEG): container finished" podID="bdf7de93-de28-49b8-b83f-1174c23bbd2f" containerID="f3031d38398a1299821a38dba71739cd77b4202204dd1cd5dd6367b419b14d7b" exitCode=143 Nov 28 16:36:05 crc kubenswrapper[4909]: I1128 16:36:05.356105 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"bdf7de93-de28-49b8-b83f-1174c23bbd2f","Type":"ContainerDied","Data":"11ac9a6b55dccdabe7e64c630e4c33e9f41f6c8c26d42b4e367805b2de03dbc8"} Nov 28 16:36:05 crc kubenswrapper[4909]: I1128 16:36:05.356140 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"bdf7de93-de28-49b8-b83f-1174c23bbd2f","Type":"ContainerDied","Data":"f3031d38398a1299821a38dba71739cd77b4202204dd1cd5dd6367b419b14d7b"} Nov 28 16:36:05 crc kubenswrapper[4909]: I1128 16:36:05.420833 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-sync-cj8sl"] Nov 28 16:36:05 crc kubenswrapper[4909]: I1128 16:36:05.494795 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement3326-account-delete-6mrc7"] Nov 28 16:36:05 crc kubenswrapper[4909]: I1128 16:36:05.504597 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement3326-account-delete-6mrc7" Nov 28 16:36:05 crc kubenswrapper[4909]: I1128 16:36:05.593778 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement3326-account-delete-6mrc7"] Nov 28 16:36:05 crc kubenswrapper[4909]: I1128 16:36:05.642890 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-sync-ngll9"] Nov 28 16:36:05 crc kubenswrapper[4909]: I1128 16:36:05.662540 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2cfda3c9-a8dd-4a86-bb0d-d3b826b82a0f-operator-scripts\") pod \"placement3326-account-delete-6mrc7\" (UID: \"2cfda3c9-a8dd-4a86-bb0d-d3b826b82a0f\") " pod="openstack/placement3326-account-delete-6mrc7" Nov 28 16:36:05 crc kubenswrapper[4909]: I1128 16:36:05.662625 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m9sgf\" (UniqueName: \"kubernetes.io/projected/2cfda3c9-a8dd-4a86-bb0d-d3b826b82a0f-kube-api-access-m9sgf\") pod \"placement3326-account-delete-6mrc7\" (UID: \"2cfda3c9-a8dd-4a86-bb0d-d3b826b82a0f\") " pod="openstack/placement3326-account-delete-6mrc7" Nov 28 16:36:05 crc kubenswrapper[4909]: I1128 16:36:05.666058 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-sync-ngll9"] Nov 28 16:36:05 crc kubenswrapper[4909]: I1128 16:36:05.713523 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 28 16:36:05 crc kubenswrapper[4909]: I1128 16:36:05.714186 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovsdbserver-nb-0" podUID="9a3a5941-5c86-4a65-be1e-26327ca990ad" containerName="openstack-network-exporter" containerID="cri-o://8e03e0c2cfe2a76da7c2c9a3025b7a0ea43754fa7aeb32ae6610a67be0eb8a43" gracePeriod=300 Nov 28 16:36:05 crc kubenswrapper[4909]: I1128 16:36:05.731837 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5c7b6c5df9-gbql4"] Nov 28 16:36:05 crc kubenswrapper[4909]: I1128 16:36:05.732041 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5c7b6c5df9-gbql4" podUID="20c5e3ee-fe01-49c7-96fd-153897da815e" containerName="dnsmasq-dns" containerID="cri-o://a20717125aece580e07ee96759127e5cfd62fd2d73b26608a64fc09ab56c1ffa" gracePeriod=10 Nov 28 16:36:05 crc kubenswrapper[4909]: I1128 16:36:05.753716 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-sync-fcstr"] Nov 28 16:36:05 crc kubenswrapper[4909]: I1128 16:36:05.764641 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2cfda3c9-a8dd-4a86-bb0d-d3b826b82a0f-operator-scripts\") pod \"placement3326-account-delete-6mrc7\" (UID: \"2cfda3c9-a8dd-4a86-bb0d-d3b826b82a0f\") " pod="openstack/placement3326-account-delete-6mrc7" Nov 28 16:36:05 crc kubenswrapper[4909]: I1128 16:36:05.764858 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m9sgf\" (UniqueName: \"kubernetes.io/projected/2cfda3c9-a8dd-4a86-bb0d-d3b826b82a0f-kube-api-access-m9sgf\") pod \"placement3326-account-delete-6mrc7\" (UID: \"2cfda3c9-a8dd-4a86-bb0d-d3b826b82a0f\") " pod="openstack/placement3326-account-delete-6mrc7" Nov 28 16:36:05 crc kubenswrapper[4909]: I1128 16:36:05.767593 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2cfda3c9-a8dd-4a86-bb0d-d3b826b82a0f-operator-scripts\") pod \"placement3326-account-delete-6mrc7\" (UID: \"2cfda3c9-a8dd-4a86-bb0d-d3b826b82a0f\") " pod="openstack/placement3326-account-delete-6mrc7" Nov 28 16:36:05 crc kubenswrapper[4909]: I1128 16:36:05.777796 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron8770-account-delete-9m56t"] Nov 28 16:36:05 crc kubenswrapper[4909]: I1128 16:36:05.778890 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron8770-account-delete-9m56t" Nov 28 16:36:05 crc kubenswrapper[4909]: I1128 16:36:05.796373 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-sync-fcstr"] Nov 28 16:36:05 crc kubenswrapper[4909]: I1128 16:36:05.830570 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m9sgf\" (UniqueName: \"kubernetes.io/projected/2cfda3c9-a8dd-4a86-bb0d-d3b826b82a0f-kube-api-access-m9sgf\") pod \"placement3326-account-delete-6mrc7\" (UID: \"2cfda3c9-a8dd-4a86-bb0d-d3b826b82a0f\") " pod="openstack/placement3326-account-delete-6mrc7" Nov 28 16:36:05 crc kubenswrapper[4909]: I1128 16:36:05.865396 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovsdbserver-nb-0" podUID="9a3a5941-5c86-4a65-be1e-26327ca990ad" containerName="ovsdbserver-nb" containerID="cri-o://6e2af8a1d96aee4df901387db3f0677372000cdb75cb35bf23b1e8474fd7bde6" gracePeriod=300 Nov 28 16:36:05 crc kubenswrapper[4909]: I1128 16:36:05.865554 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron8770-account-delete-9m56t"] Nov 28 16:36:05 crc kubenswrapper[4909]: I1128 16:36:05.896466 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican9799-account-delete-z2p59"] Nov 28 16:36:05 crc kubenswrapper[4909]: I1128 16:36:05.902366 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican9799-account-delete-z2p59" Nov 28 16:36:05 crc kubenswrapper[4909]: I1128 16:36:05.907862 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement3326-account-delete-6mrc7" Nov 28 16:36:05 crc kubenswrapper[4909]: I1128 16:36:05.925513 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="148d191b-98d8-4e26-a335-1bfb373f4f07" path="/var/lib/kubelet/pods/148d191b-98d8-4e26-a335-1bfb373f4f07/volumes" Nov 28 16:36:05 crc kubenswrapper[4909]: I1128 16:36:05.935758 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="29c42f34-c4bc-433c-b0d7-a0a8acf595db" path="/var/lib/kubelet/pods/29c42f34-c4bc-433c-b0d7-a0a8acf595db/volumes" Nov 28 16:36:05 crc kubenswrapper[4909]: I1128 16:36:05.940066 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2d62f293-d022-4d92-915b-f83c3fa157a7" path="/var/lib/kubelet/pods/2d62f293-d022-4d92-915b-f83c3fa157a7/volumes" Nov 28 16:36:05 crc kubenswrapper[4909]: I1128 16:36:05.941285 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ff8acb59-a082-400b-a87a-f4ef8cfa22f4" path="/var/lib/kubelet/pods/ff8acb59-a082-400b-a87a-f4ef8cfa22f4/volumes" Nov 28 16:36:05 crc kubenswrapper[4909]: I1128 16:36:05.948871 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican9799-account-delete-z2p59"] Nov 28 16:36:05 crc kubenswrapper[4909]: I1128 16:36:05.971421 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5b472e4d-2724-4ea4-93c9-5552d92af793-operator-scripts\") pod \"neutron8770-account-delete-9m56t\" (UID: \"5b472e4d-2724-4ea4-93c9-5552d92af793\") " pod="openstack/neutron8770-account-delete-9m56t" Nov 28 16:36:05 crc kubenswrapper[4909]: I1128 16:36:05.971565 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dgqlh\" (UniqueName: \"kubernetes.io/projected/5b472e4d-2724-4ea4-93c9-5552d92af793-kube-api-access-dgqlh\") pod \"neutron8770-account-delete-9m56t\" (UID: \"5b472e4d-2724-4ea4-93c9-5552d92af793\") " pod="openstack/neutron8770-account-delete-9m56t" Nov 28 16:36:05 crc kubenswrapper[4909]: I1128 16:36:05.999132 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-metrics-fg65t"] Nov 28 16:36:05 crc kubenswrapper[4909]: I1128 16:36:05.999393 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovn-controller-metrics-fg65t" podUID="f69e804c-fdc4-4b8f-86f3-d497612f42b8" containerName="openstack-network-exporter" containerID="cri-o://31efa2915b2d01df78595f44826736718a1033b3247d499a7a9d8cc17106d2a1" gracePeriod=30 Nov 28 16:36:06 crc kubenswrapper[4909]: I1128 16:36:06.008245 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-ring-rebalance-cpxgj"] Nov 28 16:36:06 crc kubenswrapper[4909]: I1128 16:36:06.033583 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/swift-ring-rebalance-cpxgj"] Nov 28 16:36:06 crc kubenswrapper[4909]: I1128 16:36:06.053272 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-6tr6g"] Nov 28 16:36:06 crc kubenswrapper[4909]: I1128 16:36:06.062720 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-ovs-q2kt7"] Nov 28 16:36:06 crc kubenswrapper[4909]: I1128 16:36:06.073823 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 16:36:06 crc kubenswrapper[4909]: I1128 16:36:06.074047 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="b97782ba-8bf0-4da9-bd81-97e88b4e73e7" containerName="glance-log" containerID="cri-o://0805f4bc86dd00471ce300a5735ca911ab4a9a41d60ea124ead1fd0e3fd4ccbe" gracePeriod=30 Nov 28 16:36:06 crc kubenswrapper[4909]: I1128 16:36:06.074222 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5b472e4d-2724-4ea4-93c9-5552d92af793-operator-scripts\") pod \"neutron8770-account-delete-9m56t\" (UID: \"5b472e4d-2724-4ea4-93c9-5552d92af793\") " pod="openstack/neutron8770-account-delete-9m56t" Nov 28 16:36:06 crc kubenswrapper[4909]: I1128 16:36:06.074403 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="b97782ba-8bf0-4da9-bd81-97e88b4e73e7" containerName="glance-httpd" containerID="cri-o://06ec10e870b78a9508fdc0f9af0d0769bace54567bdcb85ffc77bf9a218d7d6e" gracePeriod=30 Nov 28 16:36:06 crc kubenswrapper[4909]: I1128 16:36:06.074509 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dgqlh\" (UniqueName: \"kubernetes.io/projected/5b472e4d-2724-4ea4-93c9-5552d92af793-kube-api-access-dgqlh\") pod \"neutron8770-account-delete-9m56t\" (UID: \"5b472e4d-2724-4ea4-93c9-5552d92af793\") " pod="openstack/neutron8770-account-delete-9m56t" Nov 28 16:36:06 crc kubenswrapper[4909]: I1128 16:36:06.074545 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7wdw8\" (UniqueName: \"kubernetes.io/projected/8fa2662b-eed3-4461-ba5d-d4554ca4a22b-kube-api-access-7wdw8\") pod \"barbican9799-account-delete-z2p59\" (UID: \"8fa2662b-eed3-4461-ba5d-d4554ca4a22b\") " pod="openstack/barbican9799-account-delete-z2p59" Nov 28 16:36:06 crc kubenswrapper[4909]: I1128 16:36:06.074743 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8fa2662b-eed3-4461-ba5d-d4554ca4a22b-operator-scripts\") pod \"barbican9799-account-delete-z2p59\" (UID: \"8fa2662b-eed3-4461-ba5d-d4554ca4a22b\") " pod="openstack/barbican9799-account-delete-z2p59" Nov 28 16:36:06 crc kubenswrapper[4909]: I1128 16:36:06.075373 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5b472e4d-2724-4ea4-93c9-5552d92af793-operator-scripts\") pod \"neutron8770-account-delete-9m56t\" (UID: \"5b472e4d-2724-4ea4-93c9-5552d92af793\") " pod="openstack/neutron8770-account-delete-9m56t" Nov 28 16:36:06 crc kubenswrapper[4909]: I1128 16:36:06.087385 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-sync-6wh46"] Nov 28 16:36:06 crc kubenswrapper[4909]: I1128 16:36:06.099770 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dgqlh\" (UniqueName: \"kubernetes.io/projected/5b472e4d-2724-4ea4-93c9-5552d92af793-kube-api-access-dgqlh\") pod \"neutron8770-account-delete-9m56t\" (UID: \"5b472e4d-2724-4ea4-93c9-5552d92af793\") " pod="openstack/neutron8770-account-delete-9m56t" Nov 28 16:36:06 crc kubenswrapper[4909]: I1128 16:36:06.106650 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-sync-6wh46"] Nov 28 16:36:06 crc kubenswrapper[4909]: I1128 16:36:06.118339 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-7bcd585886-f6h7k"] Nov 28 16:36:06 crc kubenswrapper[4909]: I1128 16:36:06.118626 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/placement-7bcd585886-f6h7k" podUID="d7651107-0120-4611-87d0-be009f3749d7" containerName="placement-log" containerID="cri-o://f5457f347b25c89d470eee3116b8c1baa0d18385fa471e46a66b59d37d629001" gracePeriod=30 Nov 28 16:36:06 crc kubenswrapper[4909]: I1128 16:36:06.119030 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/placement-7bcd585886-f6h7k" podUID="d7651107-0120-4611-87d0-be009f3749d7" containerName="placement-api" containerID="cri-o://b1b648d707bec46e03074f6ddbe73bc4787a1ff840f797f11edd6e2f52984f64" gracePeriod=30 Nov 28 16:36:06 crc kubenswrapper[4909]: I1128 16:36:06.131626 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/novaapiab11-account-delete-2d7kv"] Nov 28 16:36:06 crc kubenswrapper[4909]: I1128 16:36:06.133281 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/novaapiab11-account-delete-2d7kv" Nov 28 16:36:06 crc kubenswrapper[4909]: I1128 16:36:06.174764 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 28 16:36:06 crc kubenswrapper[4909]: I1128 16:36:06.175166 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="7b7568e8-e3d4-4e06-a25f-33656bdf089f" containerName="cinder-scheduler" containerID="cri-o://498c5080ce90e9a7105e201c315e2156da6b516e7e542f7334041b20bfa59f28" gracePeriod=30 Nov 28 16:36:06 crc kubenswrapper[4909]: I1128 16:36:06.175349 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="7b7568e8-e3d4-4e06-a25f-33656bdf089f" containerName="probe" containerID="cri-o://6965ce3a18191ccba9ccd72339cd48bef0713e889950e45db1e00d6f157854c1" gracePeriod=30 Nov 28 16:36:06 crc kubenswrapper[4909]: I1128 16:36:06.228880 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-storage-0"] Nov 28 16:36:06 crc kubenswrapper[4909]: I1128 16:36:06.230187 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="af476a0f-b390-443d-b7a5-14181e7c7bc7" containerName="account-server" containerID="cri-o://0f9831bb56002e61b2af5b4efe43f7352b26f5bcba0fac4ff2b7c7594d30ca11" gracePeriod=30 Nov 28 16:36:06 crc kubenswrapper[4909]: I1128 16:36:06.230899 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron8770-account-delete-9m56t" Nov 28 16:36:06 crc kubenswrapper[4909]: I1128 16:36:06.231049 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="af476a0f-b390-443d-b7a5-14181e7c7bc7" containerName="swift-recon-cron" containerID="cri-o://d413cfaf4ea4f22a5ed6f16b2e1f0edf2c4c5fa640499e9a5165eac333f204d1" gracePeriod=30 Nov 28 16:36:06 crc kubenswrapper[4909]: I1128 16:36:06.231096 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="af476a0f-b390-443d-b7a5-14181e7c7bc7" containerName="container-updater" containerID="cri-o://5fb91605f618e075deb2aac1d02ba547d7690726cbd7cbd378c8171a086d9018" gracePeriod=30 Nov 28 16:36:06 crc kubenswrapper[4909]: I1128 16:36:06.231161 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="af476a0f-b390-443d-b7a5-14181e7c7bc7" containerName="container-auditor" containerID="cri-o://e537fff9ca9b1d1abd731931ae9c78538cb3c3b7ac87bc65d5b181dd8dc9988e" gracePeriod=30 Nov 28 16:36:06 crc kubenswrapper[4909]: I1128 16:36:06.231222 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="af476a0f-b390-443d-b7a5-14181e7c7bc7" containerName="rsync" containerID="cri-o://98ad30563ab1b4b11f32f1a8f225fc528006c2c8fcdf166079a8a955004b7948" gracePeriod=30 Nov 28 16:36:06 crc kubenswrapper[4909]: I1128 16:36:06.231236 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="af476a0f-b390-443d-b7a5-14181e7c7bc7" containerName="container-replicator" containerID="cri-o://4d6f16f0949b97c95bc1814668d3795dce74816c7c22e56931a221f9c9af6515" gracePeriod=30 Nov 28 16:36:06 crc kubenswrapper[4909]: I1128 16:36:06.231303 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="af476a0f-b390-443d-b7a5-14181e7c7bc7" containerName="object-expirer" containerID="cri-o://a9da75fb1065909a22a80afa31dcf4a18f089d1a8658fc535e3bcc82fe8ac3a4" gracePeriod=30 Nov 28 16:36:06 crc kubenswrapper[4909]: I1128 16:36:06.231320 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="af476a0f-b390-443d-b7a5-14181e7c7bc7" containerName="container-server" containerID="cri-o://0555790df47cc73f744971b0906de125a73508760c08642f116f54367b3effa9" gracePeriod=30 Nov 28 16:36:06 crc kubenswrapper[4909]: I1128 16:36:06.231350 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="af476a0f-b390-443d-b7a5-14181e7c7bc7" containerName="object-updater" containerID="cri-o://dc11615ceb380e3360e5cbe4640e562e4e49cb1fd342fce0adce73bd5cb5460b" gracePeriod=30 Nov 28 16:36:06 crc kubenswrapper[4909]: I1128 16:36:06.231397 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="af476a0f-b390-443d-b7a5-14181e7c7bc7" containerName="account-reaper" containerID="cri-o://f7767062c82c125be67fc8d87066f9088acb549941128e8dfa6db30304d06a51" gracePeriod=30 Nov 28 16:36:06 crc kubenswrapper[4909]: I1128 16:36:06.231405 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="af476a0f-b390-443d-b7a5-14181e7c7bc7" containerName="object-auditor" containerID="cri-o://f27ae8741dd55f00cb98a8fb2353da4cf744518b48ce853d95d2679d8887ef94" gracePeriod=30 Nov 28 16:36:06 crc kubenswrapper[4909]: I1128 16:36:06.231474 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="af476a0f-b390-443d-b7a5-14181e7c7bc7" containerName="object-replicator" containerID="cri-o://7cc9830ede6c460701043fa486da1fb48a9626227dd752474528ad1c78113d8a" gracePeriod=30 Nov 28 16:36:06 crc kubenswrapper[4909]: I1128 16:36:06.231486 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="af476a0f-b390-443d-b7a5-14181e7c7bc7" containerName="account-auditor" containerID="cri-o://ce4539306b72b839722ed46646da187a61406695c153b17107ee77a2ce3e2377" gracePeriod=30 Nov 28 16:36:06 crc kubenswrapper[4909]: I1128 16:36:06.231524 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="af476a0f-b390-443d-b7a5-14181e7c7bc7" containerName="object-server" containerID="cri-o://ea215891e9e527c761e9c1fee97f230f011da309562acb7bce70287bd0410c66" gracePeriod=30 Nov 28 16:36:06 crc kubenswrapper[4909]: I1128 16:36:06.231559 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="af476a0f-b390-443d-b7a5-14181e7c7bc7" containerName="account-replicator" containerID="cri-o://2431575eb32cdbcb1846b977ebb5a16dee5e1fd73658a4da6fe6d41dd6ea5859" gracePeriod=30 Nov 28 16:36:06 crc kubenswrapper[4909]: I1128 16:36:06.232414 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_bdf7de93-de28-49b8-b83f-1174c23bbd2f/ovsdbserver-sb/0.log" Nov 28 16:36:06 crc kubenswrapper[4909]: I1128 16:36:06.232487 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Nov 28 16:36:06 crc kubenswrapper[4909]: I1128 16:36:06.233762 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8fa2662b-eed3-4461-ba5d-d4554ca4a22b-operator-scripts\") pod \"barbican9799-account-delete-z2p59\" (UID: \"8fa2662b-eed3-4461-ba5d-d4554ca4a22b\") " pod="openstack/barbican9799-account-delete-z2p59" Nov 28 16:36:06 crc kubenswrapper[4909]: I1128 16:36:06.233910 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/38f993aa-ed40-45f0-821f-e5d7f482ec99-operator-scripts\") pod \"novaapiab11-account-delete-2d7kv\" (UID: \"38f993aa-ed40-45f0-821f-e5d7f482ec99\") " pod="openstack/novaapiab11-account-delete-2d7kv" Nov 28 16:36:06 crc kubenswrapper[4909]: I1128 16:36:06.234025 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sf79k\" (UniqueName: \"kubernetes.io/projected/38f993aa-ed40-45f0-821f-e5d7f482ec99-kube-api-access-sf79k\") pod \"novaapiab11-account-delete-2d7kv\" (UID: \"38f993aa-ed40-45f0-821f-e5d7f482ec99\") " pod="openstack/novaapiab11-account-delete-2d7kv" Nov 28 16:36:06 crc kubenswrapper[4909]: I1128 16:36:06.234208 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7wdw8\" (UniqueName: \"kubernetes.io/projected/8fa2662b-eed3-4461-ba5d-d4554ca4a22b-kube-api-access-7wdw8\") pod \"barbican9799-account-delete-z2p59\" (UID: \"8fa2662b-eed3-4461-ba5d-d4554ca4a22b\") " pod="openstack/barbican9799-account-delete-z2p59" Nov 28 16:36:06 crc kubenswrapper[4909]: I1128 16:36:06.235237 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8fa2662b-eed3-4461-ba5d-d4554ca4a22b-operator-scripts\") pod \"barbican9799-account-delete-z2p59\" (UID: \"8fa2662b-eed3-4461-ba5d-d4554ca4a22b\") " pod="openstack/barbican9799-account-delete-z2p59" Nov 28 16:36:06 crc kubenswrapper[4909]: E1128 16:36:06.250533 4909 configmap.go:193] Couldn't get configMap openstack/rabbitmq-config-data: configmap "rabbitmq-config-data" not found Nov 28 16:36:06 crc kubenswrapper[4909]: E1128 16:36:06.251729 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444-config-data podName:7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444 nodeName:}" failed. No retries permitted until 2025-11-28 16:36:08.25169857 +0000 UTC m=+1550.648383084 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444-config-data") pod "rabbitmq-server-0" (UID: "7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444") : configmap "rabbitmq-config-data" not found Nov 28 16:36:06 crc kubenswrapper[4909]: I1128 16:36:06.255888 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7wdw8\" (UniqueName: \"kubernetes.io/projected/8fa2662b-eed3-4461-ba5d-d4554ca4a22b-kube-api-access-7wdw8\") pod \"barbican9799-account-delete-z2p59\" (UID: \"8fa2662b-eed3-4461-ba5d-d4554ca4a22b\") " pod="openstack/barbican9799-account-delete-z2p59" Nov 28 16:36:06 crc kubenswrapper[4909]: I1128 16:36:06.272124 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/novaapiab11-account-delete-2d7kv"] Nov 28 16:36:06 crc kubenswrapper[4909]: I1128 16:36:06.298354 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Nov 28 16:36:06 crc kubenswrapper[4909]: I1128 16:36:06.298786 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="287e7e9a-0240-478e-a15b-b01122e79c32" containerName="cinder-api-log" containerID="cri-o://d938862fe9fc3e6327eed52ecb437574cdd14b5fddf79ca390b9bf6e50d98375" gracePeriod=30 Nov 28 16:36:06 crc kubenswrapper[4909]: I1128 16:36:06.299472 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="287e7e9a-0240-478e-a15b-b01122e79c32" containerName="cinder-api" containerID="cri-o://29762398aa81300aa7e6fa97b5acccc7e5d16e4234ca8d5ea87d42654450084b" gracePeriod=30 Nov 28 16:36:06 crc kubenswrapper[4909]: I1128 16:36:06.319992 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 16:36:06 crc kubenswrapper[4909]: I1128 16:36:06.320393 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="00913f80-f496-44ec-a619-99129724cb89" containerName="glance-log" containerID="cri-o://257ba16eb5dc11579d07b9316fe274af6b54797db9a8db896742e423617ab540" gracePeriod=30 Nov 28 16:36:06 crc kubenswrapper[4909]: I1128 16:36:06.320598 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="00913f80-f496-44ec-a619-99129724cb89" containerName="glance-httpd" containerID="cri-o://30fbad84f804b7ada9da16d8ee037dd6c5bb06b55551d23a9f96ea3c5222b69f" gracePeriod=30 Nov 28 16:36:06 crc kubenswrapper[4909]: I1128 16:36:06.337679 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/bdf7de93-de28-49b8-b83f-1174c23bbd2f-ovsdb-rundir\") pod \"bdf7de93-de28-49b8-b83f-1174c23bbd2f\" (UID: \"bdf7de93-de28-49b8-b83f-1174c23bbd2f\") " Nov 28 16:36:06 crc kubenswrapper[4909]: I1128 16:36:06.337793 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/bdf7de93-de28-49b8-b83f-1174c23bbd2f-scripts\") pod \"bdf7de93-de28-49b8-b83f-1174c23bbd2f\" (UID: \"bdf7de93-de28-49b8-b83f-1174c23bbd2f\") " Nov 28 16:36:06 crc kubenswrapper[4909]: I1128 16:36:06.337891 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bdf7de93-de28-49b8-b83f-1174c23bbd2f-combined-ca-bundle\") pod \"bdf7de93-de28-49b8-b83f-1174c23bbd2f\" (UID: \"bdf7de93-de28-49b8-b83f-1174c23bbd2f\") " Nov 28 16:36:06 crc kubenswrapper[4909]: I1128 16:36:06.337939 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/bdf7de93-de28-49b8-b83f-1174c23bbd2f-ovsdbserver-sb-tls-certs\") pod \"bdf7de93-de28-49b8-b83f-1174c23bbd2f\" (UID: \"bdf7de93-de28-49b8-b83f-1174c23bbd2f\") " Nov 28 16:36:06 crc kubenswrapper[4909]: I1128 16:36:06.338096 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q68xh\" (UniqueName: \"kubernetes.io/projected/bdf7de93-de28-49b8-b83f-1174c23bbd2f-kube-api-access-q68xh\") pod \"bdf7de93-de28-49b8-b83f-1174c23bbd2f\" (UID: \"bdf7de93-de28-49b8-b83f-1174c23bbd2f\") " Nov 28 16:36:06 crc kubenswrapper[4909]: I1128 16:36:06.338117 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndbcluster-sb-etc-ovn\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"bdf7de93-de28-49b8-b83f-1174c23bbd2f\" (UID: \"bdf7de93-de28-49b8-b83f-1174c23bbd2f\") " Nov 28 16:36:06 crc kubenswrapper[4909]: I1128 16:36:06.380303 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/38f993aa-ed40-45f0-821f-e5d7f482ec99-operator-scripts\") pod \"novaapiab11-account-delete-2d7kv\" (UID: \"38f993aa-ed40-45f0-821f-e5d7f482ec99\") " pod="openstack/novaapiab11-account-delete-2d7kv" Nov 28 16:36:06 crc kubenswrapper[4909]: I1128 16:36:06.380422 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sf79k\" (UniqueName: \"kubernetes.io/projected/38f993aa-ed40-45f0-821f-e5d7f482ec99-kube-api-access-sf79k\") pod \"novaapiab11-account-delete-2d7kv\" (UID: \"38f993aa-ed40-45f0-821f-e5d7f482ec99\") " pod="openstack/novaapiab11-account-delete-2d7kv" Nov 28 16:36:06 crc kubenswrapper[4909]: I1128 16:36:06.383925 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bdf7de93-de28-49b8-b83f-1174c23bbd2f-ovsdb-rundir" (OuterVolumeSpecName: "ovsdb-rundir") pod "bdf7de93-de28-49b8-b83f-1174c23bbd2f" (UID: "bdf7de93-de28-49b8-b83f-1174c23bbd2f"). InnerVolumeSpecName "ovsdb-rundir". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:36:06 crc kubenswrapper[4909]: I1128 16:36:06.391430 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/38f993aa-ed40-45f0-821f-e5d7f482ec99-operator-scripts\") pod \"novaapiab11-account-delete-2d7kv\" (UID: \"38f993aa-ed40-45f0-821f-e5d7f482ec99\") " pod="openstack/novaapiab11-account-delete-2d7kv" Nov 28 16:36:06 crc kubenswrapper[4909]: I1128 16:36:06.392424 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bdf7de93-de28-49b8-b83f-1174c23bbd2f-scripts" (OuterVolumeSpecName: "scripts") pod "bdf7de93-de28-49b8-b83f-1174c23bbd2f" (UID: "bdf7de93-de28-49b8-b83f-1174c23bbd2f"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:36:06 crc kubenswrapper[4909]: I1128 16:36:06.405025 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bdf7de93-de28-49b8-b83f-1174c23bbd2f-kube-api-access-q68xh" (OuterVolumeSpecName: "kube-api-access-q68xh") pod "bdf7de93-de28-49b8-b83f-1174c23bbd2f" (UID: "bdf7de93-de28-49b8-b83f-1174c23bbd2f"). InnerVolumeSpecName "kube-api-access-q68xh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:36:06 crc kubenswrapper[4909]: I1128 16:36:06.432083 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sf79k\" (UniqueName: \"kubernetes.io/projected/38f993aa-ed40-45f0-821f-e5d7f482ec99-kube-api-access-sf79k\") pod \"novaapiab11-account-delete-2d7kv\" (UID: \"38f993aa-ed40-45f0-821f-e5d7f482ec99\") " pod="openstack/novaapiab11-account-delete-2d7kv" Nov 28 16:36:06 crc kubenswrapper[4909]: I1128 16:36:06.439649 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-55f6d745d5-tgbm7"] Nov 28 16:36:06 crc kubenswrapper[4909]: I1128 16:36:06.447614 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-55f6d745d5-tgbm7" podUID="febda67e-3daf-4cb4-9fd1-530d6c398404" containerName="neutron-api" containerID="cri-o://84ef1b544276823c5c91a1406dc17348087fabba68d10b1561cec7a3a87c25bd" gracePeriod=30 Nov 28 16:36:06 crc kubenswrapper[4909]: I1128 16:36:06.448102 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-55f6d745d5-tgbm7" podUID="febda67e-3daf-4cb4-9fd1-530d6c398404" containerName="neutron-httpd" containerID="cri-o://a3fd76fee056f26d16128b3c7dd903dc417db926bafd7b4cc42bf63262cd356c" gracePeriod=30 Nov 28 16:36:06 crc kubenswrapper[4909]: I1128 16:36:06.455706 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage12-crc" (OuterVolumeSpecName: "ovndbcluster-sb-etc-ovn") pod "bdf7de93-de28-49b8-b83f-1174c23bbd2f" (UID: "bdf7de93-de28-49b8-b83f-1174c23bbd2f"). InnerVolumeSpecName "local-storage12-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 28 16:36:06 crc kubenswrapper[4909]: I1128 16:36:06.457977 4909 generic.go:334] "Generic (PLEG): container finished" podID="20c5e3ee-fe01-49c7-96fd-153897da815e" containerID="a20717125aece580e07ee96759127e5cfd62fd2d73b26608a64fc09ab56c1ffa" exitCode=0 Nov 28 16:36:06 crc kubenswrapper[4909]: I1128 16:36:06.458102 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c7b6c5df9-gbql4" event={"ID":"20c5e3ee-fe01-49c7-96fd-153897da815e","Type":"ContainerDied","Data":"a20717125aece580e07ee96759127e5cfd62fd2d73b26608a64fc09ab56c1ffa"} Nov 28 16:36:06 crc kubenswrapper[4909]: I1128 16:36:06.461143 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/novacell077a6-account-delete-dqcds"] Nov 28 16:36:06 crc kubenswrapper[4909]: E1128 16:36:06.462730 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bdf7de93-de28-49b8-b83f-1174c23bbd2f" containerName="openstack-network-exporter" Nov 28 16:36:06 crc kubenswrapper[4909]: I1128 16:36:06.462757 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="bdf7de93-de28-49b8-b83f-1174c23bbd2f" containerName="openstack-network-exporter" Nov 28 16:36:06 crc kubenswrapper[4909]: E1128 16:36:06.462819 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bdf7de93-de28-49b8-b83f-1174c23bbd2f" containerName="ovsdbserver-sb" Nov 28 16:36:06 crc kubenswrapper[4909]: I1128 16:36:06.462826 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="bdf7de93-de28-49b8-b83f-1174c23bbd2f" containerName="ovsdbserver-sb" Nov 28 16:36:06 crc kubenswrapper[4909]: I1128 16:36:06.463226 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="bdf7de93-de28-49b8-b83f-1174c23bbd2f" containerName="openstack-network-exporter" Nov 28 16:36:06 crc kubenswrapper[4909]: I1128 16:36:06.463243 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="bdf7de93-de28-49b8-b83f-1174c23bbd2f" containerName="ovsdbserver-sb" Nov 28 16:36:06 crc kubenswrapper[4909]: I1128 16:36:06.464862 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/novacell077a6-account-delete-dqcds" Nov 28 16:36:06 crc kubenswrapper[4909]: I1128 16:36:06.466630 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-metrics-fg65t_f69e804c-fdc4-4b8f-86f3-d497612f42b8/openstack-network-exporter/0.log" Nov 28 16:36:06 crc kubenswrapper[4909]: I1128 16:36:06.466689 4909 generic.go:334] "Generic (PLEG): container finished" podID="f69e804c-fdc4-4b8f-86f3-d497612f42b8" containerID="31efa2915b2d01df78595f44826736718a1033b3247d499a7a9d8cc17106d2a1" exitCode=2 Nov 28 16:36:06 crc kubenswrapper[4909]: I1128 16:36:06.466777 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-fg65t" event={"ID":"f69e804c-fdc4-4b8f-86f3-d497612f42b8","Type":"ContainerDied","Data":"31efa2915b2d01df78595f44826736718a1033b3247d499a7a9d8cc17106d2a1"} Nov 28 16:36:06 crc kubenswrapper[4909]: I1128 16:36:06.473728 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-84ff6c46f-q849h" event={"ID":"99398e49-db85-4878-b759-367747402c8b","Type":"ContainerStarted","Data":"f0d9dd166d658f22876e770c6d80529e6bccc330def0c2d462d37a796ac8585c"} Nov 28 16:36:06 crc kubenswrapper[4909]: I1128 16:36:06.474919 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-cell-mapping-fn62x"] Nov 28 16:36:06 crc kubenswrapper[4909]: I1128 16:36:06.485175 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bdf7de93-de28-49b8-b83f-1174c23bbd2f-config\") pod \"bdf7de93-de28-49b8-b83f-1174c23bbd2f\" (UID: \"bdf7de93-de28-49b8-b83f-1174c23bbd2f\") " Nov 28 16:36:06 crc kubenswrapper[4909]: I1128 16:36:06.485439 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/bdf7de93-de28-49b8-b83f-1174c23bbd2f-metrics-certs-tls-certs\") pod \"bdf7de93-de28-49b8-b83f-1174c23bbd2f\" (UID: \"bdf7de93-de28-49b8-b83f-1174c23bbd2f\") " Nov 28 16:36:06 crc kubenswrapper[4909]: I1128 16:36:06.485724 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9fksp\" (UniqueName: \"kubernetes.io/projected/b6a50190-5f38-40b8-87b1-3e67fe7d3cf4-kube-api-access-9fksp\") pod \"novacell077a6-account-delete-dqcds\" (UID: \"b6a50190-5f38-40b8-87b1-3e67fe7d3cf4\") " pod="openstack/novacell077a6-account-delete-dqcds" Nov 28 16:36:06 crc kubenswrapper[4909]: I1128 16:36:06.485873 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b6a50190-5f38-40b8-87b1-3e67fe7d3cf4-operator-scripts\") pod \"novacell077a6-account-delete-dqcds\" (UID: \"b6a50190-5f38-40b8-87b1-3e67fe7d3cf4\") " pod="openstack/novacell077a6-account-delete-dqcds" Nov 28 16:36:06 crc kubenswrapper[4909]: I1128 16:36:06.487513 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q68xh\" (UniqueName: \"kubernetes.io/projected/bdf7de93-de28-49b8-b83f-1174c23bbd2f-kube-api-access-q68xh\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:06 crc kubenswrapper[4909]: I1128 16:36:06.487550 4909 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") on node \"crc\" " Nov 28 16:36:06 crc kubenswrapper[4909]: I1128 16:36:06.487564 4909 reconciler_common.go:293] "Volume detached for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/bdf7de93-de28-49b8-b83f-1174c23bbd2f-ovsdb-rundir\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:06 crc kubenswrapper[4909]: I1128 16:36:06.487576 4909 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/bdf7de93-de28-49b8-b83f-1174c23bbd2f-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:06 crc kubenswrapper[4909]: I1128 16:36:06.490755 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bdf7de93-de28-49b8-b83f-1174c23bbd2f-config" (OuterVolumeSpecName: "config") pod "bdf7de93-de28-49b8-b83f-1174c23bbd2f" (UID: "bdf7de93-de28-49b8-b83f-1174c23bbd2f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:36:06 crc kubenswrapper[4909]: I1128 16:36:06.492209 4909 generic.go:334] "Generic (PLEG): container finished" podID="d7651107-0120-4611-87d0-be009f3749d7" containerID="f5457f347b25c89d470eee3116b8c1baa0d18385fa471e46a66b59d37d629001" exitCode=143 Nov 28 16:36:06 crc kubenswrapper[4909]: I1128 16:36:06.492373 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-7bcd585886-f6h7k" event={"ID":"d7651107-0120-4611-87d0-be009f3749d7","Type":"ContainerDied","Data":"f5457f347b25c89d470eee3116b8c1baa0d18385fa471e46a66b59d37d629001"} Nov 28 16:36:06 crc kubenswrapper[4909]: I1128 16:36:06.516382 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-cell-mapping-hqb8m"] Nov 28 16:36:06 crc kubenswrapper[4909]: I1128 16:36:06.517901 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bdf7de93-de28-49b8-b83f-1174c23bbd2f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "bdf7de93-de28-49b8-b83f-1174c23bbd2f" (UID: "bdf7de93-de28-49b8-b83f-1174c23bbd2f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:06 crc kubenswrapper[4909]: I1128 16:36:06.518581 4909 generic.go:334] "Generic (PLEG): container finished" podID="b97782ba-8bf0-4da9-bd81-97e88b4e73e7" containerID="0805f4bc86dd00471ce300a5735ca911ab4a9a41d60ea124ead1fd0e3fd4ccbe" exitCode=143 Nov 28 16:36:06 crc kubenswrapper[4909]: I1128 16:36:06.518630 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"b97782ba-8bf0-4da9-bd81-97e88b4e73e7","Type":"ContainerDied","Data":"0805f4bc86dd00471ce300a5735ca911ab4a9a41d60ea124ead1fd0e3fd4ccbe"} Nov 28 16:36:06 crc kubenswrapper[4909]: I1128 16:36:06.526279 4909 generic.go:334] "Generic (PLEG): container finished" podID="dc78dec8-567e-41a1-9fbf-793224410d3b" containerID="1c3a9af8648dff180e4db3b4e37877beb5c3240d62fe2ea612826fb90703150d" exitCode=2 Nov 28 16:36:06 crc kubenswrapper[4909]: I1128 16:36:06.526364 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"dc78dec8-567e-41a1-9fbf-793224410d3b","Type":"ContainerDied","Data":"1c3a9af8648dff180e4db3b4e37877beb5c3240d62fe2ea612826fb90703150d"} Nov 28 16:36:06 crc kubenswrapper[4909]: I1128 16:36:06.529641 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/novaapiab11-account-delete-2d7kv" Nov 28 16:36:06 crc kubenswrapper[4909]: I1128 16:36:06.529818 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_bdf7de93-de28-49b8-b83f-1174c23bbd2f/ovsdbserver-sb/0.log" Nov 28 16:36:06 crc kubenswrapper[4909]: I1128 16:36:06.530135 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Nov 28 16:36:06 crc kubenswrapper[4909]: I1128 16:36:06.530447 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"bdf7de93-de28-49b8-b83f-1174c23bbd2f","Type":"ContainerDied","Data":"5ee73e3a160a074a846c2f1633fd2786b1e1fdf857e8650508847fec4846e0a1"} Nov 28 16:36:06 crc kubenswrapper[4909]: I1128 16:36:06.530513 4909 scope.go:117] "RemoveContainer" containerID="11ac9a6b55dccdabe7e64c630e4c33e9f41f6c8c26d42b4e367805b2de03dbc8" Nov 28 16:36:06 crc kubenswrapper[4909]: I1128 16:36:06.540894 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican9799-account-delete-z2p59" Nov 28 16:36:06 crc kubenswrapper[4909]: I1128 16:36:06.561848 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_9a3a5941-5c86-4a65-be1e-26327ca990ad/ovsdbserver-nb/0.log" Nov 28 16:36:06 crc kubenswrapper[4909]: I1128 16:36:06.561895 4909 generic.go:334] "Generic (PLEG): container finished" podID="9a3a5941-5c86-4a65-be1e-26327ca990ad" containerID="8e03e0c2cfe2a76da7c2c9a3025b7a0ea43754fa7aeb32ae6610a67be0eb8a43" exitCode=2 Nov 28 16:36:06 crc kubenswrapper[4909]: I1128 16:36:06.561912 4909 generic.go:334] "Generic (PLEG): container finished" podID="9a3a5941-5c86-4a65-be1e-26327ca990ad" containerID="6e2af8a1d96aee4df901387db3f0677372000cdb75cb35bf23b1e8474fd7bde6" exitCode=143 Nov 28 16:36:06 crc kubenswrapper[4909]: I1128 16:36:06.565764 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"9a3a5941-5c86-4a65-be1e-26327ca990ad","Type":"ContainerDied","Data":"8e03e0c2cfe2a76da7c2c9a3025b7a0ea43754fa7aeb32ae6610a67be0eb8a43"} Nov 28 16:36:06 crc kubenswrapper[4909]: I1128 16:36:06.565965 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"9a3a5941-5c86-4a65-be1e-26327ca990ad","Type":"ContainerDied","Data":"6e2af8a1d96aee4df901387db3f0677372000cdb75cb35bf23b1e8474fd7bde6"} Nov 28 16:36:06 crc kubenswrapper[4909]: I1128 16:36:06.583103 4909 generic.go:334] "Generic (PLEG): container finished" podID="f8d630e7-c532-4fc8-b3f4-0a2a6b06f5ae" containerID="15d6d7bb76cdcf47fdb12971445027a116e8951d0899335b43ee4f4fb9c7586a" exitCode=137 Nov 28 16:36:06 crc kubenswrapper[4909]: I1128 16:36:06.590545 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b6a50190-5f38-40b8-87b1-3e67fe7d3cf4-operator-scripts\") pod \"novacell077a6-account-delete-dqcds\" (UID: \"b6a50190-5f38-40b8-87b1-3e67fe7d3cf4\") " pod="openstack/novacell077a6-account-delete-dqcds" Nov 28 16:36:06 crc kubenswrapper[4909]: I1128 16:36:06.590692 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9fksp\" (UniqueName: \"kubernetes.io/projected/b6a50190-5f38-40b8-87b1-3e67fe7d3cf4-kube-api-access-9fksp\") pod \"novacell077a6-account-delete-dqcds\" (UID: \"b6a50190-5f38-40b8-87b1-3e67fe7d3cf4\") " pod="openstack/novacell077a6-account-delete-dqcds" Nov 28 16:36:06 crc kubenswrapper[4909]: I1128 16:36:06.590796 4909 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bdf7de93-de28-49b8-b83f-1174c23bbd2f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:06 crc kubenswrapper[4909]: I1128 16:36:06.590809 4909 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bdf7de93-de28-49b8-b83f-1174c23bbd2f-config\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:06 crc kubenswrapper[4909]: I1128 16:36:06.591358 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b6a50190-5f38-40b8-87b1-3e67fe7d3cf4-operator-scripts\") pod \"novacell077a6-account-delete-dqcds\" (UID: \"b6a50190-5f38-40b8-87b1-3e67fe7d3cf4\") " pod="openstack/novacell077a6-account-delete-dqcds" Nov 28 16:36:06 crc kubenswrapper[4909]: I1128 16:36:06.622038 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9fksp\" (UniqueName: \"kubernetes.io/projected/b6a50190-5f38-40b8-87b1-3e67fe7d3cf4-kube-api-access-9fksp\") pod \"novacell077a6-account-delete-dqcds\" (UID: \"b6a50190-5f38-40b8-87b1-3e67fe7d3cf4\") " pod="openstack/novacell077a6-account-delete-dqcds" Nov 28 16:36:06 crc kubenswrapper[4909]: I1128 16:36:06.634441 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-cell-mapping-hqb8m"] Nov 28 16:36:06 crc kubenswrapper[4909]: I1128 16:36:06.645362 4909 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage12-crc" (UniqueName: "kubernetes.io/local-volume/local-storage12-crc") on node "crc" Nov 28 16:36:06 crc kubenswrapper[4909]: I1128 16:36:06.659417 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/novacell077a6-account-delete-dqcds"] Nov 28 16:36:06 crc kubenswrapper[4909]: I1128 16:36:06.695714 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-cell-mapping-fn62x"] Nov 28 16:36:06 crc kubenswrapper[4909]: I1128 16:36:06.697271 4909 reconciler_common.go:293] "Volume detached for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:06 crc kubenswrapper[4909]: I1128 16:36:06.722327 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-proxy-589b6f8979-wbls8"] Nov 28 16:36:06 crc kubenswrapper[4909]: I1128 16:36:06.722444 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 28 16:36:06 crc kubenswrapper[4909]: I1128 16:36:06.724093 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-proxy-589b6f8979-wbls8" podUID="e792abf7-967c-4293-b5f4-f073b07c8cf1" containerName="proxy-httpd" containerID="cri-o://1be6a34dec3cfc7a9c5a2a82788430cdb9b7ee059f8aeacb143350b3dd68f3c7" gracePeriod=30 Nov 28 16:36:06 crc kubenswrapper[4909]: I1128 16:36:06.724568 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-proxy-589b6f8979-wbls8" podUID="e792abf7-967c-4293-b5f4-f073b07c8cf1" containerName="proxy-server" containerID="cri-o://95eacaaf5af98b7623dbb633b828928620aa97b977799e32b9e1bc8948b35490" gracePeriod=30 Nov 28 16:36:06 crc kubenswrapper[4909]: I1128 16:36:06.746687 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-84ff6c46f-q849h"] Nov 28 16:36:06 crc kubenswrapper[4909]: I1128 16:36:06.760994 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 28 16:36:06 crc kubenswrapper[4909]: I1128 16:36:06.791792 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-db-create-92jzz"] Nov 28 16:36:06 crc kubenswrapper[4909]: I1128 16:36:06.817836 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-db-create-92jzz"] Nov 28 16:36:06 crc kubenswrapper[4909]: I1128 16:36:06.838432 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 28 16:36:06 crc kubenswrapper[4909]: I1128 16:36:06.838733 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="1c81b153-5498-4d63-9c98-fa8b79d5acdd" containerName="nova-api-log" containerID="cri-o://bcd3a169e67b44354a85ac02fdf79896704f5e85915fcde17e813b5bf5c5d5ac" gracePeriod=30 Nov 28 16:36:06 crc kubenswrapper[4909]: I1128 16:36:06.839505 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="1c81b153-5498-4d63-9c98-fa8b79d5acdd" containerName="nova-api-api" containerID="cri-o://edce0e3a1b79a461c6e384cfefd0cbf3c0e7f50280e3c51aafc961a31f14493c" gracePeriod=30 Nov 28 16:36:06 crc kubenswrapper[4909]: I1128 16:36:06.849166 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-keystone-listener-585576c97d-fvkcs"] Nov 28 16:36:06 crc kubenswrapper[4909]: I1128 16:36:06.849563 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-keystone-listener-585576c97d-fvkcs" podUID="e086b29e-c7fb-45a4-a6f2-c30508f1b25a" containerName="barbican-keystone-listener-log" containerID="cri-o://52e65448aef539353daa8f7db84d105cd71f70da4604565423fd5950afa26a6b" gracePeriod=30 Nov 28 16:36:06 crc kubenswrapper[4909]: I1128 16:36:06.850370 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-keystone-listener-585576c97d-fvkcs" podUID="e086b29e-c7fb-45a4-a6f2-c30508f1b25a" containerName="barbican-keystone-listener" containerID="cri-o://61af0c4690ad10f578c11a1874dcaa8f66ff04b25238b214fda8321e55f07b14" gracePeriod=30 Nov 28 16:36:06 crc kubenswrapper[4909]: I1128 16:36:06.855327 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-worker-84ff6c46f-q849h"] Nov 28 16:36:06 crc kubenswrapper[4909]: I1128 16:36:06.866781 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/novacell077a6-account-delete-dqcds" Nov 28 16:36:06 crc kubenswrapper[4909]: I1128 16:36:06.886417 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-worker-6c95ffb47-q5ls2"] Nov 28 16:36:06 crc kubenswrapper[4909]: I1128 16:36:06.886669 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-worker-6c95ffb47-q5ls2" podUID="175903ef-59e0-4c1f-820f-bd3d2692462d" containerName="barbican-worker-log" containerID="cri-o://dbc15b80c1cd50c3f062d20e4bbfd0c4ab351bae72bf60617a069c1be00aaa4b" gracePeriod=30 Nov 28 16:36:06 crc kubenswrapper[4909]: I1128 16:36:06.887011 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-worker-6c95ffb47-q5ls2" podUID="175903ef-59e0-4c1f-820f-bd3d2692462d" containerName="barbican-worker" containerID="cri-o://2dedb170588deaa6f11bf8d0e9ccb4ac0fb1f6ba18fbbaac5554659c70446bce" gracePeriod=30 Nov 28 16:36:06 crc kubenswrapper[4909]: E1128 16:36:06.909289 4909 configmap.go:193] Couldn't get configMap openstack/rabbitmq-cell1-config-data: configmap "rabbitmq-cell1-config-data" not found Nov 28 16:36:06 crc kubenswrapper[4909]: E1128 16:36:06.909364 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/02c83d05-a6ce-4c22-9015-91c0a766a518-config-data podName:02c83d05-a6ce-4c22-9015-91c0a766a518 nodeName:}" failed. No retries permitted until 2025-11-28 16:36:07.409327126 +0000 UTC m=+1549.806011650 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/02c83d05-a6ce-4c22-9015-91c0a766a518-config-data") pod "rabbitmq-cell1-server-0" (UID: "02c83d05-a6ce-4c22-9015-91c0a766a518") : configmap "rabbitmq-cell1-config-data" not found Nov 28 16:36:06 crc kubenswrapper[4909]: I1128 16:36:06.912241 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-keystone-listener-8588dd4f7d-772fj"] Nov 28 16:36:06 crc kubenswrapper[4909]: I1128 16:36:06.941582 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c7b6c5df9-gbql4" Nov 28 16:36:06 crc kubenswrapper[4909]: I1128 16:36:06.946527 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-e892-account-create-update-rwgsj"] Nov 28 16:36:06 crc kubenswrapper[4909]: I1128 16:36:06.953824 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-e892-account-create-update-rwgsj"] Nov 28 16:36:06 crc kubenswrapper[4909]: I1128 16:36:06.960141 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bdf7de93-de28-49b8-b83f-1174c23bbd2f-metrics-certs-tls-certs" (OuterVolumeSpecName: "metrics-certs-tls-certs") pod "bdf7de93-de28-49b8-b83f-1174c23bbd2f" (UID: "bdf7de93-de28-49b8-b83f-1174c23bbd2f"). InnerVolumeSpecName "metrics-certs-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:06 crc kubenswrapper[4909]: I1128 16:36:06.960229 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bdf7de93-de28-49b8-b83f-1174c23bbd2f-ovsdbserver-sb-tls-certs" (OuterVolumeSpecName: "ovsdbserver-sb-tls-certs") pod "bdf7de93-de28-49b8-b83f-1174c23bbd2f" (UID: "bdf7de93-de28-49b8-b83f-1174c23bbd2f"). InnerVolumeSpecName "ovsdbserver-sb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:06 crc kubenswrapper[4909]: I1128 16:36:06.968335 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-65c6d9c7fd-627g9"] Nov 28 16:36:06 crc kubenswrapper[4909]: I1128 16:36:06.986408 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovn-controller-ovs-q2kt7" podUID="ff22194e-63a9-410d-80b6-9b1a1e68b164" containerName="ovs-vswitchd" containerID="cri-o://afca6794cc8913f3a96d1e4b580a859e2e2d5089f2b862e784689db6cf4ab6c4" gracePeriod=30 Nov 28 16:36:07 crc kubenswrapper[4909]: I1128 16:36:07.006579 4909 reconciler_common.go:293] "Volume detached for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/bdf7de93-de28-49b8-b83f-1174c23bbd2f-metrics-certs-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:07 crc kubenswrapper[4909]: I1128 16:36:07.006609 4909 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/bdf7de93-de28-49b8-b83f-1174c23bbd2f-ovsdbserver-sb-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:07 crc kubenswrapper[4909]: I1128 16:36:07.011968 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 16:36:07 crc kubenswrapper[4909]: I1128 16:36:07.012167 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="e8837df0-c6fe-42a6-bf0f-8ca14f1961a6" containerName="nova-metadata-log" containerID="cri-o://09cb7681bd82577f5dec1afd70b7dfc60e7c497bc0efb2d3202eab82a5623018" gracePeriod=30 Nov 28 16:36:07 crc kubenswrapper[4909]: I1128 16:36:07.012634 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="e8837df0-c6fe-42a6-bf0f-8ca14f1961a6" containerName="nova-metadata-metadata" containerID="cri-o://34cdf5b11d6117bafb37ddcc3824f5ce702d1b9711769b9e114b66075bba4f47" gracePeriod=30 Nov 28 16:36:07 crc kubenswrapper[4909]: I1128 16:36:07.018879 4909 scope.go:117] "RemoveContainer" containerID="f3031d38398a1299821a38dba71739cd77b4202204dd1cd5dd6367b419b14d7b" Nov 28 16:36:07 crc kubenswrapper[4909]: I1128 16:36:07.036721 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-f6ddfdd4b-szlst"] Nov 28 16:36:07 crc kubenswrapper[4909]: I1128 16:36:07.037102 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-f6ddfdd4b-szlst" podUID="ffd60458-19af-464b-9649-57d25893f22a" containerName="barbican-api-log" containerID="cri-o://29d0bc179bbb27c3f9f6023ab4558b76e568b29b98ddf992b4f8391b462dd92d" gracePeriod=30 Nov 28 16:36:07 crc kubenswrapper[4909]: I1128 16:36:07.037160 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-f6ddfdd4b-szlst" podUID="ffd60458-19af-464b-9649-57d25893f22a" containerName="barbican-api" containerID="cri-o://3cd8ac1736c6fbc1977f593e8c58c7c95ab9e0dac8a3505b8acefff70b5cfba5" gracePeriod=30 Nov 28 16:36:07 crc kubenswrapper[4909]: E1128 16:36:07.040285 4909 handlers.go:78] "Exec lifecycle hook for Container in Pod failed" err=< Nov 28 16:36:07 crc kubenswrapper[4909]: command '/usr/local/bin/container-scripts/stop-ovsdb-server.sh' exited with 137: ++ dirname /usr/local/bin/container-scripts/stop-ovsdb-server.sh Nov 28 16:36:07 crc kubenswrapper[4909]: + source /usr/local/bin/container-scripts/functions Nov 28 16:36:07 crc kubenswrapper[4909]: ++ OVNBridge=br-int Nov 28 16:36:07 crc kubenswrapper[4909]: ++ OVNRemote=tcp:localhost:6642 Nov 28 16:36:07 crc kubenswrapper[4909]: ++ OVNEncapType=geneve Nov 28 16:36:07 crc kubenswrapper[4909]: ++ OVNAvailabilityZones= Nov 28 16:36:07 crc kubenswrapper[4909]: ++ EnableChassisAsGateway=true Nov 28 16:36:07 crc kubenswrapper[4909]: ++ PhysicalNetworks= Nov 28 16:36:07 crc kubenswrapper[4909]: ++ OVNHostName= Nov 28 16:36:07 crc kubenswrapper[4909]: ++ DB_FILE=/etc/openvswitch/conf.db Nov 28 16:36:07 crc kubenswrapper[4909]: ++ ovs_dir=/var/lib/openvswitch Nov 28 16:36:07 crc kubenswrapper[4909]: ++ FLOWS_RESTORE_SCRIPT=/var/lib/openvswitch/flows-script Nov 28 16:36:07 crc kubenswrapper[4909]: ++ FLOWS_RESTORE_DIR=/var/lib/openvswitch/saved-flows Nov 28 16:36:07 crc kubenswrapper[4909]: ++ SAFE_TO_STOP_OVSDB_SERVER_SEMAPHORE=/var/lib/openvswitch/is_safe_to_stop_ovsdb_server Nov 28 16:36:07 crc kubenswrapper[4909]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Nov 28 16:36:07 crc kubenswrapper[4909]: + sleep 0.5 Nov 28 16:36:07 crc kubenswrapper[4909]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Nov 28 16:36:07 crc kubenswrapper[4909]: + cleanup_ovsdb_server_semaphore Nov 28 16:36:07 crc kubenswrapper[4909]: + rm -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server Nov 28 16:36:07 crc kubenswrapper[4909]: + /usr/share/openvswitch/scripts/ovs-ctl stop --no-ovs-vswitchd Nov 28 16:36:07 crc kubenswrapper[4909]: > execCommand=["/usr/local/bin/container-scripts/stop-ovsdb-server.sh"] containerName="ovsdb-server" pod="openstack/ovn-controller-ovs-q2kt7" message=< Nov 28 16:36:07 crc kubenswrapper[4909]: Exiting ovsdb-server (5) [ OK ] Nov 28 16:36:07 crc kubenswrapper[4909]: ++ dirname /usr/local/bin/container-scripts/stop-ovsdb-server.sh Nov 28 16:36:07 crc kubenswrapper[4909]: + source /usr/local/bin/container-scripts/functions Nov 28 16:36:07 crc kubenswrapper[4909]: ++ OVNBridge=br-int Nov 28 16:36:07 crc kubenswrapper[4909]: ++ OVNRemote=tcp:localhost:6642 Nov 28 16:36:07 crc kubenswrapper[4909]: ++ OVNEncapType=geneve Nov 28 16:36:07 crc kubenswrapper[4909]: ++ OVNAvailabilityZones= Nov 28 16:36:07 crc kubenswrapper[4909]: ++ EnableChassisAsGateway=true Nov 28 16:36:07 crc kubenswrapper[4909]: ++ PhysicalNetworks= Nov 28 16:36:07 crc kubenswrapper[4909]: ++ OVNHostName= Nov 28 16:36:07 crc kubenswrapper[4909]: ++ DB_FILE=/etc/openvswitch/conf.db Nov 28 16:36:07 crc kubenswrapper[4909]: ++ ovs_dir=/var/lib/openvswitch Nov 28 16:36:07 crc kubenswrapper[4909]: ++ FLOWS_RESTORE_SCRIPT=/var/lib/openvswitch/flows-script Nov 28 16:36:07 crc kubenswrapper[4909]: ++ FLOWS_RESTORE_DIR=/var/lib/openvswitch/saved-flows Nov 28 16:36:07 crc kubenswrapper[4909]: ++ SAFE_TO_STOP_OVSDB_SERVER_SEMAPHORE=/var/lib/openvswitch/is_safe_to_stop_ovsdb_server Nov 28 16:36:07 crc kubenswrapper[4909]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Nov 28 16:36:07 crc kubenswrapper[4909]: + sleep 0.5 Nov 28 16:36:07 crc kubenswrapper[4909]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Nov 28 16:36:07 crc kubenswrapper[4909]: + cleanup_ovsdb_server_semaphore Nov 28 16:36:07 crc kubenswrapper[4909]: + rm -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server Nov 28 16:36:07 crc kubenswrapper[4909]: + /usr/share/openvswitch/scripts/ovs-ctl stop --no-ovs-vswitchd Nov 28 16:36:07 crc kubenswrapper[4909]: > Nov 28 16:36:07 crc kubenswrapper[4909]: E1128 16:36:07.040315 4909 kuberuntime_container.go:691] "PreStop hook failed" err=< Nov 28 16:36:07 crc kubenswrapper[4909]: command '/usr/local/bin/container-scripts/stop-ovsdb-server.sh' exited with 137: ++ dirname /usr/local/bin/container-scripts/stop-ovsdb-server.sh Nov 28 16:36:07 crc kubenswrapper[4909]: + source /usr/local/bin/container-scripts/functions Nov 28 16:36:07 crc kubenswrapper[4909]: ++ OVNBridge=br-int Nov 28 16:36:07 crc kubenswrapper[4909]: ++ OVNRemote=tcp:localhost:6642 Nov 28 16:36:07 crc kubenswrapper[4909]: ++ OVNEncapType=geneve Nov 28 16:36:07 crc kubenswrapper[4909]: ++ OVNAvailabilityZones= Nov 28 16:36:07 crc kubenswrapper[4909]: ++ EnableChassisAsGateway=true Nov 28 16:36:07 crc kubenswrapper[4909]: ++ PhysicalNetworks= Nov 28 16:36:07 crc kubenswrapper[4909]: ++ OVNHostName= Nov 28 16:36:07 crc kubenswrapper[4909]: ++ DB_FILE=/etc/openvswitch/conf.db Nov 28 16:36:07 crc kubenswrapper[4909]: ++ ovs_dir=/var/lib/openvswitch Nov 28 16:36:07 crc kubenswrapper[4909]: ++ FLOWS_RESTORE_SCRIPT=/var/lib/openvswitch/flows-script Nov 28 16:36:07 crc kubenswrapper[4909]: ++ FLOWS_RESTORE_DIR=/var/lib/openvswitch/saved-flows Nov 28 16:36:07 crc kubenswrapper[4909]: ++ SAFE_TO_STOP_OVSDB_SERVER_SEMAPHORE=/var/lib/openvswitch/is_safe_to_stop_ovsdb_server Nov 28 16:36:07 crc kubenswrapper[4909]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Nov 28 16:36:07 crc kubenswrapper[4909]: + sleep 0.5 Nov 28 16:36:07 crc kubenswrapper[4909]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Nov 28 16:36:07 crc kubenswrapper[4909]: + cleanup_ovsdb_server_semaphore Nov 28 16:36:07 crc kubenswrapper[4909]: + rm -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server Nov 28 16:36:07 crc kubenswrapper[4909]: + /usr/share/openvswitch/scripts/ovs-ctl stop --no-ovs-vswitchd Nov 28 16:36:07 crc kubenswrapper[4909]: > pod="openstack/ovn-controller-ovs-q2kt7" podUID="ff22194e-63a9-410d-80b6-9b1a1e68b164" containerName="ovsdb-server" containerID="cri-o://96dc460f1fe51a7876a0d73f5eedaf50b76b8405b0eee13b2afa64eddabda435" Nov 28 16:36:07 crc kubenswrapper[4909]: I1128 16:36:07.040337 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovn-controller-ovs-q2kt7" podUID="ff22194e-63a9-410d-80b6-9b1a1e68b164" containerName="ovsdb-server" containerID="cri-o://96dc460f1fe51a7876a0d73f5eedaf50b76b8405b0eee13b2afa64eddabda435" gracePeriod=30 Nov 28 16:36:07 crc kubenswrapper[4909]: I1128 16:36:07.055836 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 28 16:36:07 crc kubenswrapper[4909]: I1128 16:36:07.062300 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 28 16:36:07 crc kubenswrapper[4909]: I1128 16:36:07.076316 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 28 16:36:07 crc kubenswrapper[4909]: I1128 16:36:07.076516 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell1-novncproxy-0" podUID="bac06af4-bbe1-482a-8815-14a9cf2a1699" containerName="nova-cell1-novncproxy-novncproxy" containerID="cri-o://d5719a7de701581a9790f618ddf6bcdd49af95eea721edc447a568c570efffdc" gracePeriod=30 Nov 28 16:36:07 crc kubenswrapper[4909]: I1128 16:36:07.105445 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-f75mj"] Nov 28 16:36:07 crc kubenswrapper[4909]: I1128 16:36:07.110204 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 28 16:36:07 crc kubenswrapper[4909]: I1128 16:36:07.110391 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell1-conductor-0" podUID="b2ea37c8-3213-4043-9da2-a9e76f9284e4" containerName="nova-cell1-conductor-conductor" containerID="cri-o://0dc92530d7c3a493fba4c36b3d79070c26b600b170baa4f05c83ac54ca1f0cd4" gracePeriod=30 Nov 28 16:36:07 crc kubenswrapper[4909]: I1128 16:36:07.111401 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/20c5e3ee-fe01-49c7-96fd-153897da815e-dns-svc\") pod \"20c5e3ee-fe01-49c7-96fd-153897da815e\" (UID: \"20c5e3ee-fe01-49c7-96fd-153897da815e\") " Nov 28 16:36:07 crc kubenswrapper[4909]: I1128 16:36:07.111447 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/20c5e3ee-fe01-49c7-96fd-153897da815e-ovsdbserver-nb\") pod \"20c5e3ee-fe01-49c7-96fd-153897da815e\" (UID: \"20c5e3ee-fe01-49c7-96fd-153897da815e\") " Nov 28 16:36:07 crc kubenswrapper[4909]: I1128 16:36:07.111487 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/20c5e3ee-fe01-49c7-96fd-153897da815e-ovsdbserver-sb\") pod \"20c5e3ee-fe01-49c7-96fd-153897da815e\" (UID: \"20c5e3ee-fe01-49c7-96fd-153897da815e\") " Nov 28 16:36:07 crc kubenswrapper[4909]: I1128 16:36:07.111533 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-62hxf\" (UniqueName: \"kubernetes.io/projected/20c5e3ee-fe01-49c7-96fd-153897da815e-kube-api-access-62hxf\") pod \"20c5e3ee-fe01-49c7-96fd-153897da815e\" (UID: \"20c5e3ee-fe01-49c7-96fd-153897da815e\") " Nov 28 16:36:07 crc kubenswrapper[4909]: I1128 16:36:07.111637 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/20c5e3ee-fe01-49c7-96fd-153897da815e-config\") pod \"20c5e3ee-fe01-49c7-96fd-153897da815e\" (UID: \"20c5e3ee-fe01-49c7-96fd-153897da815e\") " Nov 28 16:36:07 crc kubenswrapper[4909]: I1128 16:36:07.111688 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/20c5e3ee-fe01-49c7-96fd-153897da815e-dns-swift-storage-0\") pod \"20c5e3ee-fe01-49c7-96fd-153897da815e\" (UID: \"20c5e3ee-fe01-49c7-96fd-153897da815e\") " Nov 28 16:36:07 crc kubenswrapper[4909]: I1128 16:36:07.122475 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-f75mj"] Nov 28 16:36:07 crc kubenswrapper[4909]: I1128 16:36:07.147124 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/20c5e3ee-fe01-49c7-96fd-153897da815e-kube-api-access-62hxf" (OuterVolumeSpecName: "kube-api-access-62hxf") pod "20c5e3ee-fe01-49c7-96fd-153897da815e" (UID: "20c5e3ee-fe01-49c7-96fd-153897da815e"). InnerVolumeSpecName "kube-api-access-62hxf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.168200 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.168645 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell0-conductor-0" podUID="c64b6821-6f46-4764-be55-97ed8c71fefa" containerName="nova-cell0-conductor-conductor" containerID="cri-o://1b0631590dbf2f02faff4199c7f351befddd6177ddde8165eb64c85e0c20b740" gracePeriod=30 Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.211275 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-server-0" podUID="7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444" containerName="rabbitmq" containerID="cri-o://ea9f036508aa973d9d7d95b6b3c4ac6136769fc0843a49037989a78d48329266" gracePeriod=604800 Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.213353 4909 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/swift-proxy-589b6f8979-wbls8" podUID="e792abf7-967c-4293-b5f4-f073b07c8cf1" containerName="proxy-httpd" probeResult="failure" output="Get \"https://10.217.0.163:8080/healthcheck\": dial tcp 10.217.0.163:8080: connect: connection refused" Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.213410 4909 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/swift-proxy-589b6f8979-wbls8" podUID="e792abf7-967c-4293-b5f4-f073b07c8cf1" containerName="proxy-server" probeResult="failure" output="Get \"https://10.217.0.163:8080/healthcheck\": dial tcp 10.217.0.163:8080: connect: connection refused" Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.225126 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/openstack-cell1-galera-0" podUID="2a8757b1-bbbf-4d00-9bbb-9f4bc855a9d9" containerName="galera" containerID="cri-o://d3f40b1a27b1ea548b445a46b92770d9795f4ed9429685b38f64c4173b4e0c3f" gracePeriod=30 Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.235080 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/f8d630e7-c532-4fc8-b3f4-0a2a6b06f5ae-openstack-config\") pod \"f8d630e7-c532-4fc8-b3f4-0a2a6b06f5ae\" (UID: \"f8d630e7-c532-4fc8-b3f4-0a2a6b06f5ae\") " Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.235252 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/f8d630e7-c532-4fc8-b3f4-0a2a6b06f5ae-openstack-config-secret\") pod \"f8d630e7-c532-4fc8-b3f4-0a2a6b06f5ae\" (UID: \"f8d630e7-c532-4fc8-b3f4-0a2a6b06f5ae\") " Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.235292 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9p4q5\" (UniqueName: \"kubernetes.io/projected/f8d630e7-c532-4fc8-b3f4-0a2a6b06f5ae-kube-api-access-9p4q5\") pod \"f8d630e7-c532-4fc8-b3f4-0a2a6b06f5ae\" (UID: \"f8d630e7-c532-4fc8-b3f4-0a2a6b06f5ae\") " Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.235361 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f8d630e7-c532-4fc8-b3f4-0a2a6b06f5ae-combined-ca-bundle\") pod \"f8d630e7-c532-4fc8-b3f4-0a2a6b06f5ae\" (UID: \"f8d630e7-c532-4fc8-b3f4-0a2a6b06f5ae\") " Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.243341 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-62hxf\" (UniqueName: \"kubernetes.io/projected/20c5e3ee-fe01-49c7-96fd-153897da815e-kube-api-access-62hxf\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.244741 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-dntf4"] Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.269099 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-dntf4"] Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.284384 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f8d630e7-c532-4fc8-b3f4-0a2a6b06f5ae-kube-api-access-9p4q5" (OuterVolumeSpecName: "kube-api-access-9p4q5") pod "f8d630e7-c532-4fc8-b3f4-0a2a6b06f5ae" (UID: "f8d630e7-c532-4fc8-b3f4-0a2a6b06f5ae"). InnerVolumeSpecName "kube-api-access-9p4q5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.319959 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.320139 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="e8b95945-6169-4e44-861a-f4abd48a7161" containerName="nova-scheduler-scheduler" containerID="cri-o://bd5449498d8b191c04307f37b3bba2f646c9d3de3fd836f283ecb8d81e786377" gracePeriod=30 Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.326694 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/20c5e3ee-fe01-49c7-96fd-153897da815e-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "20c5e3ee-fe01-49c7-96fd-153897da815e" (UID: "20c5e3ee-fe01-49c7-96fd-153897da815e"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.345361 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9p4q5\" (UniqueName: \"kubernetes.io/projected/f8d630e7-c532-4fc8-b3f4-0a2a6b06f5ae-kube-api-access-9p4q5\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.345632 4909 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/20c5e3ee-fe01-49c7-96fd-153897da815e-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:08 crc kubenswrapper[4909]: E1128 16:36:07.393174 4909 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 6e2af8a1d96aee4df901387db3f0677372000cdb75cb35bf23b1e8474fd7bde6 is running failed: container process not found" containerID="6e2af8a1d96aee4df901387db3f0677372000cdb75cb35bf23b1e8474fd7bde6" cmd=["/usr/bin/pidof","ovsdb-server"] Nov 28 16:36:08 crc kubenswrapper[4909]: E1128 16:36:07.394946 4909 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 6e2af8a1d96aee4df901387db3f0677372000cdb75cb35bf23b1e8474fd7bde6 is running failed: container process not found" containerID="6e2af8a1d96aee4df901387db3f0677372000cdb75cb35bf23b1e8474fd7bde6" cmd=["/usr/bin/pidof","ovsdb-server"] Nov 28 16:36:08 crc kubenswrapper[4909]: E1128 16:36:07.399596 4909 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 6e2af8a1d96aee4df901387db3f0677372000cdb75cb35bf23b1e8474fd7bde6 is running failed: container process not found" containerID="6e2af8a1d96aee4df901387db3f0677372000cdb75cb35bf23b1e8474fd7bde6" cmd=["/usr/bin/pidof","ovsdb-server"] Nov 28 16:36:08 crc kubenswrapper[4909]: E1128 16:36:07.399676 4909 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 6e2af8a1d96aee4df901387db3f0677372000cdb75cb35bf23b1e8474fd7bde6 is running failed: container process not found" probeType="Readiness" pod="openstack/ovsdbserver-nb-0" podUID="9a3a5941-5c86-4a65-be1e-26327ca990ad" containerName="ovsdbserver-nb" Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.409888 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f8d630e7-c532-4fc8-b3f4-0a2a6b06f5ae-openstack-config-secret" (OuterVolumeSpecName: "openstack-config-secret") pod "f8d630e7-c532-4fc8-b3f4-0a2a6b06f5ae" (UID: "f8d630e7-c532-4fc8-b3f4-0a2a6b06f5ae"). InnerVolumeSpecName "openstack-config-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.411194 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/20c5e3ee-fe01-49c7-96fd-153897da815e-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "20c5e3ee-fe01-49c7-96fd-153897da815e" (UID: "20c5e3ee-fe01-49c7-96fd-153897da815e"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.429147 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/20c5e3ee-fe01-49c7-96fd-153897da815e-config" (OuterVolumeSpecName: "config") pod "20c5e3ee-fe01-49c7-96fd-153897da815e" (UID: "20c5e3ee-fe01-49c7-96fd-153897da815e"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.443914 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f8d630e7-c532-4fc8-b3f4-0a2a6b06f5ae-openstack-config" (OuterVolumeSpecName: "openstack-config") pod "f8d630e7-c532-4fc8-b3f4-0a2a6b06f5ae" (UID: "f8d630e7-c532-4fc8-b3f4-0a2a6b06f5ae"). InnerVolumeSpecName "openstack-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.443969 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-keystone-listener-8588dd4f7d-772fj"] Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.453726 4909 reconciler_common.go:293] "Volume detached for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/f8d630e7-c532-4fc8-b3f4-0a2a6b06f5ae-openstack-config-secret\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.453751 4909 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/20c5e3ee-fe01-49c7-96fd-153897da815e-config\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.453762 4909 reconciler_common.go:293] "Volume detached for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/f8d630e7-c532-4fc8-b3f4-0a2a6b06f5ae-openstack-config\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.453772 4909 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/20c5e3ee-fe01-49c7-96fd-153897da815e-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:08 crc kubenswrapper[4909]: E1128 16:36:07.453834 4909 configmap.go:193] Couldn't get configMap openstack/rabbitmq-cell1-config-data: configmap "rabbitmq-cell1-config-data" not found Nov 28 16:36:08 crc kubenswrapper[4909]: E1128 16:36:07.453878 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/02c83d05-a6ce-4c22-9015-91c0a766a518-config-data podName:02c83d05-a6ce-4c22-9015-91c0a766a518 nodeName:}" failed. No retries permitted until 2025-11-28 16:36:08.453863407 +0000 UTC m=+1550.850547921 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/02c83d05-a6ce-4c22-9015-91c0a766a518-config-data") pod "rabbitmq-cell1-server-0" (UID: "02c83d05-a6ce-4c22-9015-91c0a766a518") : configmap "rabbitmq-cell1-config-data" not found Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.455727 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.478931 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/20c5e3ee-fe01-49c7-96fd-153897da815e-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "20c5e3ee-fe01-49c7-96fd-153897da815e" (UID: "20c5e3ee-fe01-49c7-96fd-153897da815e"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.480908 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f8d630e7-c532-4fc8-b3f4-0a2a6b06f5ae-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f8d630e7-c532-4fc8-b3f4-0a2a6b06f5ae" (UID: "f8d630e7-c532-4fc8-b3f4-0a2a6b06f5ae"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:08 crc kubenswrapper[4909]: W1128 16:36:07.502279 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0e95a7b6_74fd_4db5_bb83_b8e8f80a698f.slice/crio-e0692acbee18d2024d6649491c0e3feb865f0fd7c443f6d20eee0b1cf0d3ad8d WatchSource:0}: Error finding container e0692acbee18d2024d6649491c0e3feb865f0fd7c443f6d20eee0b1cf0d3ad8d: Status 404 returned error can't find the container with id e0692acbee18d2024d6649491c0e3feb865f0fd7c443f6d20eee0b1cf0d3ad8d Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.537273 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/20c5e3ee-fe01-49c7-96fd-153897da815e-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "20c5e3ee-fe01-49c7-96fd-153897da815e" (UID: "20c5e3ee-fe01-49c7-96fd-153897da815e"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.558288 4909 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/20c5e3ee-fe01-49c7-96fd-153897da815e-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.558315 4909 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f8d630e7-c532-4fc8-b3f4-0a2a6b06f5ae-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.558324 4909 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/20c5e3ee-fe01-49c7-96fd-153897da815e-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.598684 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"7b7568e8-e3d4-4e06-a25f-33656bdf089f","Type":"ContainerDied","Data":"6965ce3a18191ccba9ccd72339cd48bef0713e889950e45db1e00d6f157854c1"} Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.598684 4909 generic.go:334] "Generic (PLEG): container finished" podID="7b7568e8-e3d4-4e06-a25f-33656bdf089f" containerID="6965ce3a18191ccba9ccd72339cd48bef0713e889950e45db1e00d6f157854c1" exitCode=0 Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.625432 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c7b6c5df9-gbql4" Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.625875 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c7b6c5df9-gbql4" event={"ID":"20c5e3ee-fe01-49c7-96fd-153897da815e","Type":"ContainerDied","Data":"f7812f891f5cd5938f3d04d22c5106203e9fdc5d055f01111bdcf8abbac0bec3"} Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.625934 4909 scope.go:117] "RemoveContainer" containerID="a20717125aece580e07ee96759127e5cfd62fd2d73b26608a64fc09ab56c1ffa" Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.640804 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-metrics-fg65t_f69e804c-fdc4-4b8f-86f3-d497612f42b8/openstack-network-exporter/0.log" Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.640866 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-fg65t" Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.686517 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_9a3a5941-5c86-4a65-be1e-26327ca990ad/ovsdbserver-nb/0.log" Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.686595 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.690511 4909 generic.go:334] "Generic (PLEG): container finished" podID="af476a0f-b390-443d-b7a5-14181e7c7bc7" containerID="98ad30563ab1b4b11f32f1a8f225fc528006c2c8fcdf166079a8a955004b7948" exitCode=0 Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.690534 4909 generic.go:334] "Generic (PLEG): container finished" podID="af476a0f-b390-443d-b7a5-14181e7c7bc7" containerID="a9da75fb1065909a22a80afa31dcf4a18f089d1a8658fc535e3bcc82fe8ac3a4" exitCode=0 Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.690543 4909 generic.go:334] "Generic (PLEG): container finished" podID="af476a0f-b390-443d-b7a5-14181e7c7bc7" containerID="dc11615ceb380e3360e5cbe4640e562e4e49cb1fd342fce0adce73bd5cb5460b" exitCode=0 Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.690555 4909 generic.go:334] "Generic (PLEG): container finished" podID="af476a0f-b390-443d-b7a5-14181e7c7bc7" containerID="f27ae8741dd55f00cb98a8fb2353da4cf744518b48ce853d95d2679d8887ef94" exitCode=0 Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.690565 4909 generic.go:334] "Generic (PLEG): container finished" podID="af476a0f-b390-443d-b7a5-14181e7c7bc7" containerID="7cc9830ede6c460701043fa486da1fb48a9626227dd752474528ad1c78113d8a" exitCode=0 Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.690575 4909 generic.go:334] "Generic (PLEG): container finished" podID="af476a0f-b390-443d-b7a5-14181e7c7bc7" containerID="ea215891e9e527c761e9c1fee97f230f011da309562acb7bce70287bd0410c66" exitCode=0 Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.690582 4909 generic.go:334] "Generic (PLEG): container finished" podID="af476a0f-b390-443d-b7a5-14181e7c7bc7" containerID="5fb91605f618e075deb2aac1d02ba547d7690726cbd7cbd378c8171a086d9018" exitCode=0 Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.690589 4909 generic.go:334] "Generic (PLEG): container finished" podID="af476a0f-b390-443d-b7a5-14181e7c7bc7" containerID="e537fff9ca9b1d1abd731931ae9c78538cb3c3b7ac87bc65d5b181dd8dc9988e" exitCode=0 Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.690594 4909 generic.go:334] "Generic (PLEG): container finished" podID="af476a0f-b390-443d-b7a5-14181e7c7bc7" containerID="4d6f16f0949b97c95bc1814668d3795dce74816c7c22e56931a221f9c9af6515" exitCode=0 Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.690600 4909 generic.go:334] "Generic (PLEG): container finished" podID="af476a0f-b390-443d-b7a5-14181e7c7bc7" containerID="0555790df47cc73f744971b0906de125a73508760c08642f116f54367b3effa9" exitCode=0 Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.690606 4909 generic.go:334] "Generic (PLEG): container finished" podID="af476a0f-b390-443d-b7a5-14181e7c7bc7" containerID="f7767062c82c125be67fc8d87066f9088acb549941128e8dfa6db30304d06a51" exitCode=0 Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.690612 4909 generic.go:334] "Generic (PLEG): container finished" podID="af476a0f-b390-443d-b7a5-14181e7c7bc7" containerID="ce4539306b72b839722ed46646da187a61406695c153b17107ee77a2ce3e2377" exitCode=0 Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.690618 4909 generic.go:334] "Generic (PLEG): container finished" podID="af476a0f-b390-443d-b7a5-14181e7c7bc7" containerID="2431575eb32cdbcb1846b977ebb5a16dee5e1fd73658a4da6fe6d41dd6ea5859" exitCode=0 Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.690623 4909 generic.go:334] "Generic (PLEG): container finished" podID="af476a0f-b390-443d-b7a5-14181e7c7bc7" containerID="0f9831bb56002e61b2af5b4efe43f7352b26f5bcba0fac4ff2b7c7594d30ca11" exitCode=0 Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.691040 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"af476a0f-b390-443d-b7a5-14181e7c7bc7","Type":"ContainerDied","Data":"98ad30563ab1b4b11f32f1a8f225fc528006c2c8fcdf166079a8a955004b7948"} Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.691080 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"af476a0f-b390-443d-b7a5-14181e7c7bc7","Type":"ContainerDied","Data":"a9da75fb1065909a22a80afa31dcf4a18f089d1a8658fc535e3bcc82fe8ac3a4"} Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.691112 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"af476a0f-b390-443d-b7a5-14181e7c7bc7","Type":"ContainerDied","Data":"dc11615ceb380e3360e5cbe4640e562e4e49cb1fd342fce0adce73bd5cb5460b"} Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.691122 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"af476a0f-b390-443d-b7a5-14181e7c7bc7","Type":"ContainerDied","Data":"f27ae8741dd55f00cb98a8fb2353da4cf744518b48ce853d95d2679d8887ef94"} Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.691130 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"af476a0f-b390-443d-b7a5-14181e7c7bc7","Type":"ContainerDied","Data":"7cc9830ede6c460701043fa486da1fb48a9626227dd752474528ad1c78113d8a"} Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.691140 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"af476a0f-b390-443d-b7a5-14181e7c7bc7","Type":"ContainerDied","Data":"ea215891e9e527c761e9c1fee97f230f011da309562acb7bce70287bd0410c66"} Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.691149 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"af476a0f-b390-443d-b7a5-14181e7c7bc7","Type":"ContainerDied","Data":"5fb91605f618e075deb2aac1d02ba547d7690726cbd7cbd378c8171a086d9018"} Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.691157 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"af476a0f-b390-443d-b7a5-14181e7c7bc7","Type":"ContainerDied","Data":"e537fff9ca9b1d1abd731931ae9c78538cb3c3b7ac87bc65d5b181dd8dc9988e"} Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.691166 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"af476a0f-b390-443d-b7a5-14181e7c7bc7","Type":"ContainerDied","Data":"4d6f16f0949b97c95bc1814668d3795dce74816c7c22e56931a221f9c9af6515"} Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.691174 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"af476a0f-b390-443d-b7a5-14181e7c7bc7","Type":"ContainerDied","Data":"0555790df47cc73f744971b0906de125a73508760c08642f116f54367b3effa9"} Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.691182 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"af476a0f-b390-443d-b7a5-14181e7c7bc7","Type":"ContainerDied","Data":"f7767062c82c125be67fc8d87066f9088acb549941128e8dfa6db30304d06a51"} Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.691191 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"af476a0f-b390-443d-b7a5-14181e7c7bc7","Type":"ContainerDied","Data":"ce4539306b72b839722ed46646da187a61406695c153b17107ee77a2ce3e2377"} Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.691200 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"af476a0f-b390-443d-b7a5-14181e7c7bc7","Type":"ContainerDied","Data":"2431575eb32cdbcb1846b977ebb5a16dee5e1fd73658a4da6fe6d41dd6ea5859"} Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.691208 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"af476a0f-b390-443d-b7a5-14181e7c7bc7","Type":"ContainerDied","Data":"0f9831bb56002e61b2af5b4efe43f7352b26f5bcba0fac4ff2b7c7594d30ca11"} Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.696690 4909 scope.go:117] "RemoveContainer" containerID="4c75e9e01c7896ba8550fe35a51a4e7f8f2739fa601933d4cd40f37cdc45075b" Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.697260 4909 generic.go:334] "Generic (PLEG): container finished" podID="e086b29e-c7fb-45a4-a6f2-c30508f1b25a" containerID="52e65448aef539353daa8f7db84d105cd71f70da4604565423fd5950afa26a6b" exitCode=143 Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.697300 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-585576c97d-fvkcs" event={"ID":"e086b29e-c7fb-45a4-a6f2-c30508f1b25a","Type":"ContainerDied","Data":"52e65448aef539353daa8f7db84d105cd71f70da4604565423fd5950afa26a6b"} Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.705247 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.712585 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_9a3a5941-5c86-4a65-be1e-26327ca990ad/ovsdbserver-nb/0.log" Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.712729 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.713400 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"9a3a5941-5c86-4a65-be1e-26327ca990ad","Type":"ContainerDied","Data":"c8b638f19b86187965ecc2b21a7ed972377396f1caac26c27dd389e1e14da07a"} Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.719607 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-metrics-fg65t_f69e804c-fdc4-4b8f-86f3-d497612f42b8/openstack-network-exporter/0.log" Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.719679 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-fg65t" event={"ID":"f69e804c-fdc4-4b8f-86f3-d497612f42b8","Type":"ContainerDied","Data":"4ac20f70f1463d6a742e63d0a2b9de508ef07b24cccd5e2683e667a4d6a7523f"} Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.719730 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-fg65t" Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.720733 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.723330 4909 generic.go:334] "Generic (PLEG): container finished" podID="ff22194e-63a9-410d-80b6-9b1a1e68b164" containerID="96dc460f1fe51a7876a0d73f5eedaf50b76b8405b0eee13b2afa64eddabda435" exitCode=0 Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.723373 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-q2kt7" event={"ID":"ff22194e-63a9-410d-80b6-9b1a1e68b164","Type":"ContainerDied","Data":"96dc460f1fe51a7876a0d73f5eedaf50b76b8405b0eee13b2afa64eddabda435"} Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.726735 4909 generic.go:334] "Generic (PLEG): container finished" podID="175903ef-59e0-4c1f-820f-bd3d2692462d" containerID="dbc15b80c1cd50c3f062d20e4bbfd0c4ab351bae72bf60617a069c1be00aaa4b" exitCode=143 Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.726779 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-6c95ffb47-q5ls2" event={"ID":"175903ef-59e0-4c1f-820f-bd3d2692462d","Type":"ContainerDied","Data":"dbc15b80c1cd50c3f062d20e4bbfd0c4ab351bae72bf60617a069c1be00aaa4b"} Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.730753 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-589b6f8979-wbls8" event={"ID":"e792abf7-967c-4293-b5f4-f073b07c8cf1","Type":"ContainerDied","Data":"95eacaaf5af98b7623dbb633b828928620aa97b977799e32b9e1bc8948b35490"} Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.730759 4909 generic.go:334] "Generic (PLEG): container finished" podID="e792abf7-967c-4293-b5f4-f073b07c8cf1" containerID="95eacaaf5af98b7623dbb633b828928620aa97b977799e32b9e1bc8948b35490" exitCode=0 Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.730812 4909 generic.go:334] "Generic (PLEG): container finished" podID="e792abf7-967c-4293-b5f4-f073b07c8cf1" containerID="1be6a34dec3cfc7a9c5a2a82788430cdb9b7ee059f8aeacb143350b3dd68f3c7" exitCode=0 Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.730919 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-589b6f8979-wbls8" event={"ID":"e792abf7-967c-4293-b5f4-f073b07c8cf1","Type":"ContainerDied","Data":"1be6a34dec3cfc7a9c5a2a82788430cdb9b7ee059f8aeacb143350b3dd68f3c7"} Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.751374 4909 generic.go:334] "Generic (PLEG): container finished" podID="1c81b153-5498-4d63-9c98-fa8b79d5acdd" containerID="bcd3a169e67b44354a85ac02fdf79896704f5e85915fcde17e813b5bf5c5d5ac" exitCode=143 Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.751444 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"1c81b153-5498-4d63-9c98-fa8b79d5acdd","Type":"ContainerDied","Data":"bcd3a169e67b44354a85ac02fdf79896704f5e85915fcde17e813b5bf5c5d5ac"} Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.755090 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-8588dd4f7d-772fj" event={"ID":"0e95a7b6-74fd-4db5-bb83-b8e8f80a698f","Type":"ContainerStarted","Data":"e0692acbee18d2024d6649491c0e3feb865f0fd7c443f6d20eee0b1cf0d3ad8d"} Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.762572 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/f69e804c-fdc4-4b8f-86f3-d497612f42b8-ovn-rundir\") pod \"f69e804c-fdc4-4b8f-86f3-d497612f42b8\" (UID: \"f69e804c-fdc4-4b8f-86f3-d497612f42b8\") " Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.762639 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/f69e804c-fdc4-4b8f-86f3-d497612f42b8-metrics-certs-tls-certs\") pod \"f69e804c-fdc4-4b8f-86f3-d497612f42b8\" (UID: \"f69e804c-fdc4-4b8f-86f3-d497612f42b8\") " Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.762707 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/9a3a5941-5c86-4a65-be1e-26327ca990ad-ovsdb-rundir\") pod \"9a3a5941-5c86-4a65-be1e-26327ca990ad\" (UID: \"9a3a5941-5c86-4a65-be1e-26327ca990ad\") " Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.762733 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9a3a5941-5c86-4a65-be1e-26327ca990ad-config\") pod \"9a3a5941-5c86-4a65-be1e-26327ca990ad\" (UID: \"9a3a5941-5c86-4a65-be1e-26327ca990ad\") " Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.762783 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f69e804c-fdc4-4b8f-86f3-d497612f42b8-config\") pod \"f69e804c-fdc4-4b8f-86f3-d497612f42b8\" (UID: \"f69e804c-fdc4-4b8f-86f3-d497612f42b8\") " Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.762804 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f69e804c-fdc4-4b8f-86f3-d497612f42b8-combined-ca-bundle\") pod \"f69e804c-fdc4-4b8f-86f3-d497612f42b8\" (UID: \"f69e804c-fdc4-4b8f-86f3-d497612f42b8\") " Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.762832 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9a3a5941-5c86-4a65-be1e-26327ca990ad-combined-ca-bundle\") pod \"9a3a5941-5c86-4a65-be1e-26327ca990ad\" (UID: \"9a3a5941-5c86-4a65-be1e-26327ca990ad\") " Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.762852 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndbcluster-nb-etc-ovn\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"9a3a5941-5c86-4a65-be1e-26327ca990ad\" (UID: \"9a3a5941-5c86-4a65-be1e-26327ca990ad\") " Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.762879 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/9a3a5941-5c86-4a65-be1e-26327ca990ad-metrics-certs-tls-certs\") pod \"9a3a5941-5c86-4a65-be1e-26327ca990ad\" (UID: \"9a3a5941-5c86-4a65-be1e-26327ca990ad\") " Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.762917 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/f69e804c-fdc4-4b8f-86f3-d497612f42b8-ovs-rundir\") pod \"f69e804c-fdc4-4b8f-86f3-d497612f42b8\" (UID: \"f69e804c-fdc4-4b8f-86f3-d497612f42b8\") " Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.762973 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/9a3a5941-5c86-4a65-be1e-26327ca990ad-ovsdbserver-nb-tls-certs\") pod \"9a3a5941-5c86-4a65-be1e-26327ca990ad\" (UID: \"9a3a5941-5c86-4a65-be1e-26327ca990ad\") " Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.763023 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/9a3a5941-5c86-4a65-be1e-26327ca990ad-scripts\") pod \"9a3a5941-5c86-4a65-be1e-26327ca990ad\" (UID: \"9a3a5941-5c86-4a65-be1e-26327ca990ad\") " Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.763149 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5wtmw\" (UniqueName: \"kubernetes.io/projected/9a3a5941-5c86-4a65-be1e-26327ca990ad-kube-api-access-5wtmw\") pod \"9a3a5941-5c86-4a65-be1e-26327ca990ad\" (UID: \"9a3a5941-5c86-4a65-be1e-26327ca990ad\") " Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.763217 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-shm9n\" (UniqueName: \"kubernetes.io/projected/f69e804c-fdc4-4b8f-86f3-d497612f42b8-kube-api-access-shm9n\") pod \"f69e804c-fdc4-4b8f-86f3-d497612f42b8\" (UID: \"f69e804c-fdc4-4b8f-86f3-d497612f42b8\") " Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.765937 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.768349 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f69e804c-fdc4-4b8f-86f3-d497612f42b8-ovs-rundir" (OuterVolumeSpecName: "ovs-rundir") pod "f69e804c-fdc4-4b8f-86f3-d497612f42b8" (UID: "f69e804c-fdc4-4b8f-86f3-d497612f42b8"). InnerVolumeSpecName "ovs-rundir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.771082 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f69e804c-fdc4-4b8f-86f3-d497612f42b8-config" (OuterVolumeSpecName: "config") pod "f69e804c-fdc4-4b8f-86f3-d497612f42b8" (UID: "f69e804c-fdc4-4b8f-86f3-d497612f42b8"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.771357 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f69e804c-fdc4-4b8f-86f3-d497612f42b8-ovn-rundir" (OuterVolumeSpecName: "ovn-rundir") pod "f69e804c-fdc4-4b8f-86f3-d497612f42b8" (UID: "f69e804c-fdc4-4b8f-86f3-d497612f42b8"). InnerVolumeSpecName "ovn-rundir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.772023 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9a3a5941-5c86-4a65-be1e-26327ca990ad-config" (OuterVolumeSpecName: "config") pod "9a3a5941-5c86-4a65-be1e-26327ca990ad" (UID: "9a3a5941-5c86-4a65-be1e-26327ca990ad"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.772481 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9a3a5941-5c86-4a65-be1e-26327ca990ad-scripts" (OuterVolumeSpecName: "scripts") pod "9a3a5941-5c86-4a65-be1e-26327ca990ad" (UID: "9a3a5941-5c86-4a65-be1e-26327ca990ad"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.772846 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9a3a5941-5c86-4a65-be1e-26327ca990ad-ovsdb-rundir" (OuterVolumeSpecName: "ovsdb-rundir") pod "9a3a5941-5c86-4a65-be1e-26327ca990ad" (UID: "9a3a5941-5c86-4a65-be1e-26327ca990ad"). InnerVolumeSpecName "ovsdb-rundir". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.773666 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5c7b6c5df9-gbql4"] Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.775095 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f69e804c-fdc4-4b8f-86f3-d497612f42b8-kube-api-access-shm9n" (OuterVolumeSpecName: "kube-api-access-shm9n") pod "f69e804c-fdc4-4b8f-86f3-d497612f42b8" (UID: "f69e804c-fdc4-4b8f-86f3-d497612f42b8"). InnerVolumeSpecName "kube-api-access-shm9n". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.778953 4909 scope.go:117] "RemoveContainer" containerID="8e03e0c2cfe2a76da7c2c9a3025b7a0ea43754fa7aeb32ae6610a67be0eb8a43" Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.779643 4909 generic.go:334] "Generic (PLEG): container finished" podID="287e7e9a-0240-478e-a15b-b01122e79c32" containerID="d938862fe9fc3e6327eed52ecb437574cdd14b5fddf79ca390b9bf6e50d98375" exitCode=143 Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.779679 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"287e7e9a-0240-478e-a15b-b01122e79c32","Type":"ContainerDied","Data":"d938862fe9fc3e6327eed52ecb437574cdd14b5fddf79ca390b9bf6e50d98375"} Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.784885 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage04-crc" (OuterVolumeSpecName: "ovndbcluster-nb-etc-ovn") pod "9a3a5941-5c86-4a65-be1e-26327ca990ad" (UID: "9a3a5941-5c86-4a65-be1e-26327ca990ad"). InnerVolumeSpecName "local-storage04-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.785408 4909 generic.go:334] "Generic (PLEG): container finished" podID="e8837df0-c6fe-42a6-bf0f-8ca14f1961a6" containerID="09cb7681bd82577f5dec1afd70b7dfc60e7c497bc0efb2d3202eab82a5623018" exitCode=143 Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.785460 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"e8837df0-c6fe-42a6-bf0f-8ca14f1961a6","Type":"ContainerDied","Data":"09cb7681bd82577f5dec1afd70b7dfc60e7c497bc0efb2d3202eab82a5623018"} Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.787329 4909 generic.go:334] "Generic (PLEG): container finished" podID="00913f80-f496-44ec-a619-99129724cb89" containerID="257ba16eb5dc11579d07b9316fe274af6b54797db9a8db896742e423617ab540" exitCode=143 Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.787386 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"00913f80-f496-44ec-a619-99129724cb89","Type":"ContainerDied","Data":"257ba16eb5dc11579d07b9316fe274af6b54797db9a8db896742e423617ab540"} Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.787701 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5c7b6c5df9-gbql4"] Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.793950 4909 generic.go:334] "Generic (PLEG): container finished" podID="ffd60458-19af-464b-9649-57d25893f22a" containerID="29d0bc179bbb27c3f9f6023ab4558b76e568b29b98ddf992b4f8391b462dd92d" exitCode=143 Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.794014 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-f6ddfdd4b-szlst" event={"ID":"ffd60458-19af-464b-9649-57d25893f22a","Type":"ContainerDied","Data":"29d0bc179bbb27c3f9f6023ab4558b76e568b29b98ddf992b4f8391b462dd92d"} Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.796593 4909 generic.go:334] "Generic (PLEG): container finished" podID="febda67e-3daf-4cb4-9fd1-530d6c398404" containerID="a3fd76fee056f26d16128b3c7dd903dc417db926bafd7b4cc42bf63262cd356c" exitCode=0 Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.796642 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-55f6d745d5-tgbm7" event={"ID":"febda67e-3daf-4cb4-9fd1-530d6c398404","Type":"ContainerDied","Data":"a3fd76fee056f26d16128b3c7dd903dc417db926bafd7b4cc42bf63262cd356c"} Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.798292 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-84ff6c46f-q849h" event={"ID":"99398e49-db85-4878-b759-367747402c8b","Type":"ContainerStarted","Data":"a025c3c4d56c233f80af0daccb5a94bf3f456bd546731ecd2ccb3e2696f523ac"} Nov 28 16:36:08 crc kubenswrapper[4909]: E1128 16:36:07.807019 4909 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="604d9593fae6f90c84804afb01b99c2a6be4dbebec46ec2fa908b5b83bb8c9dc" cmd=["/usr/local/bin/container-scripts/status_check.sh"] Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.808548 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9a3a5941-5c86-4a65-be1e-26327ca990ad-kube-api-access-5wtmw" (OuterVolumeSpecName: "kube-api-access-5wtmw") pod "9a3a5941-5c86-4a65-be1e-26327ca990ad" (UID: "9a3a5941-5c86-4a65-be1e-26327ca990ad"). InnerVolumeSpecName "kube-api-access-5wtmw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:36:08 crc kubenswrapper[4909]: E1128 16:36:07.809197 4909 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="604d9593fae6f90c84804afb01b99c2a6be4dbebec46ec2fa908b5b83bb8c9dc" cmd=["/usr/local/bin/container-scripts/status_check.sh"] Nov 28 16:36:08 crc kubenswrapper[4909]: E1128 16:36:07.810758 4909 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="604d9593fae6f90c84804afb01b99c2a6be4dbebec46ec2fa908b5b83bb8c9dc" cmd=["/usr/local/bin/container-scripts/status_check.sh"] Nov 28 16:36:08 crc kubenswrapper[4909]: E1128 16:36:07.810782 4909 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-northd-0" podUID="dc78dec8-567e-41a1-9fbf-793224410d3b" containerName="ovn-northd" Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.819375 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9a3a5941-5c86-4a65-be1e-26327ca990ad-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "9a3a5941-5c86-4a65-be1e-26327ca990ad" (UID: "9a3a5941-5c86-4a65-be1e-26327ca990ad"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.820475 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f69e804c-fdc4-4b8f-86f3-d497612f42b8-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f69e804c-fdc4-4b8f-86f3-d497612f42b8" (UID: "f69e804c-fdc4-4b8f-86f3-d497612f42b8"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.866749 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5wtmw\" (UniqueName: \"kubernetes.io/projected/9a3a5941-5c86-4a65-be1e-26327ca990ad-kube-api-access-5wtmw\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.866774 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-shm9n\" (UniqueName: \"kubernetes.io/projected/f69e804c-fdc4-4b8f-86f3-d497612f42b8-kube-api-access-shm9n\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.866783 4909 reconciler_common.go:293] "Volume detached for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/f69e804c-fdc4-4b8f-86f3-d497612f42b8-ovn-rundir\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.866794 4909 reconciler_common.go:293] "Volume detached for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/9a3a5941-5c86-4a65-be1e-26327ca990ad-ovsdb-rundir\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.866802 4909 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9a3a5941-5c86-4a65-be1e-26327ca990ad-config\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.866814 4909 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f69e804c-fdc4-4b8f-86f3-d497612f42b8-config\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.866822 4909 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f69e804c-fdc4-4b8f-86f3-d497612f42b8-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.866830 4909 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9a3a5941-5c86-4a65-be1e-26327ca990ad-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.866861 4909 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") on node \"crc\" " Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.866872 4909 reconciler_common.go:293] "Volume detached for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/f69e804c-fdc4-4b8f-86f3-d497612f42b8-ovs-rundir\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.866881 4909 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/9a3a5941-5c86-4a65-be1e-26327ca990ad-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.869449 4909 scope.go:117] "RemoveContainer" containerID="6e2af8a1d96aee4df901387db3f0677372000cdb75cb35bf23b1e8474fd7bde6" Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.891217 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-cell1-server-0" podUID="02c83d05-a6ce-4c22-9015-91c0a766a518" containerName="rabbitmq" containerID="cri-o://e404e875f3c6c8a15d87ad24861803ab1e659ac087607f8971106d0d6890fc63" gracePeriod=604800 Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.897756 4909 scope.go:117] "RemoveContainer" containerID="31efa2915b2d01df78595f44826736718a1033b3247d499a7a9d8cc17106d2a1" Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.903089 4909 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage04-crc" (UniqueName: "kubernetes.io/local-volume/local-storage04-crc") on node "crc" Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.909501 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9a3a5941-5c86-4a65-be1e-26327ca990ad-metrics-certs-tls-certs" (OuterVolumeSpecName: "metrics-certs-tls-certs") pod "9a3a5941-5c86-4a65-be1e-26327ca990ad" (UID: "9a3a5941-5c86-4a65-be1e-26327ca990ad"). InnerVolumeSpecName "metrics-certs-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.923074 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9a3a5941-5c86-4a65-be1e-26327ca990ad-ovsdbserver-nb-tls-certs" (OuterVolumeSpecName: "ovsdbserver-nb-tls-certs") pod "9a3a5941-5c86-4a65-be1e-26327ca990ad" (UID: "9a3a5941-5c86-4a65-be1e-26327ca990ad"). InnerVolumeSpecName "ovsdbserver-nb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.946602 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="20c5e3ee-fe01-49c7-96fd-153897da815e" path="/var/lib/kubelet/pods/20c5e3ee-fe01-49c7-96fd-153897da815e/volumes" Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.947317 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6a22fca6-e32c-4df8-8240-d9c749c19261" path="/var/lib/kubelet/pods/6a22fca6-e32c-4df8-8240-d9c749c19261/volumes" Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.947957 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="828d440a-4ae3-45ef-83fe-79866f3f2d8e" path="/var/lib/kubelet/pods/828d440a-4ae3-45ef-83fe-79866f3f2d8e/volumes" Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.948868 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="93ff38ac-f623-4932-aaa4-fde31d11a4ed" path="/var/lib/kubelet/pods/93ff38ac-f623-4932-aaa4-fde31d11a4ed/volumes" Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.949358 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a5b41266-6099-4bf7-a26b-4a98b01aa9b6" path="/var/lib/kubelet/pods/a5b41266-6099-4bf7-a26b-4a98b01aa9b6/volumes" Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.949868 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b69a490a-8b2d-405f-b41e-903268d44cca" path="/var/lib/kubelet/pods/b69a490a-8b2d-405f-b41e-903268d44cca/volumes" Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.951004 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bdf7de93-de28-49b8-b83f-1174c23bbd2f" path="/var/lib/kubelet/pods/bdf7de93-de28-49b8-b83f-1174c23bbd2f/volumes" Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.953559 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bf22088e-8ad0-4323-bca3-2b7bdb648bf9" path="/var/lib/kubelet/pods/bf22088e-8ad0-4323-bca3-2b7bdb648bf9/volumes" Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.953815 4909 scope.go:117] "RemoveContainer" containerID="15d6d7bb76cdcf47fdb12971445027a116e8951d0899335b43ee4f4fb9c7586a" Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.954961 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e042c4d0-9fc9-4e8d-a67f-6e3029444f4a" path="/var/lib/kubelet/pods/e042c4d0-9fc9-4e8d-a67f-6e3029444f4a/volumes" Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.956196 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f8d630e7-c532-4fc8-b3f4-0a2a6b06f5ae" path="/var/lib/kubelet/pods/f8d630e7-c532-4fc8-b3f4-0a2a6b06f5ae/volumes" Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.956787 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fbc4977f-f846-4428-9fb6-558811c3e65b" path="/var/lib/kubelet/pods/fbc4977f-f846-4428-9fb6-558811c3e65b/volumes" Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.969780 4909 reconciler_common.go:293] "Volume detached for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.969810 4909 reconciler_common.go:293] "Volume detached for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/9a3a5941-5c86-4a65-be1e-26327ca990ad-metrics-certs-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.969828 4909 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/9a3a5941-5c86-4a65-be1e-26327ca990ad-ovsdbserver-nb-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:07.976481 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f69e804c-fdc4-4b8f-86f3-d497612f42b8-metrics-certs-tls-certs" (OuterVolumeSpecName: "metrics-certs-tls-certs") pod "f69e804c-fdc4-4b8f-86f3-d497612f42b8" (UID: "f69e804c-fdc4-4b8f-86f3-d497612f42b8"). InnerVolumeSpecName "metrics-certs-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:08.037480 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:08.046084 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:08.067525 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-metrics-fg65t"] Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:08.071277 4909 reconciler_common.go:293] "Volume detached for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/f69e804c-fdc4-4b8f-86f3-d497612f42b8-metrics-certs-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:08.076448 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-metrics-fg65t"] Nov 28 16:36:08 crc kubenswrapper[4909]: E1128 16:36:08.224261 4909 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 96dc460f1fe51a7876a0d73f5eedaf50b76b8405b0eee13b2afa64eddabda435 is running failed: container process not found" containerID="96dc460f1fe51a7876a0d73f5eedaf50b76b8405b0eee13b2afa64eddabda435" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 28 16:36:08 crc kubenswrapper[4909]: E1128 16:36:08.224950 4909 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 96dc460f1fe51a7876a0d73f5eedaf50b76b8405b0eee13b2afa64eddabda435 is running failed: container process not found" containerID="96dc460f1fe51a7876a0d73f5eedaf50b76b8405b0eee13b2afa64eddabda435" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 28 16:36:08 crc kubenswrapper[4909]: E1128 16:36:08.225333 4909 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 96dc460f1fe51a7876a0d73f5eedaf50b76b8405b0eee13b2afa64eddabda435 is running failed: container process not found" containerID="96dc460f1fe51a7876a0d73f5eedaf50b76b8405b0eee13b2afa64eddabda435" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 28 16:36:08 crc kubenswrapper[4909]: E1128 16:36:08.225379 4909 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 96dc460f1fe51a7876a0d73f5eedaf50b76b8405b0eee13b2afa64eddabda435 is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-ovs-q2kt7" podUID="ff22194e-63a9-410d-80b6-9b1a1e68b164" containerName="ovsdb-server" Nov 28 16:36:08 crc kubenswrapper[4909]: E1128 16:36:08.225570 4909 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="afca6794cc8913f3a96d1e4b580a859e2e2d5089f2b862e784689db6cf4ab6c4" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 28 16:36:08 crc kubenswrapper[4909]: E1128 16:36:08.236068 4909 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="afca6794cc8913f3a96d1e4b580a859e2e2d5089f2b862e784689db6cf4ab6c4" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 28 16:36:08 crc kubenswrapper[4909]: E1128 16:36:08.237847 4909 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="afca6794cc8913f3a96d1e4b580a859e2e2d5089f2b862e784689db6cf4ab6c4" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 28 16:36:08 crc kubenswrapper[4909]: E1128 16:36:08.237995 4909 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-controller-ovs-q2kt7" podUID="ff22194e-63a9-410d-80b6-9b1a1e68b164" containerName="ovs-vswitchd" Nov 28 16:36:08 crc kubenswrapper[4909]: E1128 16:36:08.292808 4909 configmap.go:193] Couldn't get configMap openstack/rabbitmq-config-data: configmap "rabbitmq-config-data" not found Nov 28 16:36:08 crc kubenswrapper[4909]: E1128 16:36:08.292874 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444-config-data podName:7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444 nodeName:}" failed. No retries permitted until 2025-11-28 16:36:12.292861396 +0000 UTC m=+1554.689545920 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444-config-data") pod "rabbitmq-server-0" (UID: "7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444") : configmap "rabbitmq-config-data" not found Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:08.435722 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance2631-account-delete-2c2fs"] Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:08.456483 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder937d-account-delete-4rdjd"] Nov 28 16:36:08 crc kubenswrapper[4909]: W1128 16:36:08.467996 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb0f3cfb2_6884_4ef1_9844_cf494a2e21bb.slice/crio-e1ff692285d603972a12b07eb3c17c9ceeb9d4c28fdbf684db3c4dc63fd343fb WatchSource:0}: Error finding container e1ff692285d603972a12b07eb3c17c9ceeb9d4c28fdbf684db3c4dc63fd343fb: Status 404 returned error can't find the container with id e1ff692285d603972a12b07eb3c17c9ceeb9d4c28fdbf684db3c4dc63fd343fb Nov 28 16:36:08 crc kubenswrapper[4909]: W1128 16:36:08.473799 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc2cc9842_6f8f_4afc_9895_2b7a75a9696c.slice/crio-e8bf24bda58e5121229915ceab0bc295fb4e4833801ddc66f26c95fbafcc8094 WatchSource:0}: Error finding container e8bf24bda58e5121229915ceab0bc295fb4e4833801ddc66f26c95fbafcc8094: Status 404 returned error can't find the container with id e8bf24bda58e5121229915ceab0bc295fb4e4833801ddc66f26c95fbafcc8094 Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:08.476042 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-65c6d9c7fd-627g9"] Nov 28 16:36:08 crc kubenswrapper[4909]: E1128 16:36:08.496036 4909 configmap.go:193] Couldn't get configMap openstack/rabbitmq-cell1-config-data: configmap "rabbitmq-cell1-config-data" not found Nov 28 16:36:08 crc kubenswrapper[4909]: E1128 16:36:08.496110 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/02c83d05-a6ce-4c22-9015-91c0a766a518-config-data podName:02c83d05-a6ce-4c22-9015-91c0a766a518 nodeName:}" failed. No retries permitted until 2025-11-28 16:36:10.496094928 +0000 UTC m=+1552.892779452 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/02c83d05-a6ce-4c22-9015-91c0a766a518-config-data") pod "rabbitmq-cell1-server-0" (UID: "02c83d05-a6ce-4c22-9015-91c0a766a518") : configmap "rabbitmq-cell1-config-data" not found Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:08.506859 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron8770-account-delete-9m56t"] Nov 28 16:36:08 crc kubenswrapper[4909]: W1128 16:36:08.511383 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5b472e4d_2724_4ea4_93c9_5552d92af793.slice/crio-ccd58b768d86d30c56eae48a2272013e7ba47fc23d9455039a5fdca0131a2767 WatchSource:0}: Error finding container ccd58b768d86d30c56eae48a2272013e7ba47fc23d9455039a5fdca0131a2767: Status 404 returned error can't find the container with id ccd58b768d86d30c56eae48a2272013e7ba47fc23d9455039a5fdca0131a2767 Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:08.567254 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement3326-account-delete-6mrc7"] Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:08.840242 4909 generic.go:334] "Generic (PLEG): container finished" podID="2a8757b1-bbbf-4d00-9bbb-9f4bc855a9d9" containerID="d3f40b1a27b1ea548b445a46b92770d9795f4ed9429685b38f64c4173b4e0c3f" exitCode=0 Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:08.840609 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"2a8757b1-bbbf-4d00-9bbb-9f4bc855a9d9","Type":"ContainerDied","Data":"d3f40b1a27b1ea548b445a46b92770d9795f4ed9429685b38f64c4173b4e0c3f"} Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:08.840755 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-589b6f8979-wbls8" Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:08.864853 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-65c6d9c7fd-627g9" event={"ID":"5ad0a326-11b9-40c8-b251-5994a436110a","Type":"ContainerStarted","Data":"9af8cd104e3187ddf8c610b6776a27019a3e5c7984dbfbd2adf24572ee3f1171"} Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:08.874047 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:08.881139 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance2631-account-delete-2c2fs" event={"ID":"b0f3cfb2-6884-4ef1-9844-cf494a2e21bb","Type":"ContainerStarted","Data":"e1ff692285d603972a12b07eb3c17c9ceeb9d4c28fdbf684db3c4dc63fd343fb"} Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:08.882425 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder937d-account-delete-4rdjd" event={"ID":"c2cc9842-6f8f-4afc-9895-2b7a75a9696c","Type":"ContainerStarted","Data":"e8bf24bda58e5121229915ceab0bc295fb4e4833801ddc66f26c95fbafcc8094"} Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:08.885597 4909 generic.go:334] "Generic (PLEG): container finished" podID="175903ef-59e0-4c1f-820f-bd3d2692462d" containerID="2dedb170588deaa6f11bf8d0e9ccb4ac0fb1f6ba18fbbaac5554659c70446bce" exitCode=0 Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:08.885644 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-6c95ffb47-q5ls2" event={"ID":"175903ef-59e0-4c1f-820f-bd3d2692462d","Type":"ContainerDied","Data":"2dedb170588deaa6f11bf8d0e9ccb4ac0fb1f6ba18fbbaac5554659c70446bce"} Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:08.887559 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-589b6f8979-wbls8" event={"ID":"e792abf7-967c-4293-b5f4-f073b07c8cf1","Type":"ContainerDied","Data":"43391a917392eb96d2e2e8a7d61fdefd88f801c99a389ac0950971d172990617"} Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:08.887590 4909 scope.go:117] "RemoveContainer" containerID="95eacaaf5af98b7623dbb633b828928620aa97b977799e32b9e1bc8948b35490" Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:08.887765 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-589b6f8979-wbls8" Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:08.890876 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement3326-account-delete-6mrc7" event={"ID":"2cfda3c9-a8dd-4a86-bb0d-d3b826b82a0f","Type":"ContainerStarted","Data":"abe14875a3d0a4033d9e1a171aa9a4afd425397efc4e742ac63ad06775ba8f35"} Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:08.893619 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-8588dd4f7d-772fj" event={"ID":"0e95a7b6-74fd-4db5-bb83-b8e8f80a698f","Type":"ContainerStarted","Data":"e082a22ef08996aa9a428e8fe49aaaf3d2faeeef0059056c795acba881811baa"} Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:08.893652 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-keystone-listener-8588dd4f7d-772fj" podUID="0e95a7b6-74fd-4db5-bb83-b8e8f80a698f" containerName="barbican-keystone-listener-log" containerID="cri-o://c9a6ef16e79ec3520b4ab028495d1a8dc6b763fafac55c5afb96c84780e98938" gracePeriod=30 Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:08.893707 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-keystone-listener-8588dd4f7d-772fj" podUID="0e95a7b6-74fd-4db5-bb83-b8e8f80a698f" containerName="barbican-keystone-listener" containerID="cri-o://e082a22ef08996aa9a428e8fe49aaaf3d2faeeef0059056c795acba881811baa" gracePeriod=30 Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:08.893677 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-8588dd4f7d-772fj" event={"ID":"0e95a7b6-74fd-4db5-bb83-b8e8f80a698f","Type":"ContainerStarted","Data":"c9a6ef16e79ec3520b4ab028495d1a8dc6b763fafac55c5afb96c84780e98938"} Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:08.905074 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron8770-account-delete-9m56t" event={"ID":"5b472e4d-2724-4ea4-93c9-5552d92af793","Type":"ContainerStarted","Data":"ccd58b768d86d30c56eae48a2272013e7ba47fc23d9455039a5fdca0131a2767"} Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:08.913736 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e792abf7-967c-4293-b5f4-f073b07c8cf1-run-httpd\") pod \"e792abf7-967c-4293-b5f4-f073b07c8cf1\" (UID: \"e792abf7-967c-4293-b5f4-f073b07c8cf1\") " Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:08.913812 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e792abf7-967c-4293-b5f4-f073b07c8cf1-log-httpd\") pod \"e792abf7-967c-4293-b5f4-f073b07c8cf1\" (UID: \"e792abf7-967c-4293-b5f4-f073b07c8cf1\") " Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:08.913837 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e792abf7-967c-4293-b5f4-f073b07c8cf1-config-data\") pod \"e792abf7-967c-4293-b5f4-f073b07c8cf1\" (UID: \"e792abf7-967c-4293-b5f4-f073b07c8cf1\") " Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:08.913928 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/e792abf7-967c-4293-b5f4-f073b07c8cf1-etc-swift\") pod \"e792abf7-967c-4293-b5f4-f073b07c8cf1\" (UID: \"e792abf7-967c-4293-b5f4-f073b07c8cf1\") " Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:08.913970 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e792abf7-967c-4293-b5f4-f073b07c8cf1-internal-tls-certs\") pod \"e792abf7-967c-4293-b5f4-f073b07c8cf1\" (UID: \"e792abf7-967c-4293-b5f4-f073b07c8cf1\") " Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:08.913990 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/e792abf7-967c-4293-b5f4-f073b07c8cf1-public-tls-certs\") pod \"e792abf7-967c-4293-b5f4-f073b07c8cf1\" (UID: \"e792abf7-967c-4293-b5f4-f073b07c8cf1\") " Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:08.914009 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e792abf7-967c-4293-b5f4-f073b07c8cf1-combined-ca-bundle\") pod \"e792abf7-967c-4293-b5f4-f073b07c8cf1\" (UID: \"e792abf7-967c-4293-b5f4-f073b07c8cf1\") " Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:08.914072 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mc5q8\" (UniqueName: \"kubernetes.io/projected/e792abf7-967c-4293-b5f4-f073b07c8cf1-kube-api-access-mc5q8\") pod \"e792abf7-967c-4293-b5f4-f073b07c8cf1\" (UID: \"e792abf7-967c-4293-b5f4-f073b07c8cf1\") " Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:08.914562 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e792abf7-967c-4293-b5f4-f073b07c8cf1-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "e792abf7-967c-4293-b5f4-f073b07c8cf1" (UID: "e792abf7-967c-4293-b5f4-f073b07c8cf1"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:08.915386 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e792abf7-967c-4293-b5f4-f073b07c8cf1-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "e792abf7-967c-4293-b5f4-f073b07c8cf1" (UID: "e792abf7-967c-4293-b5f4-f073b07c8cf1"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:08.924506 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-keystone-listener-8588dd4f7d-772fj" podStartSLOduration=5.924486452 podStartE2EDuration="5.924486452s" podCreationTimestamp="2025-11-28 16:36:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:36:08.922730685 +0000 UTC m=+1551.319415229" watchObservedRunningTime="2025-11-28 16:36:08.924486452 +0000 UTC m=+1551.321170976" Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:08.933226 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e792abf7-967c-4293-b5f4-f073b07c8cf1-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "e792abf7-967c-4293-b5f4-f073b07c8cf1" (UID: "e792abf7-967c-4293-b5f4-f073b07c8cf1"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:08.936865 4909 scope.go:117] "RemoveContainer" containerID="1be6a34dec3cfc7a9c5a2a82788430cdb9b7ee059f8aeacb143350b3dd68f3c7" Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:08.938506 4909 generic.go:334] "Generic (PLEG): container finished" podID="bac06af4-bbe1-482a-8815-14a9cf2a1699" containerID="d5719a7de701581a9790f618ddf6bcdd49af95eea721edc447a568c570efffdc" exitCode=0 Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:08.938568 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"bac06af4-bbe1-482a-8815-14a9cf2a1699","Type":"ContainerDied","Data":"d5719a7de701581a9790f618ddf6bcdd49af95eea721edc447a568c570efffdc"} Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:08.938600 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"bac06af4-bbe1-482a-8815-14a9cf2a1699","Type":"ContainerDied","Data":"6369879d221d0173522f49293e5e5d88085918cc8e071a781bf1b66beb112e5f"} Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:08.938768 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:08.953643 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-84ff6c46f-q849h" event={"ID":"99398e49-db85-4878-b759-367747402c8b","Type":"ContainerStarted","Data":"993b00e1b6bc618ccc84a0eb611ea3966a893434096b04798bdd63d90c26e82f"} Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:08.953813 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-worker-84ff6c46f-q849h" podUID="99398e49-db85-4878-b759-367747402c8b" containerName="barbican-worker-log" containerID="cri-o://a025c3c4d56c233f80af0daccb5a94bf3f456bd546731ecd2ccb3e2696f523ac" gracePeriod=30 Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:08.954638 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-worker-84ff6c46f-q849h" podUID="99398e49-db85-4878-b759-367747402c8b" containerName="barbican-worker" containerID="cri-o://993b00e1b6bc618ccc84a0eb611ea3966a893434096b04798bdd63d90c26e82f" gracePeriod=30 Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:08.955131 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e792abf7-967c-4293-b5f4-f073b07c8cf1-kube-api-access-mc5q8" (OuterVolumeSpecName: "kube-api-access-mc5q8") pod "e792abf7-967c-4293-b5f4-f073b07c8cf1" (UID: "e792abf7-967c-4293-b5f4-f073b07c8cf1"). InnerVolumeSpecName "kube-api-access-mc5q8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:36:08 crc kubenswrapper[4909]: I1128 16:36:08.980963 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-worker-84ff6c46f-q849h" podStartSLOduration=5.980935412 podStartE2EDuration="5.980935412s" podCreationTimestamp="2025-11-28 16:36:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:36:08.971816417 +0000 UTC m=+1551.368500941" watchObservedRunningTime="2025-11-28 16:36:08.980935412 +0000 UTC m=+1551.377619936" Nov 28 16:36:09 crc kubenswrapper[4909]: I1128 16:36:09.018415 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/bac06af4-bbe1-482a-8815-14a9cf2a1699-nova-novncproxy-tls-certs\") pod \"bac06af4-bbe1-482a-8815-14a9cf2a1699\" (UID: \"bac06af4-bbe1-482a-8815-14a9cf2a1699\") " Nov 28 16:36:09 crc kubenswrapper[4909]: I1128 16:36:09.018619 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bac06af4-bbe1-482a-8815-14a9cf2a1699-combined-ca-bundle\") pod \"bac06af4-bbe1-482a-8815-14a9cf2a1699\" (UID: \"bac06af4-bbe1-482a-8815-14a9cf2a1699\") " Nov 28 16:36:09 crc kubenswrapper[4909]: I1128 16:36:09.018744 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bac06af4-bbe1-482a-8815-14a9cf2a1699-config-data\") pod \"bac06af4-bbe1-482a-8815-14a9cf2a1699\" (UID: \"bac06af4-bbe1-482a-8815-14a9cf2a1699\") " Nov 28 16:36:09 crc kubenswrapper[4909]: I1128 16:36:09.018916 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/bac06af4-bbe1-482a-8815-14a9cf2a1699-vencrypt-tls-certs\") pod \"bac06af4-bbe1-482a-8815-14a9cf2a1699\" (UID: \"bac06af4-bbe1-482a-8815-14a9cf2a1699\") " Nov 28 16:36:09 crc kubenswrapper[4909]: I1128 16:36:09.019065 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zc77c\" (UniqueName: \"kubernetes.io/projected/bac06af4-bbe1-482a-8815-14a9cf2a1699-kube-api-access-zc77c\") pod \"bac06af4-bbe1-482a-8815-14a9cf2a1699\" (UID: \"bac06af4-bbe1-482a-8815-14a9cf2a1699\") " Nov 28 16:36:09 crc kubenswrapper[4909]: I1128 16:36:09.019718 4909 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e792abf7-967c-4293-b5f4-f073b07c8cf1-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:09 crc kubenswrapper[4909]: I1128 16:36:09.019808 4909 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/e792abf7-967c-4293-b5f4-f073b07c8cf1-etc-swift\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:09 crc kubenswrapper[4909]: I1128 16:36:09.019871 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mc5q8\" (UniqueName: \"kubernetes.io/projected/e792abf7-967c-4293-b5f4-f073b07c8cf1-kube-api-access-mc5q8\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:09 crc kubenswrapper[4909]: I1128 16:36:09.019937 4909 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e792abf7-967c-4293-b5f4-f073b07c8cf1-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:09 crc kubenswrapper[4909]: I1128 16:36:09.044008 4909 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-cell1-server-0" podUID="02c83d05-a6ce-4c22-9015-91c0a766a518" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.99:5671: connect: connection refused" Nov 28 16:36:09 crc kubenswrapper[4909]: I1128 16:36:09.052045 4909 scope.go:117] "RemoveContainer" containerID="d5719a7de701581a9790f618ddf6bcdd49af95eea721edc447a568c570efffdc" Nov 28 16:36:09 crc kubenswrapper[4909]: I1128 16:36:09.059531 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/novaapiab11-account-delete-2d7kv"] Nov 28 16:36:09 crc kubenswrapper[4909]: I1128 16:36:09.069197 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bac06af4-bbe1-482a-8815-14a9cf2a1699-kube-api-access-zc77c" (OuterVolumeSpecName: "kube-api-access-zc77c") pod "bac06af4-bbe1-482a-8815-14a9cf2a1699" (UID: "bac06af4-bbe1-482a-8815-14a9cf2a1699"). InnerVolumeSpecName "kube-api-access-zc77c". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:36:09 crc kubenswrapper[4909]: I1128 16:36:09.070898 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/novacell077a6-account-delete-dqcds"] Nov 28 16:36:09 crc kubenswrapper[4909]: I1128 16:36:09.082412 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican9799-account-delete-z2p59"] Nov 28 16:36:09 crc kubenswrapper[4909]: I1128 16:36:09.121562 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zc77c\" (UniqueName: \"kubernetes.io/projected/bac06af4-bbe1-482a-8815-14a9cf2a1699-kube-api-access-zc77c\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:09 crc kubenswrapper[4909]: W1128 16:36:09.174189 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8fa2662b_eed3_4461_ba5d_d4554ca4a22b.slice/crio-94bf77be6b236b8999a881b64809cc5d3228f9ba7c55e297eb5bb70916649ff8 WatchSource:0}: Error finding container 94bf77be6b236b8999a881b64809cc5d3228f9ba7c55e297eb5bb70916649ff8: Status 404 returned error can't find the container with id 94bf77be6b236b8999a881b64809cc5d3228f9ba7c55e297eb5bb70916649ff8 Nov 28 16:36:09 crc kubenswrapper[4909]: I1128 16:36:09.231778 4909 scope.go:117] "RemoveContainer" containerID="d5719a7de701581a9790f618ddf6bcdd49af95eea721edc447a568c570efffdc" Nov 28 16:36:09 crc kubenswrapper[4909]: E1128 16:36:09.232218 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d5719a7de701581a9790f618ddf6bcdd49af95eea721edc447a568c570efffdc\": container with ID starting with d5719a7de701581a9790f618ddf6bcdd49af95eea721edc447a568c570efffdc not found: ID does not exist" containerID="d5719a7de701581a9790f618ddf6bcdd49af95eea721edc447a568c570efffdc" Nov 28 16:36:09 crc kubenswrapper[4909]: I1128 16:36:09.232249 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d5719a7de701581a9790f618ddf6bcdd49af95eea721edc447a568c570efffdc"} err="failed to get container status \"d5719a7de701581a9790f618ddf6bcdd49af95eea721edc447a568c570efffdc\": rpc error: code = NotFound desc = could not find container \"d5719a7de701581a9790f618ddf6bcdd49af95eea721edc447a568c570efffdc\": container with ID starting with d5719a7de701581a9790f618ddf6bcdd49af95eea721edc447a568c570efffdc not found: ID does not exist" Nov 28 16:36:09 crc kubenswrapper[4909]: I1128 16:36:09.266455 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bac06af4-bbe1-482a-8815-14a9cf2a1699-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "bac06af4-bbe1-482a-8815-14a9cf2a1699" (UID: "bac06af4-bbe1-482a-8815-14a9cf2a1699"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:09 crc kubenswrapper[4909]: I1128 16:36:09.325160 4909 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bac06af4-bbe1-482a-8815-14a9cf2a1699-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:09 crc kubenswrapper[4909]: I1128 16:36:09.360162 4909 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-0" podUID="7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.100:5671: connect: connection refused" Nov 28 16:36:09 crc kubenswrapper[4909]: E1128 16:36:09.462805 4909 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="0dc92530d7c3a493fba4c36b3d79070c26b600b170baa4f05c83ac54ca1f0cd4" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Nov 28 16:36:09 crc kubenswrapper[4909]: E1128 16:36:09.487985 4909 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="0dc92530d7c3a493fba4c36b3d79070c26b600b170baa4f05c83ac54ca1f0cd4" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Nov 28 16:36:09 crc kubenswrapper[4909]: E1128 16:36:09.505094 4909 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="0dc92530d7c3a493fba4c36b3d79070c26b600b170baa4f05c83ac54ca1f0cd4" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Nov 28 16:36:09 crc kubenswrapper[4909]: E1128 16:36:09.505282 4909 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-cell1-conductor-0" podUID="b2ea37c8-3213-4043-9da2-a9e76f9284e4" containerName="nova-cell1-conductor-conductor" Nov 28 16:36:09 crc kubenswrapper[4909]: I1128 16:36:09.541269 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bac06af4-bbe1-482a-8815-14a9cf2a1699-nova-novncproxy-tls-certs" (OuterVolumeSpecName: "nova-novncproxy-tls-certs") pod "bac06af4-bbe1-482a-8815-14a9cf2a1699" (UID: "bac06af4-bbe1-482a-8815-14a9cf2a1699"). InnerVolumeSpecName "nova-novncproxy-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:09 crc kubenswrapper[4909]: I1128 16:36:09.577903 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 28 16:36:09 crc kubenswrapper[4909]: I1128 16:36:09.578543 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="44195e2b-7f1d-4542-8948-93a818071fd2" containerName="ceilometer-central-agent" containerID="cri-o://100c610b01cd8cbae563a7661d097874303002f0d958013b0241d5cf74e9cfd2" gracePeriod=30 Nov 28 16:36:09 crc kubenswrapper[4909]: I1128 16:36:09.578898 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="44195e2b-7f1d-4542-8948-93a818071fd2" containerName="proxy-httpd" containerID="cri-o://2eb951aa3c283ce1fd52ad843a7234b8357e48ad0ed3f9fa1b30578a1af9fa7a" gracePeriod=30 Nov 28 16:36:09 crc kubenswrapper[4909]: I1128 16:36:09.579025 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="44195e2b-7f1d-4542-8948-93a818071fd2" containerName="sg-core" containerID="cri-o://66d5e1af763ac9fbd2d79ed29f67762e31a80531d747a095842ae46df8e3741e" gracePeriod=30 Nov 28 16:36:09 crc kubenswrapper[4909]: I1128 16:36:09.579078 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="44195e2b-7f1d-4542-8948-93a818071fd2" containerName="ceilometer-notification-agent" containerID="cri-o://243efd87260faad6273959bee1e446c951a950b25ee3ad8a4d986ebc6dff73bb" gracePeriod=30 Nov 28 16:36:09 crc kubenswrapper[4909]: I1128 16:36:09.611731 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 28 16:36:09 crc kubenswrapper[4909]: I1128 16:36:09.611982 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/kube-state-metrics-0" podUID="64b9a5c2-09a8-48fb-9e1b-b66c1003cf61" containerName="kube-state-metrics" containerID="cri-o://995a1412416d800250460929b1d713faf90c65aaefa1d997100a9293ee18ff38" gracePeriod=30 Nov 28 16:36:09 crc kubenswrapper[4909]: I1128 16:36:09.643116 4909 reconciler_common.go:293] "Volume detached for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/bac06af4-bbe1-482a-8815-14a9cf2a1699-nova-novncproxy-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:09 crc kubenswrapper[4909]: I1128 16:36:09.664992 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/memcached-0"] Nov 28 16:36:09 crc kubenswrapper[4909]: I1128 16:36:09.665543 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/memcached-0" podUID="72f0e500-fe06-4373-9bc3-6cdaa2520043" containerName="memcached" containerID="cri-o://9c0ed5eb3169a895fddc352403d70cbfcb4b590d019f84677427fdac6ec6cf71" gracePeriod=30 Nov 28 16:36:09 crc kubenswrapper[4909]: I1128 16:36:09.728640 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-sync-xhz8l"] Nov 28 16:36:09 crc kubenswrapper[4909]: I1128 16:36:09.749487 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-sync-xhz8l"] Nov 28 16:36:09 crc kubenswrapper[4909]: I1128 16:36:09.780206 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-6w86s"] Nov 28 16:36:09 crc kubenswrapper[4909]: I1128 16:36:09.788340 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-6w86s"] Nov 28 16:36:09 crc kubenswrapper[4909]: I1128 16:36:09.792119 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-8676ff5994-wjk95"] Nov 28 16:36:09 crc kubenswrapper[4909]: I1128 16:36:09.792338 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/keystone-8676ff5994-wjk95" podUID="19094b17-f379-494e-b377-8191ddab4924" containerName="keystone-api" containerID="cri-o://c758b61754fb6095949cc04fca00fab2b1a68ab0a205fa48bd04afb7cdc48dca" gracePeriod=30 Nov 28 16:36:09 crc kubenswrapper[4909]: I1128 16:36:09.796960 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e792abf7-967c-4293-b5f4-f073b07c8cf1-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "e792abf7-967c-4293-b5f4-f073b07c8cf1" (UID: "e792abf7-967c-4293-b5f4-f073b07c8cf1"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:09 crc kubenswrapper[4909]: I1128 16:36:09.797477 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bac06af4-bbe1-482a-8815-14a9cf2a1699-config-data" (OuterVolumeSpecName: "config-data") pod "bac06af4-bbe1-482a-8815-14a9cf2a1699" (UID: "bac06af4-bbe1-482a-8815-14a9cf2a1699"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:09 crc kubenswrapper[4909]: I1128 16:36:09.802511 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstack-galera-0"] Nov 28 16:36:09 crc kubenswrapper[4909]: I1128 16:36:09.832736 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-create-vlgkn"] Nov 28 16:36:09 crc kubenswrapper[4909]: I1128 16:36:09.847237 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-create-vlgkn"] Nov 28 16:36:09 crc kubenswrapper[4909]: I1128 16:36:09.848266 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e792abf7-967c-4293-b5f4-f073b07c8cf1-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "e792abf7-967c-4293-b5f4-f073b07c8cf1" (UID: "e792abf7-967c-4293-b5f4-f073b07c8cf1"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:09 crc kubenswrapper[4909]: I1128 16:36:09.853131 4909 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e792abf7-967c-4293-b5f4-f073b07c8cf1-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:09 crc kubenswrapper[4909]: I1128 16:36:09.853166 4909 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/e792abf7-967c-4293-b5f4-f073b07c8cf1-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:09 crc kubenswrapper[4909]: I1128 16:36:09.853178 4909 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bac06af4-bbe1-482a-8815-14a9cf2a1699-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:09 crc kubenswrapper[4909]: I1128 16:36:09.866737 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-937d-account-create-update-bmpjc"] Nov 28 16:36:09 crc kubenswrapper[4909]: I1128 16:36:09.875997 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-937d-account-create-update-bmpjc"] Nov 28 16:36:09 crc kubenswrapper[4909]: I1128 16:36:09.891713 4909 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/cinder-api-0" podUID="287e7e9a-0240-478e-a15b-b01122e79c32" containerName="cinder-api" probeResult="failure" output="Get \"https://10.217.0.159:8776/healthcheck\": read tcp 10.217.0.2:44786->10.217.0.159:8776: read: connection reset by peer" Nov 28 16:36:09 crc kubenswrapper[4909]: I1128 16:36:09.892291 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bac06af4-bbe1-482a-8815-14a9cf2a1699-vencrypt-tls-certs" (OuterVolumeSpecName: "vencrypt-tls-certs") pod "bac06af4-bbe1-482a-8815-14a9cf2a1699" (UID: "bac06af4-bbe1-482a-8815-14a9cf2a1699"). InnerVolumeSpecName "vencrypt-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:09 crc kubenswrapper[4909]: I1128 16:36:09.893021 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e792abf7-967c-4293-b5f4-f073b07c8cf1-config-data" (OuterVolumeSpecName: "config-data") pod "e792abf7-967c-4293-b5f4-f073b07c8cf1" (UID: "e792abf7-967c-4293-b5f4-f073b07c8cf1"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:09 crc kubenswrapper[4909]: I1128 16:36:09.898916 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e792abf7-967c-4293-b5f4-f073b07c8cf1-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e792abf7-967c-4293-b5f4-f073b07c8cf1" (UID: "e792abf7-967c-4293-b5f4-f073b07c8cf1"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:09 crc kubenswrapper[4909]: I1128 16:36:09.930471 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="469389c1-482e-4968-b969-163e760e87f2" path="/var/lib/kubelet/pods/469389c1-482e-4968-b969-163e760e87f2/volumes" Nov 28 16:36:09 crc kubenswrapper[4909]: I1128 16:36:09.931229 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="970bbe40-62b2-4c32-8f8f-6b36abe92607" path="/var/lib/kubelet/pods/970bbe40-62b2-4c32-8f8f-6b36abe92607/volumes" Nov 28 16:36:09 crc kubenswrapper[4909]: I1128 16:36:09.932844 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9a3a5941-5c86-4a65-be1e-26327ca990ad" path="/var/lib/kubelet/pods/9a3a5941-5c86-4a65-be1e-26327ca990ad/volumes" Nov 28 16:36:09 crc kubenswrapper[4909]: I1128 16:36:09.935413 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a09f19f7-b777-4314-9b4e-acdac8cf783a" path="/var/lib/kubelet/pods/a09f19f7-b777-4314-9b4e-acdac8cf783a/volumes" Nov 28 16:36:09 crc kubenswrapper[4909]: I1128 16:36:09.936156 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b561f9ee-f192-4215-a832-f60fd675206e" path="/var/lib/kubelet/pods/b561f9ee-f192-4215-a832-f60fd675206e/volumes" Nov 28 16:36:09 crc kubenswrapper[4909]: I1128 16:36:09.983185 4909 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e792abf7-967c-4293-b5f4-f073b07c8cf1-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:09 crc kubenswrapper[4909]: I1128 16:36:09.983258 4909 reconciler_common.go:293] "Volume detached for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/bac06af4-bbe1-482a-8815-14a9cf2a1699-vencrypt-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:09 crc kubenswrapper[4909]: I1128 16:36:09.983271 4909 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e792abf7-967c-4293-b5f4-f073b07c8cf1-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:09 crc kubenswrapper[4909]: I1128 16:36:09.983572 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f69e804c-fdc4-4b8f-86f3-d497612f42b8" path="/var/lib/kubelet/pods/f69e804c-fdc4-4b8f-86f3-d497612f42b8/volumes" Nov 28 16:36:09 crc kubenswrapper[4909]: I1128 16:36:09.985183 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder937d-account-delete-4rdjd"] Nov 28 16:36:09 crc kubenswrapper[4909]: I1128 16:36:09.985222 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-create-shkqq"] Nov 28 16:36:09 crc kubenswrapper[4909]: I1128 16:36:09.985239 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-create-shkqq"] Nov 28 16:36:09 crc kubenswrapper[4909]: I1128 16:36:09.985269 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-f14d-account-create-update-pr2mt"] Nov 28 16:36:09 crc kubenswrapper[4909]: I1128 16:36:09.992087 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-f14d-account-create-update-pr2mt"] Nov 28 16:36:10 crc kubenswrapper[4909]: I1128 16:36:10.020467 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-create-pd67x"] Nov 28 16:36:10 crc kubenswrapper[4909]: I1128 16:36:10.030220 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-65c6d9c7fd-627g9" event={"ID":"5ad0a326-11b9-40c8-b251-5994a436110a","Type":"ContainerStarted","Data":"630e4eeb18086e03661fc2005462721172f42178abd3a1806a71653ed20f565d"} Nov 28 16:36:10 crc kubenswrapper[4909]: I1128 16:36:10.030872 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-create-pd67x"] Nov 28 16:36:10 crc kubenswrapper[4909]: I1128 16:36:10.046031 4909 generic.go:334] "Generic (PLEG): container finished" podID="b2ea37c8-3213-4043-9da2-a9e76f9284e4" containerID="0dc92530d7c3a493fba4c36b3d79070c26b600b170baa4f05c83ac54ca1f0cd4" exitCode=0 Nov 28 16:36:10 crc kubenswrapper[4909]: I1128 16:36:10.046133 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"b2ea37c8-3213-4043-9da2-a9e76f9284e4","Type":"ContainerDied","Data":"0dc92530d7c3a493fba4c36b3d79070c26b600b170baa4f05c83ac54ca1f0cd4"} Nov 28 16:36:10 crc kubenswrapper[4909]: E1128 16:36:10.093892 4909 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="bd5449498d8b191c04307f37b3bba2f646c9d3de3fd836f283ecb8d81e786377" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 28 16:36:10 crc kubenswrapper[4909]: I1128 16:36:10.094156 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-3326-account-create-update-qqqzv"] Nov 28 16:36:10 crc kubenswrapper[4909]: I1128 16:36:10.094207 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement3326-account-delete-6mrc7" event={"ID":"2cfda3c9-a8dd-4a86-bb0d-d3b826b82a0f","Type":"ContainerStarted","Data":"83480d25e4e745360a0b09a414270260f2f6fd2bc5ea6f0e7c2301194f75d223"} Nov 28 16:36:10 crc kubenswrapper[4909]: I1128 16:36:10.100178 4909 kubelet_pods.go:1007] "Unable to retrieve pull secret, the image pull may not succeed." pod="openstack/placement3326-account-delete-6mrc7" secret="" err="secret \"galera-openstack-dockercfg-n7tgg\" not found" Nov 28 16:36:10 crc kubenswrapper[4909]: I1128 16:36:10.111900 4909 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/placement-7bcd585886-f6h7k" podUID="d7651107-0120-4611-87d0-be009f3749d7" containerName="placement-log" probeResult="failure" output="Get \"https://10.217.0.148:8778/\": dial tcp 10.217.0.148:8778: connect: connection refused" Nov 28 16:36:10 crc kubenswrapper[4909]: I1128 16:36:10.111961 4909 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/placement-7bcd585886-f6h7k" podUID="d7651107-0120-4611-87d0-be009f3749d7" containerName="placement-api" probeResult="failure" output="Get \"https://10.217.0.148:8778/\": dial tcp 10.217.0.148:8778: connect: connection refused" Nov 28 16:36:10 crc kubenswrapper[4909]: E1128 16:36:10.128944 4909 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="bd5449498d8b191c04307f37b3bba2f646c9d3de3fd836f283ecb8d81e786377" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 28 16:36:10 crc kubenswrapper[4909]: I1128 16:36:10.129880 4909 generic.go:334] "Generic (PLEG): container finished" podID="64b9a5c2-09a8-48fb-9e1b-b66c1003cf61" containerID="995a1412416d800250460929b1d713faf90c65aaefa1d997100a9293ee18ff38" exitCode=2 Nov 28 16:36:10 crc kubenswrapper[4909]: I1128 16:36:10.130083 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"64b9a5c2-09a8-48fb-9e1b-b66c1003cf61","Type":"ContainerDied","Data":"995a1412416d800250460929b1d713faf90c65aaefa1d997100a9293ee18ff38"} Nov 28 16:36:10 crc kubenswrapper[4909]: I1128 16:36:10.132733 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-3326-account-create-update-qqqzv"] Nov 28 16:36:10 crc kubenswrapper[4909]: E1128 16:36:10.171218 4909 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="bd5449498d8b191c04307f37b3bba2f646c9d3de3fd836f283ecb8d81e786377" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 28 16:36:10 crc kubenswrapper[4909]: E1128 16:36:10.171288 4909 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="e8b95945-6169-4e44-861a-f4abd48a7161" containerName="nova-scheduler-scheduler" Nov 28 16:36:10 crc kubenswrapper[4909]: I1128 16:36:10.177366 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement3326-account-delete-6mrc7"] Nov 28 16:36:10 crc kubenswrapper[4909]: I1128 16:36:10.177418 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"2a8757b1-bbbf-4d00-9bbb-9f4bc855a9d9","Type":"ContainerDied","Data":"1994f966c75fb3df089f85d98060b5bd15e6a05e4d3800bd3ee8d2efc5482cce"} Nov 28 16:36:10 crc kubenswrapper[4909]: I1128 16:36:10.177444 4909 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1994f966c75fb3df089f85d98060b5bd15e6a05e4d3800bd3ee8d2efc5482cce" Nov 28 16:36:10 crc kubenswrapper[4909]: I1128 16:36:10.184886 4909 generic.go:334] "Generic (PLEG): container finished" podID="44195e2b-7f1d-4542-8948-93a818071fd2" containerID="66d5e1af763ac9fbd2d79ed29f67762e31a80531d747a095842ae46df8e3741e" exitCode=2 Nov 28 16:36:10 crc kubenswrapper[4909]: I1128 16:36:10.184960 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"44195e2b-7f1d-4542-8948-93a818071fd2","Type":"ContainerDied","Data":"66d5e1af763ac9fbd2d79ed29f67762e31a80531d747a095842ae46df8e3741e"} Nov 28 16:36:10 crc kubenswrapper[4909]: I1128 16:36:10.189019 4909 generic.go:334] "Generic (PLEG): container finished" podID="7b7568e8-e3d4-4e06-a25f-33656bdf089f" containerID="498c5080ce90e9a7105e201c315e2156da6b516e7e542f7334041b20bfa59f28" exitCode=0 Nov 28 16:36:10 crc kubenswrapper[4909]: I1128 16:36:10.189091 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"7b7568e8-e3d4-4e06-a25f-33656bdf089f","Type":"ContainerDied","Data":"498c5080ce90e9a7105e201c315e2156da6b516e7e542f7334041b20bfa59f28"} Nov 28 16:36:10 crc kubenswrapper[4909]: I1128 16:36:10.191487 4909 generic.go:334] "Generic (PLEG): container finished" podID="0e95a7b6-74fd-4db5-bb83-b8e8f80a698f" containerID="c9a6ef16e79ec3520b4ab028495d1a8dc6b763fafac55c5afb96c84780e98938" exitCode=143 Nov 28 16:36:10 crc kubenswrapper[4909]: I1128 16:36:10.191581 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-8588dd4f7d-772fj" event={"ID":"0e95a7b6-74fd-4db5-bb83-b8e8f80a698f","Type":"ContainerDied","Data":"c9a6ef16e79ec3520b4ab028495d1a8dc6b763fafac55c5afb96c84780e98938"} Nov 28 16:36:10 crc kubenswrapper[4909]: E1128 16:36:10.195765 4909 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Nov 28 16:36:10 crc kubenswrapper[4909]: E1128 16:36:10.195839 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/2cfda3c9-a8dd-4a86-bb0d-d3b826b82a0f-operator-scripts podName:2cfda3c9-a8dd-4a86-bb0d-d3b826b82a0f nodeName:}" failed. No retries permitted until 2025-11-28 16:36:10.695818832 +0000 UTC m=+1553.092503356 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/2cfda3c9-a8dd-4a86-bb0d-d3b826b82a0f-operator-scripts") pod "placement3326-account-delete-6mrc7" (UID: "2cfda3c9-a8dd-4a86-bb0d-d3b826b82a0f") : configmap "openstack-scripts" not found Nov 28 16:36:10 crc kubenswrapper[4909]: I1128 16:36:10.196123 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-create-fhbph"] Nov 28 16:36:10 crc kubenswrapper[4909]: I1128 16:36:10.201239 4909 generic.go:334] "Generic (PLEG): container finished" podID="b0f3cfb2-6884-4ef1-9844-cf494a2e21bb" containerID="0ffd2c461bc2721d0397f08b5b47716138419e6b87145bbcd26504ee193d270a" exitCode=0 Nov 28 16:36:10 crc kubenswrapper[4909]: I1128 16:36:10.201317 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance2631-account-delete-2c2fs" event={"ID":"b0f3cfb2-6884-4ef1-9844-cf494a2e21bb","Type":"ContainerDied","Data":"0ffd2c461bc2721d0397f08b5b47716138419e6b87145bbcd26504ee193d270a"} Nov 28 16:36:10 crc kubenswrapper[4909]: I1128 16:36:10.205706 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-create-fhbph"] Nov 28 16:36:10 crc kubenswrapper[4909]: I1128 16:36:10.206248 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-6c95ffb47-q5ls2" event={"ID":"175903ef-59e0-4c1f-820f-bd3d2692462d","Type":"ContainerDied","Data":"7e5e798026bdcb44572f2fef1737c88674e5cd832b7e7ac9747d50d221925db0"} Nov 28 16:36:10 crc kubenswrapper[4909]: I1128 16:36:10.206277 4909 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7e5e798026bdcb44572f2fef1737c88674e5cd832b7e7ac9747d50d221925db0" Nov 28 16:36:10 crc kubenswrapper[4909]: I1128 16:36:10.207505 4909 generic.go:334] "Generic (PLEG): container finished" podID="5b472e4d-2724-4ea4-93c9-5552d92af793" containerID="ef94b6682122c37e874d2c9670f4f10b050ca9f317d02f480fb72e2a87ccec9c" exitCode=0 Nov 28 16:36:10 crc kubenswrapper[4909]: I1128 16:36:10.207569 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron8770-account-delete-9m56t" event={"ID":"5b472e4d-2724-4ea4-93c9-5552d92af793","Type":"ContainerDied","Data":"ef94b6682122c37e874d2c9670f4f10b050ca9f317d02f480fb72e2a87ccec9c"} Nov 28 16:36:10 crc kubenswrapper[4909]: I1128 16:36:10.209009 4909 generic.go:334] "Generic (PLEG): container finished" podID="99398e49-db85-4878-b759-367747402c8b" containerID="a025c3c4d56c233f80af0daccb5a94bf3f456bd546731ecd2ccb3e2696f523ac" exitCode=143 Nov 28 16:36:10 crc kubenswrapper[4909]: I1128 16:36:10.209046 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-84ff6c46f-q849h" event={"ID":"99398e49-db85-4878-b759-367747402c8b","Type":"ContainerDied","Data":"a025c3c4d56c233f80af0daccb5a94bf3f456bd546731ecd2ccb3e2696f523ac"} Nov 28 16:36:10 crc kubenswrapper[4909]: I1128 16:36:10.213221 4909 generic.go:334] "Generic (PLEG): container finished" podID="d7651107-0120-4611-87d0-be009f3749d7" containerID="b1b648d707bec46e03074f6ddbe73bc4787a1ff840f797f11edd6e2f52984f64" exitCode=0 Nov 28 16:36:10 crc kubenswrapper[4909]: I1128 16:36:10.213286 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-7bcd585886-f6h7k" event={"ID":"d7651107-0120-4611-87d0-be009f3749d7","Type":"ContainerDied","Data":"b1b648d707bec46e03074f6ddbe73bc4787a1ff840f797f11edd6e2f52984f64"} Nov 28 16:36:10 crc kubenswrapper[4909]: I1128 16:36:10.217881 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/openstack-galera-0" podUID="0b1d1797-999d-4453-b674-c40f53d4231e" containerName="galera" containerID="cri-o://219f6794715541737d340cec186fc3847a65402dd4d251a98a0ddbe7c6c7178b" gracePeriod=30 Nov 28 16:36:10 crc kubenswrapper[4909]: I1128 16:36:10.218011 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican9799-account-delete-z2p59" event={"ID":"8fa2662b-eed3-4461-ba5d-d4554ca4a22b","Type":"ContainerStarted","Data":"94bf77be6b236b8999a881b64809cc5d3228f9ba7c55e297eb5bb70916649ff8"} Nov 28 16:36:10 crc kubenswrapper[4909]: I1128 16:36:10.222600 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron8770-account-delete-9m56t"] Nov 28 16:36:10 crc kubenswrapper[4909]: I1128 16:36:10.233278 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/novaapiab11-account-delete-2d7kv" event={"ID":"38f993aa-ed40-45f0-821f-e5d7f482ec99","Type":"ContainerStarted","Data":"b62786e4643f30fc830419e2c3f64fafab8b9e4690f1a7ab2286171bc0dd5d7a"} Nov 28 16:36:10 crc kubenswrapper[4909]: I1128 16:36:10.236960 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder937d-account-delete-4rdjd" event={"ID":"c2cc9842-6f8f-4afc-9895-2b7a75a9696c","Type":"ContainerStarted","Data":"09205ac4d06967aa940b5f5c0504e1750c1b1ffd2519f1715951a28c3d850f7f"} Nov 28 16:36:10 crc kubenswrapper[4909]: I1128 16:36:10.237496 4909 kubelet_pods.go:1007] "Unable to retrieve pull secret, the image pull may not succeed." pod="openstack/cinder937d-account-delete-4rdjd" secret="" err="secret \"galera-openstack-dockercfg-n7tgg\" not found" Nov 28 16:36:10 crc kubenswrapper[4909]: I1128 16:36:10.265008 4909 generic.go:334] "Generic (PLEG): container finished" podID="00913f80-f496-44ec-a619-99129724cb89" containerID="30fbad84f804b7ada9da16d8ee037dd6c5bb06b55551d23a9f96ea3c5222b69f" exitCode=0 Nov 28 16:36:10 crc kubenswrapper[4909]: I1128 16:36:10.265124 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"00913f80-f496-44ec-a619-99129724cb89","Type":"ContainerDied","Data":"30fbad84f804b7ada9da16d8ee037dd6c5bb06b55551d23a9f96ea3c5222b69f"} Nov 28 16:36:10 crc kubenswrapper[4909]: I1128 16:36:10.269747 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-8770-account-create-update-tphgw"] Nov 28 16:36:10 crc kubenswrapper[4909]: I1128 16:36:10.269786 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/novacell077a6-account-delete-dqcds" event={"ID":"b6a50190-5f38-40b8-87b1-3e67fe7d3cf4","Type":"ContainerStarted","Data":"308974de15c3aa0ad5f3404fa69c6c81d13d1b6401da416491cfece3da4efade"} Nov 28 16:36:10 crc kubenswrapper[4909]: I1128 16:36:10.286523 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-8770-account-create-update-tphgw"] Nov 28 16:36:10 crc kubenswrapper[4909]: I1128 16:36:10.292947 4909 generic.go:334] "Generic (PLEG): container finished" podID="b97782ba-8bf0-4da9-bd81-97e88b4e73e7" containerID="06ec10e870b78a9508fdc0f9af0d0769bace54567bdcb85ffc77bf9a218d7d6e" exitCode=0 Nov 28 16:36:10 crc kubenswrapper[4909]: I1128 16:36:10.293017 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"b97782ba-8bf0-4da9-bd81-97e88b4e73e7","Type":"ContainerDied","Data":"06ec10e870b78a9508fdc0f9af0d0769bace54567bdcb85ffc77bf9a218d7d6e"} Nov 28 16:36:10 crc kubenswrapper[4909]: I1128 16:36:10.296001 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-create-lqx45"] Nov 28 16:36:10 crc kubenswrapper[4909]: I1128 16:36:10.316642 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-create-lqx45"] Nov 28 16:36:10 crc kubenswrapper[4909]: I1128 16:36:10.322281 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-9799-account-create-update-w49wd"] Nov 28 16:36:10 crc kubenswrapper[4909]: I1128 16:36:10.324873 4909 generic.go:334] "Generic (PLEG): container finished" podID="287e7e9a-0240-478e-a15b-b01122e79c32" containerID="29762398aa81300aa7e6fa97b5acccc7e5d16e4234ca8d5ea87d42654450084b" exitCode=0 Nov 28 16:36:10 crc kubenswrapper[4909]: I1128 16:36:10.324912 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"287e7e9a-0240-478e-a15b-b01122e79c32","Type":"ContainerDied","Data":"29762398aa81300aa7e6fa97b5acccc7e5d16e4234ca8d5ea87d42654450084b"} Nov 28 16:36:10 crc kubenswrapper[4909]: I1128 16:36:10.332918 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican9799-account-delete-z2p59"] Nov 28 16:36:10 crc kubenswrapper[4909]: I1128 16:36:10.337410 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-9799-account-create-update-w49wd"] Nov 28 16:36:10 crc kubenswrapper[4909]: I1128 16:36:10.342389 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement3326-account-delete-6mrc7" podStartSLOduration=6.342364498 podStartE2EDuration="6.342364498s" podCreationTimestamp="2025-11-28 16:36:04 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:36:10.108934543 +0000 UTC m=+1552.505619067" watchObservedRunningTime="2025-11-28 16:36:10.342364498 +0000 UTC m=+1552.739049022" Nov 28 16:36:10 crc kubenswrapper[4909]: I1128 16:36:10.366312 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-db-create-5hd6v"] Nov 28 16:36:10 crc kubenswrapper[4909]: I1128 16:36:10.375703 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-db-create-5hd6v"] Nov 28 16:36:10 crc kubenswrapper[4909]: I1128 16:36:10.377358 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder937d-account-delete-4rdjd" podStartSLOduration=6.377339519 podStartE2EDuration="6.377339519s" podCreationTimestamp="2025-11-28 16:36:04 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:36:10.257198715 +0000 UTC m=+1552.653883239" watchObservedRunningTime="2025-11-28 16:36:10.377339519 +0000 UTC m=+1552.774024043" Nov 28 16:36:10 crc kubenswrapper[4909]: E1128 16:36:10.400481 4909 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Nov 28 16:36:10 crc kubenswrapper[4909]: E1128 16:36:10.400640 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/c2cc9842-6f8f-4afc-9895-2b7a75a9696c-operator-scripts podName:c2cc9842-6f8f-4afc-9895-2b7a75a9696c nodeName:}" failed. No retries permitted until 2025-11-28 16:36:10.900625166 +0000 UTC m=+1553.297309690 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/c2cc9842-6f8f-4afc-9895-2b7a75a9696c-operator-scripts") pod "cinder937d-account-delete-4rdjd" (UID: "c2cc9842-6f8f-4afc-9895-2b7a75a9696c") : configmap "openstack-scripts" not found Nov 28 16:36:10 crc kubenswrapper[4909]: I1128 16:36:10.401064 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/novaapiab11-account-delete-2d7kv"] Nov 28 16:36:10 crc kubenswrapper[4909]: I1128 16:36:10.417244 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-ab11-account-create-update-shpl9"] Nov 28 16:36:10 crc kubenswrapper[4909]: I1128 16:36:10.432210 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-ab11-account-create-update-shpl9"] Nov 28 16:36:10 crc kubenswrapper[4909]: I1128 16:36:10.466453 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-db-create-chks5"] Nov 28 16:36:10 crc kubenswrapper[4909]: E1128 16:36:10.505818 4909 configmap.go:193] Couldn't get configMap openstack/rabbitmq-cell1-config-data: configmap "rabbitmq-cell1-config-data" not found Nov 28 16:36:10 crc kubenswrapper[4909]: E1128 16:36:10.505946 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/02c83d05-a6ce-4c22-9015-91c0a766a518-config-data podName:02c83d05-a6ce-4c22-9015-91c0a766a518 nodeName:}" failed. No retries permitted until 2025-11-28 16:36:14.505924712 +0000 UTC m=+1556.902609236 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/02c83d05-a6ce-4c22-9015-91c0a766a518-config-data") pod "rabbitmq-cell1-server-0" (UID: "02c83d05-a6ce-4c22-9015-91c0a766a518") : configmap "rabbitmq-cell1-config-data" not found Nov 28 16:36:10 crc kubenswrapper[4909]: I1128 16:36:10.518561 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-db-create-chks5"] Nov 28 16:36:10 crc kubenswrapper[4909]: I1128 16:36:10.527861 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-77a6-account-create-update-pq4lq"] Nov 28 16:36:10 crc kubenswrapper[4909]: E1128 16:36:10.529338 4909 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="219f6794715541737d340cec186fc3847a65402dd4d251a98a0ddbe7c6c7178b" cmd=["/bin/bash","/var/lib/operator-scripts/mysql_probe.sh","readiness"] Nov 28 16:36:10 crc kubenswrapper[4909]: E1128 16:36:10.531333 4909 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="219f6794715541737d340cec186fc3847a65402dd4d251a98a0ddbe7c6c7178b" cmd=["/bin/bash","/var/lib/operator-scripts/mysql_probe.sh","readiness"] Nov 28 16:36:10 crc kubenswrapper[4909]: E1128 16:36:10.532363 4909 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="219f6794715541737d340cec186fc3847a65402dd4d251a98a0ddbe7c6c7178b" cmd=["/bin/bash","/var/lib/operator-scripts/mysql_probe.sh","readiness"] Nov 28 16:36:10 crc kubenswrapper[4909]: E1128 16:36:10.532395 4909 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/openstack-galera-0" podUID="0b1d1797-999d-4453-b674-c40f53d4231e" containerName="galera" Nov 28 16:36:10 crc kubenswrapper[4909]: I1128 16:36:10.535736 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-77a6-account-create-update-pq4lq"] Nov 28 16:36:10 crc kubenswrapper[4909]: I1128 16:36:10.544197 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/novacell077a6-account-delete-dqcds"] Nov 28 16:36:10 crc kubenswrapper[4909]: I1128 16:36:10.679818 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Nov 28 16:36:10 crc kubenswrapper[4909]: I1128 16:36:10.694266 4909 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-f6ddfdd4b-szlst" podUID="ffd60458-19af-464b-9649-57d25893f22a" containerName="barbican-api-log" probeResult="failure" output="Get \"https://10.217.0.156:9311/healthcheck\": dial tcp 10.217.0.156:9311: connect: connection refused" Nov 28 16:36:10 crc kubenswrapper[4909]: I1128 16:36:10.694474 4909 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-f6ddfdd4b-szlst" podUID="ffd60458-19af-464b-9649-57d25893f22a" containerName="barbican-api" probeResult="failure" output="Get \"https://10.217.0.156:9311/healthcheck\": dial tcp 10.217.0.156:9311: connect: connection refused" Nov 28 16:36:10 crc kubenswrapper[4909]: E1128 16:36:10.712754 4909 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Nov 28 16:36:10 crc kubenswrapper[4909]: E1128 16:36:10.712818 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/2cfda3c9-a8dd-4a86-bb0d-d3b826b82a0f-operator-scripts podName:2cfda3c9-a8dd-4a86-bb0d-d3b826b82a0f nodeName:}" failed. No retries permitted until 2025-11-28 16:36:11.712802842 +0000 UTC m=+1554.109487366 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/2cfda3c9-a8dd-4a86-bb0d-d3b826b82a0f-operator-scripts") pod "placement3326-account-delete-6mrc7" (UID: "2cfda3c9-a8dd-4a86-bb0d-d3b826b82a0f") : configmap "openstack-scripts" not found Nov 28 16:36:10 crc kubenswrapper[4909]: I1128 16:36:10.813671 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zpgq7\" (UniqueName: \"kubernetes.io/projected/2a8757b1-bbbf-4d00-9bbb-9f4bc855a9d9-kube-api-access-zpgq7\") pod \"2a8757b1-bbbf-4d00-9bbb-9f4bc855a9d9\" (UID: \"2a8757b1-bbbf-4d00-9bbb-9f4bc855a9d9\") " Nov 28 16:36:10 crc kubenswrapper[4909]: I1128 16:36:10.813730 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2a8757b1-bbbf-4d00-9bbb-9f4bc855a9d9-operator-scripts\") pod \"2a8757b1-bbbf-4d00-9bbb-9f4bc855a9d9\" (UID: \"2a8757b1-bbbf-4d00-9bbb-9f4bc855a9d9\") " Nov 28 16:36:10 crc kubenswrapper[4909]: I1128 16:36:10.813762 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2a8757b1-bbbf-4d00-9bbb-9f4bc855a9d9-combined-ca-bundle\") pod \"2a8757b1-bbbf-4d00-9bbb-9f4bc855a9d9\" (UID: \"2a8757b1-bbbf-4d00-9bbb-9f4bc855a9d9\") " Nov 28 16:36:10 crc kubenswrapper[4909]: I1128 16:36:10.813935 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/2a8757b1-bbbf-4d00-9bbb-9f4bc855a9d9-config-data-default\") pod \"2a8757b1-bbbf-4d00-9bbb-9f4bc855a9d9\" (UID: \"2a8757b1-bbbf-4d00-9bbb-9f4bc855a9d9\") " Nov 28 16:36:10 crc kubenswrapper[4909]: I1128 16:36:10.813965 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/2a8757b1-bbbf-4d00-9bbb-9f4bc855a9d9-config-data-generated\") pod \"2a8757b1-bbbf-4d00-9bbb-9f4bc855a9d9\" (UID: \"2a8757b1-bbbf-4d00-9bbb-9f4bc855a9d9\") " Nov 28 16:36:10 crc kubenswrapper[4909]: I1128 16:36:10.814010 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/2a8757b1-bbbf-4d00-9bbb-9f4bc855a9d9-galera-tls-certs\") pod \"2a8757b1-bbbf-4d00-9bbb-9f4bc855a9d9\" (UID: \"2a8757b1-bbbf-4d00-9bbb-9f4bc855a9d9\") " Nov 28 16:36:10 crc kubenswrapper[4909]: I1128 16:36:10.814036 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mysql-db\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"2a8757b1-bbbf-4d00-9bbb-9f4bc855a9d9\" (UID: \"2a8757b1-bbbf-4d00-9bbb-9f4bc855a9d9\") " Nov 28 16:36:10 crc kubenswrapper[4909]: I1128 16:36:10.814059 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/2a8757b1-bbbf-4d00-9bbb-9f4bc855a9d9-kolla-config\") pod \"2a8757b1-bbbf-4d00-9bbb-9f4bc855a9d9\" (UID: \"2a8757b1-bbbf-4d00-9bbb-9f4bc855a9d9\") " Nov 28 16:36:10 crc kubenswrapper[4909]: I1128 16:36:10.814854 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2a8757b1-bbbf-4d00-9bbb-9f4bc855a9d9-config-data-generated" (OuterVolumeSpecName: "config-data-generated") pod "2a8757b1-bbbf-4d00-9bbb-9f4bc855a9d9" (UID: "2a8757b1-bbbf-4d00-9bbb-9f4bc855a9d9"). InnerVolumeSpecName "config-data-generated". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:36:10 crc kubenswrapper[4909]: I1128 16:36:10.815214 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2a8757b1-bbbf-4d00-9bbb-9f4bc855a9d9-config-data-default" (OuterVolumeSpecName: "config-data-default") pod "2a8757b1-bbbf-4d00-9bbb-9f4bc855a9d9" (UID: "2a8757b1-bbbf-4d00-9bbb-9f4bc855a9d9"). InnerVolumeSpecName "config-data-default". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:36:10 crc kubenswrapper[4909]: I1128 16:36:10.815563 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2a8757b1-bbbf-4d00-9bbb-9f4bc855a9d9-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "2a8757b1-bbbf-4d00-9bbb-9f4bc855a9d9" (UID: "2a8757b1-bbbf-4d00-9bbb-9f4bc855a9d9"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:36:10 crc kubenswrapper[4909]: I1128 16:36:10.815772 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2a8757b1-bbbf-4d00-9bbb-9f4bc855a9d9-kolla-config" (OuterVolumeSpecName: "kolla-config") pod "2a8757b1-bbbf-4d00-9bbb-9f4bc855a9d9" (UID: "2a8757b1-bbbf-4d00-9bbb-9f4bc855a9d9"). InnerVolumeSpecName "kolla-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:36:10 crc kubenswrapper[4909]: I1128 16:36:10.816286 4909 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2a8757b1-bbbf-4d00-9bbb-9f4bc855a9d9-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:10 crc kubenswrapper[4909]: I1128 16:36:10.816307 4909 reconciler_common.go:293] "Volume detached for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/2a8757b1-bbbf-4d00-9bbb-9f4bc855a9d9-config-data-default\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:10 crc kubenswrapper[4909]: I1128 16:36:10.816562 4909 reconciler_common.go:293] "Volume detached for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/2a8757b1-bbbf-4d00-9bbb-9f4bc855a9d9-config-data-generated\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:10 crc kubenswrapper[4909]: I1128 16:36:10.816574 4909 reconciler_common.go:293] "Volume detached for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/2a8757b1-bbbf-4d00-9bbb-9f4bc855a9d9-kolla-config\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:10 crc kubenswrapper[4909]: I1128 16:36:10.828028 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2a8757b1-bbbf-4d00-9bbb-9f4bc855a9d9-kube-api-access-zpgq7" (OuterVolumeSpecName: "kube-api-access-zpgq7") pod "2a8757b1-bbbf-4d00-9bbb-9f4bc855a9d9" (UID: "2a8757b1-bbbf-4d00-9bbb-9f4bc855a9d9"). InnerVolumeSpecName "kube-api-access-zpgq7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:36:10 crc kubenswrapper[4909]: I1128 16:36:10.836288 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage10-crc" (OuterVolumeSpecName: "mysql-db") pod "2a8757b1-bbbf-4d00-9bbb-9f4bc855a9d9" (UID: "2a8757b1-bbbf-4d00-9bbb-9f4bc855a9d9"). InnerVolumeSpecName "local-storage10-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 28 16:36:10 crc kubenswrapper[4909]: I1128 16:36:10.846006 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2a8757b1-bbbf-4d00-9bbb-9f4bc855a9d9-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "2a8757b1-bbbf-4d00-9bbb-9f4bc855a9d9" (UID: "2a8757b1-bbbf-4d00-9bbb-9f4bc855a9d9"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:10 crc kubenswrapper[4909]: I1128 16:36:10.882585 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2a8757b1-bbbf-4d00-9bbb-9f4bc855a9d9-galera-tls-certs" (OuterVolumeSpecName: "galera-tls-certs") pod "2a8757b1-bbbf-4d00-9bbb-9f4bc855a9d9" (UID: "2a8757b1-bbbf-4d00-9bbb-9f4bc855a9d9"). InnerVolumeSpecName "galera-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:10 crc kubenswrapper[4909]: I1128 16:36:10.917784 4909 reconciler_common.go:293] "Volume detached for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/2a8757b1-bbbf-4d00-9bbb-9f4bc855a9d9-galera-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:10 crc kubenswrapper[4909]: I1128 16:36:10.917822 4909 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") on node \"crc\" " Nov 28 16:36:10 crc kubenswrapper[4909]: I1128 16:36:10.917833 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zpgq7\" (UniqueName: \"kubernetes.io/projected/2a8757b1-bbbf-4d00-9bbb-9f4bc855a9d9-kube-api-access-zpgq7\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:10 crc kubenswrapper[4909]: I1128 16:36:10.917843 4909 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2a8757b1-bbbf-4d00-9bbb-9f4bc855a9d9-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:10 crc kubenswrapper[4909]: E1128 16:36:10.918281 4909 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Nov 28 16:36:10 crc kubenswrapper[4909]: E1128 16:36:10.918352 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/c2cc9842-6f8f-4afc-9895-2b7a75a9696c-operator-scripts podName:c2cc9842-6f8f-4afc-9895-2b7a75a9696c nodeName:}" failed. No retries permitted until 2025-11-28 16:36:11.918335155 +0000 UTC m=+1554.315019679 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/c2cc9842-6f8f-4afc-9895-2b7a75a9696c-operator-scripts") pod "cinder937d-account-delete-4rdjd" (UID: "c2cc9842-6f8f-4afc-9895-2b7a75a9696c") : configmap "openstack-scripts" not found Nov 28 16:36:10 crc kubenswrapper[4909]: I1128 16:36:10.958013 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-proxy-589b6f8979-wbls8"] Nov 28 16:36:10 crc kubenswrapper[4909]: I1128 16:36:10.966106 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/swift-proxy-589b6f8979-wbls8"] Nov 28 16:36:10 crc kubenswrapper[4909]: I1128 16:36:10.972064 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-6c95ffb47-q5ls2" Nov 28 16:36:10 crc kubenswrapper[4909]: I1128 16:36:10.975892 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 28 16:36:10 crc kubenswrapper[4909]: I1128 16:36:10.983985 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 28 16:36:10 crc kubenswrapper[4909]: I1128 16:36:10.996259 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.006054 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.020122 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.039428 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.051246 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-7bcd585886-f6h7k" Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.095497 4909 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage10-crc" (UniqueName: "kubernetes.io/local-volume/local-storage10-crc") on node "crc" Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.103810 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.107307 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.119629 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b97782ba-8bf0-4da9-bd81-97e88b4e73e7-combined-ca-bundle\") pod \"b97782ba-8bf0-4da9-bd81-97e88b4e73e7\" (UID: \"b97782ba-8bf0-4da9-bd81-97e88b4e73e7\") " Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.119747 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x4sst\" (UniqueName: \"kubernetes.io/projected/7b7568e8-e3d4-4e06-a25f-33656bdf089f-kube-api-access-x4sst\") pod \"7b7568e8-e3d4-4e06-a25f-33656bdf089f\" (UID: \"7b7568e8-e3d4-4e06-a25f-33656bdf089f\") " Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.119775 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pvdkl\" (UniqueName: \"kubernetes.io/projected/b2ea37c8-3213-4043-9da2-a9e76f9284e4-kube-api-access-pvdkl\") pod \"b2ea37c8-3213-4043-9da2-a9e76f9284e4\" (UID: \"b2ea37c8-3213-4043-9da2-a9e76f9284e4\") " Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.119811 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/175903ef-59e0-4c1f-820f-bd3d2692462d-config-data-custom\") pod \"175903ef-59e0-4c1f-820f-bd3d2692462d\" (UID: \"175903ef-59e0-4c1f-820f-bd3d2692462d\") " Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.119845 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d7651107-0120-4611-87d0-be009f3749d7-logs\") pod \"d7651107-0120-4611-87d0-be009f3749d7\" (UID: \"d7651107-0120-4611-87d0-be009f3749d7\") " Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.119866 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/d7651107-0120-4611-87d0-be009f3749d7-public-tls-certs\") pod \"d7651107-0120-4611-87d0-be009f3749d7\" (UID: \"d7651107-0120-4611-87d0-be009f3749d7\") " Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.119886 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dbwjk\" (UniqueName: \"kubernetes.io/projected/d7651107-0120-4611-87d0-be009f3749d7-kube-api-access-dbwjk\") pod \"d7651107-0120-4611-87d0-be009f3749d7\" (UID: \"d7651107-0120-4611-87d0-be009f3749d7\") " Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.119915 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f4qnd\" (UniqueName: \"kubernetes.io/projected/175903ef-59e0-4c1f-820f-bd3d2692462d-kube-api-access-f4qnd\") pod \"175903ef-59e0-4c1f-820f-bd3d2692462d\" (UID: \"175903ef-59e0-4c1f-820f-bd3d2692462d\") " Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.119941 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/175903ef-59e0-4c1f-820f-bd3d2692462d-config-data\") pod \"175903ef-59e0-4c1f-820f-bd3d2692462d\" (UID: \"175903ef-59e0-4c1f-820f-bd3d2692462d\") " Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.119968 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b97782ba-8bf0-4da9-bd81-97e88b4e73e7-config-data\") pod \"b97782ba-8bf0-4da9-bd81-97e88b4e73e7\" (UID: \"b97782ba-8bf0-4da9-bd81-97e88b4e73e7\") " Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.119997 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/b97782ba-8bf0-4da9-bd81-97e88b4e73e7-internal-tls-certs\") pod \"b97782ba-8bf0-4da9-bd81-97e88b4e73e7\" (UID: \"b97782ba-8bf0-4da9-bd81-97e88b4e73e7\") " Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.120046 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/64b9a5c2-09a8-48fb-9e1b-b66c1003cf61-combined-ca-bundle\") pod \"64b9a5c2-09a8-48fb-9e1b-b66c1003cf61\" (UID: \"64b9a5c2-09a8-48fb-9e1b-b66c1003cf61\") " Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.120082 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4hj7x\" (UniqueName: \"kubernetes.io/projected/b97782ba-8bf0-4da9-bd81-97e88b4e73e7-kube-api-access-4hj7x\") pod \"b97782ba-8bf0-4da9-bd81-97e88b4e73e7\" (UID: \"b97782ba-8bf0-4da9-bd81-97e88b4e73e7\") " Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.120108 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"b97782ba-8bf0-4da9-bd81-97e88b4e73e7\" (UID: \"b97782ba-8bf0-4da9-bd81-97e88b4e73e7\") " Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.120130 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d7651107-0120-4611-87d0-be009f3749d7-scripts\") pod \"d7651107-0120-4611-87d0-be009f3749d7\" (UID: \"d7651107-0120-4611-87d0-be009f3749d7\") " Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.120154 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/b97782ba-8bf0-4da9-bd81-97e88b4e73e7-httpd-run\") pod \"b97782ba-8bf0-4da9-bd81-97e88b4e73e7\" (UID: \"b97782ba-8bf0-4da9-bd81-97e88b4e73e7\") " Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.120225 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b2ea37c8-3213-4043-9da2-a9e76f9284e4-config-data\") pod \"b2ea37c8-3213-4043-9da2-a9e76f9284e4\" (UID: \"b2ea37c8-3213-4043-9da2-a9e76f9284e4\") " Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.120266 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lgt79\" (UniqueName: \"kubernetes.io/projected/64b9a5c2-09a8-48fb-9e1b-b66c1003cf61-kube-api-access-lgt79\") pod \"64b9a5c2-09a8-48fb-9e1b-b66c1003cf61\" (UID: \"64b9a5c2-09a8-48fb-9e1b-b66c1003cf61\") " Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.120291 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/175903ef-59e0-4c1f-820f-bd3d2692462d-combined-ca-bundle\") pod \"175903ef-59e0-4c1f-820f-bd3d2692462d\" (UID: \"175903ef-59e0-4c1f-820f-bd3d2692462d\") " Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.120327 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7b7568e8-e3d4-4e06-a25f-33656bdf089f-config-data\") pod \"7b7568e8-e3d4-4e06-a25f-33656bdf089f\" (UID: \"7b7568e8-e3d4-4e06-a25f-33656bdf089f\") " Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.120362 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/64b9a5c2-09a8-48fb-9e1b-b66c1003cf61-kube-state-metrics-tls-certs\") pod \"64b9a5c2-09a8-48fb-9e1b-b66c1003cf61\" (UID: \"64b9a5c2-09a8-48fb-9e1b-b66c1003cf61\") " Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.120404 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d7651107-0120-4611-87d0-be009f3749d7-internal-tls-certs\") pod \"d7651107-0120-4611-87d0-be009f3749d7\" (UID: \"d7651107-0120-4611-87d0-be009f3749d7\") " Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.120430 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d7651107-0120-4611-87d0-be009f3749d7-combined-ca-bundle\") pod \"d7651107-0120-4611-87d0-be009f3749d7\" (UID: \"d7651107-0120-4611-87d0-be009f3749d7\") " Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.120454 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d7651107-0120-4611-87d0-be009f3749d7-config-data\") pod \"d7651107-0120-4611-87d0-be009f3749d7\" (UID: \"d7651107-0120-4611-87d0-be009f3749d7\") " Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.120479 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b97782ba-8bf0-4da9-bd81-97e88b4e73e7-scripts\") pod \"b97782ba-8bf0-4da9-bd81-97e88b4e73e7\" (UID: \"b97782ba-8bf0-4da9-bd81-97e88b4e73e7\") " Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.120517 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/7b7568e8-e3d4-4e06-a25f-33656bdf089f-config-data-custom\") pod \"7b7568e8-e3d4-4e06-a25f-33656bdf089f\" (UID: \"7b7568e8-e3d4-4e06-a25f-33656bdf089f\") " Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.120555 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/175903ef-59e0-4c1f-820f-bd3d2692462d-logs\") pod \"175903ef-59e0-4c1f-820f-bd3d2692462d\" (UID: \"175903ef-59e0-4c1f-820f-bd3d2692462d\") " Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.120592 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/7b7568e8-e3d4-4e06-a25f-33656bdf089f-etc-machine-id\") pod \"7b7568e8-e3d4-4e06-a25f-33656bdf089f\" (UID: \"7b7568e8-e3d4-4e06-a25f-33656bdf089f\") " Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.120617 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/64b9a5c2-09a8-48fb-9e1b-b66c1003cf61-kube-state-metrics-tls-config\") pod \"64b9a5c2-09a8-48fb-9e1b-b66c1003cf61\" (UID: \"64b9a5c2-09a8-48fb-9e1b-b66c1003cf61\") " Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.120710 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b2ea37c8-3213-4043-9da2-a9e76f9284e4-combined-ca-bundle\") pod \"b2ea37c8-3213-4043-9da2-a9e76f9284e4\" (UID: \"b2ea37c8-3213-4043-9da2-a9e76f9284e4\") " Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.120750 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7b7568e8-e3d4-4e06-a25f-33656bdf089f-scripts\") pod \"7b7568e8-e3d4-4e06-a25f-33656bdf089f\" (UID: \"7b7568e8-e3d4-4e06-a25f-33656bdf089f\") " Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.121136 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b97782ba-8bf0-4da9-bd81-97e88b4e73e7-logs\") pod \"b97782ba-8bf0-4da9-bd81-97e88b4e73e7\" (UID: \"b97782ba-8bf0-4da9-bd81-97e88b4e73e7\") " Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.121170 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7b7568e8-e3d4-4e06-a25f-33656bdf089f-combined-ca-bundle\") pod \"7b7568e8-e3d4-4e06-a25f-33656bdf089f\" (UID: \"7b7568e8-e3d4-4e06-a25f-33656bdf089f\") " Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.121383 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b97782ba-8bf0-4da9-bd81-97e88b4e73e7-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "b97782ba-8bf0-4da9-bd81-97e88b4e73e7" (UID: "b97782ba-8bf0-4da9-bd81-97e88b4e73e7"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.121808 4909 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/b97782ba-8bf0-4da9-bd81-97e88b4e73e7-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.121834 4909 reconciler_common.go:293] "Volume detached for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.131585 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/7b7568e8-e3d4-4e06-a25f-33656bdf089f-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "7b7568e8-e3d4-4e06-a25f-33656bdf089f" (UID: "7b7568e8-e3d4-4e06-a25f-33656bdf089f"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.132454 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/175903ef-59e0-4c1f-820f-bd3d2692462d-logs" (OuterVolumeSpecName: "logs") pod "175903ef-59e0-4c1f-820f-bd3d2692462d" (UID: "175903ef-59e0-4c1f-820f-bd3d2692462d"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.132877 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d7651107-0120-4611-87d0-be009f3749d7-logs" (OuterVolumeSpecName: "logs") pod "d7651107-0120-4611-87d0-be009f3749d7" (UID: "d7651107-0120-4611-87d0-be009f3749d7"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:36:11 crc kubenswrapper[4909]: E1128 16:36:11.133309 4909 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 1b0631590dbf2f02faff4199c7f351befddd6177ddde8165eb64c85e0c20b740 is running failed: container process not found" containerID="1b0631590dbf2f02faff4199c7f351befddd6177ddde8165eb64c85e0c20b740" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.138181 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b97782ba-8bf0-4da9-bd81-97e88b4e73e7-logs" (OuterVolumeSpecName: "logs") pod "b97782ba-8bf0-4da9-bd81-97e88b4e73e7" (UID: "b97782ba-8bf0-4da9-bd81-97e88b4e73e7"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:36:11 crc kubenswrapper[4909]: E1128 16:36:11.138505 4909 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 1b0631590dbf2f02faff4199c7f351befddd6177ddde8165eb64c85e0c20b740 is running failed: container process not found" containerID="1b0631590dbf2f02faff4199c7f351befddd6177ddde8165eb64c85e0c20b740" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Nov 28 16:36:11 crc kubenswrapper[4909]: E1128 16:36:11.139457 4909 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 1b0631590dbf2f02faff4199c7f351befddd6177ddde8165eb64c85e0c20b740 is running failed: container process not found" containerID="1b0631590dbf2f02faff4199c7f351befddd6177ddde8165eb64c85e0c20b740" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Nov 28 16:36:11 crc kubenswrapper[4909]: E1128 16:36:11.139555 4909 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 1b0631590dbf2f02faff4199c7f351befddd6177ddde8165eb64c85e0c20b740 is running failed: container process not found" probeType="Readiness" pod="openstack/nova-cell0-conductor-0" podUID="c64b6821-6f46-4764-be55-97ed8c71fefa" containerName="nova-cell0-conductor-conductor" Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.148328 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/175903ef-59e0-4c1f-820f-bd3d2692462d-kube-api-access-f4qnd" (OuterVolumeSpecName: "kube-api-access-f4qnd") pod "175903ef-59e0-4c1f-820f-bd3d2692462d" (UID: "175903ef-59e0-4c1f-820f-bd3d2692462d"). InnerVolumeSpecName "kube-api-access-f4qnd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.148450 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage09-crc" (OuterVolumeSpecName: "glance") pod "b97782ba-8bf0-4da9-bd81-97e88b4e73e7" (UID: "b97782ba-8bf0-4da9-bd81-97e88b4e73e7"). InnerVolumeSpecName "local-storage09-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.161765 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/175903ef-59e0-4c1f-820f-bd3d2692462d-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "175903ef-59e0-4c1f-820f-bd3d2692462d" (UID: "175903ef-59e0-4c1f-820f-bd3d2692462d"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.162249 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d7651107-0120-4611-87d0-be009f3749d7-scripts" (OuterVolumeSpecName: "scripts") pod "d7651107-0120-4611-87d0-be009f3749d7" (UID: "d7651107-0120-4611-87d0-be009f3749d7"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.162356 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b97782ba-8bf0-4da9-bd81-97e88b4e73e7-kube-api-access-4hj7x" (OuterVolumeSpecName: "kube-api-access-4hj7x") pod "b97782ba-8bf0-4da9-bd81-97e88b4e73e7" (UID: "b97782ba-8bf0-4da9-bd81-97e88b4e73e7"). InnerVolumeSpecName "kube-api-access-4hj7x". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.166634 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/64b9a5c2-09a8-48fb-9e1b-b66c1003cf61-kube-api-access-lgt79" (OuterVolumeSpecName: "kube-api-access-lgt79") pod "64b9a5c2-09a8-48fb-9e1b-b66c1003cf61" (UID: "64b9a5c2-09a8-48fb-9e1b-b66c1003cf61"). InnerVolumeSpecName "kube-api-access-lgt79". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.177001 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7b7568e8-e3d4-4e06-a25f-33656bdf089f-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "7b7568e8-e3d4-4e06-a25f-33656bdf089f" (UID: "7b7568e8-e3d4-4e06-a25f-33656bdf089f"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.177799 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7b7568e8-e3d4-4e06-a25f-33656bdf089f-scripts" (OuterVolumeSpecName: "scripts") pod "7b7568e8-e3d4-4e06-a25f-33656bdf089f" (UID: "7b7568e8-e3d4-4e06-a25f-33656bdf089f"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.177826 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d7651107-0120-4611-87d0-be009f3749d7-kube-api-access-dbwjk" (OuterVolumeSpecName: "kube-api-access-dbwjk") pod "d7651107-0120-4611-87d0-be009f3749d7" (UID: "d7651107-0120-4611-87d0-be009f3749d7"). InnerVolumeSpecName "kube-api-access-dbwjk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.184723 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b97782ba-8bf0-4da9-bd81-97e88b4e73e7-scripts" (OuterVolumeSpecName: "scripts") pod "b97782ba-8bf0-4da9-bd81-97e88b4e73e7" (UID: "b97782ba-8bf0-4da9-bd81-97e88b4e73e7"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.184732 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b2ea37c8-3213-4043-9da2-a9e76f9284e4-kube-api-access-pvdkl" (OuterVolumeSpecName: "kube-api-access-pvdkl") pod "b2ea37c8-3213-4043-9da2-a9e76f9284e4" (UID: "b2ea37c8-3213-4043-9da2-a9e76f9284e4"). InnerVolumeSpecName "kube-api-access-pvdkl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.187173 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7b7568e8-e3d4-4e06-a25f-33656bdf089f-kube-api-access-x4sst" (OuterVolumeSpecName: "kube-api-access-x4sst") pod "7b7568e8-e3d4-4e06-a25f-33656bdf089f" (UID: "7b7568e8-e3d4-4e06-a25f-33656bdf089f"). InnerVolumeSpecName "kube-api-access-x4sst". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.222468 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/287e7e9a-0240-478e-a15b-b01122e79c32-internal-tls-certs\") pod \"287e7e9a-0240-478e-a15b-b01122e79c32\" (UID: \"287e7e9a-0240-478e-a15b-b01122e79c32\") " Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.222918 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/287e7e9a-0240-478e-a15b-b01122e79c32-scripts\") pod \"287e7e9a-0240-478e-a15b-b01122e79c32\" (UID: \"287e7e9a-0240-478e-a15b-b01122e79c32\") " Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.222957 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/287e7e9a-0240-478e-a15b-b01122e79c32-config-data-custom\") pod \"287e7e9a-0240-478e-a15b-b01122e79c32\" (UID: \"287e7e9a-0240-478e-a15b-b01122e79c32\") " Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.223030 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/287e7e9a-0240-478e-a15b-b01122e79c32-logs\") pod \"287e7e9a-0240-478e-a15b-b01122e79c32\" (UID: \"287e7e9a-0240-478e-a15b-b01122e79c32\") " Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.223052 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-n42nt\" (UniqueName: \"kubernetes.io/projected/287e7e9a-0240-478e-a15b-b01122e79c32-kube-api-access-n42nt\") pod \"287e7e9a-0240-478e-a15b-b01122e79c32\" (UID: \"287e7e9a-0240-478e-a15b-b01122e79c32\") " Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.223093 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"00913f80-f496-44ec-a619-99129724cb89\" (UID: \"00913f80-f496-44ec-a619-99129724cb89\") " Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.223130 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/287e7e9a-0240-478e-a15b-b01122e79c32-config-data\") pod \"287e7e9a-0240-478e-a15b-b01122e79c32\" (UID: \"287e7e9a-0240-478e-a15b-b01122e79c32\") " Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.223205 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/00913f80-f496-44ec-a619-99129724cb89-scripts\") pod \"00913f80-f496-44ec-a619-99129724cb89\" (UID: \"00913f80-f496-44ec-a619-99129724cb89\") " Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.223249 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z52sq\" (UniqueName: \"kubernetes.io/projected/00913f80-f496-44ec-a619-99129724cb89-kube-api-access-z52sq\") pod \"00913f80-f496-44ec-a619-99129724cb89\" (UID: \"00913f80-f496-44ec-a619-99129724cb89\") " Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.223293 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/00913f80-f496-44ec-a619-99129724cb89-httpd-run\") pod \"00913f80-f496-44ec-a619-99129724cb89\" (UID: \"00913f80-f496-44ec-a619-99129724cb89\") " Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.223379 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/287e7e9a-0240-478e-a15b-b01122e79c32-etc-machine-id\") pod \"287e7e9a-0240-478e-a15b-b01122e79c32\" (UID: \"287e7e9a-0240-478e-a15b-b01122e79c32\") " Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.223425 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/287e7e9a-0240-478e-a15b-b01122e79c32-public-tls-certs\") pod \"287e7e9a-0240-478e-a15b-b01122e79c32\" (UID: \"287e7e9a-0240-478e-a15b-b01122e79c32\") " Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.223449 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/287e7e9a-0240-478e-a15b-b01122e79c32-combined-ca-bundle\") pod \"287e7e9a-0240-478e-a15b-b01122e79c32\" (UID: \"287e7e9a-0240-478e-a15b-b01122e79c32\") " Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.223476 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/00913f80-f496-44ec-a619-99129724cb89-config-data\") pod \"00913f80-f496-44ec-a619-99129724cb89\" (UID: \"00913f80-f496-44ec-a619-99129724cb89\") " Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.223587 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/00913f80-f496-44ec-a619-99129724cb89-public-tls-certs\") pod \"00913f80-f496-44ec-a619-99129724cb89\" (UID: \"00913f80-f496-44ec-a619-99129724cb89\") " Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.223618 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/00913f80-f496-44ec-a619-99129724cb89-logs\") pod \"00913f80-f496-44ec-a619-99129724cb89\" (UID: \"00913f80-f496-44ec-a619-99129724cb89\") " Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.223670 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/00913f80-f496-44ec-a619-99129724cb89-combined-ca-bundle\") pod \"00913f80-f496-44ec-a619-99129724cb89\" (UID: \"00913f80-f496-44ec-a619-99129724cb89\") " Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.224243 4909 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/175903ef-59e0-4c1f-820f-bd3d2692462d-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.224267 4909 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d7651107-0120-4611-87d0-be009f3749d7-logs\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.224303 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dbwjk\" (UniqueName: \"kubernetes.io/projected/d7651107-0120-4611-87d0-be009f3749d7-kube-api-access-dbwjk\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.224314 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f4qnd\" (UniqueName: \"kubernetes.io/projected/175903ef-59e0-4c1f-820f-bd3d2692462d-kube-api-access-f4qnd\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.224325 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4hj7x\" (UniqueName: \"kubernetes.io/projected/b97782ba-8bf0-4da9-bd81-97e88b4e73e7-kube-api-access-4hj7x\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.224345 4909 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") on node \"crc\" " Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.224354 4909 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d7651107-0120-4611-87d0-be009f3749d7-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.224384 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lgt79\" (UniqueName: \"kubernetes.io/projected/64b9a5c2-09a8-48fb-9e1b-b66c1003cf61-kube-api-access-lgt79\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.224393 4909 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b97782ba-8bf0-4da9-bd81-97e88b4e73e7-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.224401 4909 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/7b7568e8-e3d4-4e06-a25f-33656bdf089f-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.224409 4909 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/175903ef-59e0-4c1f-820f-bd3d2692462d-logs\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.224417 4909 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/7b7568e8-e3d4-4e06-a25f-33656bdf089f-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.224425 4909 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7b7568e8-e3d4-4e06-a25f-33656bdf089f-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.224432 4909 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b97782ba-8bf0-4da9-bd81-97e88b4e73e7-logs\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.224440 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x4sst\" (UniqueName: \"kubernetes.io/projected/7b7568e8-e3d4-4e06-a25f-33656bdf089f-kube-api-access-x4sst\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.224448 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pvdkl\" (UniqueName: \"kubernetes.io/projected/b2ea37c8-3213-4043-9da2-a9e76f9284e4-kube-api-access-pvdkl\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.225188 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/287e7e9a-0240-478e-a15b-b01122e79c32-logs" (OuterVolumeSpecName: "logs") pod "287e7e9a-0240-478e-a15b-b01122e79c32" (UID: "287e7e9a-0240-478e-a15b-b01122e79c32"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.234005 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/00913f80-f496-44ec-a619-99129724cb89-kube-api-access-z52sq" (OuterVolumeSpecName: "kube-api-access-z52sq") pod "00913f80-f496-44ec-a619-99129724cb89" (UID: "00913f80-f496-44ec-a619-99129724cb89"). InnerVolumeSpecName "kube-api-access-z52sq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.234109 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/00913f80-f496-44ec-a619-99129724cb89-logs" (OuterVolumeSpecName: "logs") pod "00913f80-f496-44ec-a619-99129724cb89" (UID: "00913f80-f496-44ec-a619-99129724cb89"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.234156 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/287e7e9a-0240-478e-a15b-b01122e79c32-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "287e7e9a-0240-478e-a15b-b01122e79c32" (UID: "287e7e9a-0240-478e-a15b-b01122e79c32"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.234368 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/00913f80-f496-44ec-a619-99129724cb89-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "00913f80-f496-44ec-a619-99129724cb89" (UID: "00913f80-f496-44ec-a619-99129724cb89"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.247460 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage03-crc" (OuterVolumeSpecName: "glance") pod "00913f80-f496-44ec-a619-99129724cb89" (UID: "00913f80-f496-44ec-a619-99129724cb89"). InnerVolumeSpecName "local-storage03-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.285550 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/287e7e9a-0240-478e-a15b-b01122e79c32-kube-api-access-n42nt" (OuterVolumeSpecName: "kube-api-access-n42nt") pod "287e7e9a-0240-478e-a15b-b01122e79c32" (UID: "287e7e9a-0240-478e-a15b-b01122e79c32"). InnerVolumeSpecName "kube-api-access-n42nt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.308024 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/00913f80-f496-44ec-a619-99129724cb89-scripts" (OuterVolumeSpecName: "scripts") pod "00913f80-f496-44ec-a619-99129724cb89" (UID: "00913f80-f496-44ec-a619-99129724cb89"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.308037 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/287e7e9a-0240-478e-a15b-b01122e79c32-scripts" (OuterVolumeSpecName: "scripts") pod "287e7e9a-0240-478e-a15b-b01122e79c32" (UID: "287e7e9a-0240-478e-a15b-b01122e79c32"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.315977 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/287e7e9a-0240-478e-a15b-b01122e79c32-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "287e7e9a-0240-478e-a15b-b01122e79c32" (UID: "287e7e9a-0240-478e-a15b-b01122e79c32"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.326949 4909 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/287e7e9a-0240-478e-a15b-b01122e79c32-logs\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.327144 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-n42nt\" (UniqueName: \"kubernetes.io/projected/287e7e9a-0240-478e-a15b-b01122e79c32-kube-api-access-n42nt\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.327236 4909 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") on node \"crc\" " Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.327335 4909 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/00913f80-f496-44ec-a619-99129724cb89-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.327411 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z52sq\" (UniqueName: \"kubernetes.io/projected/00913f80-f496-44ec-a619-99129724cb89-kube-api-access-z52sq\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.327481 4909 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/00913f80-f496-44ec-a619-99129724cb89-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.327553 4909 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/287e7e9a-0240-478e-a15b-b01122e79c32-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.327623 4909 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/00913f80-f496-44ec-a619-99129724cb89-logs\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.327712 4909 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/287e7e9a-0240-478e-a15b-b01122e79c32-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.327824 4909 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/287e7e9a-0240-478e-a15b-b01122e79c32-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.342868 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"00913f80-f496-44ec-a619-99129724cb89","Type":"ContainerDied","Data":"68a0915ee2dd3acb65a87dd91cc9a841f68c3241c8362deb5a14bb722ea6a2b9"} Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.342929 4909 scope.go:117] "RemoveContainer" containerID="30fbad84f804b7ada9da16d8ee037dd6c5bb06b55551d23a9f96ea3c5222b69f" Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.343102 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.361308 4909 generic.go:334] "Generic (PLEG): container finished" podID="1c81b153-5498-4d63-9c98-fa8b79d5acdd" containerID="edce0e3a1b79a461c6e384cfefd0cbf3c0e7f50280e3c51aafc961a31f14493c" exitCode=0 Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.361583 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"1c81b153-5498-4d63-9c98-fa8b79d5acdd","Type":"ContainerDied","Data":"edce0e3a1b79a461c6e384cfefd0cbf3c0e7f50280e3c51aafc961a31f14493c"} Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.366350 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"64b9a5c2-09a8-48fb-9e1b-b66c1003cf61","Type":"ContainerDied","Data":"6a5f3255286ce870c817ddb1b48a301fe20009afca6ef58437cd668e092f9b4e"} Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.367140 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.385461 4909 generic.go:334] "Generic (PLEG): container finished" podID="72f0e500-fe06-4373-9bc3-6cdaa2520043" containerID="9c0ed5eb3169a895fddc352403d70cbfcb4b590d019f84677427fdac6ec6cf71" exitCode=0 Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.385865 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"72f0e500-fe06-4373-9bc3-6cdaa2520043","Type":"ContainerDied","Data":"9c0ed5eb3169a895fddc352403d70cbfcb4b590d019f84677427fdac6ec6cf71"} Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.390474 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican9799-account-delete-z2p59" event={"ID":"8fa2662b-eed3-4461-ba5d-d4554ca4a22b","Type":"ContainerStarted","Data":"dddd51e21ee1da11fcdbb2f80b518588c94f66fdf38cd76c69d36bbd73a25213"} Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.390716 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican9799-account-delete-z2p59" podUID="8fa2662b-eed3-4461-ba5d-d4554ca4a22b" containerName="mariadb-account-delete" containerID="cri-o://dddd51e21ee1da11fcdbb2f80b518588c94f66fdf38cd76c69d36bbd73a25213" gracePeriod=30 Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.398174 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/novaapiab11-account-delete-2d7kv" event={"ID":"38f993aa-ed40-45f0-821f-e5d7f482ec99","Type":"ContainerStarted","Data":"c85df760aafac3a038f71027a3c1a79c7e72fbde573d1a5e4dfa79b8700bbd6d"} Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.399569 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/novaapiab11-account-delete-2d7kv" podUID="38f993aa-ed40-45f0-821f-e5d7f482ec99" containerName="mariadb-account-delete" containerID="cri-o://c85df760aafac3a038f71027a3c1a79c7e72fbde573d1a5e4dfa79b8700bbd6d" gracePeriod=30 Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.420620 4909 generic.go:334] "Generic (PLEG): container finished" podID="e086b29e-c7fb-45a4-a6f2-c30508f1b25a" containerID="61af0c4690ad10f578c11a1874dcaa8f66ff04b25238b214fda8321e55f07b14" exitCode=0 Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.420703 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-585576c97d-fvkcs" event={"ID":"e086b29e-c7fb-45a4-a6f2-c30508f1b25a","Type":"ContainerDied","Data":"61af0c4690ad10f578c11a1874dcaa8f66ff04b25238b214fda8321e55f07b14"} Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.451681 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.451698 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"7b7568e8-e3d4-4e06-a25f-33656bdf089f","Type":"ContainerDied","Data":"bd4409c68bbe1e59ec8c444df21ccbe33a05a6b3a3351c40e67fed585b228d63"} Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.455899 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican9799-account-delete-z2p59" podStartSLOduration=6.455878417 podStartE2EDuration="6.455878417s" podCreationTimestamp="2025-11-28 16:36:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:36:11.406514588 +0000 UTC m=+1553.803199122" watchObservedRunningTime="2025-11-28 16:36:11.455878417 +0000 UTC m=+1553.852562951" Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.458270 4909 generic.go:334] "Generic (PLEG): container finished" podID="e8b95945-6169-4e44-861a-f4abd48a7161" containerID="bd5449498d8b191c04307f37b3bba2f646c9d3de3fd836f283ecb8d81e786377" exitCode=0 Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.458363 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"e8b95945-6169-4e44-861a-f4abd48a7161","Type":"ContainerDied","Data":"bd5449498d8b191c04307f37b3bba2f646c9d3de3fd836f283ecb8d81e786377"} Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.465477 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-7bcd585886-f6h7k" event={"ID":"d7651107-0120-4611-87d0-be009f3749d7","Type":"ContainerDied","Data":"4ad31d32da06cf49ec254691efea35419db7b0910b39f95e73b313cb0412df86"} Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.465690 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/novaapiab11-account-delete-2d7kv" podStartSLOduration=6.465672021 podStartE2EDuration="6.465672021s" podCreationTimestamp="2025-11-28 16:36:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:36:11.42923413 +0000 UTC m=+1553.825918654" watchObservedRunningTime="2025-11-28 16:36:11.465672021 +0000 UTC m=+1553.862356545" Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.465777 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-7bcd585886-f6h7k" Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.473571 4909 generic.go:334] "Generic (PLEG): container finished" podID="44195e2b-7f1d-4542-8948-93a818071fd2" containerID="2eb951aa3c283ce1fd52ad843a7234b8357e48ad0ed3f9fa1b30578a1af9fa7a" exitCode=0 Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.473808 4909 generic.go:334] "Generic (PLEG): container finished" podID="44195e2b-7f1d-4542-8948-93a818071fd2" containerID="100c610b01cd8cbae563a7661d097874303002f0d958013b0241d5cf74e9cfd2" exitCode=0 Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.473605 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"44195e2b-7f1d-4542-8948-93a818071fd2","Type":"ContainerDied","Data":"2eb951aa3c283ce1fd52ad843a7234b8357e48ad0ed3f9fa1b30578a1af9fa7a"} Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.473889 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"44195e2b-7f1d-4542-8948-93a818071fd2","Type":"ContainerDied","Data":"100c610b01cd8cbae563a7661d097874303002f0d958013b0241d5cf74e9cfd2"} Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.478085 4909 generic.go:334] "Generic (PLEG): container finished" podID="c64b6821-6f46-4764-be55-97ed8c71fefa" containerID="1b0631590dbf2f02faff4199c7f351befddd6177ddde8165eb64c85e0c20b740" exitCode=0 Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.478164 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"c64b6821-6f46-4764-be55-97ed8c71fefa","Type":"ContainerDied","Data":"1b0631590dbf2f02faff4199c7f351befddd6177ddde8165eb64c85e0c20b740"} Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.482691 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"b2ea37c8-3213-4043-9da2-a9e76f9284e4","Type":"ContainerDied","Data":"78ad3448d2834c66fa31516dc972b1d20a281375bf506a32666ec4282e9e84f6"} Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.483175 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.489343 4909 generic.go:334] "Generic (PLEG): container finished" podID="e8837df0-c6fe-42a6-bf0f-8ca14f1961a6" containerID="34cdf5b11d6117bafb37ddcc3824f5ce702d1b9711769b9e114b66075bba4f47" exitCode=0 Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.489453 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"e8837df0-c6fe-42a6-bf0f-8ca14f1961a6","Type":"ContainerDied","Data":"34cdf5b11d6117bafb37ddcc3824f5ce702d1b9711769b9e114b66075bba4f47"} Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.492828 4909 generic.go:334] "Generic (PLEG): container finished" podID="ffd60458-19af-464b-9649-57d25893f22a" containerID="3cd8ac1736c6fbc1977f593e8c58c7c95ab9e0dac8a3505b8acefff70b5cfba5" exitCode=0 Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.492935 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-f6ddfdd4b-szlst" event={"ID":"ffd60458-19af-464b-9649-57d25893f22a","Type":"ContainerDied","Data":"3cd8ac1736c6fbc1977f593e8c58c7c95ab9e0dac8a3505b8acefff70b5cfba5"} Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.495948 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"b97782ba-8bf0-4da9-bd81-97e88b4e73e7","Type":"ContainerDied","Data":"757942926ce5d1f49f6df9094a2b9c8153258d63791c59f907eb87a53c3b24b2"} Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.496134 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.502858 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-6c95ffb47-q5ls2" Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.504851 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.505097 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"287e7e9a-0240-478e-a15b-b01122e79c32","Type":"ContainerDied","Data":"107a3bcbe7139b74f69622a738afc6a71e6d7f6d4189f9d3eb0043869232e2a0"} Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.505392 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder937d-account-delete-4rdjd" podUID="c2cc9842-6f8f-4afc-9895-2b7a75a9696c" containerName="mariadb-account-delete" containerID="cri-o://09205ac4d06967aa940b5f5c0504e1750c1b1ffd2519f1715951a28c3d850f7f" gracePeriod=30 Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.506088 4909 kubelet_pods.go:1007] "Unable to retrieve pull secret, the image pull may not succeed." pod="openstack/placement3326-account-delete-6mrc7" secret="" err="secret \"galera-openstack-dockercfg-n7tgg\" not found" Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.506633 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.686055 4909 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage09-crc" (UniqueName: "kubernetes.io/local-volume/local-storage09-crc") on node "crc" Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.703345 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/64b9a5c2-09a8-48fb-9e1b-b66c1003cf61-kube-state-metrics-tls-certs" (OuterVolumeSpecName: "kube-state-metrics-tls-certs") pod "64b9a5c2-09a8-48fb-9e1b-b66c1003cf61" (UID: "64b9a5c2-09a8-48fb-9e1b-b66c1003cf61"). InnerVolumeSpecName "kube-state-metrics-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.752779 4909 reconciler_common.go:293] "Volume detached for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.752808 4909 reconciler_common.go:293] "Volume detached for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/64b9a5c2-09a8-48fb-9e1b-b66c1003cf61-kube-state-metrics-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.752807 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/64b9a5c2-09a8-48fb-9e1b-b66c1003cf61-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "64b9a5c2-09a8-48fb-9e1b-b66c1003cf61" (UID: "64b9a5c2-09a8-48fb-9e1b-b66c1003cf61"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:11 crc kubenswrapper[4909]: E1128 16:36:11.752867 4909 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Nov 28 16:36:11 crc kubenswrapper[4909]: E1128 16:36:11.752926 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/2cfda3c9-a8dd-4a86-bb0d-d3b826b82a0f-operator-scripts podName:2cfda3c9-a8dd-4a86-bb0d-d3b826b82a0f nodeName:}" failed. No retries permitted until 2025-11-28 16:36:13.752910005 +0000 UTC m=+1556.149594529 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/2cfda3c9-a8dd-4a86-bb0d-d3b826b82a0f-operator-scripts") pod "placement3326-account-delete-6mrc7" (UID: "2cfda3c9-a8dd-4a86-bb0d-d3b826b82a0f") : configmap "openstack-scripts" not found Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.779601 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/175903ef-59e0-4c1f-820f-bd3d2692462d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "175903ef-59e0-4c1f-820f-bd3d2692462d" (UID: "175903ef-59e0-4c1f-820f-bd3d2692462d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.839308 4909 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage03-crc" (UniqueName: "kubernetes.io/local-volume/local-storage03-crc") on node "crc" Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.852031 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b97782ba-8bf0-4da9-bd81-97e88b4e73e7-config-data" (OuterVolumeSpecName: "config-data") pod "b97782ba-8bf0-4da9-bd81-97e88b4e73e7" (UID: "b97782ba-8bf0-4da9-bd81-97e88b4e73e7"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.854993 4909 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b97782ba-8bf0-4da9-bd81-97e88b4e73e7-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.855021 4909 reconciler_common.go:293] "Volume detached for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.855035 4909 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/64b9a5c2-09a8-48fb-9e1b-b66c1003cf61-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.855050 4909 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/175903ef-59e0-4c1f-820f-bd3d2692462d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.869242 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d7651107-0120-4611-87d0-be009f3749d7-config-data" (OuterVolumeSpecName: "config-data") pod "d7651107-0120-4611-87d0-be009f3749d7" (UID: "d7651107-0120-4611-87d0-be009f3749d7"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.884963 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/64b9a5c2-09a8-48fb-9e1b-b66c1003cf61-kube-state-metrics-tls-config" (OuterVolumeSpecName: "kube-state-metrics-tls-config") pod "64b9a5c2-09a8-48fb-9e1b-b66c1003cf61" (UID: "64b9a5c2-09a8-48fb-9e1b-b66c1003cf61"). InnerVolumeSpecName "kube-state-metrics-tls-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.903105 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b2ea37c8-3213-4043-9da2-a9e76f9284e4-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b2ea37c8-3213-4043-9da2-a9e76f9284e4" (UID: "b2ea37c8-3213-4043-9da2-a9e76f9284e4"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.920886 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/287e7e9a-0240-478e-a15b-b01122e79c32-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "287e7e9a-0240-478e-a15b-b01122e79c32" (UID: "287e7e9a-0240-478e-a15b-b01122e79c32"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.920967 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7b7568e8-e3d4-4e06-a25f-33656bdf089f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "7b7568e8-e3d4-4e06-a25f-33656bdf089f" (UID: "7b7568e8-e3d4-4e06-a25f-33656bdf089f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.955955 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="22576912-638d-46ee-ad3e-9d78debb719e" path="/var/lib/kubelet/pods/22576912-638d-46ee-ad3e-9d78debb719e/volumes" Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.956453 4909 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d7651107-0120-4611-87d0-be009f3749d7-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.957026 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4740f8ad-ec12-40b8-b3eb-f22149cbd07a" path="/var/lib/kubelet/pods/4740f8ad-ec12-40b8-b3eb-f22149cbd07a/volumes" Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.957896 4909 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/287e7e9a-0240-478e-a15b-b01122e79c32-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:11 crc kubenswrapper[4909]: E1128 16:36:11.957979 4909 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.958001 4909 reconciler_common.go:293] "Volume detached for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/64b9a5c2-09a8-48fb-9e1b-b66c1003cf61-kube-state-metrics-tls-config\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:11 crc kubenswrapper[4909]: E1128 16:36:11.958072 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/c2cc9842-6f8f-4afc-9895-2b7a75a9696c-operator-scripts podName:c2cc9842-6f8f-4afc-9895-2b7a75a9696c nodeName:}" failed. No retries permitted until 2025-11-28 16:36:13.958049978 +0000 UTC m=+1556.354734572 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/c2cc9842-6f8f-4afc-9895-2b7a75a9696c-operator-scripts") pod "cinder937d-account-delete-4rdjd" (UID: "c2cc9842-6f8f-4afc-9895-2b7a75a9696c") : configmap "openstack-scripts" not found Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.958096 4909 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b2ea37c8-3213-4043-9da2-a9e76f9284e4-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.958112 4909 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7b7568e8-e3d4-4e06-a25f-33656bdf089f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.959510 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="59f1690b-72e4-431e-b207-77553fd936f2" path="/var/lib/kubelet/pods/59f1690b-72e4-431e-b207-77553fd936f2/volumes" Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.967346 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="650d0ac8-9d90-4bca-9d00-830345bf8e65" path="/var/lib/kubelet/pods/650d0ac8-9d90-4bca-9d00-830345bf8e65/volumes" Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.974118 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b97782ba-8bf0-4da9-bd81-97e88b4e73e7-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b97782ba-8bf0-4da9-bd81-97e88b4e73e7" (UID: "b97782ba-8bf0-4da9-bd81-97e88b4e73e7"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.983848 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7648e31f-ca65-4302-903a-abf1fe2aa860" path="/var/lib/kubelet/pods/7648e31f-ca65-4302-903a-abf1fe2aa860/volumes" Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.986579 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="79e6b32d-eeda-4bfa-8872-e2581a703aa5" path="/var/lib/kubelet/pods/79e6b32d-eeda-4bfa-8872-e2581a703aa5/volumes" Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.987128 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9384b65d-c575-4479-99be-eb182b848bb5" path="/var/lib/kubelet/pods/9384b65d-c575-4479-99be-eb182b848bb5/volumes" Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.987702 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="98978b85-3c69-4ae1-83cb-73f72e8d2093" path="/var/lib/kubelet/pods/98978b85-3c69-4ae1-83cb-73f72e8d2093/volumes" Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.988244 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a4cdba92-a257-403a-a47f-8678e1e63b84" path="/var/lib/kubelet/pods/a4cdba92-a257-403a-a47f-8678e1e63b84/volumes" Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.989226 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bac06af4-bbe1-482a-8815-14a9cf2a1699" path="/var/lib/kubelet/pods/bac06af4-bbe1-482a-8815-14a9cf2a1699/volumes" Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.989741 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="be57306e-a9e9-45f4-8660-f5e1515799f8" path="/var/lib/kubelet/pods/be57306e-a9e9-45f4-8660-f5e1515799f8/volumes" Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.990208 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="be6ec85b-3437-458a-9e53-6464138dbcea" path="/var/lib/kubelet/pods/be6ec85b-3437-458a-9e53-6464138dbcea/volumes" Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.993198 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e792abf7-967c-4293-b5f4-f073b07c8cf1" path="/var/lib/kubelet/pods/e792abf7-967c-4293-b5f4-f073b07c8cf1/volumes" Nov 28 16:36:11 crc kubenswrapper[4909]: I1128 16:36:11.993778 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f41afab6-2271-4347-97fd-15d60c351409" path="/var/lib/kubelet/pods/f41afab6-2271-4347-97fd-15d60c351409/volumes" Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.031338 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b97782ba-8bf0-4da9-bd81-97e88b4e73e7-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "b97782ba-8bf0-4da9-bd81-97e88b4e73e7" (UID: "b97782ba-8bf0-4da9-bd81-97e88b4e73e7"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.066564 4909 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b97782ba-8bf0-4da9-bd81-97e88b4e73e7-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.066607 4909 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/b97782ba-8bf0-4da9-bd81-97e88b4e73e7-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.161916 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/287e7e9a-0240-478e-a15b-b01122e79c32-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "287e7e9a-0240-478e-a15b-b01122e79c32" (UID: "287e7e9a-0240-478e-a15b-b01122e79c32"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.167983 4909 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/287e7e9a-0240-478e-a15b-b01122e79c32-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.201019 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/287e7e9a-0240-478e-a15b-b01122e79c32-config-data" (OuterVolumeSpecName: "config-data") pod "287e7e9a-0240-478e-a15b-b01122e79c32" (UID: "287e7e9a-0240-478e-a15b-b01122e79c32"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.206014 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b2ea37c8-3213-4043-9da2-a9e76f9284e4-config-data" (OuterVolumeSpecName: "config-data") pod "b2ea37c8-3213-4043-9da2-a9e76f9284e4" (UID: "b2ea37c8-3213-4043-9da2-a9e76f9284e4"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.211281 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/287e7e9a-0240-478e-a15b-b01122e79c32-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "287e7e9a-0240-478e-a15b-b01122e79c32" (UID: "287e7e9a-0240-478e-a15b-b01122e79c32"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.233723 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/175903ef-59e0-4c1f-820f-bd3d2692462d-config-data" (OuterVolumeSpecName: "config-data") pod "175903ef-59e0-4c1f-820f-bd3d2692462d" (UID: "175903ef-59e0-4c1f-820f-bd3d2692462d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.271991 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/00913f80-f496-44ec-a619-99129724cb89-config-data" (OuterVolumeSpecName: "config-data") pod "00913f80-f496-44ec-a619-99129724cb89" (UID: "00913f80-f496-44ec-a619-99129724cb89"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.272045 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/00913f80-f496-44ec-a619-99129724cb89-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "00913f80-f496-44ec-a619-99129724cb89" (UID: "00913f80-f496-44ec-a619-99129724cb89"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.272511 4909 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/287e7e9a-0240-478e-a15b-b01122e79c32-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.272525 4909 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b2ea37c8-3213-4043-9da2-a9e76f9284e4-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.272534 4909 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/287e7e9a-0240-478e-a15b-b01122e79c32-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.272543 4909 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/00913f80-f496-44ec-a619-99129724cb89-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.272552 4909 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/00913f80-f496-44ec-a619-99129724cb89-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.272561 4909 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/175903ef-59e0-4c1f-820f-bd3d2692462d-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.286045 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7b7568e8-e3d4-4e06-a25f-33656bdf089f-config-data" (OuterVolumeSpecName: "config-data") pod "7b7568e8-e3d4-4e06-a25f-33656bdf089f" (UID: "7b7568e8-e3d4-4e06-a25f-33656bdf089f"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.294253 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d7651107-0120-4611-87d0-be009f3749d7-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d7651107-0120-4611-87d0-be009f3749d7" (UID: "d7651107-0120-4611-87d0-be009f3749d7"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.314769 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/00913f80-f496-44ec-a619-99129724cb89-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "00913f80-f496-44ec-a619-99129724cb89" (UID: "00913f80-f496-44ec-a619-99129724cb89"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.317885 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d7651107-0120-4611-87d0-be009f3749d7-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "d7651107-0120-4611-87d0-be009f3749d7" (UID: "d7651107-0120-4611-87d0-be009f3749d7"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.332256 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d7651107-0120-4611-87d0-be009f3749d7-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "d7651107-0120-4611-87d0-be009f3749d7" (UID: "d7651107-0120-4611-87d0-be009f3749d7"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.373818 4909 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7b7568e8-e3d4-4e06-a25f-33656bdf089f-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.373855 4909 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d7651107-0120-4611-87d0-be009f3749d7-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.373865 4909 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d7651107-0120-4611-87d0-be009f3749d7-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.373874 4909 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/00913f80-f496-44ec-a619-99129724cb89-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.373882 4909 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/d7651107-0120-4611-87d0-be009f3749d7-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:12 crc kubenswrapper[4909]: E1128 16:36:12.373947 4909 configmap.go:193] Couldn't get configMap openstack/rabbitmq-config-data: configmap "rabbitmq-config-data" not found Nov 28 16:36:12 crc kubenswrapper[4909]: E1128 16:36:12.373999 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444-config-data podName:7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444 nodeName:}" failed. No retries permitted until 2025-11-28 16:36:20.373981347 +0000 UTC m=+1562.770665871 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444-config-data") pod "rabbitmq-server-0" (UID: "7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444") : configmap "rabbitmq-config-data" not found Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.396428 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.475192 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e8837df0-c6fe-42a6-bf0f-8ca14f1961a6-combined-ca-bundle\") pod \"e8837df0-c6fe-42a6-bf0f-8ca14f1961a6\" (UID: \"e8837df0-c6fe-42a6-bf0f-8ca14f1961a6\") " Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.475258 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e8837df0-c6fe-42a6-bf0f-8ca14f1961a6-logs\") pod \"e8837df0-c6fe-42a6-bf0f-8ca14f1961a6\" (UID: \"e8837df0-c6fe-42a6-bf0f-8ca14f1961a6\") " Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.475337 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e8837df0-c6fe-42a6-bf0f-8ca14f1961a6-config-data\") pod \"e8837df0-c6fe-42a6-bf0f-8ca14f1961a6\" (UID: \"e8837df0-c6fe-42a6-bf0f-8ca14f1961a6\") " Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.475393 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/e8837df0-c6fe-42a6-bf0f-8ca14f1961a6-nova-metadata-tls-certs\") pod \"e8837df0-c6fe-42a6-bf0f-8ca14f1961a6\" (UID: \"e8837df0-c6fe-42a6-bf0f-8ca14f1961a6\") " Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.475490 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bbp46\" (UniqueName: \"kubernetes.io/projected/e8837df0-c6fe-42a6-bf0f-8ca14f1961a6-kube-api-access-bbp46\") pod \"e8837df0-c6fe-42a6-bf0f-8ca14f1961a6\" (UID: \"e8837df0-c6fe-42a6-bf0f-8ca14f1961a6\") " Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.476360 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e8837df0-c6fe-42a6-bf0f-8ca14f1961a6-logs" (OuterVolumeSpecName: "logs") pod "e8837df0-c6fe-42a6-bf0f-8ca14f1961a6" (UID: "e8837df0-c6fe-42a6-bf0f-8ca14f1961a6"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.482087 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e8837df0-c6fe-42a6-bf0f-8ca14f1961a6-kube-api-access-bbp46" (OuterVolumeSpecName: "kube-api-access-bbp46") pod "e8837df0-c6fe-42a6-bf0f-8ca14f1961a6" (UID: "e8837df0-c6fe-42a6-bf0f-8ca14f1961a6"). InnerVolumeSpecName "kube-api-access-bbp46". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.508530 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e8837df0-c6fe-42a6-bf0f-8ca14f1961a6-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e8837df0-c6fe-42a6-bf0f-8ca14f1961a6" (UID: "e8837df0-c6fe-42a6-bf0f-8ca14f1961a6"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.509018 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e8837df0-c6fe-42a6-bf0f-8ca14f1961a6-config-data" (OuterVolumeSpecName: "config-data") pod "e8837df0-c6fe-42a6-bf0f-8ca14f1961a6" (UID: "e8837df0-c6fe-42a6-bf0f-8ca14f1961a6"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.514597 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"1c81b153-5498-4d63-9c98-fa8b79d5acdd","Type":"ContainerDied","Data":"3300409b6cb731b3216e287bc8588cf577b91e17d54c3318d7cace8d3b1223ef"} Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.514635 4909 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3300409b6cb731b3216e287bc8588cf577b91e17d54c3318d7cace8d3b1223ef" Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.516879 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-65c6d9c7fd-627g9" event={"ID":"5ad0a326-11b9-40c8-b251-5994a436110a","Type":"ContainerStarted","Data":"15d949a1b99dcbb9b7b8db33a347fc722bb599a90632809376044acb153f117d"} Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.517150 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-65c6d9c7fd-627g9" podUID="5ad0a326-11b9-40c8-b251-5994a436110a" containerName="barbican-api-log" containerID="cri-o://630e4eeb18086e03661fc2005462721172f42178abd3a1806a71653ed20f565d" gracePeriod=30 Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.517303 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-65c6d9c7fd-627g9" Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.517338 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-65c6d9c7fd-627g9" Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.517444 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-65c6d9c7fd-627g9" podUID="5ad0a326-11b9-40c8-b251-5994a436110a" containerName="barbican-api" containerID="cri-o://15d949a1b99dcbb9b7b8db33a347fc722bb599a90632809376044acb153f117d" gracePeriod=30 Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.533853 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"72f0e500-fe06-4373-9bc3-6cdaa2520043","Type":"ContainerDied","Data":"8b9c4e7fc23f26be96a3901625cb22f61e05dc6d320f1ce226e9d753fd86554e"} Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.533886 4909 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8b9c4e7fc23f26be96a3901625cb22f61e05dc6d320f1ce226e9d753fd86554e" Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.544089 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-585576c97d-fvkcs" event={"ID":"e086b29e-c7fb-45a4-a6f2-c30508f1b25a","Type":"ContainerDied","Data":"d41b06983d45f5cd27ddd2af51a3f720a0ea4c59305d9052b2da5d7556796074"} Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.544132 4909 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d41b06983d45f5cd27ddd2af51a3f720a0ea4c59305d9052b2da5d7556796074" Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.549081 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"e8b95945-6169-4e44-861a-f4abd48a7161","Type":"ContainerDied","Data":"a5be97e66ba6d28d38e21c6092e9c3c674ed31badc5bd35ffc952b4c55772330"} Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.550099 4909 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a5be97e66ba6d28d38e21c6092e9c3c674ed31badc5bd35ffc952b4c55772330" Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.555065 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-65c6d9c7fd-627g9" podStartSLOduration=8.555048262 podStartE2EDuration="8.555048262s" podCreationTimestamp="2025-11-28 16:36:04 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:36:12.536985536 +0000 UTC m=+1554.933670060" watchObservedRunningTime="2025-11-28 16:36:12.555048262 +0000 UTC m=+1554.951732786" Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.558432 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"e8837df0-c6fe-42a6-bf0f-8ca14f1961a6","Type":"ContainerDied","Data":"6a0fe3c93870048d7b5a5cbaa3d6a3d69d63bbddc0cc7161d897e534dd32e64d"} Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.558515 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.564443 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e8837df0-c6fe-42a6-bf0f-8ca14f1961a6-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "e8837df0-c6fe-42a6-bf0f-8ca14f1961a6" (UID: "e8837df0-c6fe-42a6-bf0f-8ca14f1961a6"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.568342 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-f6ddfdd4b-szlst" event={"ID":"ffd60458-19af-464b-9649-57d25893f22a","Type":"ContainerDied","Data":"a53d479313f266911a327abd159a691cbb4811bc83d8bb71b525714484c79937"} Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.568395 4909 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a53d479313f266911a327abd159a691cbb4811bc83d8bb71b525714484c79937" Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.570065 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/novacell077a6-account-delete-dqcds" event={"ID":"b6a50190-5f38-40b8-87b1-3e67fe7d3cf4","Type":"ContainerStarted","Data":"a2a36a920755c46c32ca06509345b534a6606de3b3aa2f0a0f11b2df67f1283c"} Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.570853 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/novacell077a6-account-delete-dqcds" podUID="b6a50190-5f38-40b8-87b1-3e67fe7d3cf4" containerName="mariadb-account-delete" containerID="cri-o://a2a36a920755c46c32ca06509345b534a6606de3b3aa2f0a0f11b2df67f1283c" gracePeriod=30 Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.575892 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance2631-account-delete-2c2fs" event={"ID":"b0f3cfb2-6884-4ef1-9844-cf494a2e21bb","Type":"ContainerDied","Data":"e1ff692285d603972a12b07eb3c17c9ceeb9d4c28fdbf684db3c4dc63fd343fb"} Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.575929 4909 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e1ff692285d603972a12b07eb3c17c9ceeb9d4c28fdbf684db3c4dc63fd343fb" Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.582354 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron8770-account-delete-9m56t" event={"ID":"5b472e4d-2724-4ea4-93c9-5552d92af793","Type":"ContainerDied","Data":"ccd58b768d86d30c56eae48a2272013e7ba47fc23d9455039a5fdca0131a2767"} Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.582398 4909 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ccd58b768d86d30c56eae48a2272013e7ba47fc23d9455039a5fdca0131a2767" Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.582933 4909 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e8837df0-c6fe-42a6-bf0f-8ca14f1961a6-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.582954 4909 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/e8837df0-c6fe-42a6-bf0f-8ca14f1961a6-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.584474 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bbp46\" (UniqueName: \"kubernetes.io/projected/e8837df0-c6fe-42a6-bf0f-8ca14f1961a6-kube-api-access-bbp46\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.584497 4909 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e8837df0-c6fe-42a6-bf0f-8ca14f1961a6-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.584522 4909 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e8837df0-c6fe-42a6-bf0f-8ca14f1961a6-logs\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.586044 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/novacell077a6-account-delete-dqcds" podStartSLOduration=7.586029086 podStartE2EDuration="7.586029086s" podCreationTimestamp="2025-11-28 16:36:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:36:12.583498928 +0000 UTC m=+1554.980183472" watchObservedRunningTime="2025-11-28 16:36:12.586029086 +0000 UTC m=+1554.982713610" Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.600010 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"c64b6821-6f46-4764-be55-97ed8c71fefa","Type":"ContainerDied","Data":"f1a87ce8374db877ff4a4b3a66ae2f5efd1d08bd22aafe79ee5158a6aef305d0"} Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.600072 4909 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f1a87ce8374db877ff4a4b3a66ae2f5efd1d08bd22aafe79ee5158a6aef305d0" Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.600094 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/placement3326-account-delete-6mrc7" podUID="2cfda3c9-a8dd-4a86-bb0d-d3b826b82a0f" containerName="mariadb-account-delete" containerID="cri-o://83480d25e4e745360a0b09a414270260f2f6fd2bc5ea6f0e7c2301194f75d223" gracePeriod=30 Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.605389 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.616619 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-f6ddfdd4b-szlst" Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.647295 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.647774 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.658394 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.669010 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.683018 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.685799 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-585576c97d-fvkcs" Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.686245 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1c81b153-5498-4d63-9c98-fa8b79d5acdd-combined-ca-bundle\") pod \"1c81b153-5498-4d63-9c98-fa8b79d5acdd\" (UID: \"1c81b153-5498-4d63-9c98-fa8b79d5acdd\") " Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.686297 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/72f0e500-fe06-4373-9bc3-6cdaa2520043-config-data\") pod \"72f0e500-fe06-4373-9bc3-6cdaa2520043\" (UID: \"72f0e500-fe06-4373-9bc3-6cdaa2520043\") " Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.686340 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ffd60458-19af-464b-9649-57d25893f22a-combined-ca-bundle\") pod \"ffd60458-19af-464b-9649-57d25893f22a\" (UID: \"ffd60458-19af-464b-9649-57d25893f22a\") " Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.686364 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ffd60458-19af-464b-9649-57d25893f22a-logs\") pod \"ffd60458-19af-464b-9649-57d25893f22a\" (UID: \"ffd60458-19af-464b-9649-57d25893f22a\") " Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.686384 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ffd60458-19af-464b-9649-57d25893f22a-internal-tls-certs\") pod \"ffd60458-19af-464b-9649-57d25893f22a\" (UID: \"ffd60458-19af-464b-9649-57d25893f22a\") " Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.686415 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ffd60458-19af-464b-9649-57d25893f22a-config-data-custom\") pod \"ffd60458-19af-464b-9649-57d25893f22a\" (UID: \"ffd60458-19af-464b-9649-57d25893f22a\") " Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.686448 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ffd60458-19af-464b-9649-57d25893f22a-config-data\") pod \"ffd60458-19af-464b-9649-57d25893f22a\" (UID: \"ffd60458-19af-464b-9649-57d25893f22a\") " Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.686465 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1c81b153-5498-4d63-9c98-fa8b79d5acdd-config-data\") pod \"1c81b153-5498-4d63-9c98-fa8b79d5acdd\" (UID: \"1c81b153-5498-4d63-9c98-fa8b79d5acdd\") " Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.686488 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/72f0e500-fe06-4373-9bc3-6cdaa2520043-combined-ca-bundle\") pod \"72f0e500-fe06-4373-9bc3-6cdaa2520043\" (UID: \"72f0e500-fe06-4373-9bc3-6cdaa2520043\") " Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.686526 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ffd60458-19af-464b-9649-57d25893f22a-public-tls-certs\") pod \"ffd60458-19af-464b-9649-57d25893f22a\" (UID: \"ffd60458-19af-464b-9649-57d25893f22a\") " Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.686558 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/1c81b153-5498-4d63-9c98-fa8b79d5acdd-public-tls-certs\") pod \"1c81b153-5498-4d63-9c98-fa8b79d5acdd\" (UID: \"1c81b153-5498-4d63-9c98-fa8b79d5acdd\") " Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.686579 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5cd27\" (UniqueName: \"kubernetes.io/projected/72f0e500-fe06-4373-9bc3-6cdaa2520043-kube-api-access-5cd27\") pod \"72f0e500-fe06-4373-9bc3-6cdaa2520043\" (UID: \"72f0e500-fe06-4373-9bc3-6cdaa2520043\") " Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.686592 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/1c81b153-5498-4d63-9c98-fa8b79d5acdd-internal-tls-certs\") pod \"1c81b153-5498-4d63-9c98-fa8b79d5acdd\" (UID: \"1c81b153-5498-4d63-9c98-fa8b79d5acdd\") " Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.686611 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/72f0e500-fe06-4373-9bc3-6cdaa2520043-memcached-tls-certs\") pod \"72f0e500-fe06-4373-9bc3-6cdaa2520043\" (UID: \"72f0e500-fe06-4373-9bc3-6cdaa2520043\") " Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.686627 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mbm7r\" (UniqueName: \"kubernetes.io/projected/ffd60458-19af-464b-9649-57d25893f22a-kube-api-access-mbm7r\") pod \"ffd60458-19af-464b-9649-57d25893f22a\" (UID: \"ffd60458-19af-464b-9649-57d25893f22a\") " Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.686648 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lfzwk\" (UniqueName: \"kubernetes.io/projected/1c81b153-5498-4d63-9c98-fa8b79d5acdd-kube-api-access-lfzwk\") pod \"1c81b153-5498-4d63-9c98-fa8b79d5acdd\" (UID: \"1c81b153-5498-4d63-9c98-fa8b79d5acdd\") " Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.686717 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/72f0e500-fe06-4373-9bc3-6cdaa2520043-kolla-config\") pod \"72f0e500-fe06-4373-9bc3-6cdaa2520043\" (UID: \"72f0e500-fe06-4373-9bc3-6cdaa2520043\") " Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.686738 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1c81b153-5498-4d63-9c98-fa8b79d5acdd-logs\") pod \"1c81b153-5498-4d63-9c98-fa8b79d5acdd\" (UID: \"1c81b153-5498-4d63-9c98-fa8b79d5acdd\") " Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.687974 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ffd60458-19af-464b-9649-57d25893f22a-logs" (OuterVolumeSpecName: "logs") pod "ffd60458-19af-464b-9649-57d25893f22a" (UID: "ffd60458-19af-464b-9649-57d25893f22a"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.688131 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1c81b153-5498-4d63-9c98-fa8b79d5acdd-logs" (OuterVolumeSpecName: "logs") pod "1c81b153-5498-4d63-9c98-fa8b79d5acdd" (UID: "1c81b153-5498-4d63-9c98-fa8b79d5acdd"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.690631 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/72f0e500-fe06-4373-9bc3-6cdaa2520043-config-data" (OuterVolumeSpecName: "config-data") pod "72f0e500-fe06-4373-9bc3-6cdaa2520043" (UID: "72f0e500-fe06-4373-9bc3-6cdaa2520043"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.693966 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/72f0e500-fe06-4373-9bc3-6cdaa2520043-kolla-config" (OuterVolumeSpecName: "kolla-config") pod "72f0e500-fe06-4373-9bc3-6cdaa2520043" (UID: "72f0e500-fe06-4373-9bc3-6cdaa2520043"). InnerVolumeSpecName "kolla-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.696700 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.702223 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance2631-account-delete-2c2fs" Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.706375 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.712759 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1c81b153-5498-4d63-9c98-fa8b79d5acdd-kube-api-access-lfzwk" (OuterVolumeSpecName: "kube-api-access-lfzwk") pod "1c81b153-5498-4d63-9c98-fa8b79d5acdd" (UID: "1c81b153-5498-4d63-9c98-fa8b79d5acdd"). InnerVolumeSpecName "kube-api-access-lfzwk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.721871 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ffd60458-19af-464b-9649-57d25893f22a-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "ffd60458-19af-464b-9649-57d25893f22a" (UID: "ffd60458-19af-464b-9649-57d25893f22a"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.721914 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ffd60458-19af-464b-9649-57d25893f22a-kube-api-access-mbm7r" (OuterVolumeSpecName: "kube-api-access-mbm7r") pod "ffd60458-19af-464b-9649-57d25893f22a" (UID: "ffd60458-19af-464b-9649-57d25893f22a"). InnerVolumeSpecName "kube-api-access-mbm7r". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.721888 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/72f0e500-fe06-4373-9bc3-6cdaa2520043-kube-api-access-5cd27" (OuterVolumeSpecName: "kube-api-access-5cd27") pod "72f0e500-fe06-4373-9bc3-6cdaa2520043" (UID: "72f0e500-fe06-4373-9bc3-6cdaa2520043"). InnerVolumeSpecName "kube-api-access-5cd27". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.722805 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron8770-account-delete-9m56t" Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.733367 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.741644 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.763429 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1c81b153-5498-4d63-9c98-fa8b79d5acdd-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "1c81b153-5498-4d63-9c98-fa8b79d5acdd" (UID: "1c81b153-5498-4d63-9c98-fa8b79d5acdd"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.773302 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.788431 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1c81b153-5498-4d63-9c98-fa8b79d5acdd-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "1c81b153-5498-4d63-9c98-fa8b79d5acdd" (UID: "1c81b153-5498-4d63-9c98-fa8b79d5acdd"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.789198 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e8b95945-6169-4e44-861a-f4abd48a7161-config-data\") pod \"e8b95945-6169-4e44-861a-f4abd48a7161\" (UID: \"e8b95945-6169-4e44-861a-f4abd48a7161\") " Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.789317 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c64b6821-6f46-4764-be55-97ed8c71fefa-combined-ca-bundle\") pod \"c64b6821-6f46-4764-be55-97ed8c71fefa\" (UID: \"c64b6821-6f46-4764-be55-97ed8c71fefa\") " Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.789398 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5b472e4d-2724-4ea4-93c9-5552d92af793-operator-scripts\") pod \"5b472e4d-2724-4ea4-93c9-5552d92af793\" (UID: \"5b472e4d-2724-4ea4-93c9-5552d92af793\") " Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.789473 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b0f3cfb2-6884-4ef1-9844-cf494a2e21bb-operator-scripts\") pod \"b0f3cfb2-6884-4ef1-9844-cf494a2e21bb\" (UID: \"b0f3cfb2-6884-4ef1-9844-cf494a2e21bb\") " Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.789573 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e086b29e-c7fb-45a4-a6f2-c30508f1b25a-combined-ca-bundle\") pod \"e086b29e-c7fb-45a4-a6f2-c30508f1b25a\" (UID: \"e086b29e-c7fb-45a4-a6f2-c30508f1b25a\") " Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.789781 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c64b6821-6f46-4764-be55-97ed8c71fefa-config-data\") pod \"c64b6821-6f46-4764-be55-97ed8c71fefa\" (UID: \"c64b6821-6f46-4764-be55-97ed8c71fefa\") " Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.789881 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dgqlh\" (UniqueName: \"kubernetes.io/projected/5b472e4d-2724-4ea4-93c9-5552d92af793-kube-api-access-dgqlh\") pod \"5b472e4d-2724-4ea4-93c9-5552d92af793\" (UID: \"5b472e4d-2724-4ea4-93c9-5552d92af793\") " Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.789987 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5b472e4d-2724-4ea4-93c9-5552d92af793-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "5b472e4d-2724-4ea4-93c9-5552d92af793" (UID: "5b472e4d-2724-4ea4-93c9-5552d92af793"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.791902 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e8b95945-6169-4e44-861a-f4abd48a7161-combined-ca-bundle\") pod \"e8b95945-6169-4e44-861a-f4abd48a7161\" (UID: \"e8b95945-6169-4e44-861a-f4abd48a7161\") " Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.792049 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e086b29e-c7fb-45a4-a6f2-c30508f1b25a-config-data\") pod \"e086b29e-c7fb-45a4-a6f2-c30508f1b25a\" (UID: \"e086b29e-c7fb-45a4-a6f2-c30508f1b25a\") " Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.792123 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.792903 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b0f3cfb2-6884-4ef1-9844-cf494a2e21bb-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "b0f3cfb2-6884-4ef1-9844-cf494a2e21bb" (UID: "b0f3cfb2-6884-4ef1-9844-cf494a2e21bb"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.793286 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ffd60458-19af-464b-9649-57d25893f22a-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "ffd60458-19af-464b-9649-57d25893f22a" (UID: "ffd60458-19af-464b-9649-57d25893f22a"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.793587 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hzsgm\" (UniqueName: \"kubernetes.io/projected/c64b6821-6f46-4764-be55-97ed8c71fefa-kube-api-access-hzsgm\") pod \"c64b6821-6f46-4764-be55-97ed8c71fefa\" (UID: \"c64b6821-6f46-4764-be55-97ed8c71fefa\") " Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.793847 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-424v7\" (UniqueName: \"kubernetes.io/projected/e086b29e-c7fb-45a4-a6f2-c30508f1b25a-kube-api-access-424v7\") pod \"e086b29e-c7fb-45a4-a6f2-c30508f1b25a\" (UID: \"e086b29e-c7fb-45a4-a6f2-c30508f1b25a\") " Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.793943 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e086b29e-c7fb-45a4-a6f2-c30508f1b25a-logs\") pod \"e086b29e-c7fb-45a4-a6f2-c30508f1b25a\" (UID: \"e086b29e-c7fb-45a4-a6f2-c30508f1b25a\") " Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.794015 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lpx9c\" (UniqueName: \"kubernetes.io/projected/b0f3cfb2-6884-4ef1-9844-cf494a2e21bb-kube-api-access-lpx9c\") pod \"b0f3cfb2-6884-4ef1-9844-cf494a2e21bb\" (UID: \"b0f3cfb2-6884-4ef1-9844-cf494a2e21bb\") " Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.794079 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/e086b29e-c7fb-45a4-a6f2-c30508f1b25a-config-data-custom\") pod \"e086b29e-c7fb-45a4-a6f2-c30508f1b25a\" (UID: \"e086b29e-c7fb-45a4-a6f2-c30508f1b25a\") " Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.794153 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mqsll\" (UniqueName: \"kubernetes.io/projected/e8b95945-6169-4e44-861a-f4abd48a7161-kube-api-access-mqsll\") pod \"e8b95945-6169-4e44-861a-f4abd48a7161\" (UID: \"e8b95945-6169-4e44-861a-f4abd48a7161\") " Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.794256 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ffd60458-19af-464b-9649-57d25893f22a-public-tls-certs\") pod \"ffd60458-19af-464b-9649-57d25893f22a\" (UID: \"ffd60458-19af-464b-9649-57d25893f22a\") " Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.795029 4909 reconciler_common.go:293] "Volume detached for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/72f0e500-fe06-4373-9bc3-6cdaa2520043-kolla-config\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.795100 4909 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1c81b153-5498-4d63-9c98-fa8b79d5acdd-logs\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.795156 4909 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1c81b153-5498-4d63-9c98-fa8b79d5acdd-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.795225 4909 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/72f0e500-fe06-4373-9bc3-6cdaa2520043-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.795276 4909 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ffd60458-19af-464b-9649-57d25893f22a-logs\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.795323 4909 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ffd60458-19af-464b-9649-57d25893f22a-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.795414 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5cd27\" (UniqueName: \"kubernetes.io/projected/72f0e500-fe06-4373-9bc3-6cdaa2520043-kube-api-access-5cd27\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.795556 4909 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/1c81b153-5498-4d63-9c98-fa8b79d5acdd-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.795612 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mbm7r\" (UniqueName: \"kubernetes.io/projected/ffd60458-19af-464b-9649-57d25893f22a-kube-api-access-mbm7r\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.795686 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lfzwk\" (UniqueName: \"kubernetes.io/projected/1c81b153-5498-4d63-9c98-fa8b79d5acdd-kube-api-access-lfzwk\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.795740 4909 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5b472e4d-2724-4ea4-93c9-5552d92af793-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.795791 4909 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b0f3cfb2-6884-4ef1-9844-cf494a2e21bb-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:12 crc kubenswrapper[4909]: W1128 16:36:12.795963 4909 empty_dir.go:500] Warning: Unmount skipped because path does not exist: /var/lib/kubelet/pods/ffd60458-19af-464b-9649-57d25893f22a/volumes/kubernetes.io~secret/public-tls-certs Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.796051 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ffd60458-19af-464b-9649-57d25893f22a-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "ffd60458-19af-464b-9649-57d25893f22a" (UID: "ffd60458-19af-464b-9649-57d25893f22a"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.797295 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5b472e4d-2724-4ea4-93c9-5552d92af793-kube-api-access-dgqlh" (OuterVolumeSpecName: "kube-api-access-dgqlh") pod "5b472e4d-2724-4ea4-93c9-5552d92af793" (UID: "5b472e4d-2724-4ea4-93c9-5552d92af793"). InnerVolumeSpecName "kube-api-access-dgqlh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.803127 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e086b29e-c7fb-45a4-a6f2-c30508f1b25a-kube-api-access-424v7" (OuterVolumeSpecName: "kube-api-access-424v7") pod "e086b29e-c7fb-45a4-a6f2-c30508f1b25a" (UID: "e086b29e-c7fb-45a4-a6f2-c30508f1b25a"). InnerVolumeSpecName "kube-api-access-424v7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.809839 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/72f0e500-fe06-4373-9bc3-6cdaa2520043-memcached-tls-certs" (OuterVolumeSpecName: "memcached-tls-certs") pod "72f0e500-fe06-4373-9bc3-6cdaa2520043" (UID: "72f0e500-fe06-4373-9bc3-6cdaa2520043"). InnerVolumeSpecName "memcached-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.809925 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e086b29e-c7fb-45a4-a6f2-c30508f1b25a-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "e086b29e-c7fb-45a4-a6f2-c30508f1b25a" (UID: "e086b29e-c7fb-45a4-a6f2-c30508f1b25a"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.810005 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b0f3cfb2-6884-4ef1-9844-cf494a2e21bb-kube-api-access-lpx9c" (OuterVolumeSpecName: "kube-api-access-lpx9c") pod "b0f3cfb2-6884-4ef1-9844-cf494a2e21bb" (UID: "b0f3cfb2-6884-4ef1-9844-cf494a2e21bb"). InnerVolumeSpecName "kube-api-access-lpx9c". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:36:12 crc kubenswrapper[4909]: E1128 16:36:12.810007 4909 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="604d9593fae6f90c84804afb01b99c2a6be4dbebec46ec2fa908b5b83bb8c9dc" cmd=["/usr/local/bin/container-scripts/status_check.sh"] Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.810423 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c64b6821-6f46-4764-be55-97ed8c71fefa-kube-api-access-hzsgm" (OuterVolumeSpecName: "kube-api-access-hzsgm") pod "c64b6821-6f46-4764-be55-97ed8c71fefa" (UID: "c64b6821-6f46-4764-be55-97ed8c71fefa"). InnerVolumeSpecName "kube-api-access-hzsgm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:36:12 crc kubenswrapper[4909]: E1128 16:36:12.811513 4909 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="604d9593fae6f90c84804afb01b99c2a6be4dbebec46ec2fa908b5b83bb8c9dc" cmd=["/usr/local/bin/container-scripts/status_check.sh"] Nov 28 16:36:12 crc kubenswrapper[4909]: E1128 16:36:12.812866 4909 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="604d9593fae6f90c84804afb01b99c2a6be4dbebec46ec2fa908b5b83bb8c9dc" cmd=["/usr/local/bin/container-scripts/status_check.sh"] Nov 28 16:36:12 crc kubenswrapper[4909]: E1128 16:36:12.812894 4909 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-northd-0" podUID="dc78dec8-567e-41a1-9fbf-793224410d3b" containerName="ovn-northd" Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.813857 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e8b95945-6169-4e44-861a-f4abd48a7161-kube-api-access-mqsll" (OuterVolumeSpecName: "kube-api-access-mqsll") pod "e8b95945-6169-4e44-861a-f4abd48a7161" (UID: "e8b95945-6169-4e44-861a-f4abd48a7161"). InnerVolumeSpecName "kube-api-access-mqsll". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.821162 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.823639 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ffd60458-19af-464b-9649-57d25893f22a-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "ffd60458-19af-464b-9649-57d25893f22a" (UID: "ffd60458-19af-464b-9649-57d25893f22a"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.827159 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.831803 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.837604 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.844234 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-worker-6c95ffb47-q5ls2"] Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.848837 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1c81b153-5498-4d63-9c98-fa8b79d5acdd-config-data" (OuterVolumeSpecName: "config-data") pod "1c81b153-5498-4d63-9c98-fa8b79d5acdd" (UID: "1c81b153-5498-4d63-9c98-fa8b79d5acdd"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.852779 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-worker-6c95ffb47-q5ls2"] Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.857461 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.863056 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-api-0"] Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.865083 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e8b95945-6169-4e44-861a-f4abd48a7161-config-data" (OuterVolumeSpecName: "config-data") pod "e8b95945-6169-4e44-861a-f4abd48a7161" (UID: "e8b95945-6169-4e44-861a-f4abd48a7161"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.868938 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c64b6821-6f46-4764-be55-97ed8c71fefa-config-data" (OuterVolumeSpecName: "config-data") pod "c64b6821-6f46-4764-be55-97ed8c71fefa" (UID: "c64b6821-6f46-4764-be55-97ed8c71fefa"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.872864 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-7bcd585886-f6h7k"] Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.875175 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-7bcd585886-f6h7k"] Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.897330 4909 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ffd60458-19af-464b-9649-57d25893f22a-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.897357 4909 reconciler_common.go:293] "Volume detached for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/72f0e500-fe06-4373-9bc3-6cdaa2520043-memcached-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.897366 4909 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e8b95945-6169-4e44-861a-f4abd48a7161-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.897376 4909 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c64b6821-6f46-4764-be55-97ed8c71fefa-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.897386 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dgqlh\" (UniqueName: \"kubernetes.io/projected/5b472e4d-2724-4ea4-93c9-5552d92af793-kube-api-access-dgqlh\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.897394 4909 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ffd60458-19af-464b-9649-57d25893f22a-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.897403 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hzsgm\" (UniqueName: \"kubernetes.io/projected/c64b6821-6f46-4764-be55-97ed8c71fefa-kube-api-access-hzsgm\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.897412 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-424v7\" (UniqueName: \"kubernetes.io/projected/e086b29e-c7fb-45a4-a6f2-c30508f1b25a-kube-api-access-424v7\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.897421 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lpx9c\" (UniqueName: \"kubernetes.io/projected/b0f3cfb2-6884-4ef1-9844-cf494a2e21bb-kube-api-access-lpx9c\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.897429 4909 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/e086b29e-c7fb-45a4-a6f2-c30508f1b25a-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.897438 4909 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1c81b153-5498-4d63-9c98-fa8b79d5acdd-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.897446 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mqsll\" (UniqueName: \"kubernetes.io/projected/e8b95945-6169-4e44-861a-f4abd48a7161-kube-api-access-mqsll\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.902193 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c64b6821-6f46-4764-be55-97ed8c71fefa-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c64b6821-6f46-4764-be55-97ed8c71fefa" (UID: "c64b6821-6f46-4764-be55-97ed8c71fefa"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.928358 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e8b95945-6169-4e44-861a-f4abd48a7161-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e8b95945-6169-4e44-861a-f4abd48a7161" (UID: "e8b95945-6169-4e44-861a-f4abd48a7161"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.940783 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ffd60458-19af-464b-9649-57d25893f22a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ffd60458-19af-464b-9649-57d25893f22a" (UID: "ffd60458-19af-464b-9649-57d25893f22a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.946708 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.949961 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e086b29e-c7fb-45a4-a6f2-c30508f1b25a-config-data" (OuterVolumeSpecName: "config-data") pod "e086b29e-c7fb-45a4-a6f2-c30508f1b25a" (UID: "e086b29e-c7fb-45a4-a6f2-c30508f1b25a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.959536 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e086b29e-c7fb-45a4-a6f2-c30508f1b25a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e086b29e-c7fb-45a4-a6f2-c30508f1b25a" (UID: "e086b29e-c7fb-45a4-a6f2-c30508f1b25a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.960965 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.974822 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/72f0e500-fe06-4373-9bc3-6cdaa2520043-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "72f0e500-fe06-4373-9bc3-6cdaa2520043" (UID: "72f0e500-fe06-4373-9bc3-6cdaa2520043"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:12 crc kubenswrapper[4909]: I1128 16:36:12.982804 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1c81b153-5498-4d63-9c98-fa8b79d5acdd-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "1c81b153-5498-4d63-9c98-fa8b79d5acdd" (UID: "1c81b153-5498-4d63-9c98-fa8b79d5acdd"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:13 crc kubenswrapper[4909]: I1128 16:36:13.002913 4909 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/1c81b153-5498-4d63-9c98-fa8b79d5acdd-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:13 crc kubenswrapper[4909]: I1128 16:36:13.002939 4909 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c64b6821-6f46-4764-be55-97ed8c71fefa-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:13 crc kubenswrapper[4909]: I1128 16:36:13.002949 4909 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e086b29e-c7fb-45a4-a6f2-c30508f1b25a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:13 crc kubenswrapper[4909]: I1128 16:36:13.002960 4909 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e8b95945-6169-4e44-861a-f4abd48a7161-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:13 crc kubenswrapper[4909]: I1128 16:36:13.002972 4909 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ffd60458-19af-464b-9649-57d25893f22a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:13 crc kubenswrapper[4909]: I1128 16:36:13.002985 4909 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e086b29e-c7fb-45a4-a6f2-c30508f1b25a-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:13 crc kubenswrapper[4909]: I1128 16:36:13.002996 4909 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/72f0e500-fe06-4373-9bc3-6cdaa2520043-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:13 crc kubenswrapper[4909]: I1128 16:36:13.010595 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ffd60458-19af-464b-9649-57d25893f22a-config-data" (OuterVolumeSpecName: "config-data") pod "ffd60458-19af-464b-9649-57d25893f22a" (UID: "ffd60458-19af-464b-9649-57d25893f22a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:13 crc kubenswrapper[4909]: I1128 16:36:13.070155 4909 scope.go:117] "RemoveContainer" containerID="257ba16eb5dc11579d07b9316fe274af6b54797db9a8db896742e423617ab540" Nov 28 16:36:13 crc kubenswrapper[4909]: I1128 16:36:13.091194 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e086b29e-c7fb-45a4-a6f2-c30508f1b25a-logs" (OuterVolumeSpecName: "logs") pod "e086b29e-c7fb-45a4-a6f2-c30508f1b25a" (UID: "e086b29e-c7fb-45a4-a6f2-c30508f1b25a"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:36:13 crc kubenswrapper[4909]: I1128 16:36:13.103989 4909 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e086b29e-c7fb-45a4-a6f2-c30508f1b25a-logs\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:13 crc kubenswrapper[4909]: I1128 16:36:13.104017 4909 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ffd60458-19af-464b-9649-57d25893f22a-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:13 crc kubenswrapper[4909]: I1128 16:36:13.113943 4909 scope.go:117] "RemoveContainer" containerID="995a1412416d800250460929b1d713faf90c65aaefa1d997100a9293ee18ff38" Nov 28 16:36:13 crc kubenswrapper[4909]: I1128 16:36:13.196437 4909 scope.go:117] "RemoveContainer" containerID="6965ce3a18191ccba9ccd72339cd48bef0713e889950e45db1e00d6f157854c1" Nov 28 16:36:13 crc kubenswrapper[4909]: E1128 16:36:13.221354 4909 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 96dc460f1fe51a7876a0d73f5eedaf50b76b8405b0eee13b2afa64eddabda435 is running failed: container process not found" containerID="96dc460f1fe51a7876a0d73f5eedaf50b76b8405b0eee13b2afa64eddabda435" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 28 16:36:13 crc kubenswrapper[4909]: E1128 16:36:13.222047 4909 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 96dc460f1fe51a7876a0d73f5eedaf50b76b8405b0eee13b2afa64eddabda435 is running failed: container process not found" containerID="96dc460f1fe51a7876a0d73f5eedaf50b76b8405b0eee13b2afa64eddabda435" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 28 16:36:13 crc kubenswrapper[4909]: E1128 16:36:13.222430 4909 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 96dc460f1fe51a7876a0d73f5eedaf50b76b8405b0eee13b2afa64eddabda435 is running failed: container process not found" containerID="96dc460f1fe51a7876a0d73f5eedaf50b76b8405b0eee13b2afa64eddabda435" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 28 16:36:13 crc kubenswrapper[4909]: E1128 16:36:13.222476 4909 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 96dc460f1fe51a7876a0d73f5eedaf50b76b8405b0eee13b2afa64eddabda435 is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-ovs-q2kt7" podUID="ff22194e-63a9-410d-80b6-9b1a1e68b164" containerName="ovsdb-server" Nov 28 16:36:13 crc kubenswrapper[4909]: E1128 16:36:13.224690 4909 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="afca6794cc8913f3a96d1e4b580a859e2e2d5089f2b862e784689db6cf4ab6c4" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 28 16:36:13 crc kubenswrapper[4909]: E1128 16:36:13.227486 4909 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="afca6794cc8913f3a96d1e4b580a859e2e2d5089f2b862e784689db6cf4ab6c4" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 28 16:36:13 crc kubenswrapper[4909]: E1128 16:36:13.229249 4909 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="afca6794cc8913f3a96d1e4b580a859e2e2d5089f2b862e784689db6cf4ab6c4" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 28 16:36:13 crc kubenswrapper[4909]: E1128 16:36:13.229289 4909 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-controller-ovs-q2kt7" podUID="ff22194e-63a9-410d-80b6-9b1a1e68b164" containerName="ovs-vswitchd" Nov 28 16:36:13 crc kubenswrapper[4909]: E1128 16:36:13.288238 4909 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod19094b17_f379_494e_b377_8191ddab4924.slice/crio-c758b61754fb6095949cc04fca00fab2b1a68ab0a205fa48bd04afb7cdc48dca.scope\": RecentStats: unable to find data in memory cache]" Nov 28 16:36:13 crc kubenswrapper[4909]: I1128 16:36:13.307145 4909 scope.go:117] "RemoveContainer" containerID="498c5080ce90e9a7105e201c315e2156da6b516e7e542f7334041b20bfa59f28" Nov 28 16:36:13 crc kubenswrapper[4909]: I1128 16:36:13.353279 4909 scope.go:117] "RemoveContainer" containerID="b1b648d707bec46e03074f6ddbe73bc4787a1ff840f797f11edd6e2f52984f64" Nov 28 16:36:13 crc kubenswrapper[4909]: I1128 16:36:13.386324 4909 scope.go:117] "RemoveContainer" containerID="f5457f347b25c89d470eee3116b8c1baa0d18385fa471e46a66b59d37d629001" Nov 28 16:36:13 crc kubenswrapper[4909]: I1128 16:36:13.405166 4909 scope.go:117] "RemoveContainer" containerID="0dc92530d7c3a493fba4c36b3d79070c26b600b170baa4f05c83ac54ca1f0cd4" Nov 28 16:36:13 crc kubenswrapper[4909]: I1128 16:36:13.436204 4909 scope.go:117] "RemoveContainer" containerID="06ec10e870b78a9508fdc0f9af0d0769bace54567bdcb85ffc77bf9a218d7d6e" Nov 28 16:36:13 crc kubenswrapper[4909]: I1128 16:36:13.505879 4909 scope.go:117] "RemoveContainer" containerID="0805f4bc86dd00471ce300a5735ca911ab4a9a41d60ea124ead1fd0e3fd4ccbe" Nov 28 16:36:13 crc kubenswrapper[4909]: I1128 16:36:13.546949 4909 scope.go:117] "RemoveContainer" containerID="29762398aa81300aa7e6fa97b5acccc7e5d16e4234ca8d5ea87d42654450084b" Nov 28 16:36:13 crc kubenswrapper[4909]: I1128 16:36:13.571554 4909 scope.go:117] "RemoveContainer" containerID="d938862fe9fc3e6327eed52ecb437574cdd14b5fddf79ca390b9bf6e50d98375" Nov 28 16:36:13 crc kubenswrapper[4909]: I1128 16:36:13.580784 4909 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-6tr6g" podUID="14b66e32-a660-4643-9f57-f66bf12a56ef" containerName="ovn-controller" probeResult="failure" output="command timed out" Nov 28 16:36:13 crc kubenswrapper[4909]: I1128 16:36:13.639954 4909 generic.go:334] "Generic (PLEG): container finished" podID="5ad0a326-11b9-40c8-b251-5994a436110a" containerID="15d949a1b99dcbb9b7b8db33a347fc722bb599a90632809376044acb153f117d" exitCode=0 Nov 28 16:36:13 crc kubenswrapper[4909]: I1128 16:36:13.639978 4909 generic.go:334] "Generic (PLEG): container finished" podID="5ad0a326-11b9-40c8-b251-5994a436110a" containerID="630e4eeb18086e03661fc2005462721172f42178abd3a1806a71653ed20f565d" exitCode=143 Nov 28 16:36:13 crc kubenswrapper[4909]: I1128 16:36:13.640019 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-65c6d9c7fd-627g9" event={"ID":"5ad0a326-11b9-40c8-b251-5994a436110a","Type":"ContainerDied","Data":"15d949a1b99dcbb9b7b8db33a347fc722bb599a90632809376044acb153f117d"} Nov 28 16:36:13 crc kubenswrapper[4909]: I1128 16:36:13.640045 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-65c6d9c7fd-627g9" event={"ID":"5ad0a326-11b9-40c8-b251-5994a436110a","Type":"ContainerDied","Data":"630e4eeb18086e03661fc2005462721172f42178abd3a1806a71653ed20f565d"} Nov 28 16:36:13 crc kubenswrapper[4909]: I1128 16:36:13.641169 4909 generic.go:334] "Generic (PLEG): container finished" podID="19094b17-f379-494e-b377-8191ddab4924" containerID="c758b61754fb6095949cc04fca00fab2b1a68ab0a205fa48bd04afb7cdc48dca" exitCode=0 Nov 28 16:36:13 crc kubenswrapper[4909]: I1128 16:36:13.641204 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-8676ff5994-wjk95" event={"ID":"19094b17-f379-494e-b377-8191ddab4924","Type":"ContainerDied","Data":"c758b61754fb6095949cc04fca00fab2b1a68ab0a205fa48bd04afb7cdc48dca"} Nov 28 16:36:13 crc kubenswrapper[4909]: I1128 16:36:13.643643 4909 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-6tr6g" podUID="14b66e32-a660-4643-9f57-f66bf12a56ef" containerName="ovn-controller" probeResult="failure" output=< Nov 28 16:36:13 crc kubenswrapper[4909]: ERROR - Failed to get connection status from ovn-controller, ovn-appctl exit status: 0 Nov 28 16:36:13 crc kubenswrapper[4909]: > Nov 28 16:36:13 crc kubenswrapper[4909]: I1128 16:36:13.650137 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-f6ddfdd4b-szlst" Nov 28 16:36:13 crc kubenswrapper[4909]: I1128 16:36:13.650147 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance2631-account-delete-2c2fs" Nov 28 16:36:13 crc kubenswrapper[4909]: I1128 16:36:13.650165 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron8770-account-delete-9m56t" Nov 28 16:36:13 crc kubenswrapper[4909]: I1128 16:36:13.650177 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 28 16:36:13 crc kubenswrapper[4909]: I1128 16:36:13.650263 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 16:36:13 crc kubenswrapper[4909]: I1128 16:36:13.650137 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-585576c97d-fvkcs" Nov 28 16:36:13 crc kubenswrapper[4909]: I1128 16:36:13.650288 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 16:36:13 crc kubenswrapper[4909]: I1128 16:36:13.650310 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Nov 28 16:36:13 crc kubenswrapper[4909]: I1128 16:36:13.680851 4909 scope.go:117] "RemoveContainer" containerID="34cdf5b11d6117bafb37ddcc3824f5ce702d1b9711769b9e114b66075bba4f47" Nov 28 16:36:13 crc kubenswrapper[4909]: E1128 16:36:13.815540 4909 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Nov 28 16:36:13 crc kubenswrapper[4909]: E1128 16:36:13.815599 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/2cfda3c9-a8dd-4a86-bb0d-d3b826b82a0f-operator-scripts podName:2cfda3c9-a8dd-4a86-bb0d-d3b826b82a0f nodeName:}" failed. No retries permitted until 2025-11-28 16:36:17.815585431 +0000 UTC m=+1560.212269955 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/2cfda3c9-a8dd-4a86-bb0d-d3b826b82a0f-operator-scripts") pod "placement3326-account-delete-6mrc7" (UID: "2cfda3c9-a8dd-4a86-bb0d-d3b826b82a0f") : configmap "openstack-scripts" not found Nov 28 16:36:14 crc kubenswrapper[4909]: I1128 16:36:14.021040 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="00913f80-f496-44ec-a619-99129724cb89" path="/var/lib/kubelet/pods/00913f80-f496-44ec-a619-99129724cb89/volumes" Nov 28 16:36:14 crc kubenswrapper[4909]: I1128 16:36:14.022019 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="175903ef-59e0-4c1f-820f-bd3d2692462d" path="/var/lib/kubelet/pods/175903ef-59e0-4c1f-820f-bd3d2692462d/volumes" Nov 28 16:36:14 crc kubenswrapper[4909]: I1128 16:36:14.022574 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="287e7e9a-0240-478e-a15b-b01122e79c32" path="/var/lib/kubelet/pods/287e7e9a-0240-478e-a15b-b01122e79c32/volumes" Nov 28 16:36:14 crc kubenswrapper[4909]: E1128 16:36:14.023617 4909 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Nov 28 16:36:14 crc kubenswrapper[4909]: E1128 16:36:14.023706 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/c2cc9842-6f8f-4afc-9895-2b7a75a9696c-operator-scripts podName:c2cc9842-6f8f-4afc-9895-2b7a75a9696c nodeName:}" failed. No retries permitted until 2025-11-28 16:36:18.023690364 +0000 UTC m=+1560.420374888 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/c2cc9842-6f8f-4afc-9895-2b7a75a9696c-operator-scripts") pod "cinder937d-account-delete-4rdjd" (UID: "c2cc9842-6f8f-4afc-9895-2b7a75a9696c") : configmap "openstack-scripts" not found Nov 28 16:36:14 crc kubenswrapper[4909]: I1128 16:36:14.023924 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2a8757b1-bbbf-4d00-9bbb-9f4bc855a9d9" path="/var/lib/kubelet/pods/2a8757b1-bbbf-4d00-9bbb-9f4bc855a9d9/volumes" Nov 28 16:36:14 crc kubenswrapper[4909]: I1128 16:36:14.024748 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="64b9a5c2-09a8-48fb-9e1b-b66c1003cf61" path="/var/lib/kubelet/pods/64b9a5c2-09a8-48fb-9e1b-b66c1003cf61/volumes" Nov 28 16:36:14 crc kubenswrapper[4909]: I1128 16:36:14.027445 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7b7568e8-e3d4-4e06-a25f-33656bdf089f" path="/var/lib/kubelet/pods/7b7568e8-e3d4-4e06-a25f-33656bdf089f/volumes" Nov 28 16:36:14 crc kubenswrapper[4909]: I1128 16:36:14.028124 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b2ea37c8-3213-4043-9da2-a9e76f9284e4" path="/var/lib/kubelet/pods/b2ea37c8-3213-4043-9da2-a9e76f9284e4/volumes" Nov 28 16:36:14 crc kubenswrapper[4909]: I1128 16:36:14.028692 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b97782ba-8bf0-4da9-bd81-97e88b4e73e7" path="/var/lib/kubelet/pods/b97782ba-8bf0-4da9-bd81-97e88b4e73e7/volumes" Nov 28 16:36:14 crc kubenswrapper[4909]: I1128 16:36:14.029989 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d7651107-0120-4611-87d0-be009f3749d7" path="/var/lib/kubelet/pods/d7651107-0120-4611-87d0-be009f3749d7/volumes" Nov 28 16:36:14 crc kubenswrapper[4909]: I1128 16:36:14.030970 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e8837df0-c6fe-42a6-bf0f-8ca14f1961a6" path="/var/lib/kubelet/pods/e8837df0-c6fe-42a6-bf0f-8ca14f1961a6/volumes" Nov 28 16:36:14 crc kubenswrapper[4909]: I1128 16:36:14.100711 4909 scope.go:117] "RemoveContainer" containerID="09cb7681bd82577f5dec1afd70b7dfc60e7c497bc0efb2d3202eab82a5623018" Nov 28 16:36:14 crc kubenswrapper[4909]: I1128 16:36:14.156352 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/memcached-0"] Nov 28 16:36:14 crc kubenswrapper[4909]: I1128 16:36:14.165057 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/memcached-0"] Nov 28 16:36:14 crc kubenswrapper[4909]: I1128 16:36:14.174480 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-keystone-listener-585576c97d-fvkcs"] Nov 28 16:36:14 crc kubenswrapper[4909]: I1128 16:36:14.182825 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-keystone-listener-585576c97d-fvkcs"] Nov 28 16:36:14 crc kubenswrapper[4909]: I1128 16:36:14.191523 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron8770-account-delete-9m56t"] Nov 28 16:36:14 crc kubenswrapper[4909]: I1128 16:36:14.201672 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron8770-account-delete-9m56t"] Nov 28 16:36:14 crc kubenswrapper[4909]: I1128 16:36:14.209338 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 28 16:36:14 crc kubenswrapper[4909]: I1128 16:36:14.216521 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 28 16:36:14 crc kubenswrapper[4909]: I1128 16:36:14.226327 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-f6ddfdd4b-szlst"] Nov 28 16:36:14 crc kubenswrapper[4909]: I1128 16:36:14.232364 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-api-f6ddfdd4b-szlst"] Nov 28 16:36:14 crc kubenswrapper[4909]: I1128 16:36:14.237840 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 16:36:14 crc kubenswrapper[4909]: I1128 16:36:14.242518 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 16:36:14 crc kubenswrapper[4909]: I1128 16:36:14.247324 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 28 16:36:14 crc kubenswrapper[4909]: I1128 16:36:14.252098 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 28 16:36:14 crc kubenswrapper[4909]: I1128 16:36:14.372721 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-65c6d9c7fd-627g9" Nov 28 16:36:14 crc kubenswrapper[4909]: I1128 16:36:14.401373 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_dc78dec8-567e-41a1-9fbf-793224410d3b/ovn-northd/0.log" Nov 28 16:36:14 crc kubenswrapper[4909]: I1128 16:36:14.401449 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Nov 28 16:36:14 crc kubenswrapper[4909]: I1128 16:36:14.429866 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xg9tm\" (UniqueName: \"kubernetes.io/projected/dc78dec8-567e-41a1-9fbf-793224410d3b-kube-api-access-xg9tm\") pod \"dc78dec8-567e-41a1-9fbf-793224410d3b\" (UID: \"dc78dec8-567e-41a1-9fbf-793224410d3b\") " Nov 28 16:36:14 crc kubenswrapper[4909]: I1128 16:36:14.429932 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dc78dec8-567e-41a1-9fbf-793224410d3b-combined-ca-bundle\") pod \"dc78dec8-567e-41a1-9fbf-793224410d3b\" (UID: \"dc78dec8-567e-41a1-9fbf-793224410d3b\") " Nov 28 16:36:14 crc kubenswrapper[4909]: I1128 16:36:14.429975 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5ad0a326-11b9-40c8-b251-5994a436110a-logs\") pod \"5ad0a326-11b9-40c8-b251-5994a436110a\" (UID: \"5ad0a326-11b9-40c8-b251-5994a436110a\") " Nov 28 16:36:14 crc kubenswrapper[4909]: I1128 16:36:14.430024 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5ad0a326-11b9-40c8-b251-5994a436110a-config-data\") pod \"5ad0a326-11b9-40c8-b251-5994a436110a\" (UID: \"5ad0a326-11b9-40c8-b251-5994a436110a\") " Nov 28 16:36:14 crc kubenswrapper[4909]: I1128 16:36:14.430063 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/5ad0a326-11b9-40c8-b251-5994a436110a-config-data-custom\") pod \"5ad0a326-11b9-40c8-b251-5994a436110a\" (UID: \"5ad0a326-11b9-40c8-b251-5994a436110a\") " Nov 28 16:36:14 crc kubenswrapper[4909]: I1128 16:36:14.430095 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5ad0a326-11b9-40c8-b251-5994a436110a-combined-ca-bundle\") pod \"5ad0a326-11b9-40c8-b251-5994a436110a\" (UID: \"5ad0a326-11b9-40c8-b251-5994a436110a\") " Nov 28 16:36:14 crc kubenswrapper[4909]: I1128 16:36:14.430131 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/dc78dec8-567e-41a1-9fbf-793224410d3b-ovn-rundir\") pod \"dc78dec8-567e-41a1-9fbf-793224410d3b\" (UID: \"dc78dec8-567e-41a1-9fbf-793224410d3b\") " Nov 28 16:36:14 crc kubenswrapper[4909]: I1128 16:36:14.430162 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/dc78dec8-567e-41a1-9fbf-793224410d3b-scripts\") pod \"dc78dec8-567e-41a1-9fbf-793224410d3b\" (UID: \"dc78dec8-567e-41a1-9fbf-793224410d3b\") " Nov 28 16:36:14 crc kubenswrapper[4909]: I1128 16:36:14.430238 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f8lx5\" (UniqueName: \"kubernetes.io/projected/5ad0a326-11b9-40c8-b251-5994a436110a-kube-api-access-f8lx5\") pod \"5ad0a326-11b9-40c8-b251-5994a436110a\" (UID: \"5ad0a326-11b9-40c8-b251-5994a436110a\") " Nov 28 16:36:14 crc kubenswrapper[4909]: I1128 16:36:14.430265 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/dc78dec8-567e-41a1-9fbf-793224410d3b-ovn-northd-tls-certs\") pod \"dc78dec8-567e-41a1-9fbf-793224410d3b\" (UID: \"dc78dec8-567e-41a1-9fbf-793224410d3b\") " Nov 28 16:36:14 crc kubenswrapper[4909]: I1128 16:36:14.430358 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dc78dec8-567e-41a1-9fbf-793224410d3b-config\") pod \"dc78dec8-567e-41a1-9fbf-793224410d3b\" (UID: \"dc78dec8-567e-41a1-9fbf-793224410d3b\") " Nov 28 16:36:14 crc kubenswrapper[4909]: I1128 16:36:14.430399 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/5ad0a326-11b9-40c8-b251-5994a436110a-internal-tls-certs\") pod \"5ad0a326-11b9-40c8-b251-5994a436110a\" (UID: \"5ad0a326-11b9-40c8-b251-5994a436110a\") " Nov 28 16:36:14 crc kubenswrapper[4909]: I1128 16:36:14.430440 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/5ad0a326-11b9-40c8-b251-5994a436110a-public-tls-certs\") pod \"5ad0a326-11b9-40c8-b251-5994a436110a\" (UID: \"5ad0a326-11b9-40c8-b251-5994a436110a\") " Nov 28 16:36:14 crc kubenswrapper[4909]: I1128 16:36:14.430478 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/dc78dec8-567e-41a1-9fbf-793224410d3b-metrics-certs-tls-certs\") pod \"dc78dec8-567e-41a1-9fbf-793224410d3b\" (UID: \"dc78dec8-567e-41a1-9fbf-793224410d3b\") " Nov 28 16:36:14 crc kubenswrapper[4909]: I1128 16:36:14.431266 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dc78dec8-567e-41a1-9fbf-793224410d3b-ovn-rundir" (OuterVolumeSpecName: "ovn-rundir") pod "dc78dec8-567e-41a1-9fbf-793224410d3b" (UID: "dc78dec8-567e-41a1-9fbf-793224410d3b"). InnerVolumeSpecName "ovn-rundir". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:36:14 crc kubenswrapper[4909]: E1128 16:36:14.432842 4909 handlers.go:78] "Exec lifecycle hook for Container in Pod failed" err=< Nov 28 16:36:14 crc kubenswrapper[4909]: command '/usr/share/ovn/scripts/ovn-ctl stop_controller' exited with 137: 2025-11-28T16:36:07Z|00001|fatal_signal|WARN|terminating with signal 14 (Alarm clock) Nov 28 16:36:14 crc kubenswrapper[4909]: /etc/init.d/functions: line 589: 456 Alarm clock "$@" Nov 28 16:36:14 crc kubenswrapper[4909]: > execCommand=["/usr/share/ovn/scripts/ovn-ctl","stop_controller"] containerName="ovn-controller" pod="openstack/ovn-controller-6tr6g" message=< Nov 28 16:36:14 crc kubenswrapper[4909]: Exiting ovn-controller (1) [FAILED] Nov 28 16:36:14 crc kubenswrapper[4909]: Killing ovn-controller (1) [ OK ] Nov 28 16:36:14 crc kubenswrapper[4909]: Killing ovn-controller (1) with SIGKILL [ OK ] Nov 28 16:36:14 crc kubenswrapper[4909]: 2025-11-28T16:36:07Z|00001|fatal_signal|WARN|terminating with signal 14 (Alarm clock) Nov 28 16:36:14 crc kubenswrapper[4909]: /etc/init.d/functions: line 589: 456 Alarm clock "$@" Nov 28 16:36:14 crc kubenswrapper[4909]: > Nov 28 16:36:14 crc kubenswrapper[4909]: E1128 16:36:14.432904 4909 kuberuntime_container.go:691] "PreStop hook failed" err=< Nov 28 16:36:14 crc kubenswrapper[4909]: command '/usr/share/ovn/scripts/ovn-ctl stop_controller' exited with 137: 2025-11-28T16:36:07Z|00001|fatal_signal|WARN|terminating with signal 14 (Alarm clock) Nov 28 16:36:14 crc kubenswrapper[4909]: /etc/init.d/functions: line 589: 456 Alarm clock "$@" Nov 28 16:36:14 crc kubenswrapper[4909]: > pod="openstack/ovn-controller-6tr6g" podUID="14b66e32-a660-4643-9f57-f66bf12a56ef" containerName="ovn-controller" containerID="cri-o://dcbacc4980de48368b434765e94f18d28973918c9194edcb4918b9d52bf6bafd" Nov 28 16:36:14 crc kubenswrapper[4909]: I1128 16:36:14.432947 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovn-controller-6tr6g" podUID="14b66e32-a660-4643-9f57-f66bf12a56ef" containerName="ovn-controller" containerID="cri-o://dcbacc4980de48368b434765e94f18d28973918c9194edcb4918b9d52bf6bafd" gracePeriod=22 Nov 28 16:36:14 crc kubenswrapper[4909]: I1128 16:36:14.433764 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dc78dec8-567e-41a1-9fbf-793224410d3b-config" (OuterVolumeSpecName: "config") pod "dc78dec8-567e-41a1-9fbf-793224410d3b" (UID: "dc78dec8-567e-41a1-9fbf-793224410d3b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:36:14 crc kubenswrapper[4909]: I1128 16:36:14.436179 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dc78dec8-567e-41a1-9fbf-793224410d3b-scripts" (OuterVolumeSpecName: "scripts") pod "dc78dec8-567e-41a1-9fbf-793224410d3b" (UID: "dc78dec8-567e-41a1-9fbf-793224410d3b"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:36:14 crc kubenswrapper[4909]: I1128 16:36:14.473105 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-create-9v966"] Nov 28 16:36:14 crc kubenswrapper[4909]: I1128 16:36:14.455322 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5ad0a326-11b9-40c8-b251-5994a436110a-logs" (OuterVolumeSpecName: "logs") pod "5ad0a326-11b9-40c8-b251-5994a436110a" (UID: "5ad0a326-11b9-40c8-b251-5994a436110a"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:36:14 crc kubenswrapper[4909]: I1128 16:36:14.460144 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5ad0a326-11b9-40c8-b251-5994a436110a-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "5ad0a326-11b9-40c8-b251-5994a436110a" (UID: "5ad0a326-11b9-40c8-b251-5994a436110a"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:14 crc kubenswrapper[4909]: I1128 16:36:14.474080 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dc78dec8-567e-41a1-9fbf-793224410d3b-kube-api-access-xg9tm" (OuterVolumeSpecName: "kube-api-access-xg9tm") pod "dc78dec8-567e-41a1-9fbf-793224410d3b" (UID: "dc78dec8-567e-41a1-9fbf-793224410d3b"). InnerVolumeSpecName "kube-api-access-xg9tm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:36:14 crc kubenswrapper[4909]: I1128 16:36:14.476066 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5ad0a326-11b9-40c8-b251-5994a436110a-kube-api-access-f8lx5" (OuterVolumeSpecName: "kube-api-access-f8lx5") pod "5ad0a326-11b9-40c8-b251-5994a436110a" (UID: "5ad0a326-11b9-40c8-b251-5994a436110a"). InnerVolumeSpecName "kube-api-access-f8lx5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:36:14 crc kubenswrapper[4909]: I1128 16:36:14.495135 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-2631-account-create-update-kvmx8"] Nov 28 16:36:14 crc kubenswrapper[4909]: I1128 16:36:14.504337 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance2631-account-delete-2c2fs"] Nov 28 16:36:14 crc kubenswrapper[4909]: I1128 16:36:14.511150 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-2631-account-create-update-kvmx8"] Nov 28 16:36:14 crc kubenswrapper[4909]: I1128 16:36:14.512110 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5ad0a326-11b9-40c8-b251-5994a436110a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "5ad0a326-11b9-40c8-b251-5994a436110a" (UID: "5ad0a326-11b9-40c8-b251-5994a436110a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:14 crc kubenswrapper[4909]: I1128 16:36:14.533069 4909 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dc78dec8-567e-41a1-9fbf-793224410d3b-config\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:14 crc kubenswrapper[4909]: I1128 16:36:14.533096 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xg9tm\" (UniqueName: \"kubernetes.io/projected/dc78dec8-567e-41a1-9fbf-793224410d3b-kube-api-access-xg9tm\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:14 crc kubenswrapper[4909]: I1128 16:36:14.533106 4909 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5ad0a326-11b9-40c8-b251-5994a436110a-logs\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:14 crc kubenswrapper[4909]: I1128 16:36:14.533115 4909 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/5ad0a326-11b9-40c8-b251-5994a436110a-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:14 crc kubenswrapper[4909]: I1128 16:36:14.533124 4909 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5ad0a326-11b9-40c8-b251-5994a436110a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:14 crc kubenswrapper[4909]: I1128 16:36:14.533132 4909 reconciler_common.go:293] "Volume detached for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/dc78dec8-567e-41a1-9fbf-793224410d3b-ovn-rundir\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:14 crc kubenswrapper[4909]: I1128 16:36:14.533142 4909 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/dc78dec8-567e-41a1-9fbf-793224410d3b-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:14 crc kubenswrapper[4909]: I1128 16:36:14.533150 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f8lx5\" (UniqueName: \"kubernetes.io/projected/5ad0a326-11b9-40c8-b251-5994a436110a-kube-api-access-f8lx5\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:14 crc kubenswrapper[4909]: E1128 16:36:14.533160 4909 configmap.go:193] Couldn't get configMap openstack/rabbitmq-cell1-config-data: configmap "rabbitmq-cell1-config-data" not found Nov 28 16:36:14 crc kubenswrapper[4909]: E1128 16:36:14.533257 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/02c83d05-a6ce-4c22-9015-91c0a766a518-config-data podName:02c83d05-a6ce-4c22-9015-91c0a766a518 nodeName:}" failed. No retries permitted until 2025-11-28 16:36:22.533237284 +0000 UTC m=+1564.929921878 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/02c83d05-a6ce-4c22-9015-91c0a766a518-config-data") pod "rabbitmq-cell1-server-0" (UID: "02c83d05-a6ce-4c22-9015-91c0a766a518") : configmap "rabbitmq-cell1-config-data" not found Nov 28 16:36:14 crc kubenswrapper[4909]: I1128 16:36:14.534889 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dc78dec8-567e-41a1-9fbf-793224410d3b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "dc78dec8-567e-41a1-9fbf-793224410d3b" (UID: "dc78dec8-567e-41a1-9fbf-793224410d3b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:14 crc kubenswrapper[4909]: I1128 16:36:14.542345 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5ad0a326-11b9-40c8-b251-5994a436110a-config-data" (OuterVolumeSpecName: "config-data") pod "5ad0a326-11b9-40c8-b251-5994a436110a" (UID: "5ad0a326-11b9-40c8-b251-5994a436110a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:14 crc kubenswrapper[4909]: I1128 16:36:14.543662 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-create-9v966"] Nov 28 16:36:14 crc kubenswrapper[4909]: I1128 16:36:14.549786 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5ad0a326-11b9-40c8-b251-5994a436110a-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "5ad0a326-11b9-40c8-b251-5994a436110a" (UID: "5ad0a326-11b9-40c8-b251-5994a436110a"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:14 crc kubenswrapper[4909]: I1128 16:36:14.550466 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance2631-account-delete-2c2fs"] Nov 28 16:36:14 crc kubenswrapper[4909]: I1128 16:36:14.552813 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5ad0a326-11b9-40c8-b251-5994a436110a-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "5ad0a326-11b9-40c8-b251-5994a436110a" (UID: "5ad0a326-11b9-40c8-b251-5994a436110a"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:14 crc kubenswrapper[4909]: I1128 16:36:14.558509 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-8676ff5994-wjk95" Nov 28 16:36:14 crc kubenswrapper[4909]: I1128 16:36:14.571855 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dc78dec8-567e-41a1-9fbf-793224410d3b-metrics-certs-tls-certs" (OuterVolumeSpecName: "metrics-certs-tls-certs") pod "dc78dec8-567e-41a1-9fbf-793224410d3b" (UID: "dc78dec8-567e-41a1-9fbf-793224410d3b"). InnerVolumeSpecName "metrics-certs-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:14 crc kubenswrapper[4909]: I1128 16:36:14.595821 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dc78dec8-567e-41a1-9fbf-793224410d3b-ovn-northd-tls-certs" (OuterVolumeSpecName: "ovn-northd-tls-certs") pod "dc78dec8-567e-41a1-9fbf-793224410d3b" (UID: "dc78dec8-567e-41a1-9fbf-793224410d3b"). InnerVolumeSpecName "ovn-northd-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:14 crc kubenswrapper[4909]: I1128 16:36:14.634030 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/19094b17-f379-494e-b377-8191ddab4924-fernet-keys\") pod \"19094b17-f379-494e-b377-8191ddab4924\" (UID: \"19094b17-f379-494e-b377-8191ddab4924\") " Nov 28 16:36:14 crc kubenswrapper[4909]: I1128 16:36:14.634232 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/19094b17-f379-494e-b377-8191ddab4924-public-tls-certs\") pod \"19094b17-f379-494e-b377-8191ddab4924\" (UID: \"19094b17-f379-494e-b377-8191ddab4924\") " Nov 28 16:36:14 crc kubenswrapper[4909]: I1128 16:36:14.634339 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/19094b17-f379-494e-b377-8191ddab4924-internal-tls-certs\") pod \"19094b17-f379-494e-b377-8191ddab4924\" (UID: \"19094b17-f379-494e-b377-8191ddab4924\") " Nov 28 16:36:14 crc kubenswrapper[4909]: I1128 16:36:14.634407 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-42k4s\" (UniqueName: \"kubernetes.io/projected/19094b17-f379-494e-b377-8191ddab4924-kube-api-access-42k4s\") pod \"19094b17-f379-494e-b377-8191ddab4924\" (UID: \"19094b17-f379-494e-b377-8191ddab4924\") " Nov 28 16:36:14 crc kubenswrapper[4909]: I1128 16:36:14.634437 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/19094b17-f379-494e-b377-8191ddab4924-scripts\") pod \"19094b17-f379-494e-b377-8191ddab4924\" (UID: \"19094b17-f379-494e-b377-8191ddab4924\") " Nov 28 16:36:14 crc kubenswrapper[4909]: I1128 16:36:14.634518 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/19094b17-f379-494e-b377-8191ddab4924-config-data\") pod \"19094b17-f379-494e-b377-8191ddab4924\" (UID: \"19094b17-f379-494e-b377-8191ddab4924\") " Nov 28 16:36:14 crc kubenswrapper[4909]: I1128 16:36:14.634571 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/19094b17-f379-494e-b377-8191ddab4924-combined-ca-bundle\") pod \"19094b17-f379-494e-b377-8191ddab4924\" (UID: \"19094b17-f379-494e-b377-8191ddab4924\") " Nov 28 16:36:14 crc kubenswrapper[4909]: I1128 16:36:14.634607 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/19094b17-f379-494e-b377-8191ddab4924-credential-keys\") pod \"19094b17-f379-494e-b377-8191ddab4924\" (UID: \"19094b17-f379-494e-b377-8191ddab4924\") " Nov 28 16:36:14 crc kubenswrapper[4909]: I1128 16:36:14.635086 4909 reconciler_common.go:293] "Volume detached for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/dc78dec8-567e-41a1-9fbf-793224410d3b-ovn-northd-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:14 crc kubenswrapper[4909]: I1128 16:36:14.635109 4909 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/5ad0a326-11b9-40c8-b251-5994a436110a-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:14 crc kubenswrapper[4909]: I1128 16:36:14.635127 4909 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/5ad0a326-11b9-40c8-b251-5994a436110a-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:14 crc kubenswrapper[4909]: I1128 16:36:14.635143 4909 reconciler_common.go:293] "Volume detached for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/dc78dec8-567e-41a1-9fbf-793224410d3b-metrics-certs-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:14 crc kubenswrapper[4909]: I1128 16:36:14.635155 4909 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dc78dec8-567e-41a1-9fbf-793224410d3b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:14 crc kubenswrapper[4909]: I1128 16:36:14.635166 4909 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5ad0a326-11b9-40c8-b251-5994a436110a-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:14 crc kubenswrapper[4909]: I1128 16:36:14.637472 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/19094b17-f379-494e-b377-8191ddab4924-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "19094b17-f379-494e-b377-8191ddab4924" (UID: "19094b17-f379-494e-b377-8191ddab4924"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:14 crc kubenswrapper[4909]: I1128 16:36:14.639002 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/19094b17-f379-494e-b377-8191ddab4924-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "19094b17-f379-494e-b377-8191ddab4924" (UID: "19094b17-f379-494e-b377-8191ddab4924"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:14 crc kubenswrapper[4909]: I1128 16:36:14.640145 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/19094b17-f379-494e-b377-8191ddab4924-scripts" (OuterVolumeSpecName: "scripts") pod "19094b17-f379-494e-b377-8191ddab4924" (UID: "19094b17-f379-494e-b377-8191ddab4924"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:14 crc kubenswrapper[4909]: I1128 16:36:14.641915 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/19094b17-f379-494e-b377-8191ddab4924-kube-api-access-42k4s" (OuterVolumeSpecName: "kube-api-access-42k4s") pod "19094b17-f379-494e-b377-8191ddab4924" (UID: "19094b17-f379-494e-b377-8191ddab4924"). InnerVolumeSpecName "kube-api-access-42k4s". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:36:14 crc kubenswrapper[4909]: I1128 16:36:14.687254 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_dc78dec8-567e-41a1-9fbf-793224410d3b/ovn-northd/0.log" Nov 28 16:36:14 crc kubenswrapper[4909]: I1128 16:36:14.687306 4909 generic.go:334] "Generic (PLEG): container finished" podID="dc78dec8-567e-41a1-9fbf-793224410d3b" containerID="604d9593fae6f90c84804afb01b99c2a6be4dbebec46ec2fa908b5b83bb8c9dc" exitCode=139 Nov 28 16:36:14 crc kubenswrapper[4909]: I1128 16:36:14.687367 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"dc78dec8-567e-41a1-9fbf-793224410d3b","Type":"ContainerDied","Data":"604d9593fae6f90c84804afb01b99c2a6be4dbebec46ec2fa908b5b83bb8c9dc"} Nov 28 16:36:14 crc kubenswrapper[4909]: I1128 16:36:14.687429 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"dc78dec8-567e-41a1-9fbf-793224410d3b","Type":"ContainerDied","Data":"8af8397c30efca8e6a308b7609f31c2e06d62be6c080bb677b2b1e1b67d6dc77"} Nov 28 16:36:14 crc kubenswrapper[4909]: I1128 16:36:14.687453 4909 scope.go:117] "RemoveContainer" containerID="1c3a9af8648dff180e4db3b4e37877beb5c3240d62fe2ea612826fb90703150d" Nov 28 16:36:14 crc kubenswrapper[4909]: I1128 16:36:14.687538 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Nov 28 16:36:14 crc kubenswrapper[4909]: I1128 16:36:14.690431 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/19094b17-f379-494e-b377-8191ddab4924-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "19094b17-f379-494e-b377-8191ddab4924" (UID: "19094b17-f379-494e-b377-8191ddab4924"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:14 crc kubenswrapper[4909]: I1128 16:36:14.692479 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/19094b17-f379-494e-b377-8191ddab4924-config-data" (OuterVolumeSpecName: "config-data") pod "19094b17-f379-494e-b377-8191ddab4924" (UID: "19094b17-f379-494e-b377-8191ddab4924"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:14 crc kubenswrapper[4909]: I1128 16:36:14.696987 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-65c6d9c7fd-627g9" event={"ID":"5ad0a326-11b9-40c8-b251-5994a436110a","Type":"ContainerDied","Data":"9af8cd104e3187ddf8c610b6776a27019a3e5c7984dbfbd2adf24572ee3f1171"} Nov 28 16:36:14 crc kubenswrapper[4909]: I1128 16:36:14.697065 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-65c6d9c7fd-627g9" Nov 28 16:36:14 crc kubenswrapper[4909]: I1128 16:36:14.702165 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-8676ff5994-wjk95" event={"ID":"19094b17-f379-494e-b377-8191ddab4924","Type":"ContainerDied","Data":"c3b929b88bd53587b3ecd169d8d2c3d40d9b7098845c0ae7b4378ffb2f091f9d"} Nov 28 16:36:14 crc kubenswrapper[4909]: I1128 16:36:14.702224 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-8676ff5994-wjk95" Nov 28 16:36:14 crc kubenswrapper[4909]: I1128 16:36:14.710615 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-6tr6g_14b66e32-a660-4643-9f57-f66bf12a56ef/ovn-controller/0.log" Nov 28 16:36:14 crc kubenswrapper[4909]: I1128 16:36:14.710655 4909 generic.go:334] "Generic (PLEG): container finished" podID="14b66e32-a660-4643-9f57-f66bf12a56ef" containerID="dcbacc4980de48368b434765e94f18d28973918c9194edcb4918b9d52bf6bafd" exitCode=137 Nov 28 16:36:14 crc kubenswrapper[4909]: I1128 16:36:14.710749 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-6tr6g" event={"ID":"14b66e32-a660-4643-9f57-f66bf12a56ef","Type":"ContainerDied","Data":"dcbacc4980de48368b434765e94f18d28973918c9194edcb4918b9d52bf6bafd"} Nov 28 16:36:14 crc kubenswrapper[4909]: I1128 16:36:14.725150 4909 scope.go:117] "RemoveContainer" containerID="604d9593fae6f90c84804afb01b99c2a6be4dbebec46ec2fa908b5b83bb8c9dc" Nov 28 16:36:14 crc kubenswrapper[4909]: I1128 16:36:14.729071 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-65c6d9c7fd-627g9"] Nov 28 16:36:14 crc kubenswrapper[4909]: I1128 16:36:14.736560 4909 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/19094b17-f379-494e-b377-8191ddab4924-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:14 crc kubenswrapper[4909]: I1128 16:36:14.736594 4909 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/19094b17-f379-494e-b377-8191ddab4924-credential-keys\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:14 crc kubenswrapper[4909]: I1128 16:36:14.736606 4909 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/19094b17-f379-494e-b377-8191ddab4924-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:14 crc kubenswrapper[4909]: I1128 16:36:14.736618 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-42k4s\" (UniqueName: \"kubernetes.io/projected/19094b17-f379-494e-b377-8191ddab4924-kube-api-access-42k4s\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:14 crc kubenswrapper[4909]: I1128 16:36:14.736629 4909 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/19094b17-f379-494e-b377-8191ddab4924-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:14 crc kubenswrapper[4909]: I1128 16:36:14.736641 4909 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/19094b17-f379-494e-b377-8191ddab4924-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:14 crc kubenswrapper[4909]: I1128 16:36:14.737175 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/19094b17-f379-494e-b377-8191ddab4924-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "19094b17-f379-494e-b377-8191ddab4924" (UID: "19094b17-f379-494e-b377-8191ddab4924"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:14 crc kubenswrapper[4909]: I1128 16:36:14.741534 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-api-65c6d9c7fd-627g9"] Nov 28 16:36:14 crc kubenswrapper[4909]: I1128 16:36:14.744142 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/19094b17-f379-494e-b377-8191ddab4924-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "19094b17-f379-494e-b377-8191ddab4924" (UID: "19094b17-f379-494e-b377-8191ddab4924"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:14 crc kubenswrapper[4909]: I1128 16:36:14.748489 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-northd-0"] Nov 28 16:36:14 crc kubenswrapper[4909]: I1128 16:36:14.749284 4909 scope.go:117] "RemoveContainer" containerID="1c3a9af8648dff180e4db3b4e37877beb5c3240d62fe2ea612826fb90703150d" Nov 28 16:36:14 crc kubenswrapper[4909]: E1128 16:36:14.749592 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1c3a9af8648dff180e4db3b4e37877beb5c3240d62fe2ea612826fb90703150d\": container with ID starting with 1c3a9af8648dff180e4db3b4e37877beb5c3240d62fe2ea612826fb90703150d not found: ID does not exist" containerID="1c3a9af8648dff180e4db3b4e37877beb5c3240d62fe2ea612826fb90703150d" Nov 28 16:36:14 crc kubenswrapper[4909]: I1128 16:36:14.749621 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1c3a9af8648dff180e4db3b4e37877beb5c3240d62fe2ea612826fb90703150d"} err="failed to get container status \"1c3a9af8648dff180e4db3b4e37877beb5c3240d62fe2ea612826fb90703150d\": rpc error: code = NotFound desc = could not find container \"1c3a9af8648dff180e4db3b4e37877beb5c3240d62fe2ea612826fb90703150d\": container with ID starting with 1c3a9af8648dff180e4db3b4e37877beb5c3240d62fe2ea612826fb90703150d not found: ID does not exist" Nov 28 16:36:14 crc kubenswrapper[4909]: I1128 16:36:14.749641 4909 scope.go:117] "RemoveContainer" containerID="604d9593fae6f90c84804afb01b99c2a6be4dbebec46ec2fa908b5b83bb8c9dc" Nov 28 16:36:14 crc kubenswrapper[4909]: E1128 16:36:14.749886 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"604d9593fae6f90c84804afb01b99c2a6be4dbebec46ec2fa908b5b83bb8c9dc\": container with ID starting with 604d9593fae6f90c84804afb01b99c2a6be4dbebec46ec2fa908b5b83bb8c9dc not found: ID does not exist" containerID="604d9593fae6f90c84804afb01b99c2a6be4dbebec46ec2fa908b5b83bb8c9dc" Nov 28 16:36:14 crc kubenswrapper[4909]: I1128 16:36:14.749910 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"604d9593fae6f90c84804afb01b99c2a6be4dbebec46ec2fa908b5b83bb8c9dc"} err="failed to get container status \"604d9593fae6f90c84804afb01b99c2a6be4dbebec46ec2fa908b5b83bb8c9dc\": rpc error: code = NotFound desc = could not find container \"604d9593fae6f90c84804afb01b99c2a6be4dbebec46ec2fa908b5b83bb8c9dc\": container with ID starting with 604d9593fae6f90c84804afb01b99c2a6be4dbebec46ec2fa908b5b83bb8c9dc not found: ID does not exist" Nov 28 16:36:14 crc kubenswrapper[4909]: I1128 16:36:14.749924 4909 scope.go:117] "RemoveContainer" containerID="15d949a1b99dcbb9b7b8db33a347fc722bb599a90632809376044acb153f117d" Nov 28 16:36:14 crc kubenswrapper[4909]: I1128 16:36:14.774500 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-northd-0"] Nov 28 16:36:14 crc kubenswrapper[4909]: I1128 16:36:14.774637 4909 scope.go:117] "RemoveContainer" containerID="630e4eeb18086e03661fc2005462721172f42178abd3a1806a71653ed20f565d" Nov 28 16:36:14 crc kubenswrapper[4909]: I1128 16:36:14.799318 4909 scope.go:117] "RemoveContainer" containerID="c758b61754fb6095949cc04fca00fab2b1a68ab0a205fa48bd04afb7cdc48dca" Nov 28 16:36:14 crc kubenswrapper[4909]: I1128 16:36:14.838536 4909 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/19094b17-f379-494e-b377-8191ddab4924-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:14 crc kubenswrapper[4909]: I1128 16:36:14.838578 4909 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/19094b17-f379-494e-b377-8191ddab4924-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.073948 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-8676ff5994-wjk95"] Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.098086 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-8676ff5994-wjk95"] Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.431004 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-6tr6g_14b66e32-a660-4643-9f57-f66bf12a56ef/ovn-controller/0.log" Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.431338 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-6tr6g" Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.553917 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/14b66e32-a660-4643-9f57-f66bf12a56ef-scripts\") pod \"14b66e32-a660-4643-9f57-f66bf12a56ef\" (UID: \"14b66e32-a660-4643-9f57-f66bf12a56ef\") " Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.553959 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-478mm\" (UniqueName: \"kubernetes.io/projected/14b66e32-a660-4643-9f57-f66bf12a56ef-kube-api-access-478mm\") pod \"14b66e32-a660-4643-9f57-f66bf12a56ef\" (UID: \"14b66e32-a660-4643-9f57-f66bf12a56ef\") " Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.553983 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/14b66e32-a660-4643-9f57-f66bf12a56ef-var-run-ovn\") pod \"14b66e32-a660-4643-9f57-f66bf12a56ef\" (UID: \"14b66e32-a660-4643-9f57-f66bf12a56ef\") " Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.554046 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/14b66e32-a660-4643-9f57-f66bf12a56ef-var-log-ovn\") pod \"14b66e32-a660-4643-9f57-f66bf12a56ef\" (UID: \"14b66e32-a660-4643-9f57-f66bf12a56ef\") " Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.554084 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/14b66e32-a660-4643-9f57-f66bf12a56ef-var-run\") pod \"14b66e32-a660-4643-9f57-f66bf12a56ef\" (UID: \"14b66e32-a660-4643-9f57-f66bf12a56ef\") " Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.554086 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/14b66e32-a660-4643-9f57-f66bf12a56ef-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "14b66e32-a660-4643-9f57-f66bf12a56ef" (UID: "14b66e32-a660-4643-9f57-f66bf12a56ef"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.554159 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/14b66e32-a660-4643-9f57-f66bf12a56ef-ovn-controller-tls-certs\") pod \"14b66e32-a660-4643-9f57-f66bf12a56ef\" (UID: \"14b66e32-a660-4643-9f57-f66bf12a56ef\") " Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.554181 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/14b66e32-a660-4643-9f57-f66bf12a56ef-combined-ca-bundle\") pod \"14b66e32-a660-4643-9f57-f66bf12a56ef\" (UID: \"14b66e32-a660-4643-9f57-f66bf12a56ef\") " Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.554187 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/14b66e32-a660-4643-9f57-f66bf12a56ef-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "14b66e32-a660-4643-9f57-f66bf12a56ef" (UID: "14b66e32-a660-4643-9f57-f66bf12a56ef"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.554356 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/14b66e32-a660-4643-9f57-f66bf12a56ef-var-run" (OuterVolumeSpecName: "var-run") pod "14b66e32-a660-4643-9f57-f66bf12a56ef" (UID: "14b66e32-a660-4643-9f57-f66bf12a56ef"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.554973 4909 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/14b66e32-a660-4643-9f57-f66bf12a56ef-var-run-ovn\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.555008 4909 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/14b66e32-a660-4643-9f57-f66bf12a56ef-var-log-ovn\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.555024 4909 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/14b66e32-a660-4643-9f57-f66bf12a56ef-var-run\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.556125 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/14b66e32-a660-4643-9f57-f66bf12a56ef-scripts" (OuterVolumeSpecName: "scripts") pod "14b66e32-a660-4643-9f57-f66bf12a56ef" (UID: "14b66e32-a660-4643-9f57-f66bf12a56ef"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.560679 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/14b66e32-a660-4643-9f57-f66bf12a56ef-kube-api-access-478mm" (OuterVolumeSpecName: "kube-api-access-478mm") pod "14b66e32-a660-4643-9f57-f66bf12a56ef" (UID: "14b66e32-a660-4643-9f57-f66bf12a56ef"). InnerVolumeSpecName "kube-api-access-478mm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.606340 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/14b66e32-a660-4643-9f57-f66bf12a56ef-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "14b66e32-a660-4643-9f57-f66bf12a56ef" (UID: "14b66e32-a660-4643-9f57-f66bf12a56ef"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.635263 4909 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/kube-state-metrics-0" podUID="64b9a5c2-09a8-48fb-9e1b-b66c1003cf61" containerName="kube-state-metrics" probeResult="failure" output="Get \"https://10.217.0.192:8081/readyz\": dial tcp 10.217.0.192:8081: i/o timeout" Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.638878 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/14b66e32-a660-4643-9f57-f66bf12a56ef-ovn-controller-tls-certs" (OuterVolumeSpecName: "ovn-controller-tls-certs") pod "14b66e32-a660-4643-9f57-f66bf12a56ef" (UID: "14b66e32-a660-4643-9f57-f66bf12a56ef"). InnerVolumeSpecName "ovn-controller-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.657500 4909 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/14b66e32-a660-4643-9f57-f66bf12a56ef-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.657538 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-478mm\" (UniqueName: \"kubernetes.io/projected/14b66e32-a660-4643-9f57-f66bf12a56ef-kube-api-access-478mm\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.657553 4909 reconciler_common.go:293] "Volume detached for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/14b66e32-a660-4643-9f57-f66bf12a56ef-ovn-controller-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.657563 4909 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/14b66e32-a660-4643-9f57-f66bf12a56ef-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.718702 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.757895 4909 generic.go:334] "Generic (PLEG): container finished" podID="7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444" containerID="ea9f036508aa973d9d7d95b6b3c4ac6136769fc0843a49037989a78d48329266" exitCode=0 Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.757973 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444","Type":"ContainerDied","Data":"ea9f036508aa973d9d7d95b6b3c4ac6136769fc0843a49037989a78d48329266"} Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.758006 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444","Type":"ContainerDied","Data":"d758f5bd74a408367d7310ae37d87089ee0f19c00277a74f6e04970d9f2bb8a2"} Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.758026 4909 scope.go:117] "RemoveContainer" containerID="ea9f036508aa973d9d7d95b6b3c4ac6136769fc0843a49037989a78d48329266" Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.758031 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.758801 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444-pod-info\") pod \"7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444\" (UID: \"7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444\") " Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.758846 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444-server-conf\") pod \"7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444\" (UID: \"7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444\") " Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.758960 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444-erlang-cookie-secret\") pod \"7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444\" (UID: \"7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444\") " Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.758994 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444-plugins-conf\") pod \"7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444\" (UID: \"7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444\") " Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.759018 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mgs9l\" (UniqueName: \"kubernetes.io/projected/7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444-kube-api-access-mgs9l\") pod \"7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444\" (UID: \"7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444\") " Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.759084 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444-rabbitmq-erlang-cookie\") pod \"7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444\" (UID: \"7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444\") " Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.759107 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444\" (UID: \"7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444\") " Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.759214 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444-rabbitmq-confd\") pod \"7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444\" (UID: \"7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444\") " Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.759240 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444-rabbitmq-plugins\") pod \"7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444\" (UID: \"7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444\") " Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.759275 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444-config-data\") pod \"7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444\" (UID: \"7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444\") " Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.759305 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444-rabbitmq-tls\") pod \"7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444\" (UID: \"7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444\") " Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.759890 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444" (UID: "7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.761565 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444" (UID: "7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.763648 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444" (UID: "7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.763758 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444" (UID: "7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.765142 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444" (UID: "7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.765459 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444-kube-api-access-mgs9l" (OuterVolumeSpecName: "kube-api-access-mgs9l") pod "7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444" (UID: "7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444"). InnerVolumeSpecName "kube-api-access-mgs9l". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.766603 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-6tr6g_14b66e32-a660-4643-9f57-f66bf12a56ef/ovn-controller/0.log" Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.766699 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-6tr6g" event={"ID":"14b66e32-a660-4643-9f57-f66bf12a56ef","Type":"ContainerDied","Data":"4043cd901717c0c55a563dd5c052609a3e5039e2af5eb6426d0ad896e6c31fc9"} Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.766842 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-6tr6g" Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.781128 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage07-crc" (OuterVolumeSpecName: "persistence") pod "7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444" (UID: "7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444"). InnerVolumeSpecName "local-storage07-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.781754 4909 generic.go:334] "Generic (PLEG): container finished" podID="0b1d1797-999d-4453-b674-c40f53d4231e" containerID="219f6794715541737d340cec186fc3847a65402dd4d251a98a0ddbe7c6c7178b" exitCode=0 Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.781816 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"0b1d1797-999d-4453-b674-c40f53d4231e","Type":"ContainerDied","Data":"219f6794715541737d340cec186fc3847a65402dd4d251a98a0ddbe7c6c7178b"} Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.784019 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444-pod-info" (OuterVolumeSpecName: "pod-info") pod "7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444" (UID: "7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.819144 4909 generic.go:334] "Generic (PLEG): container finished" podID="02c83d05-a6ce-4c22-9015-91c0a766a518" containerID="e404e875f3c6c8a15d87ad24861803ab1e659ac087607f8971106d0d6890fc63" exitCode=0 Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.819272 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"02c83d05-a6ce-4c22-9015-91c0a766a518","Type":"ContainerDied","Data":"e404e875f3c6c8a15d87ad24861803ab1e659ac087607f8971106d0d6890fc63"} Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.823516 4909 scope.go:117] "RemoveContainer" containerID="d88fa152549e49d038ad50024ffe594844eac1a2e9fbab72853e4ad449f04b71" Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.828491 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.834787 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-6tr6g"] Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.840445 4909 generic.go:334] "Generic (PLEG): container finished" podID="44195e2b-7f1d-4542-8948-93a818071fd2" containerID="243efd87260faad6273959bee1e446c951a950b25ee3ad8a4d986ebc6dff73bb" exitCode=0 Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.840489 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"44195e2b-7f1d-4542-8948-93a818071fd2","Type":"ContainerDied","Data":"243efd87260faad6273959bee1e446c951a950b25ee3ad8a4d986ebc6dff73bb"} Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.850976 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444-config-data" (OuterVolumeSpecName: "config-data") pod "7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444" (UID: "7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.852969 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.856816 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444-server-conf" (OuterVolumeSpecName: "server-conf") pod "7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444" (UID: "7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.860785 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/44195e2b-7f1d-4542-8948-93a818071fd2-scripts\") pod \"44195e2b-7f1d-4542-8948-93a818071fd2\" (UID: \"44195e2b-7f1d-4542-8948-93a818071fd2\") " Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.860851 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/44195e2b-7f1d-4542-8948-93a818071fd2-combined-ca-bundle\") pod \"44195e2b-7f1d-4542-8948-93a818071fd2\" (UID: \"44195e2b-7f1d-4542-8948-93a818071fd2\") " Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.860893 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/44195e2b-7f1d-4542-8948-93a818071fd2-ceilometer-tls-certs\") pod \"44195e2b-7f1d-4542-8948-93a818071fd2\" (UID: \"44195e2b-7f1d-4542-8948-93a818071fd2\") " Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.860914 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/44195e2b-7f1d-4542-8948-93a818071fd2-sg-core-conf-yaml\") pod \"44195e2b-7f1d-4542-8948-93a818071fd2\" (UID: \"44195e2b-7f1d-4542-8948-93a818071fd2\") " Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.861035 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/44195e2b-7f1d-4542-8948-93a818071fd2-run-httpd\") pod \"44195e2b-7f1d-4542-8948-93a818071fd2\" (UID: \"44195e2b-7f1d-4542-8948-93a818071fd2\") " Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.861085 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/44195e2b-7f1d-4542-8948-93a818071fd2-config-data\") pod \"44195e2b-7f1d-4542-8948-93a818071fd2\" (UID: \"44195e2b-7f1d-4542-8948-93a818071fd2\") " Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.861103 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/44195e2b-7f1d-4542-8948-93a818071fd2-log-httpd\") pod \"44195e2b-7f1d-4542-8948-93a818071fd2\" (UID: \"44195e2b-7f1d-4542-8948-93a818071fd2\") " Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.861132 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fjm7s\" (UniqueName: \"kubernetes.io/projected/44195e2b-7f1d-4542-8948-93a818071fd2-kube-api-access-fjm7s\") pod \"44195e2b-7f1d-4542-8948-93a818071fd2\" (UID: \"44195e2b-7f1d-4542-8948-93a818071fd2\") " Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.861495 4909 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.861509 4909 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444-plugins-conf\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.861519 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mgs9l\" (UniqueName: \"kubernetes.io/projected/7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444-kube-api-access-mgs9l\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.861530 4909 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.861556 4909 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") on node \"crc\" " Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.861565 4909 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.861574 4909 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.861583 4909 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.861592 4909 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444-pod-info\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.861600 4909 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444-server-conf\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.873423 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/44195e2b-7f1d-4542-8948-93a818071fd2-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "44195e2b-7f1d-4542-8948-93a818071fd2" (UID: "44195e2b-7f1d-4542-8948-93a818071fd2"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.868762 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/44195e2b-7f1d-4542-8948-93a818071fd2-scripts" (OuterVolumeSpecName: "scripts") pod "44195e2b-7f1d-4542-8948-93a818071fd2" (UID: "44195e2b-7f1d-4542-8948-93a818071fd2"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.874582 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/44195e2b-7f1d-4542-8948-93a818071fd2-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "44195e2b-7f1d-4542-8948-93a818071fd2" (UID: "44195e2b-7f1d-4542-8948-93a818071fd2"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.884543 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/44195e2b-7f1d-4542-8948-93a818071fd2-kube-api-access-fjm7s" (OuterVolumeSpecName: "kube-api-access-fjm7s") pod "44195e2b-7f1d-4542-8948-93a818071fd2" (UID: "44195e2b-7f1d-4542-8948-93a818071fd2"). InnerVolumeSpecName "kube-api-access-fjm7s". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.884732 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.884782 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/44195e2b-7f1d-4542-8948-93a818071fd2-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "44195e2b-7f1d-4542-8948-93a818071fd2" (UID: "44195e2b-7f1d-4542-8948-93a818071fd2"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.886518 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-6tr6g"] Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.922636 4909 scope.go:117] "RemoveContainer" containerID="ea9f036508aa973d9d7d95b6b3c4ac6136769fc0843a49037989a78d48329266" Nov 28 16:36:15 crc kubenswrapper[4909]: E1128 16:36:15.923264 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ea9f036508aa973d9d7d95b6b3c4ac6136769fc0843a49037989a78d48329266\": container with ID starting with ea9f036508aa973d9d7d95b6b3c4ac6136769fc0843a49037989a78d48329266 not found: ID does not exist" containerID="ea9f036508aa973d9d7d95b6b3c4ac6136769fc0843a49037989a78d48329266" Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.923313 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ea9f036508aa973d9d7d95b6b3c4ac6136769fc0843a49037989a78d48329266"} err="failed to get container status \"ea9f036508aa973d9d7d95b6b3c4ac6136769fc0843a49037989a78d48329266\": rpc error: code = NotFound desc = could not find container \"ea9f036508aa973d9d7d95b6b3c4ac6136769fc0843a49037989a78d48329266\": container with ID starting with ea9f036508aa973d9d7d95b6b3c4ac6136769fc0843a49037989a78d48329266 not found: ID does not exist" Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.923350 4909 scope.go:117] "RemoveContainer" containerID="d88fa152549e49d038ad50024ffe594844eac1a2e9fbab72853e4ad449f04b71" Nov 28 16:36:15 crc kubenswrapper[4909]: E1128 16:36:15.923724 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d88fa152549e49d038ad50024ffe594844eac1a2e9fbab72853e4ad449f04b71\": container with ID starting with d88fa152549e49d038ad50024ffe594844eac1a2e9fbab72853e4ad449f04b71 not found: ID does not exist" containerID="d88fa152549e49d038ad50024ffe594844eac1a2e9fbab72853e4ad449f04b71" Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.923745 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d88fa152549e49d038ad50024ffe594844eac1a2e9fbab72853e4ad449f04b71"} err="failed to get container status \"d88fa152549e49d038ad50024ffe594844eac1a2e9fbab72853e4ad449f04b71\": rpc error: code = NotFound desc = could not find container \"d88fa152549e49d038ad50024ffe594844eac1a2e9fbab72853e4ad449f04b71\": container with ID starting with d88fa152549e49d038ad50024ffe594844eac1a2e9fbab72853e4ad449f04b71 not found: ID does not exist" Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.923757 4909 scope.go:117] "RemoveContainer" containerID="dcbacc4980de48368b434765e94f18d28973918c9194edcb4918b9d52bf6bafd" Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.929643 4909 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage07-crc" (UniqueName: "kubernetes.io/local-volume/local-storage07-crc") on node "crc" Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.939013 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="14b66e32-a660-4643-9f57-f66bf12a56ef" path="/var/lib/kubelet/pods/14b66e32-a660-4643-9f57-f66bf12a56ef/volumes" Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.939525 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="19094b17-f379-494e-b377-8191ddab4924" path="/var/lib/kubelet/pods/19094b17-f379-494e-b377-8191ddab4924/volumes" Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.940048 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1c81b153-5498-4d63-9c98-fa8b79d5acdd" path="/var/lib/kubelet/pods/1c81b153-5498-4d63-9c98-fa8b79d5acdd/volumes" Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.941130 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5ad0a326-11b9-40c8-b251-5994a436110a" path="/var/lib/kubelet/pods/5ad0a326-11b9-40c8-b251-5994a436110a/volumes" Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.942030 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5b472e4d-2724-4ea4-93c9-5552d92af793" path="/var/lib/kubelet/pods/5b472e4d-2724-4ea4-93c9-5552d92af793/volumes" Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.942657 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="72f0e500-fe06-4373-9bc3-6cdaa2520043" path="/var/lib/kubelet/pods/72f0e500-fe06-4373-9bc3-6cdaa2520043/volumes" Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.943720 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9369b072-ffb6-4fe8-bd5b-ed55bd086ca0" path="/var/lib/kubelet/pods/9369b072-ffb6-4fe8-bd5b-ed55bd086ca0/volumes" Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.944253 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a91c37b7-2346-47e6-b462-a48c06d30017" path="/var/lib/kubelet/pods/a91c37b7-2346-47e6-b462-a48c06d30017/volumes" Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.944722 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b0f3cfb2-6884-4ef1-9844-cf494a2e21bb" path="/var/lib/kubelet/pods/b0f3cfb2-6884-4ef1-9844-cf494a2e21bb/volumes" Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.945778 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c64b6821-6f46-4764-be55-97ed8c71fefa" path="/var/lib/kubelet/pods/c64b6821-6f46-4764-be55-97ed8c71fefa/volumes" Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.946323 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dc78dec8-567e-41a1-9fbf-793224410d3b" path="/var/lib/kubelet/pods/dc78dec8-567e-41a1-9fbf-793224410d3b/volumes" Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.946947 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e086b29e-c7fb-45a4-a6f2-c30508f1b25a" path="/var/lib/kubelet/pods/e086b29e-c7fb-45a4-a6f2-c30508f1b25a/volumes" Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.947990 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e8b95945-6169-4e44-861a-f4abd48a7161" path="/var/lib/kubelet/pods/e8b95945-6169-4e44-861a-f4abd48a7161/volumes" Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.948716 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ffd60458-19af-464b-9649-57d25893f22a" path="/var/lib/kubelet/pods/ffd60458-19af-464b-9649-57d25893f22a/volumes" Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.949740 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/44195e2b-7f1d-4542-8948-93a818071fd2-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "44195e2b-7f1d-4542-8948-93a818071fd2" (UID: "44195e2b-7f1d-4542-8948-93a818071fd2"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.950492 4909 scope.go:117] "RemoveContainer" containerID="2eb951aa3c283ce1fd52ad843a7234b8357e48ad0ed3f9fa1b30578a1af9fa7a" Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.956056 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444" (UID: "7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.962016 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mysql-db\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"0b1d1797-999d-4453-b674-c40f53d4231e\" (UID: \"0b1d1797-999d-4453-b674-c40f53d4231e\") " Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.962146 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sv7bn\" (UniqueName: \"kubernetes.io/projected/02c83d05-a6ce-4c22-9015-91c0a766a518-kube-api-access-sv7bn\") pod \"02c83d05-a6ce-4c22-9015-91c0a766a518\" (UID: \"02c83d05-a6ce-4c22-9015-91c0a766a518\") " Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.962186 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/0b1d1797-999d-4453-b674-c40f53d4231e-config-data-default\") pod \"0b1d1797-999d-4453-b674-c40f53d4231e\" (UID: \"0b1d1797-999d-4453-b674-c40f53d4231e\") " Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.962209 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/0b1d1797-999d-4453-b674-c40f53d4231e-galera-tls-certs\") pod \"0b1d1797-999d-4453-b674-c40f53d4231e\" (UID: \"0b1d1797-999d-4453-b674-c40f53d4231e\") " Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.962245 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/02c83d05-a6ce-4c22-9015-91c0a766a518-rabbitmq-erlang-cookie\") pod \"02c83d05-a6ce-4c22-9015-91c0a766a518\" (UID: \"02c83d05-a6ce-4c22-9015-91c0a766a518\") " Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.962280 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"02c83d05-a6ce-4c22-9015-91c0a766a518\" (UID: \"02c83d05-a6ce-4c22-9015-91c0a766a518\") " Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.962710 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/02c83d05-a6ce-4c22-9015-91c0a766a518-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "02c83d05-a6ce-4c22-9015-91c0a766a518" (UID: "02c83d05-a6ce-4c22-9015-91c0a766a518"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.962770 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/02c83d05-a6ce-4c22-9015-91c0a766a518-config-data\") pod \"02c83d05-a6ce-4c22-9015-91c0a766a518\" (UID: \"02c83d05-a6ce-4c22-9015-91c0a766a518\") " Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.962790 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0b1d1797-999d-4453-b674-c40f53d4231e-operator-scripts\") pod \"0b1d1797-999d-4453-b674-c40f53d4231e\" (UID: \"0b1d1797-999d-4453-b674-c40f53d4231e\") " Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.962816 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/0b1d1797-999d-4453-b674-c40f53d4231e-kolla-config\") pod \"0b1d1797-999d-4453-b674-c40f53d4231e\" (UID: \"0b1d1797-999d-4453-b674-c40f53d4231e\") " Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.962840 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0b1d1797-999d-4453-b674-c40f53d4231e-combined-ca-bundle\") pod \"0b1d1797-999d-4453-b674-c40f53d4231e\" (UID: \"0b1d1797-999d-4453-b674-c40f53d4231e\") " Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.962858 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b1d1797-999d-4453-b674-c40f53d4231e-config-data-default" (OuterVolumeSpecName: "config-data-default") pod "0b1d1797-999d-4453-b674-c40f53d4231e" (UID: "0b1d1797-999d-4453-b674-c40f53d4231e"). InnerVolumeSpecName "config-data-default". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.962893 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/02c83d05-a6ce-4c22-9015-91c0a766a518-rabbitmq-tls\") pod \"02c83d05-a6ce-4c22-9015-91c0a766a518\" (UID: \"02c83d05-a6ce-4c22-9015-91c0a766a518\") " Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.962913 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/02c83d05-a6ce-4c22-9015-91c0a766a518-plugins-conf\") pod \"02c83d05-a6ce-4c22-9015-91c0a766a518\" (UID: \"02c83d05-a6ce-4c22-9015-91c0a766a518\") " Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.962955 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/02c83d05-a6ce-4c22-9015-91c0a766a518-erlang-cookie-secret\") pod \"02c83d05-a6ce-4c22-9015-91c0a766a518\" (UID: \"02c83d05-a6ce-4c22-9015-91c0a766a518\") " Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.962980 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m782j\" (UniqueName: \"kubernetes.io/projected/0b1d1797-999d-4453-b674-c40f53d4231e-kube-api-access-m782j\") pod \"0b1d1797-999d-4453-b674-c40f53d4231e\" (UID: \"0b1d1797-999d-4453-b674-c40f53d4231e\") " Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.963009 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/0b1d1797-999d-4453-b674-c40f53d4231e-config-data-generated\") pod \"0b1d1797-999d-4453-b674-c40f53d4231e\" (UID: \"0b1d1797-999d-4453-b674-c40f53d4231e\") " Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.963166 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/02c83d05-a6ce-4c22-9015-91c0a766a518-pod-info\") pod \"02c83d05-a6ce-4c22-9015-91c0a766a518\" (UID: \"02c83d05-a6ce-4c22-9015-91c0a766a518\") " Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.963195 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/02c83d05-a6ce-4c22-9015-91c0a766a518-rabbitmq-plugins\") pod \"02c83d05-a6ce-4c22-9015-91c0a766a518\" (UID: \"02c83d05-a6ce-4c22-9015-91c0a766a518\") " Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.963233 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/02c83d05-a6ce-4c22-9015-91c0a766a518-rabbitmq-confd\") pod \"02c83d05-a6ce-4c22-9015-91c0a766a518\" (UID: \"02c83d05-a6ce-4c22-9015-91c0a766a518\") " Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.963257 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/02c83d05-a6ce-4c22-9015-91c0a766a518-server-conf\") pod \"02c83d05-a6ce-4c22-9015-91c0a766a518\" (UID: \"02c83d05-a6ce-4c22-9015-91c0a766a518\") " Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.963563 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b1d1797-999d-4453-b674-c40f53d4231e-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "0b1d1797-999d-4453-b674-c40f53d4231e" (UID: "0b1d1797-999d-4453-b674-c40f53d4231e"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.963966 4909 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/44195e2b-7f1d-4542-8948-93a818071fd2-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.964011 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fjm7s\" (UniqueName: \"kubernetes.io/projected/44195e2b-7f1d-4542-8948-93a818071fd2-kube-api-access-fjm7s\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.964023 4909 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/44195e2b-7f1d-4542-8948-93a818071fd2-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.964033 4909 reconciler_common.go:293] "Volume detached for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.964044 4909 reconciler_common.go:293] "Volume detached for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/0b1d1797-999d-4453-b674-c40f53d4231e-config-data-default\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.964054 4909 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/02c83d05-a6ce-4c22-9015-91c0a766a518-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.964065 4909 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0b1d1797-999d-4453-b674-c40f53d4231e-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.964075 4909 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/44195e2b-7f1d-4542-8948-93a818071fd2-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.964085 4909 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/44195e2b-7f1d-4542-8948-93a818071fd2-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.964094 4909 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.964103 4909 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/44195e2b-7f1d-4542-8948-93a818071fd2-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.964651 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b1d1797-999d-4453-b674-c40f53d4231e-kolla-config" (OuterVolumeSpecName: "kolla-config") pod "0b1d1797-999d-4453-b674-c40f53d4231e" (UID: "0b1d1797-999d-4453-b674-c40f53d4231e"). InnerVolumeSpecName "kolla-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.965483 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0b1d1797-999d-4453-b674-c40f53d4231e-config-data-generated" (OuterVolumeSpecName: "config-data-generated") pod "0b1d1797-999d-4453-b674-c40f53d4231e" (UID: "0b1d1797-999d-4453-b674-c40f53d4231e"). InnerVolumeSpecName "config-data-generated". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.966286 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/02c83d05-a6ce-4c22-9015-91c0a766a518-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "02c83d05-a6ce-4c22-9015-91c0a766a518" (UID: "02c83d05-a6ce-4c22-9015-91c0a766a518"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.968380 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b1d1797-999d-4453-b674-c40f53d4231e-kube-api-access-m782j" (OuterVolumeSpecName: "kube-api-access-m782j") pod "0b1d1797-999d-4453-b674-c40f53d4231e" (UID: "0b1d1797-999d-4453-b674-c40f53d4231e"). InnerVolumeSpecName "kube-api-access-m782j". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.968511 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/02c83d05-a6ce-4c22-9015-91c0a766a518-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "02c83d05-a6ce-4c22-9015-91c0a766a518" (UID: "02c83d05-a6ce-4c22-9015-91c0a766a518"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.968616 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/02c83d05-a6ce-4c22-9015-91c0a766a518-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "02c83d05-a6ce-4c22-9015-91c0a766a518" (UID: "02c83d05-a6ce-4c22-9015-91c0a766a518"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.969030 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/02c83d05-a6ce-4c22-9015-91c0a766a518-pod-info" (OuterVolumeSpecName: "pod-info") pod "02c83d05-a6ce-4c22-9015-91c0a766a518" (UID: "02c83d05-a6ce-4c22-9015-91c0a766a518"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.969184 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage02-crc" (OuterVolumeSpecName: "persistence") pod "02c83d05-a6ce-4c22-9015-91c0a766a518" (UID: "02c83d05-a6ce-4c22-9015-91c0a766a518"). InnerVolumeSpecName "local-storage02-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.970468 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/02c83d05-a6ce-4c22-9015-91c0a766a518-kube-api-access-sv7bn" (OuterVolumeSpecName: "kube-api-access-sv7bn") pod "02c83d05-a6ce-4c22-9015-91c0a766a518" (UID: "02c83d05-a6ce-4c22-9015-91c0a766a518"). InnerVolumeSpecName "kube-api-access-sv7bn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.973100 4909 scope.go:117] "RemoveContainer" containerID="66d5e1af763ac9fbd2d79ed29f67762e31a80531d747a095842ae46df8e3741e" Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.977817 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/02c83d05-a6ce-4c22-9015-91c0a766a518-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "02c83d05-a6ce-4c22-9015-91c0a766a518" (UID: "02c83d05-a6ce-4c22-9015-91c0a766a518"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.989274 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage08-crc" (OuterVolumeSpecName: "mysql-db") pod "0b1d1797-999d-4453-b674-c40f53d4231e" (UID: "0b1d1797-999d-4453-b674-c40f53d4231e"). InnerVolumeSpecName "local-storage08-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.989305 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/44195e2b-7f1d-4542-8948-93a818071fd2-config-data" (OuterVolumeSpecName: "config-data") pod "44195e2b-7f1d-4542-8948-93a818071fd2" (UID: "44195e2b-7f1d-4542-8948-93a818071fd2"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.992251 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b1d1797-999d-4453-b674-c40f53d4231e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0b1d1797-999d-4453-b674-c40f53d4231e" (UID: "0b1d1797-999d-4453-b674-c40f53d4231e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.993952 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/02c83d05-a6ce-4c22-9015-91c0a766a518-config-data" (OuterVolumeSpecName: "config-data") pod "02c83d05-a6ce-4c22-9015-91c0a766a518" (UID: "02c83d05-a6ce-4c22-9015-91c0a766a518"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:36:15 crc kubenswrapper[4909]: I1128 16:36:15.999132 4909 scope.go:117] "RemoveContainer" containerID="243efd87260faad6273959bee1e446c951a950b25ee3ad8a4d986ebc6dff73bb" Nov 28 16:36:16 crc kubenswrapper[4909]: I1128 16:36:16.008155 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b1d1797-999d-4453-b674-c40f53d4231e-galera-tls-certs" (OuterVolumeSpecName: "galera-tls-certs") pod "0b1d1797-999d-4453-b674-c40f53d4231e" (UID: "0b1d1797-999d-4453-b674-c40f53d4231e"). InnerVolumeSpecName "galera-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:16 crc kubenswrapper[4909]: I1128 16:36:16.010121 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/02c83d05-a6ce-4c22-9015-91c0a766a518-server-conf" (OuterVolumeSpecName: "server-conf") pod "02c83d05-a6ce-4c22-9015-91c0a766a518" (UID: "02c83d05-a6ce-4c22-9015-91c0a766a518"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:36:16 crc kubenswrapper[4909]: I1128 16:36:16.010996 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/44195e2b-7f1d-4542-8948-93a818071fd2-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "44195e2b-7f1d-4542-8948-93a818071fd2" (UID: "44195e2b-7f1d-4542-8948-93a818071fd2"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:16 crc kubenswrapper[4909]: I1128 16:36:16.022161 4909 scope.go:117] "RemoveContainer" containerID="100c610b01cd8cbae563a7661d097874303002f0d958013b0241d5cf74e9cfd2" Nov 28 16:36:16 crc kubenswrapper[4909]: I1128 16:36:16.046693 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/02c83d05-a6ce-4c22-9015-91c0a766a518-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "02c83d05-a6ce-4c22-9015-91c0a766a518" (UID: "02c83d05-a6ce-4c22-9015-91c0a766a518"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:36:16 crc kubenswrapper[4909]: I1128 16:36:16.065943 4909 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") on node \"crc\" " Nov 28 16:36:16 crc kubenswrapper[4909]: I1128 16:36:16.066004 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sv7bn\" (UniqueName: \"kubernetes.io/projected/02c83d05-a6ce-4c22-9015-91c0a766a518-kube-api-access-sv7bn\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:16 crc kubenswrapper[4909]: I1128 16:36:16.066023 4909 reconciler_common.go:293] "Volume detached for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/0b1d1797-999d-4453-b674-c40f53d4231e-galera-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:16 crc kubenswrapper[4909]: I1128 16:36:16.066042 4909 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/44195e2b-7f1d-4542-8948-93a818071fd2-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:16 crc kubenswrapper[4909]: I1128 16:36:16.066069 4909 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") on node \"crc\" " Nov 28 16:36:16 crc kubenswrapper[4909]: I1128 16:36:16.066083 4909 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/02c83d05-a6ce-4c22-9015-91c0a766a518-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:16 crc kubenswrapper[4909]: I1128 16:36:16.066094 4909 reconciler_common.go:293] "Volume detached for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/0b1d1797-999d-4453-b674-c40f53d4231e-kolla-config\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:16 crc kubenswrapper[4909]: I1128 16:36:16.066106 4909 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0b1d1797-999d-4453-b674-c40f53d4231e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:16 crc kubenswrapper[4909]: I1128 16:36:16.066117 4909 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/02c83d05-a6ce-4c22-9015-91c0a766a518-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:16 crc kubenswrapper[4909]: I1128 16:36:16.066128 4909 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/02c83d05-a6ce-4c22-9015-91c0a766a518-plugins-conf\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:16 crc kubenswrapper[4909]: I1128 16:36:16.066140 4909 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/02c83d05-a6ce-4c22-9015-91c0a766a518-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:16 crc kubenswrapper[4909]: I1128 16:36:16.066153 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m782j\" (UniqueName: \"kubernetes.io/projected/0b1d1797-999d-4453-b674-c40f53d4231e-kube-api-access-m782j\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:16 crc kubenswrapper[4909]: I1128 16:36:16.066165 4909 reconciler_common.go:293] "Volume detached for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/0b1d1797-999d-4453-b674-c40f53d4231e-config-data-generated\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:16 crc kubenswrapper[4909]: I1128 16:36:16.066183 4909 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/02c83d05-a6ce-4c22-9015-91c0a766a518-pod-info\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:16 crc kubenswrapper[4909]: I1128 16:36:16.066200 4909 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/02c83d05-a6ce-4c22-9015-91c0a766a518-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:16 crc kubenswrapper[4909]: I1128 16:36:16.066216 4909 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/02c83d05-a6ce-4c22-9015-91c0a766a518-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:16 crc kubenswrapper[4909]: I1128 16:36:16.066232 4909 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/02c83d05-a6ce-4c22-9015-91c0a766a518-server-conf\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:16 crc kubenswrapper[4909]: I1128 16:36:16.066244 4909 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/44195e2b-7f1d-4542-8948-93a818071fd2-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:16 crc kubenswrapper[4909]: I1128 16:36:16.091658 4909 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage08-crc" (UniqueName: "kubernetes.io/local-volume/local-storage08-crc") on node "crc" Nov 28 16:36:16 crc kubenswrapper[4909]: I1128 16:36:16.100438 4909 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage02-crc" (UniqueName: "kubernetes.io/local-volume/local-storage02-crc") on node "crc" Nov 28 16:36:16 crc kubenswrapper[4909]: I1128 16:36:16.104759 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 28 16:36:16 crc kubenswrapper[4909]: I1128 16:36:16.111133 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 28 16:36:16 crc kubenswrapper[4909]: I1128 16:36:16.167840 4909 reconciler_common.go:293] "Volume detached for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:16 crc kubenswrapper[4909]: I1128 16:36:16.167875 4909 reconciler_common.go:293] "Volume detached for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:16 crc kubenswrapper[4909]: I1128 16:36:16.444182 4909 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="e8837df0-c6fe-42a6-bf0f-8ca14f1961a6" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.200:8775/\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Nov 28 16:36:16 crc kubenswrapper[4909]: I1128 16:36:16.444554 4909 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="e8837df0-c6fe-42a6-bf0f-8ca14f1961a6" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.200:8775/\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Nov 28 16:36:16 crc kubenswrapper[4909]: I1128 16:36:16.855737 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"0b1d1797-999d-4453-b674-c40f53d4231e","Type":"ContainerDied","Data":"f7099d33e98225332f99451ed479e52ecd6fb45773030325a3b3d5ecb6f2af70"} Nov 28 16:36:16 crc kubenswrapper[4909]: I1128 16:36:16.855797 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Nov 28 16:36:16 crc kubenswrapper[4909]: I1128 16:36:16.855841 4909 scope.go:117] "RemoveContainer" containerID="219f6794715541737d340cec186fc3847a65402dd4d251a98a0ddbe7c6c7178b" Nov 28 16:36:16 crc kubenswrapper[4909]: I1128 16:36:16.871083 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"02c83d05-a6ce-4c22-9015-91c0a766a518","Type":"ContainerDied","Data":"da6cf49208ef158abb711f1af001d84826e34b08ee2102eedfbf8a66683bb8b6"} Nov 28 16:36:16 crc kubenswrapper[4909]: I1128 16:36:16.871104 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 28 16:36:16 crc kubenswrapper[4909]: I1128 16:36:16.876598 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"44195e2b-7f1d-4542-8948-93a818071fd2","Type":"ContainerDied","Data":"65a39c69d94ebd3682bf32718aecafa565b14fc87de765b90fc2a780dd455ff1"} Nov 28 16:36:16 crc kubenswrapper[4909]: I1128 16:36:16.876700 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 16:36:16 crc kubenswrapper[4909]: I1128 16:36:16.890160 4909 scope.go:117] "RemoveContainer" containerID="67c0edd657bacfc89509d50f3315a4b0e80b205b4d0aa611b83d2d7a21e317a6" Nov 28 16:36:16 crc kubenswrapper[4909]: I1128 16:36:16.929333 4909 scope.go:117] "RemoveContainer" containerID="e404e875f3c6c8a15d87ad24861803ab1e659ac087607f8971106d0d6890fc63" Nov 28 16:36:16 crc kubenswrapper[4909]: I1128 16:36:16.979396 4909 scope.go:117] "RemoveContainer" containerID="49cf443f1a213e1c0c384ac59bd266107cf92076be4dceb74f6ecba7e3ee0c82" Nov 28 16:36:16 crc kubenswrapper[4909]: I1128 16:36:16.980600 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 28 16:36:16 crc kubenswrapper[4909]: I1128 16:36:16.989148 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 28 16:36:16 crc kubenswrapper[4909]: I1128 16:36:16.995733 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 28 16:36:17 crc kubenswrapper[4909]: I1128 16:36:17.001692 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 28 16:36:17 crc kubenswrapper[4909]: I1128 16:36:17.006737 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstack-galera-0"] Nov 28 16:36:17 crc kubenswrapper[4909]: I1128 16:36:17.012374 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/openstack-galera-0"] Nov 28 16:36:17 crc kubenswrapper[4909]: I1128 16:36:17.029466 4909 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/memcached-0" podUID="72f0e500-fe06-4373-9bc3-6cdaa2520043" containerName="memcached" probeResult="failure" output="dial tcp 10.217.0.103:11211: i/o timeout" Nov 28 16:36:17 crc kubenswrapper[4909]: E1128 16:36:17.907096 4909 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Nov 28 16:36:17 crc kubenswrapper[4909]: E1128 16:36:17.908578 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/2cfda3c9-a8dd-4a86-bb0d-d3b826b82a0f-operator-scripts podName:2cfda3c9-a8dd-4a86-bb0d-d3b826b82a0f nodeName:}" failed. No retries permitted until 2025-11-28 16:36:25.908549371 +0000 UTC m=+1568.305233935 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/2cfda3c9-a8dd-4a86-bb0d-d3b826b82a0f-operator-scripts") pod "placement3326-account-delete-6mrc7" (UID: "2cfda3c9-a8dd-4a86-bb0d-d3b826b82a0f") : configmap "openstack-scripts" not found Nov 28 16:36:17 crc kubenswrapper[4909]: I1128 16:36:17.931473 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="02c83d05-a6ce-4c22-9015-91c0a766a518" path="/var/lib/kubelet/pods/02c83d05-a6ce-4c22-9015-91c0a766a518/volumes" Nov 28 16:36:17 crc kubenswrapper[4909]: I1128 16:36:17.933100 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b1d1797-999d-4453-b674-c40f53d4231e" path="/var/lib/kubelet/pods/0b1d1797-999d-4453-b674-c40f53d4231e/volumes" Nov 28 16:36:17 crc kubenswrapper[4909]: I1128 16:36:17.935523 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="44195e2b-7f1d-4542-8948-93a818071fd2" path="/var/lib/kubelet/pods/44195e2b-7f1d-4542-8948-93a818071fd2/volumes" Nov 28 16:36:17 crc kubenswrapper[4909]: I1128 16:36:17.937432 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444" path="/var/lib/kubelet/pods/7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444/volumes" Nov 28 16:36:18 crc kubenswrapper[4909]: E1128 16:36:18.110471 4909 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Nov 28 16:36:18 crc kubenswrapper[4909]: E1128 16:36:18.110528 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/c2cc9842-6f8f-4afc-9895-2b7a75a9696c-operator-scripts podName:c2cc9842-6f8f-4afc-9895-2b7a75a9696c nodeName:}" failed. No retries permitted until 2025-11-28 16:36:26.110514459 +0000 UTC m=+1568.507198973 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/c2cc9842-6f8f-4afc-9895-2b7a75a9696c-operator-scripts") pod "cinder937d-account-delete-4rdjd" (UID: "c2cc9842-6f8f-4afc-9895-2b7a75a9696c") : configmap "openstack-scripts" not found Nov 28 16:36:18 crc kubenswrapper[4909]: E1128 16:36:18.221341 4909 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 96dc460f1fe51a7876a0d73f5eedaf50b76b8405b0eee13b2afa64eddabda435 is running failed: container process not found" containerID="96dc460f1fe51a7876a0d73f5eedaf50b76b8405b0eee13b2afa64eddabda435" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 28 16:36:18 crc kubenswrapper[4909]: E1128 16:36:18.222146 4909 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="afca6794cc8913f3a96d1e4b580a859e2e2d5089f2b862e784689db6cf4ab6c4" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 28 16:36:18 crc kubenswrapper[4909]: E1128 16:36:18.222101 4909 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 96dc460f1fe51a7876a0d73f5eedaf50b76b8405b0eee13b2afa64eddabda435 is running failed: container process not found" containerID="96dc460f1fe51a7876a0d73f5eedaf50b76b8405b0eee13b2afa64eddabda435" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 28 16:36:18 crc kubenswrapper[4909]: E1128 16:36:18.222722 4909 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 96dc460f1fe51a7876a0d73f5eedaf50b76b8405b0eee13b2afa64eddabda435 is running failed: container process not found" containerID="96dc460f1fe51a7876a0d73f5eedaf50b76b8405b0eee13b2afa64eddabda435" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 28 16:36:18 crc kubenswrapper[4909]: E1128 16:36:18.222760 4909 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 96dc460f1fe51a7876a0d73f5eedaf50b76b8405b0eee13b2afa64eddabda435 is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-ovs-q2kt7" podUID="ff22194e-63a9-410d-80b6-9b1a1e68b164" containerName="ovsdb-server" Nov 28 16:36:18 crc kubenswrapper[4909]: E1128 16:36:18.223319 4909 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="afca6794cc8913f3a96d1e4b580a859e2e2d5089f2b862e784689db6cf4ab6c4" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 28 16:36:18 crc kubenswrapper[4909]: E1128 16:36:18.224394 4909 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="afca6794cc8913f3a96d1e4b580a859e2e2d5089f2b862e784689db6cf4ab6c4" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 28 16:36:18 crc kubenswrapper[4909]: E1128 16:36:18.224430 4909 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-controller-ovs-q2kt7" podUID="ff22194e-63a9-410d-80b6-9b1a1e68b164" containerName="ovs-vswitchd" Nov 28 16:36:23 crc kubenswrapper[4909]: E1128 16:36:23.222144 4909 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 96dc460f1fe51a7876a0d73f5eedaf50b76b8405b0eee13b2afa64eddabda435 is running failed: container process not found" containerID="96dc460f1fe51a7876a0d73f5eedaf50b76b8405b0eee13b2afa64eddabda435" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 28 16:36:23 crc kubenswrapper[4909]: E1128 16:36:23.223126 4909 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 96dc460f1fe51a7876a0d73f5eedaf50b76b8405b0eee13b2afa64eddabda435 is running failed: container process not found" containerID="96dc460f1fe51a7876a0d73f5eedaf50b76b8405b0eee13b2afa64eddabda435" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 28 16:36:23 crc kubenswrapper[4909]: E1128 16:36:23.223396 4909 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 96dc460f1fe51a7876a0d73f5eedaf50b76b8405b0eee13b2afa64eddabda435 is running failed: container process not found" containerID="96dc460f1fe51a7876a0d73f5eedaf50b76b8405b0eee13b2afa64eddabda435" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 28 16:36:23 crc kubenswrapper[4909]: E1128 16:36:23.223424 4909 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 96dc460f1fe51a7876a0d73f5eedaf50b76b8405b0eee13b2afa64eddabda435 is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-ovs-q2kt7" podUID="ff22194e-63a9-410d-80b6-9b1a1e68b164" containerName="ovsdb-server" Nov 28 16:36:23 crc kubenswrapper[4909]: E1128 16:36:23.224532 4909 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="afca6794cc8913f3a96d1e4b580a859e2e2d5089f2b862e784689db6cf4ab6c4" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 28 16:36:23 crc kubenswrapper[4909]: E1128 16:36:23.228831 4909 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="afca6794cc8913f3a96d1e4b580a859e2e2d5089f2b862e784689db6cf4ab6c4" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 28 16:36:23 crc kubenswrapper[4909]: E1128 16:36:23.230139 4909 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="afca6794cc8913f3a96d1e4b580a859e2e2d5089f2b862e784689db6cf4ab6c4" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 28 16:36:23 crc kubenswrapper[4909]: E1128 16:36:23.230178 4909 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-controller-ovs-q2kt7" podUID="ff22194e-63a9-410d-80b6-9b1a1e68b164" containerName="ovs-vswitchd" Nov 28 16:36:24 crc kubenswrapper[4909]: I1128 16:36:24.985314 4909 generic.go:334] "Generic (PLEG): container finished" podID="febda67e-3daf-4cb4-9fd1-530d6c398404" containerID="84ef1b544276823c5c91a1406dc17348087fabba68d10b1561cec7a3a87c25bd" exitCode=0 Nov 28 16:36:24 crc kubenswrapper[4909]: I1128 16:36:24.985417 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-55f6d745d5-tgbm7" event={"ID":"febda67e-3daf-4cb4-9fd1-530d6c398404","Type":"ContainerDied","Data":"84ef1b544276823c5c91a1406dc17348087fabba68d10b1561cec7a3a87c25bd"} Nov 28 16:36:25 crc kubenswrapper[4909]: I1128 16:36:25.385482 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-55f6d745d5-tgbm7" Nov 28 16:36:25 crc kubenswrapper[4909]: I1128 16:36:25.435569 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/febda67e-3daf-4cb4-9fd1-530d6c398404-combined-ca-bundle\") pod \"febda67e-3daf-4cb4-9fd1-530d6c398404\" (UID: \"febda67e-3daf-4cb4-9fd1-530d6c398404\") " Nov 28 16:36:25 crc kubenswrapper[4909]: I1128 16:36:25.435621 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/febda67e-3daf-4cb4-9fd1-530d6c398404-internal-tls-certs\") pod \"febda67e-3daf-4cb4-9fd1-530d6c398404\" (UID: \"febda67e-3daf-4cb4-9fd1-530d6c398404\") " Nov 28 16:36:25 crc kubenswrapper[4909]: I1128 16:36:25.435715 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/febda67e-3daf-4cb4-9fd1-530d6c398404-config\") pod \"febda67e-3daf-4cb4-9fd1-530d6c398404\" (UID: \"febda67e-3daf-4cb4-9fd1-530d6c398404\") " Nov 28 16:36:25 crc kubenswrapper[4909]: I1128 16:36:25.435749 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/febda67e-3daf-4cb4-9fd1-530d6c398404-public-tls-certs\") pod \"febda67e-3daf-4cb4-9fd1-530d6c398404\" (UID: \"febda67e-3daf-4cb4-9fd1-530d6c398404\") " Nov 28 16:36:25 crc kubenswrapper[4909]: I1128 16:36:25.435778 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5t7lp\" (UniqueName: \"kubernetes.io/projected/febda67e-3daf-4cb4-9fd1-530d6c398404-kube-api-access-5t7lp\") pod \"febda67e-3daf-4cb4-9fd1-530d6c398404\" (UID: \"febda67e-3daf-4cb4-9fd1-530d6c398404\") " Nov 28 16:36:25 crc kubenswrapper[4909]: I1128 16:36:25.435857 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/febda67e-3daf-4cb4-9fd1-530d6c398404-ovndb-tls-certs\") pod \"febda67e-3daf-4cb4-9fd1-530d6c398404\" (UID: \"febda67e-3daf-4cb4-9fd1-530d6c398404\") " Nov 28 16:36:25 crc kubenswrapper[4909]: I1128 16:36:25.435934 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/febda67e-3daf-4cb4-9fd1-530d6c398404-httpd-config\") pod \"febda67e-3daf-4cb4-9fd1-530d6c398404\" (UID: \"febda67e-3daf-4cb4-9fd1-530d6c398404\") " Nov 28 16:36:25 crc kubenswrapper[4909]: I1128 16:36:25.443612 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/febda67e-3daf-4cb4-9fd1-530d6c398404-httpd-config" (OuterVolumeSpecName: "httpd-config") pod "febda67e-3daf-4cb4-9fd1-530d6c398404" (UID: "febda67e-3daf-4cb4-9fd1-530d6c398404"). InnerVolumeSpecName "httpd-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:25 crc kubenswrapper[4909]: I1128 16:36:25.458967 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/febda67e-3daf-4cb4-9fd1-530d6c398404-kube-api-access-5t7lp" (OuterVolumeSpecName: "kube-api-access-5t7lp") pod "febda67e-3daf-4cb4-9fd1-530d6c398404" (UID: "febda67e-3daf-4cb4-9fd1-530d6c398404"). InnerVolumeSpecName "kube-api-access-5t7lp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:36:25 crc kubenswrapper[4909]: I1128 16:36:25.486308 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/febda67e-3daf-4cb4-9fd1-530d6c398404-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "febda67e-3daf-4cb4-9fd1-530d6c398404" (UID: "febda67e-3daf-4cb4-9fd1-530d6c398404"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:25 crc kubenswrapper[4909]: I1128 16:36:25.487771 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/febda67e-3daf-4cb4-9fd1-530d6c398404-config" (OuterVolumeSpecName: "config") pod "febda67e-3daf-4cb4-9fd1-530d6c398404" (UID: "febda67e-3daf-4cb4-9fd1-530d6c398404"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:25 crc kubenswrapper[4909]: I1128 16:36:25.500441 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/febda67e-3daf-4cb4-9fd1-530d6c398404-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "febda67e-3daf-4cb4-9fd1-530d6c398404" (UID: "febda67e-3daf-4cb4-9fd1-530d6c398404"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:25 crc kubenswrapper[4909]: I1128 16:36:25.512952 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/febda67e-3daf-4cb4-9fd1-530d6c398404-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "febda67e-3daf-4cb4-9fd1-530d6c398404" (UID: "febda67e-3daf-4cb4-9fd1-530d6c398404"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:25 crc kubenswrapper[4909]: I1128 16:36:25.517443 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/febda67e-3daf-4cb4-9fd1-530d6c398404-ovndb-tls-certs" (OuterVolumeSpecName: "ovndb-tls-certs") pod "febda67e-3daf-4cb4-9fd1-530d6c398404" (UID: "febda67e-3daf-4cb4-9fd1-530d6c398404"). InnerVolumeSpecName "ovndb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:25 crc kubenswrapper[4909]: I1128 16:36:25.537761 4909 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/febda67e-3daf-4cb4-9fd1-530d6c398404-config\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:25 crc kubenswrapper[4909]: I1128 16:36:25.537803 4909 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/febda67e-3daf-4cb4-9fd1-530d6c398404-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:25 crc kubenswrapper[4909]: I1128 16:36:25.537816 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5t7lp\" (UniqueName: \"kubernetes.io/projected/febda67e-3daf-4cb4-9fd1-530d6c398404-kube-api-access-5t7lp\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:25 crc kubenswrapper[4909]: I1128 16:36:25.537826 4909 reconciler_common.go:293] "Volume detached for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/febda67e-3daf-4cb4-9fd1-530d6c398404-ovndb-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:25 crc kubenswrapper[4909]: I1128 16:36:25.537835 4909 reconciler_common.go:293] "Volume detached for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/febda67e-3daf-4cb4-9fd1-530d6c398404-httpd-config\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:25 crc kubenswrapper[4909]: I1128 16:36:25.537844 4909 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/febda67e-3daf-4cb4-9fd1-530d6c398404-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:25 crc kubenswrapper[4909]: I1128 16:36:25.537851 4909 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/febda67e-3daf-4cb4-9fd1-530d6c398404-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:25 crc kubenswrapper[4909]: E1128 16:36:25.945339 4909 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Nov 28 16:36:25 crc kubenswrapper[4909]: E1128 16:36:25.945712 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/2cfda3c9-a8dd-4a86-bb0d-d3b826b82a0f-operator-scripts podName:2cfda3c9-a8dd-4a86-bb0d-d3b826b82a0f nodeName:}" failed. No retries permitted until 2025-11-28 16:36:41.945695723 +0000 UTC m=+1584.342380247 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/2cfda3c9-a8dd-4a86-bb0d-d3b826b82a0f-operator-scripts") pod "placement3326-account-delete-6mrc7" (UID: "2cfda3c9-a8dd-4a86-bb0d-d3b826b82a0f") : configmap "openstack-scripts" not found Nov 28 16:36:25 crc kubenswrapper[4909]: I1128 16:36:25.995558 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-55f6d745d5-tgbm7" event={"ID":"febda67e-3daf-4cb4-9fd1-530d6c398404","Type":"ContainerDied","Data":"a557cc9d180d0683f4fde9bb969d28af2616171cadbdd8f48218d6d2c12dd589"} Nov 28 16:36:25 crc kubenswrapper[4909]: I1128 16:36:25.995604 4909 scope.go:117] "RemoveContainer" containerID="a3fd76fee056f26d16128b3c7dd903dc417db926bafd7b4cc42bf63262cd356c" Nov 28 16:36:25 crc kubenswrapper[4909]: I1128 16:36:25.995764 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-55f6d745d5-tgbm7" Nov 28 16:36:26 crc kubenswrapper[4909]: I1128 16:36:26.015064 4909 scope.go:117] "RemoveContainer" containerID="44d78f1f1841456ec290a5fd131e7e46dad4741dfefbdbd69b4571e60a9b775f" Nov 28 16:36:26 crc kubenswrapper[4909]: I1128 16:36:26.025193 4909 scope.go:117] "RemoveContainer" containerID="84ef1b544276823c5c91a1406dc17348087fabba68d10b1561cec7a3a87c25bd" Nov 28 16:36:26 crc kubenswrapper[4909]: I1128 16:36:26.028828 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-55f6d745d5-tgbm7"] Nov 28 16:36:26 crc kubenswrapper[4909]: I1128 16:36:26.038267 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-55f6d745d5-tgbm7"] Nov 28 16:36:26 crc kubenswrapper[4909]: E1128 16:36:26.150708 4909 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Nov 28 16:36:26 crc kubenswrapper[4909]: E1128 16:36:26.150791 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/c2cc9842-6f8f-4afc-9895-2b7a75a9696c-operator-scripts podName:c2cc9842-6f8f-4afc-9895-2b7a75a9696c nodeName:}" failed. No retries permitted until 2025-11-28 16:36:42.150772955 +0000 UTC m=+1584.547457479 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/c2cc9842-6f8f-4afc-9895-2b7a75a9696c-operator-scripts") pod "cinder937d-account-delete-4rdjd" (UID: "c2cc9842-6f8f-4afc-9895-2b7a75a9696c") : configmap "openstack-scripts" not found Nov 28 16:36:27 crc kubenswrapper[4909]: I1128 16:36:27.925739 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="febda67e-3daf-4cb4-9fd1-530d6c398404" path="/var/lib/kubelet/pods/febda67e-3daf-4cb4-9fd1-530d6c398404/volumes" Nov 28 16:36:28 crc kubenswrapper[4909]: E1128 16:36:28.221157 4909 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 96dc460f1fe51a7876a0d73f5eedaf50b76b8405b0eee13b2afa64eddabda435 is running failed: container process not found" containerID="96dc460f1fe51a7876a0d73f5eedaf50b76b8405b0eee13b2afa64eddabda435" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 28 16:36:28 crc kubenswrapper[4909]: E1128 16:36:28.221507 4909 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 96dc460f1fe51a7876a0d73f5eedaf50b76b8405b0eee13b2afa64eddabda435 is running failed: container process not found" containerID="96dc460f1fe51a7876a0d73f5eedaf50b76b8405b0eee13b2afa64eddabda435" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 28 16:36:28 crc kubenswrapper[4909]: E1128 16:36:28.222020 4909 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 96dc460f1fe51a7876a0d73f5eedaf50b76b8405b0eee13b2afa64eddabda435 is running failed: container process not found" containerID="96dc460f1fe51a7876a0d73f5eedaf50b76b8405b0eee13b2afa64eddabda435" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 28 16:36:28 crc kubenswrapper[4909]: E1128 16:36:28.222071 4909 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 96dc460f1fe51a7876a0d73f5eedaf50b76b8405b0eee13b2afa64eddabda435 is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-ovs-q2kt7" podUID="ff22194e-63a9-410d-80b6-9b1a1e68b164" containerName="ovsdb-server" Nov 28 16:36:28 crc kubenswrapper[4909]: E1128 16:36:28.222359 4909 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="afca6794cc8913f3a96d1e4b580a859e2e2d5089f2b862e784689db6cf4ab6c4" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 28 16:36:28 crc kubenswrapper[4909]: E1128 16:36:28.224182 4909 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="afca6794cc8913f3a96d1e4b580a859e2e2d5089f2b862e784689db6cf4ab6c4" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 28 16:36:28 crc kubenswrapper[4909]: E1128 16:36:28.225745 4909 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="afca6794cc8913f3a96d1e4b580a859e2e2d5089f2b862e784689db6cf4ab6c4" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 28 16:36:28 crc kubenswrapper[4909]: E1128 16:36:28.225807 4909 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-controller-ovs-q2kt7" podUID="ff22194e-63a9-410d-80b6-9b1a1e68b164" containerName="ovs-vswitchd" Nov 28 16:36:33 crc kubenswrapper[4909]: E1128 16:36:33.221249 4909 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 96dc460f1fe51a7876a0d73f5eedaf50b76b8405b0eee13b2afa64eddabda435 is running failed: container process not found" containerID="96dc460f1fe51a7876a0d73f5eedaf50b76b8405b0eee13b2afa64eddabda435" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 28 16:36:33 crc kubenswrapper[4909]: E1128 16:36:33.222351 4909 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 96dc460f1fe51a7876a0d73f5eedaf50b76b8405b0eee13b2afa64eddabda435 is running failed: container process not found" containerID="96dc460f1fe51a7876a0d73f5eedaf50b76b8405b0eee13b2afa64eddabda435" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 28 16:36:33 crc kubenswrapper[4909]: E1128 16:36:33.223116 4909 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="afca6794cc8913f3a96d1e4b580a859e2e2d5089f2b862e784689db6cf4ab6c4" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 28 16:36:33 crc kubenswrapper[4909]: E1128 16:36:33.223142 4909 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 96dc460f1fe51a7876a0d73f5eedaf50b76b8405b0eee13b2afa64eddabda435 is running failed: container process not found" containerID="96dc460f1fe51a7876a0d73f5eedaf50b76b8405b0eee13b2afa64eddabda435" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 28 16:36:33 crc kubenswrapper[4909]: E1128 16:36:33.223198 4909 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 96dc460f1fe51a7876a0d73f5eedaf50b76b8405b0eee13b2afa64eddabda435 is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-ovs-q2kt7" podUID="ff22194e-63a9-410d-80b6-9b1a1e68b164" containerName="ovsdb-server" Nov 28 16:36:33 crc kubenswrapper[4909]: E1128 16:36:33.225582 4909 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="afca6794cc8913f3a96d1e4b580a859e2e2d5089f2b862e784689db6cf4ab6c4" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 28 16:36:33 crc kubenswrapper[4909]: E1128 16:36:33.227892 4909 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="afca6794cc8913f3a96d1e4b580a859e2e2d5089f2b862e784689db6cf4ab6c4" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 28 16:36:33 crc kubenswrapper[4909]: E1128 16:36:33.227961 4909 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-controller-ovs-q2kt7" podUID="ff22194e-63a9-410d-80b6-9b1a1e68b164" containerName="ovs-vswitchd" Nov 28 16:36:36 crc kubenswrapper[4909]: I1128 16:36:36.698379 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Nov 28 16:36:36 crc kubenswrapper[4909]: I1128 16:36:36.721475 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/af476a0f-b390-443d-b7a5-14181e7c7bc7-cache\") pod \"af476a0f-b390-443d-b7a5-14181e7c7bc7\" (UID: \"af476a0f-b390-443d-b7a5-14181e7c7bc7\") " Nov 28 16:36:36 crc kubenswrapper[4909]: I1128 16:36:36.721529 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/af476a0f-b390-443d-b7a5-14181e7c7bc7-lock\") pod \"af476a0f-b390-443d-b7a5-14181e7c7bc7\" (UID: \"af476a0f-b390-443d-b7a5-14181e7c7bc7\") " Nov 28 16:36:36 crc kubenswrapper[4909]: I1128 16:36:36.721569 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7nnbx\" (UniqueName: \"kubernetes.io/projected/af476a0f-b390-443d-b7a5-14181e7c7bc7-kube-api-access-7nnbx\") pod \"af476a0f-b390-443d-b7a5-14181e7c7bc7\" (UID: \"af476a0f-b390-443d-b7a5-14181e7c7bc7\") " Nov 28 16:36:36 crc kubenswrapper[4909]: I1128 16:36:36.721614 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"swift\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"af476a0f-b390-443d-b7a5-14181e7c7bc7\" (UID: \"af476a0f-b390-443d-b7a5-14181e7c7bc7\") " Nov 28 16:36:36 crc kubenswrapper[4909]: I1128 16:36:36.721646 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/af476a0f-b390-443d-b7a5-14181e7c7bc7-etc-swift\") pod \"af476a0f-b390-443d-b7a5-14181e7c7bc7\" (UID: \"af476a0f-b390-443d-b7a5-14181e7c7bc7\") " Nov 28 16:36:36 crc kubenswrapper[4909]: I1128 16:36:36.722368 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/af476a0f-b390-443d-b7a5-14181e7c7bc7-lock" (OuterVolumeSpecName: "lock") pod "af476a0f-b390-443d-b7a5-14181e7c7bc7" (UID: "af476a0f-b390-443d-b7a5-14181e7c7bc7"). InnerVolumeSpecName "lock". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:36:36 crc kubenswrapper[4909]: I1128 16:36:36.722508 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/af476a0f-b390-443d-b7a5-14181e7c7bc7-cache" (OuterVolumeSpecName: "cache") pod "af476a0f-b390-443d-b7a5-14181e7c7bc7" (UID: "af476a0f-b390-443d-b7a5-14181e7c7bc7"). InnerVolumeSpecName "cache". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:36:36 crc kubenswrapper[4909]: I1128 16:36:36.728408 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/af476a0f-b390-443d-b7a5-14181e7c7bc7-kube-api-access-7nnbx" (OuterVolumeSpecName: "kube-api-access-7nnbx") pod "af476a0f-b390-443d-b7a5-14181e7c7bc7" (UID: "af476a0f-b390-443d-b7a5-14181e7c7bc7"). InnerVolumeSpecName "kube-api-access-7nnbx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:36:36 crc kubenswrapper[4909]: I1128 16:36:36.728475 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage06-crc" (OuterVolumeSpecName: "swift") pod "af476a0f-b390-443d-b7a5-14181e7c7bc7" (UID: "af476a0f-b390-443d-b7a5-14181e7c7bc7"). InnerVolumeSpecName "local-storage06-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 28 16:36:36 crc kubenswrapper[4909]: I1128 16:36:36.729505 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/af476a0f-b390-443d-b7a5-14181e7c7bc7-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "af476a0f-b390-443d-b7a5-14181e7c7bc7" (UID: "af476a0f-b390-443d-b7a5-14181e7c7bc7"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:36:36 crc kubenswrapper[4909]: I1128 16:36:36.824074 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7nnbx\" (UniqueName: \"kubernetes.io/projected/af476a0f-b390-443d-b7a5-14181e7c7bc7-kube-api-access-7nnbx\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:36 crc kubenswrapper[4909]: I1128 16:36:36.824144 4909 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") on node \"crc\" " Nov 28 16:36:36 crc kubenswrapper[4909]: I1128 16:36:36.824164 4909 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/af476a0f-b390-443d-b7a5-14181e7c7bc7-etc-swift\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:36 crc kubenswrapper[4909]: I1128 16:36:36.824180 4909 reconciler_common.go:293] "Volume detached for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/af476a0f-b390-443d-b7a5-14181e7c7bc7-cache\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:36 crc kubenswrapper[4909]: I1128 16:36:36.824197 4909 reconciler_common.go:293] "Volume detached for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/af476a0f-b390-443d-b7a5-14181e7c7bc7-lock\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:36 crc kubenswrapper[4909]: I1128 16:36:36.845968 4909 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage06-crc" (UniqueName: "kubernetes.io/local-volume/local-storage06-crc") on node "crc" Nov 28 16:36:36 crc kubenswrapper[4909]: I1128 16:36:36.926526 4909 reconciler_common.go:293] "Volume detached for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:37 crc kubenswrapper[4909]: I1128 16:36:37.116175 4909 generic.go:334] "Generic (PLEG): container finished" podID="af476a0f-b390-443d-b7a5-14181e7c7bc7" containerID="d413cfaf4ea4f22a5ed6f16b2e1f0edf2c4c5fa640499e9a5165eac333f204d1" exitCode=137 Nov 28 16:36:37 crc kubenswrapper[4909]: I1128 16:36:37.116260 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"af476a0f-b390-443d-b7a5-14181e7c7bc7","Type":"ContainerDied","Data":"d413cfaf4ea4f22a5ed6f16b2e1f0edf2c4c5fa640499e9a5165eac333f204d1"} Nov 28 16:36:37 crc kubenswrapper[4909]: I1128 16:36:37.116311 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Nov 28 16:36:37 crc kubenswrapper[4909]: I1128 16:36:37.116345 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"af476a0f-b390-443d-b7a5-14181e7c7bc7","Type":"ContainerDied","Data":"0f7cc4d922dbf0a99e60ac3a4ea3ccc6245ef275c02bfb082139cd45e859a584"} Nov 28 16:36:37 crc kubenswrapper[4909]: I1128 16:36:37.116373 4909 scope.go:117] "RemoveContainer" containerID="d413cfaf4ea4f22a5ed6f16b2e1f0edf2c4c5fa640499e9a5165eac333f204d1" Nov 28 16:36:37 crc kubenswrapper[4909]: I1128 16:36:37.125067 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-q2kt7_ff22194e-63a9-410d-80b6-9b1a1e68b164/ovs-vswitchd/0.log" Nov 28 16:36:37 crc kubenswrapper[4909]: I1128 16:36:37.126007 4909 generic.go:334] "Generic (PLEG): container finished" podID="ff22194e-63a9-410d-80b6-9b1a1e68b164" containerID="afca6794cc8913f3a96d1e4b580a859e2e2d5089f2b862e784689db6cf4ab6c4" exitCode=137 Nov 28 16:36:37 crc kubenswrapper[4909]: I1128 16:36:37.126052 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-q2kt7" event={"ID":"ff22194e-63a9-410d-80b6-9b1a1e68b164","Type":"ContainerDied","Data":"afca6794cc8913f3a96d1e4b580a859e2e2d5089f2b862e784689db6cf4ab6c4"} Nov 28 16:36:37 crc kubenswrapper[4909]: I1128 16:36:37.161351 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-storage-0"] Nov 28 16:36:37 crc kubenswrapper[4909]: I1128 16:36:37.162409 4909 scope.go:117] "RemoveContainer" containerID="98ad30563ab1b4b11f32f1a8f225fc528006c2c8fcdf166079a8a955004b7948" Nov 28 16:36:37 crc kubenswrapper[4909]: I1128 16:36:37.171045 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/swift-storage-0"] Nov 28 16:36:37 crc kubenswrapper[4909]: I1128 16:36:37.187230 4909 scope.go:117] "RemoveContainer" containerID="a9da75fb1065909a22a80afa31dcf4a18f089d1a8658fc535e3bcc82fe8ac3a4" Nov 28 16:36:37 crc kubenswrapper[4909]: I1128 16:36:37.209580 4909 scope.go:117] "RemoveContainer" containerID="dc11615ceb380e3360e5cbe4640e562e4e49cb1fd342fce0adce73bd5cb5460b" Nov 28 16:36:37 crc kubenswrapper[4909]: I1128 16:36:37.227954 4909 scope.go:117] "RemoveContainer" containerID="f27ae8741dd55f00cb98a8fb2353da4cf744518b48ce853d95d2679d8887ef94" Nov 28 16:36:37 crc kubenswrapper[4909]: I1128 16:36:37.249346 4909 scope.go:117] "RemoveContainer" containerID="7cc9830ede6c460701043fa486da1fb48a9626227dd752474528ad1c78113d8a" Nov 28 16:36:37 crc kubenswrapper[4909]: I1128 16:36:37.266882 4909 scope.go:117] "RemoveContainer" containerID="ea215891e9e527c761e9c1fee97f230f011da309562acb7bce70287bd0410c66" Nov 28 16:36:37 crc kubenswrapper[4909]: I1128 16:36:37.286485 4909 scope.go:117] "RemoveContainer" containerID="5fb91605f618e075deb2aac1d02ba547d7690726cbd7cbd378c8171a086d9018" Nov 28 16:36:37 crc kubenswrapper[4909]: I1128 16:36:37.303133 4909 scope.go:117] "RemoveContainer" containerID="e537fff9ca9b1d1abd731931ae9c78538cb3c3b7ac87bc65d5b181dd8dc9988e" Nov 28 16:36:37 crc kubenswrapper[4909]: I1128 16:36:37.322827 4909 scope.go:117] "RemoveContainer" containerID="4d6f16f0949b97c95bc1814668d3795dce74816c7c22e56931a221f9c9af6515" Nov 28 16:36:37 crc kubenswrapper[4909]: I1128 16:36:37.340861 4909 scope.go:117] "RemoveContainer" containerID="0555790df47cc73f744971b0906de125a73508760c08642f116f54367b3effa9" Nov 28 16:36:37 crc kubenswrapper[4909]: I1128 16:36:37.369543 4909 scope.go:117] "RemoveContainer" containerID="f7767062c82c125be67fc8d87066f9088acb549941128e8dfa6db30304d06a51" Nov 28 16:36:37 crc kubenswrapper[4909]: I1128 16:36:37.404029 4909 scope.go:117] "RemoveContainer" containerID="ce4539306b72b839722ed46646da187a61406695c153b17107ee77a2ce3e2377" Nov 28 16:36:37 crc kubenswrapper[4909]: I1128 16:36:37.431924 4909 scope.go:117] "RemoveContainer" containerID="2431575eb32cdbcb1846b977ebb5a16dee5e1fd73658a4da6fe6d41dd6ea5859" Nov 28 16:36:37 crc kubenswrapper[4909]: I1128 16:36:37.451288 4909 scope.go:117] "RemoveContainer" containerID="0f9831bb56002e61b2af5b4efe43f7352b26f5bcba0fac4ff2b7c7594d30ca11" Nov 28 16:36:37 crc kubenswrapper[4909]: I1128 16:36:37.466014 4909 scope.go:117] "RemoveContainer" containerID="d413cfaf4ea4f22a5ed6f16b2e1f0edf2c4c5fa640499e9a5165eac333f204d1" Nov 28 16:36:37 crc kubenswrapper[4909]: E1128 16:36:37.466366 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d413cfaf4ea4f22a5ed6f16b2e1f0edf2c4c5fa640499e9a5165eac333f204d1\": container with ID starting with d413cfaf4ea4f22a5ed6f16b2e1f0edf2c4c5fa640499e9a5165eac333f204d1 not found: ID does not exist" containerID="d413cfaf4ea4f22a5ed6f16b2e1f0edf2c4c5fa640499e9a5165eac333f204d1" Nov 28 16:36:37 crc kubenswrapper[4909]: I1128 16:36:37.466395 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d413cfaf4ea4f22a5ed6f16b2e1f0edf2c4c5fa640499e9a5165eac333f204d1"} err="failed to get container status \"d413cfaf4ea4f22a5ed6f16b2e1f0edf2c4c5fa640499e9a5165eac333f204d1\": rpc error: code = NotFound desc = could not find container \"d413cfaf4ea4f22a5ed6f16b2e1f0edf2c4c5fa640499e9a5165eac333f204d1\": container with ID starting with d413cfaf4ea4f22a5ed6f16b2e1f0edf2c4c5fa640499e9a5165eac333f204d1 not found: ID does not exist" Nov 28 16:36:37 crc kubenswrapper[4909]: I1128 16:36:37.466422 4909 scope.go:117] "RemoveContainer" containerID="98ad30563ab1b4b11f32f1a8f225fc528006c2c8fcdf166079a8a955004b7948" Nov 28 16:36:37 crc kubenswrapper[4909]: E1128 16:36:37.466752 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"98ad30563ab1b4b11f32f1a8f225fc528006c2c8fcdf166079a8a955004b7948\": container with ID starting with 98ad30563ab1b4b11f32f1a8f225fc528006c2c8fcdf166079a8a955004b7948 not found: ID does not exist" containerID="98ad30563ab1b4b11f32f1a8f225fc528006c2c8fcdf166079a8a955004b7948" Nov 28 16:36:37 crc kubenswrapper[4909]: I1128 16:36:37.466806 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"98ad30563ab1b4b11f32f1a8f225fc528006c2c8fcdf166079a8a955004b7948"} err="failed to get container status \"98ad30563ab1b4b11f32f1a8f225fc528006c2c8fcdf166079a8a955004b7948\": rpc error: code = NotFound desc = could not find container \"98ad30563ab1b4b11f32f1a8f225fc528006c2c8fcdf166079a8a955004b7948\": container with ID starting with 98ad30563ab1b4b11f32f1a8f225fc528006c2c8fcdf166079a8a955004b7948 not found: ID does not exist" Nov 28 16:36:37 crc kubenswrapper[4909]: I1128 16:36:37.466839 4909 scope.go:117] "RemoveContainer" containerID="a9da75fb1065909a22a80afa31dcf4a18f089d1a8658fc535e3bcc82fe8ac3a4" Nov 28 16:36:37 crc kubenswrapper[4909]: E1128 16:36:37.467206 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a9da75fb1065909a22a80afa31dcf4a18f089d1a8658fc535e3bcc82fe8ac3a4\": container with ID starting with a9da75fb1065909a22a80afa31dcf4a18f089d1a8658fc535e3bcc82fe8ac3a4 not found: ID does not exist" containerID="a9da75fb1065909a22a80afa31dcf4a18f089d1a8658fc535e3bcc82fe8ac3a4" Nov 28 16:36:37 crc kubenswrapper[4909]: I1128 16:36:37.467234 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a9da75fb1065909a22a80afa31dcf4a18f089d1a8658fc535e3bcc82fe8ac3a4"} err="failed to get container status \"a9da75fb1065909a22a80afa31dcf4a18f089d1a8658fc535e3bcc82fe8ac3a4\": rpc error: code = NotFound desc = could not find container \"a9da75fb1065909a22a80afa31dcf4a18f089d1a8658fc535e3bcc82fe8ac3a4\": container with ID starting with a9da75fb1065909a22a80afa31dcf4a18f089d1a8658fc535e3bcc82fe8ac3a4 not found: ID does not exist" Nov 28 16:36:37 crc kubenswrapper[4909]: I1128 16:36:37.467253 4909 scope.go:117] "RemoveContainer" containerID="dc11615ceb380e3360e5cbe4640e562e4e49cb1fd342fce0adce73bd5cb5460b" Nov 28 16:36:37 crc kubenswrapper[4909]: E1128 16:36:37.467464 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"dc11615ceb380e3360e5cbe4640e562e4e49cb1fd342fce0adce73bd5cb5460b\": container with ID starting with dc11615ceb380e3360e5cbe4640e562e4e49cb1fd342fce0adce73bd5cb5460b not found: ID does not exist" containerID="dc11615ceb380e3360e5cbe4640e562e4e49cb1fd342fce0adce73bd5cb5460b" Nov 28 16:36:37 crc kubenswrapper[4909]: I1128 16:36:37.467488 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dc11615ceb380e3360e5cbe4640e562e4e49cb1fd342fce0adce73bd5cb5460b"} err="failed to get container status \"dc11615ceb380e3360e5cbe4640e562e4e49cb1fd342fce0adce73bd5cb5460b\": rpc error: code = NotFound desc = could not find container \"dc11615ceb380e3360e5cbe4640e562e4e49cb1fd342fce0adce73bd5cb5460b\": container with ID starting with dc11615ceb380e3360e5cbe4640e562e4e49cb1fd342fce0adce73bd5cb5460b not found: ID does not exist" Nov 28 16:36:37 crc kubenswrapper[4909]: I1128 16:36:37.467504 4909 scope.go:117] "RemoveContainer" containerID="f27ae8741dd55f00cb98a8fb2353da4cf744518b48ce853d95d2679d8887ef94" Nov 28 16:36:37 crc kubenswrapper[4909]: E1128 16:36:37.467769 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f27ae8741dd55f00cb98a8fb2353da4cf744518b48ce853d95d2679d8887ef94\": container with ID starting with f27ae8741dd55f00cb98a8fb2353da4cf744518b48ce853d95d2679d8887ef94 not found: ID does not exist" containerID="f27ae8741dd55f00cb98a8fb2353da4cf744518b48ce853d95d2679d8887ef94" Nov 28 16:36:37 crc kubenswrapper[4909]: I1128 16:36:37.467795 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f27ae8741dd55f00cb98a8fb2353da4cf744518b48ce853d95d2679d8887ef94"} err="failed to get container status \"f27ae8741dd55f00cb98a8fb2353da4cf744518b48ce853d95d2679d8887ef94\": rpc error: code = NotFound desc = could not find container \"f27ae8741dd55f00cb98a8fb2353da4cf744518b48ce853d95d2679d8887ef94\": container with ID starting with f27ae8741dd55f00cb98a8fb2353da4cf744518b48ce853d95d2679d8887ef94 not found: ID does not exist" Nov 28 16:36:37 crc kubenswrapper[4909]: I1128 16:36:37.467814 4909 scope.go:117] "RemoveContainer" containerID="7cc9830ede6c460701043fa486da1fb48a9626227dd752474528ad1c78113d8a" Nov 28 16:36:37 crc kubenswrapper[4909]: E1128 16:36:37.468056 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7cc9830ede6c460701043fa486da1fb48a9626227dd752474528ad1c78113d8a\": container with ID starting with 7cc9830ede6c460701043fa486da1fb48a9626227dd752474528ad1c78113d8a not found: ID does not exist" containerID="7cc9830ede6c460701043fa486da1fb48a9626227dd752474528ad1c78113d8a" Nov 28 16:36:37 crc kubenswrapper[4909]: I1128 16:36:37.468089 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7cc9830ede6c460701043fa486da1fb48a9626227dd752474528ad1c78113d8a"} err="failed to get container status \"7cc9830ede6c460701043fa486da1fb48a9626227dd752474528ad1c78113d8a\": rpc error: code = NotFound desc = could not find container \"7cc9830ede6c460701043fa486da1fb48a9626227dd752474528ad1c78113d8a\": container with ID starting with 7cc9830ede6c460701043fa486da1fb48a9626227dd752474528ad1c78113d8a not found: ID does not exist" Nov 28 16:36:37 crc kubenswrapper[4909]: I1128 16:36:37.468110 4909 scope.go:117] "RemoveContainer" containerID="ea215891e9e527c761e9c1fee97f230f011da309562acb7bce70287bd0410c66" Nov 28 16:36:37 crc kubenswrapper[4909]: E1128 16:36:37.468345 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ea215891e9e527c761e9c1fee97f230f011da309562acb7bce70287bd0410c66\": container with ID starting with ea215891e9e527c761e9c1fee97f230f011da309562acb7bce70287bd0410c66 not found: ID does not exist" containerID="ea215891e9e527c761e9c1fee97f230f011da309562acb7bce70287bd0410c66" Nov 28 16:36:37 crc kubenswrapper[4909]: I1128 16:36:37.468371 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ea215891e9e527c761e9c1fee97f230f011da309562acb7bce70287bd0410c66"} err="failed to get container status \"ea215891e9e527c761e9c1fee97f230f011da309562acb7bce70287bd0410c66\": rpc error: code = NotFound desc = could not find container \"ea215891e9e527c761e9c1fee97f230f011da309562acb7bce70287bd0410c66\": container with ID starting with ea215891e9e527c761e9c1fee97f230f011da309562acb7bce70287bd0410c66 not found: ID does not exist" Nov 28 16:36:37 crc kubenswrapper[4909]: I1128 16:36:37.468391 4909 scope.go:117] "RemoveContainer" containerID="5fb91605f618e075deb2aac1d02ba547d7690726cbd7cbd378c8171a086d9018" Nov 28 16:36:37 crc kubenswrapper[4909]: E1128 16:36:37.468581 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5fb91605f618e075deb2aac1d02ba547d7690726cbd7cbd378c8171a086d9018\": container with ID starting with 5fb91605f618e075deb2aac1d02ba547d7690726cbd7cbd378c8171a086d9018 not found: ID does not exist" containerID="5fb91605f618e075deb2aac1d02ba547d7690726cbd7cbd378c8171a086d9018" Nov 28 16:36:37 crc kubenswrapper[4909]: I1128 16:36:37.468613 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5fb91605f618e075deb2aac1d02ba547d7690726cbd7cbd378c8171a086d9018"} err="failed to get container status \"5fb91605f618e075deb2aac1d02ba547d7690726cbd7cbd378c8171a086d9018\": rpc error: code = NotFound desc = could not find container \"5fb91605f618e075deb2aac1d02ba547d7690726cbd7cbd378c8171a086d9018\": container with ID starting with 5fb91605f618e075deb2aac1d02ba547d7690726cbd7cbd378c8171a086d9018 not found: ID does not exist" Nov 28 16:36:37 crc kubenswrapper[4909]: I1128 16:36:37.468632 4909 scope.go:117] "RemoveContainer" containerID="e537fff9ca9b1d1abd731931ae9c78538cb3c3b7ac87bc65d5b181dd8dc9988e" Nov 28 16:36:37 crc kubenswrapper[4909]: E1128 16:36:37.468879 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e537fff9ca9b1d1abd731931ae9c78538cb3c3b7ac87bc65d5b181dd8dc9988e\": container with ID starting with e537fff9ca9b1d1abd731931ae9c78538cb3c3b7ac87bc65d5b181dd8dc9988e not found: ID does not exist" containerID="e537fff9ca9b1d1abd731931ae9c78538cb3c3b7ac87bc65d5b181dd8dc9988e" Nov 28 16:36:37 crc kubenswrapper[4909]: I1128 16:36:37.468909 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e537fff9ca9b1d1abd731931ae9c78538cb3c3b7ac87bc65d5b181dd8dc9988e"} err="failed to get container status \"e537fff9ca9b1d1abd731931ae9c78538cb3c3b7ac87bc65d5b181dd8dc9988e\": rpc error: code = NotFound desc = could not find container \"e537fff9ca9b1d1abd731931ae9c78538cb3c3b7ac87bc65d5b181dd8dc9988e\": container with ID starting with e537fff9ca9b1d1abd731931ae9c78538cb3c3b7ac87bc65d5b181dd8dc9988e not found: ID does not exist" Nov 28 16:36:37 crc kubenswrapper[4909]: I1128 16:36:37.468930 4909 scope.go:117] "RemoveContainer" containerID="4d6f16f0949b97c95bc1814668d3795dce74816c7c22e56931a221f9c9af6515" Nov 28 16:36:37 crc kubenswrapper[4909]: E1128 16:36:37.469197 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4d6f16f0949b97c95bc1814668d3795dce74816c7c22e56931a221f9c9af6515\": container with ID starting with 4d6f16f0949b97c95bc1814668d3795dce74816c7c22e56931a221f9c9af6515 not found: ID does not exist" containerID="4d6f16f0949b97c95bc1814668d3795dce74816c7c22e56931a221f9c9af6515" Nov 28 16:36:37 crc kubenswrapper[4909]: I1128 16:36:37.469223 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4d6f16f0949b97c95bc1814668d3795dce74816c7c22e56931a221f9c9af6515"} err="failed to get container status \"4d6f16f0949b97c95bc1814668d3795dce74816c7c22e56931a221f9c9af6515\": rpc error: code = NotFound desc = could not find container \"4d6f16f0949b97c95bc1814668d3795dce74816c7c22e56931a221f9c9af6515\": container with ID starting with 4d6f16f0949b97c95bc1814668d3795dce74816c7c22e56931a221f9c9af6515 not found: ID does not exist" Nov 28 16:36:37 crc kubenswrapper[4909]: I1128 16:36:37.469241 4909 scope.go:117] "RemoveContainer" containerID="0555790df47cc73f744971b0906de125a73508760c08642f116f54367b3effa9" Nov 28 16:36:37 crc kubenswrapper[4909]: E1128 16:36:37.469560 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0555790df47cc73f744971b0906de125a73508760c08642f116f54367b3effa9\": container with ID starting with 0555790df47cc73f744971b0906de125a73508760c08642f116f54367b3effa9 not found: ID does not exist" containerID="0555790df47cc73f744971b0906de125a73508760c08642f116f54367b3effa9" Nov 28 16:36:37 crc kubenswrapper[4909]: I1128 16:36:37.469588 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0555790df47cc73f744971b0906de125a73508760c08642f116f54367b3effa9"} err="failed to get container status \"0555790df47cc73f744971b0906de125a73508760c08642f116f54367b3effa9\": rpc error: code = NotFound desc = could not find container \"0555790df47cc73f744971b0906de125a73508760c08642f116f54367b3effa9\": container with ID starting with 0555790df47cc73f744971b0906de125a73508760c08642f116f54367b3effa9 not found: ID does not exist" Nov 28 16:36:37 crc kubenswrapper[4909]: I1128 16:36:37.469604 4909 scope.go:117] "RemoveContainer" containerID="f7767062c82c125be67fc8d87066f9088acb549941128e8dfa6db30304d06a51" Nov 28 16:36:37 crc kubenswrapper[4909]: E1128 16:36:37.469875 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f7767062c82c125be67fc8d87066f9088acb549941128e8dfa6db30304d06a51\": container with ID starting with f7767062c82c125be67fc8d87066f9088acb549941128e8dfa6db30304d06a51 not found: ID does not exist" containerID="f7767062c82c125be67fc8d87066f9088acb549941128e8dfa6db30304d06a51" Nov 28 16:36:37 crc kubenswrapper[4909]: I1128 16:36:37.469894 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f7767062c82c125be67fc8d87066f9088acb549941128e8dfa6db30304d06a51"} err="failed to get container status \"f7767062c82c125be67fc8d87066f9088acb549941128e8dfa6db30304d06a51\": rpc error: code = NotFound desc = could not find container \"f7767062c82c125be67fc8d87066f9088acb549941128e8dfa6db30304d06a51\": container with ID starting with f7767062c82c125be67fc8d87066f9088acb549941128e8dfa6db30304d06a51 not found: ID does not exist" Nov 28 16:36:37 crc kubenswrapper[4909]: I1128 16:36:37.469908 4909 scope.go:117] "RemoveContainer" containerID="ce4539306b72b839722ed46646da187a61406695c153b17107ee77a2ce3e2377" Nov 28 16:36:37 crc kubenswrapper[4909]: E1128 16:36:37.470137 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ce4539306b72b839722ed46646da187a61406695c153b17107ee77a2ce3e2377\": container with ID starting with ce4539306b72b839722ed46646da187a61406695c153b17107ee77a2ce3e2377 not found: ID does not exist" containerID="ce4539306b72b839722ed46646da187a61406695c153b17107ee77a2ce3e2377" Nov 28 16:36:37 crc kubenswrapper[4909]: I1128 16:36:37.470178 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ce4539306b72b839722ed46646da187a61406695c153b17107ee77a2ce3e2377"} err="failed to get container status \"ce4539306b72b839722ed46646da187a61406695c153b17107ee77a2ce3e2377\": rpc error: code = NotFound desc = could not find container \"ce4539306b72b839722ed46646da187a61406695c153b17107ee77a2ce3e2377\": container with ID starting with ce4539306b72b839722ed46646da187a61406695c153b17107ee77a2ce3e2377 not found: ID does not exist" Nov 28 16:36:37 crc kubenswrapper[4909]: I1128 16:36:37.470190 4909 scope.go:117] "RemoveContainer" containerID="2431575eb32cdbcb1846b977ebb5a16dee5e1fd73658a4da6fe6d41dd6ea5859" Nov 28 16:36:37 crc kubenswrapper[4909]: E1128 16:36:37.470399 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2431575eb32cdbcb1846b977ebb5a16dee5e1fd73658a4da6fe6d41dd6ea5859\": container with ID starting with 2431575eb32cdbcb1846b977ebb5a16dee5e1fd73658a4da6fe6d41dd6ea5859 not found: ID does not exist" containerID="2431575eb32cdbcb1846b977ebb5a16dee5e1fd73658a4da6fe6d41dd6ea5859" Nov 28 16:36:37 crc kubenswrapper[4909]: I1128 16:36:37.470416 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2431575eb32cdbcb1846b977ebb5a16dee5e1fd73658a4da6fe6d41dd6ea5859"} err="failed to get container status \"2431575eb32cdbcb1846b977ebb5a16dee5e1fd73658a4da6fe6d41dd6ea5859\": rpc error: code = NotFound desc = could not find container \"2431575eb32cdbcb1846b977ebb5a16dee5e1fd73658a4da6fe6d41dd6ea5859\": container with ID starting with 2431575eb32cdbcb1846b977ebb5a16dee5e1fd73658a4da6fe6d41dd6ea5859 not found: ID does not exist" Nov 28 16:36:37 crc kubenswrapper[4909]: I1128 16:36:37.470428 4909 scope.go:117] "RemoveContainer" containerID="0f9831bb56002e61b2af5b4efe43f7352b26f5bcba0fac4ff2b7c7594d30ca11" Nov 28 16:36:37 crc kubenswrapper[4909]: E1128 16:36:37.470629 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0f9831bb56002e61b2af5b4efe43f7352b26f5bcba0fac4ff2b7c7594d30ca11\": container with ID starting with 0f9831bb56002e61b2af5b4efe43f7352b26f5bcba0fac4ff2b7c7594d30ca11 not found: ID does not exist" containerID="0f9831bb56002e61b2af5b4efe43f7352b26f5bcba0fac4ff2b7c7594d30ca11" Nov 28 16:36:37 crc kubenswrapper[4909]: I1128 16:36:37.470648 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0f9831bb56002e61b2af5b4efe43f7352b26f5bcba0fac4ff2b7c7594d30ca11"} err="failed to get container status \"0f9831bb56002e61b2af5b4efe43f7352b26f5bcba0fac4ff2b7c7594d30ca11\": rpc error: code = NotFound desc = could not find container \"0f9831bb56002e61b2af5b4efe43f7352b26f5bcba0fac4ff2b7c7594d30ca11\": container with ID starting with 0f9831bb56002e61b2af5b4efe43f7352b26f5bcba0fac4ff2b7c7594d30ca11 not found: ID does not exist" Nov 28 16:36:37 crc kubenswrapper[4909]: I1128 16:36:37.564460 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-q2kt7_ff22194e-63a9-410d-80b6-9b1a1e68b164/ovs-vswitchd/0.log" Nov 28 16:36:37 crc kubenswrapper[4909]: I1128 16:36:37.565793 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-q2kt7" Nov 28 16:36:37 crc kubenswrapper[4909]: I1128 16:36:37.637187 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/ff22194e-63a9-410d-80b6-9b1a1e68b164-etc-ovs\") pod \"ff22194e-63a9-410d-80b6-9b1a1e68b164\" (UID: \"ff22194e-63a9-410d-80b6-9b1a1e68b164\") " Nov 28 16:36:37 crc kubenswrapper[4909]: I1128 16:36:37.637234 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/ff22194e-63a9-410d-80b6-9b1a1e68b164-var-log\") pod \"ff22194e-63a9-410d-80b6-9b1a1e68b164\" (UID: \"ff22194e-63a9-410d-80b6-9b1a1e68b164\") " Nov 28 16:36:37 crc kubenswrapper[4909]: I1128 16:36:37.637270 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ff22194e-63a9-410d-80b6-9b1a1e68b164-scripts\") pod \"ff22194e-63a9-410d-80b6-9b1a1e68b164\" (UID: \"ff22194e-63a9-410d-80b6-9b1a1e68b164\") " Nov 28 16:36:37 crc kubenswrapper[4909]: I1128 16:36:37.637310 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/ff22194e-63a9-410d-80b6-9b1a1e68b164-var-run\") pod \"ff22194e-63a9-410d-80b6-9b1a1e68b164\" (UID: \"ff22194e-63a9-410d-80b6-9b1a1e68b164\") " Nov 28 16:36:37 crc kubenswrapper[4909]: I1128 16:36:37.637310 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/ff22194e-63a9-410d-80b6-9b1a1e68b164-var-log" (OuterVolumeSpecName: "var-log") pod "ff22194e-63a9-410d-80b6-9b1a1e68b164" (UID: "ff22194e-63a9-410d-80b6-9b1a1e68b164"). InnerVolumeSpecName "var-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 16:36:37 crc kubenswrapper[4909]: I1128 16:36:37.637289 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/ff22194e-63a9-410d-80b6-9b1a1e68b164-etc-ovs" (OuterVolumeSpecName: "etc-ovs") pod "ff22194e-63a9-410d-80b6-9b1a1e68b164" (UID: "ff22194e-63a9-410d-80b6-9b1a1e68b164"). InnerVolumeSpecName "etc-ovs". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 16:36:37 crc kubenswrapper[4909]: I1128 16:36:37.637346 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x669m\" (UniqueName: \"kubernetes.io/projected/ff22194e-63a9-410d-80b6-9b1a1e68b164-kube-api-access-x669m\") pod \"ff22194e-63a9-410d-80b6-9b1a1e68b164\" (UID: \"ff22194e-63a9-410d-80b6-9b1a1e68b164\") " Nov 28 16:36:37 crc kubenswrapper[4909]: I1128 16:36:37.637367 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/ff22194e-63a9-410d-80b6-9b1a1e68b164-var-run" (OuterVolumeSpecName: "var-run") pod "ff22194e-63a9-410d-80b6-9b1a1e68b164" (UID: "ff22194e-63a9-410d-80b6-9b1a1e68b164"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 16:36:37 crc kubenswrapper[4909]: I1128 16:36:37.637409 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/ff22194e-63a9-410d-80b6-9b1a1e68b164-var-lib\") pod \"ff22194e-63a9-410d-80b6-9b1a1e68b164\" (UID: \"ff22194e-63a9-410d-80b6-9b1a1e68b164\") " Nov 28 16:36:37 crc kubenswrapper[4909]: I1128 16:36:37.637534 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/ff22194e-63a9-410d-80b6-9b1a1e68b164-var-lib" (OuterVolumeSpecName: "var-lib") pod "ff22194e-63a9-410d-80b6-9b1a1e68b164" (UID: "ff22194e-63a9-410d-80b6-9b1a1e68b164"). InnerVolumeSpecName "var-lib". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 16:36:37 crc kubenswrapper[4909]: I1128 16:36:37.637786 4909 reconciler_common.go:293] "Volume detached for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/ff22194e-63a9-410d-80b6-9b1a1e68b164-var-lib\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:37 crc kubenswrapper[4909]: I1128 16:36:37.637813 4909 reconciler_common.go:293] "Volume detached for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/ff22194e-63a9-410d-80b6-9b1a1e68b164-etc-ovs\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:37 crc kubenswrapper[4909]: I1128 16:36:37.637824 4909 reconciler_common.go:293] "Volume detached for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/ff22194e-63a9-410d-80b6-9b1a1e68b164-var-log\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:37 crc kubenswrapper[4909]: I1128 16:36:37.637836 4909 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/ff22194e-63a9-410d-80b6-9b1a1e68b164-var-run\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:37 crc kubenswrapper[4909]: I1128 16:36:37.638358 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ff22194e-63a9-410d-80b6-9b1a1e68b164-scripts" (OuterVolumeSpecName: "scripts") pod "ff22194e-63a9-410d-80b6-9b1a1e68b164" (UID: "ff22194e-63a9-410d-80b6-9b1a1e68b164"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:36:37 crc kubenswrapper[4909]: I1128 16:36:37.640549 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ff22194e-63a9-410d-80b6-9b1a1e68b164-kube-api-access-x669m" (OuterVolumeSpecName: "kube-api-access-x669m") pod "ff22194e-63a9-410d-80b6-9b1a1e68b164" (UID: "ff22194e-63a9-410d-80b6-9b1a1e68b164"). InnerVolumeSpecName "kube-api-access-x669m". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:36:37 crc kubenswrapper[4909]: I1128 16:36:37.738840 4909 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ff22194e-63a9-410d-80b6-9b1a1e68b164-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:37 crc kubenswrapper[4909]: I1128 16:36:37.738878 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x669m\" (UniqueName: \"kubernetes.io/projected/ff22194e-63a9-410d-80b6-9b1a1e68b164-kube-api-access-x669m\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:37 crc kubenswrapper[4909]: I1128 16:36:37.918447 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="af476a0f-b390-443d-b7a5-14181e7c7bc7" path="/var/lib/kubelet/pods/af476a0f-b390-443d-b7a5-14181e7c7bc7/volumes" Nov 28 16:36:38 crc kubenswrapper[4909]: I1128 16:36:38.141033 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-q2kt7_ff22194e-63a9-410d-80b6-9b1a1e68b164/ovs-vswitchd/0.log" Nov 28 16:36:38 crc kubenswrapper[4909]: I1128 16:36:38.141993 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-q2kt7" event={"ID":"ff22194e-63a9-410d-80b6-9b1a1e68b164","Type":"ContainerDied","Data":"175842d147817b5559190c7827d9da2f04b34520f781cd5e9ce0bfce76228679"} Nov 28 16:36:38 crc kubenswrapper[4909]: I1128 16:36:38.142034 4909 scope.go:117] "RemoveContainer" containerID="afca6794cc8913f3a96d1e4b580a859e2e2d5089f2b862e784689db6cf4ab6c4" Nov 28 16:36:38 crc kubenswrapper[4909]: I1128 16:36:38.142094 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-q2kt7" Nov 28 16:36:38 crc kubenswrapper[4909]: I1128 16:36:38.171852 4909 scope.go:117] "RemoveContainer" containerID="96dc460f1fe51a7876a0d73f5eedaf50b76b8405b0eee13b2afa64eddabda435" Nov 28 16:36:38 crc kubenswrapper[4909]: I1128 16:36:38.172692 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-ovs-q2kt7"] Nov 28 16:36:38 crc kubenswrapper[4909]: I1128 16:36:38.179109 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-ovs-q2kt7"] Nov 28 16:36:38 crc kubenswrapper[4909]: I1128 16:36:38.193024 4909 scope.go:117] "RemoveContainer" containerID="4f5d8f239fd0e5782668fdf514cb48a3f0d94294be3f0d631b1c21d4439268b8" Nov 28 16:36:39 crc kubenswrapper[4909]: I1128 16:36:39.917324 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ff22194e-63a9-410d-80b6-9b1a1e68b164" path="/var/lib/kubelet/pods/ff22194e-63a9-410d-80b6-9b1a1e68b164/volumes" Nov 28 16:36:39 crc kubenswrapper[4909]: I1128 16:36:39.944291 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-8588dd4f7d-772fj" Nov 28 16:36:39 crc kubenswrapper[4909]: I1128 16:36:39.948226 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-84ff6c46f-q849h" Nov 28 16:36:39 crc kubenswrapper[4909]: I1128 16:36:39.967923 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0e95a7b6-74fd-4db5-bb83-b8e8f80a698f-combined-ca-bundle\") pod \"0e95a7b6-74fd-4db5-bb83-b8e8f80a698f\" (UID: \"0e95a7b6-74fd-4db5-bb83-b8e8f80a698f\") " Nov 28 16:36:39 crc kubenswrapper[4909]: I1128 16:36:39.967968 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/99398e49-db85-4878-b759-367747402c8b-combined-ca-bundle\") pod \"99398e49-db85-4878-b759-367747402c8b\" (UID: \"99398e49-db85-4878-b759-367747402c8b\") " Nov 28 16:36:39 crc kubenswrapper[4909]: I1128 16:36:39.968001 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0e95a7b6-74fd-4db5-bb83-b8e8f80a698f-config-data\") pod \"0e95a7b6-74fd-4db5-bb83-b8e8f80a698f\" (UID: \"0e95a7b6-74fd-4db5-bb83-b8e8f80a698f\") " Nov 28 16:36:39 crc kubenswrapper[4909]: I1128 16:36:39.968045 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/99398e49-db85-4878-b759-367747402c8b-config-data\") pod \"99398e49-db85-4878-b759-367747402c8b\" (UID: \"99398e49-db85-4878-b759-367747402c8b\") " Nov 28 16:36:39 crc kubenswrapper[4909]: I1128 16:36:39.968072 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/99398e49-db85-4878-b759-367747402c8b-config-data-custom\") pod \"99398e49-db85-4878-b759-367747402c8b\" (UID: \"99398e49-db85-4878-b759-367747402c8b\") " Nov 28 16:36:39 crc kubenswrapper[4909]: I1128 16:36:39.968096 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0e95a7b6-74fd-4db5-bb83-b8e8f80a698f-logs\") pod \"0e95a7b6-74fd-4db5-bb83-b8e8f80a698f\" (UID: \"0e95a7b6-74fd-4db5-bb83-b8e8f80a698f\") " Nov 28 16:36:39 crc kubenswrapper[4909]: I1128 16:36:39.968119 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/99398e49-db85-4878-b759-367747402c8b-logs\") pod \"99398e49-db85-4878-b759-367747402c8b\" (UID: \"99398e49-db85-4878-b759-367747402c8b\") " Nov 28 16:36:39 crc kubenswrapper[4909]: I1128 16:36:39.968141 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wwckv\" (UniqueName: \"kubernetes.io/projected/99398e49-db85-4878-b759-367747402c8b-kube-api-access-wwckv\") pod \"99398e49-db85-4878-b759-367747402c8b\" (UID: \"99398e49-db85-4878-b759-367747402c8b\") " Nov 28 16:36:39 crc kubenswrapper[4909]: I1128 16:36:39.968169 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z6zdj\" (UniqueName: \"kubernetes.io/projected/0e95a7b6-74fd-4db5-bb83-b8e8f80a698f-kube-api-access-z6zdj\") pod \"0e95a7b6-74fd-4db5-bb83-b8e8f80a698f\" (UID: \"0e95a7b6-74fd-4db5-bb83-b8e8f80a698f\") " Nov 28 16:36:39 crc kubenswrapper[4909]: I1128 16:36:39.968227 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0e95a7b6-74fd-4db5-bb83-b8e8f80a698f-config-data-custom\") pod \"0e95a7b6-74fd-4db5-bb83-b8e8f80a698f\" (UID: \"0e95a7b6-74fd-4db5-bb83-b8e8f80a698f\") " Nov 28 16:36:39 crc kubenswrapper[4909]: I1128 16:36:39.969226 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0e95a7b6-74fd-4db5-bb83-b8e8f80a698f-logs" (OuterVolumeSpecName: "logs") pod "0e95a7b6-74fd-4db5-bb83-b8e8f80a698f" (UID: "0e95a7b6-74fd-4db5-bb83-b8e8f80a698f"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:36:39 crc kubenswrapper[4909]: I1128 16:36:39.969987 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/99398e49-db85-4878-b759-367747402c8b-logs" (OuterVolumeSpecName: "logs") pod "99398e49-db85-4878-b759-367747402c8b" (UID: "99398e49-db85-4878-b759-367747402c8b"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:36:39 crc kubenswrapper[4909]: I1128 16:36:39.989740 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/99398e49-db85-4878-b759-367747402c8b-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "99398e49-db85-4878-b759-367747402c8b" (UID: "99398e49-db85-4878-b759-367747402c8b"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:39 crc kubenswrapper[4909]: I1128 16:36:39.990941 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/99398e49-db85-4878-b759-367747402c8b-kube-api-access-wwckv" (OuterVolumeSpecName: "kube-api-access-wwckv") pod "99398e49-db85-4878-b759-367747402c8b" (UID: "99398e49-db85-4878-b759-367747402c8b"). InnerVolumeSpecName "kube-api-access-wwckv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:36:39 crc kubenswrapper[4909]: I1128 16:36:39.991469 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0e95a7b6-74fd-4db5-bb83-b8e8f80a698f-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "0e95a7b6-74fd-4db5-bb83-b8e8f80a698f" (UID: "0e95a7b6-74fd-4db5-bb83-b8e8f80a698f"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:39 crc kubenswrapper[4909]: I1128 16:36:39.991880 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0e95a7b6-74fd-4db5-bb83-b8e8f80a698f-kube-api-access-z6zdj" (OuterVolumeSpecName: "kube-api-access-z6zdj") pod "0e95a7b6-74fd-4db5-bb83-b8e8f80a698f" (UID: "0e95a7b6-74fd-4db5-bb83-b8e8f80a698f"). InnerVolumeSpecName "kube-api-access-z6zdj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:36:40 crc kubenswrapper[4909]: I1128 16:36:40.011931 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0e95a7b6-74fd-4db5-bb83-b8e8f80a698f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0e95a7b6-74fd-4db5-bb83-b8e8f80a698f" (UID: "0e95a7b6-74fd-4db5-bb83-b8e8f80a698f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:40 crc kubenswrapper[4909]: I1128 16:36:40.020217 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/99398e49-db85-4878-b759-367747402c8b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "99398e49-db85-4878-b759-367747402c8b" (UID: "99398e49-db85-4878-b759-367747402c8b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:40 crc kubenswrapper[4909]: I1128 16:36:40.023774 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0e95a7b6-74fd-4db5-bb83-b8e8f80a698f-config-data" (OuterVolumeSpecName: "config-data") pod "0e95a7b6-74fd-4db5-bb83-b8e8f80a698f" (UID: "0e95a7b6-74fd-4db5-bb83-b8e8f80a698f"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:40 crc kubenswrapper[4909]: I1128 16:36:40.046015 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/99398e49-db85-4878-b759-367747402c8b-config-data" (OuterVolumeSpecName: "config-data") pod "99398e49-db85-4878-b759-367747402c8b" (UID: "99398e49-db85-4878-b759-367747402c8b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:40 crc kubenswrapper[4909]: I1128 16:36:40.069985 4909 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0e95a7b6-74fd-4db5-bb83-b8e8f80a698f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:40 crc kubenswrapper[4909]: I1128 16:36:40.070018 4909 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/99398e49-db85-4878-b759-367747402c8b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:40 crc kubenswrapper[4909]: I1128 16:36:40.070032 4909 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0e95a7b6-74fd-4db5-bb83-b8e8f80a698f-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:40 crc kubenswrapper[4909]: I1128 16:36:40.070043 4909 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/99398e49-db85-4878-b759-367747402c8b-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:40 crc kubenswrapper[4909]: I1128 16:36:40.070053 4909 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/99398e49-db85-4878-b759-367747402c8b-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:40 crc kubenswrapper[4909]: I1128 16:36:40.070066 4909 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/99398e49-db85-4878-b759-367747402c8b-logs\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:40 crc kubenswrapper[4909]: I1128 16:36:40.070078 4909 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0e95a7b6-74fd-4db5-bb83-b8e8f80a698f-logs\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:40 crc kubenswrapper[4909]: I1128 16:36:40.070088 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wwckv\" (UniqueName: \"kubernetes.io/projected/99398e49-db85-4878-b759-367747402c8b-kube-api-access-wwckv\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:40 crc kubenswrapper[4909]: I1128 16:36:40.070101 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z6zdj\" (UniqueName: \"kubernetes.io/projected/0e95a7b6-74fd-4db5-bb83-b8e8f80a698f-kube-api-access-z6zdj\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:40 crc kubenswrapper[4909]: I1128 16:36:40.070111 4909 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0e95a7b6-74fd-4db5-bb83-b8e8f80a698f-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:40 crc kubenswrapper[4909]: I1128 16:36:40.160431 4909 generic.go:334] "Generic (PLEG): container finished" podID="99398e49-db85-4878-b759-367747402c8b" containerID="993b00e1b6bc618ccc84a0eb611ea3966a893434096b04798bdd63d90c26e82f" exitCode=137 Nov 28 16:36:40 crc kubenswrapper[4909]: I1128 16:36:40.160487 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-84ff6c46f-q849h" Nov 28 16:36:40 crc kubenswrapper[4909]: I1128 16:36:40.160503 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-84ff6c46f-q849h" event={"ID":"99398e49-db85-4878-b759-367747402c8b","Type":"ContainerDied","Data":"993b00e1b6bc618ccc84a0eb611ea3966a893434096b04798bdd63d90c26e82f"} Nov 28 16:36:40 crc kubenswrapper[4909]: I1128 16:36:40.160902 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-84ff6c46f-q849h" event={"ID":"99398e49-db85-4878-b759-367747402c8b","Type":"ContainerDied","Data":"f0d9dd166d658f22876e770c6d80529e6bccc330def0c2d462d37a796ac8585c"} Nov 28 16:36:40 crc kubenswrapper[4909]: I1128 16:36:40.160928 4909 scope.go:117] "RemoveContainer" containerID="993b00e1b6bc618ccc84a0eb611ea3966a893434096b04798bdd63d90c26e82f" Nov 28 16:36:40 crc kubenswrapper[4909]: I1128 16:36:40.163356 4909 generic.go:334] "Generic (PLEG): container finished" podID="0e95a7b6-74fd-4db5-bb83-b8e8f80a698f" containerID="e082a22ef08996aa9a428e8fe49aaaf3d2faeeef0059056c795acba881811baa" exitCode=137 Nov 28 16:36:40 crc kubenswrapper[4909]: I1128 16:36:40.163387 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-8588dd4f7d-772fj" event={"ID":"0e95a7b6-74fd-4db5-bb83-b8e8f80a698f","Type":"ContainerDied","Data":"e082a22ef08996aa9a428e8fe49aaaf3d2faeeef0059056c795acba881811baa"} Nov 28 16:36:40 crc kubenswrapper[4909]: I1128 16:36:40.163592 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-8588dd4f7d-772fj" event={"ID":"0e95a7b6-74fd-4db5-bb83-b8e8f80a698f","Type":"ContainerDied","Data":"e0692acbee18d2024d6649491c0e3feb865f0fd7c443f6d20eee0b1cf0d3ad8d"} Nov 28 16:36:40 crc kubenswrapper[4909]: I1128 16:36:40.163395 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-8588dd4f7d-772fj" Nov 28 16:36:40 crc kubenswrapper[4909]: I1128 16:36:40.207896 4909 scope.go:117] "RemoveContainer" containerID="a025c3c4d56c233f80af0daccb5a94bf3f456bd546731ecd2ccb3e2696f523ac" Nov 28 16:36:40 crc kubenswrapper[4909]: I1128 16:36:40.219742 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-worker-84ff6c46f-q849h"] Nov 28 16:36:40 crc kubenswrapper[4909]: I1128 16:36:40.229160 4909 scope.go:117] "RemoveContainer" containerID="993b00e1b6bc618ccc84a0eb611ea3966a893434096b04798bdd63d90c26e82f" Nov 28 16:36:40 crc kubenswrapper[4909]: E1128 16:36:40.229784 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"993b00e1b6bc618ccc84a0eb611ea3966a893434096b04798bdd63d90c26e82f\": container with ID starting with 993b00e1b6bc618ccc84a0eb611ea3966a893434096b04798bdd63d90c26e82f not found: ID does not exist" containerID="993b00e1b6bc618ccc84a0eb611ea3966a893434096b04798bdd63d90c26e82f" Nov 28 16:36:40 crc kubenswrapper[4909]: I1128 16:36:40.229836 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"993b00e1b6bc618ccc84a0eb611ea3966a893434096b04798bdd63d90c26e82f"} err="failed to get container status \"993b00e1b6bc618ccc84a0eb611ea3966a893434096b04798bdd63d90c26e82f\": rpc error: code = NotFound desc = could not find container \"993b00e1b6bc618ccc84a0eb611ea3966a893434096b04798bdd63d90c26e82f\": container with ID starting with 993b00e1b6bc618ccc84a0eb611ea3966a893434096b04798bdd63d90c26e82f not found: ID does not exist" Nov 28 16:36:40 crc kubenswrapper[4909]: I1128 16:36:40.229866 4909 scope.go:117] "RemoveContainer" containerID="a025c3c4d56c233f80af0daccb5a94bf3f456bd546731ecd2ccb3e2696f523ac" Nov 28 16:36:40 crc kubenswrapper[4909]: E1128 16:36:40.230303 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a025c3c4d56c233f80af0daccb5a94bf3f456bd546731ecd2ccb3e2696f523ac\": container with ID starting with a025c3c4d56c233f80af0daccb5a94bf3f456bd546731ecd2ccb3e2696f523ac not found: ID does not exist" containerID="a025c3c4d56c233f80af0daccb5a94bf3f456bd546731ecd2ccb3e2696f523ac" Nov 28 16:36:40 crc kubenswrapper[4909]: I1128 16:36:40.230419 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a025c3c4d56c233f80af0daccb5a94bf3f456bd546731ecd2ccb3e2696f523ac"} err="failed to get container status \"a025c3c4d56c233f80af0daccb5a94bf3f456bd546731ecd2ccb3e2696f523ac\": rpc error: code = NotFound desc = could not find container \"a025c3c4d56c233f80af0daccb5a94bf3f456bd546731ecd2ccb3e2696f523ac\": container with ID starting with a025c3c4d56c233f80af0daccb5a94bf3f456bd546731ecd2ccb3e2696f523ac not found: ID does not exist" Nov 28 16:36:40 crc kubenswrapper[4909]: I1128 16:36:40.230482 4909 scope.go:117] "RemoveContainer" containerID="e082a22ef08996aa9a428e8fe49aaaf3d2faeeef0059056c795acba881811baa" Nov 28 16:36:40 crc kubenswrapper[4909]: I1128 16:36:40.231393 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-worker-84ff6c46f-q849h"] Nov 28 16:36:40 crc kubenswrapper[4909]: I1128 16:36:40.240328 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-keystone-listener-8588dd4f7d-772fj"] Nov 28 16:36:40 crc kubenswrapper[4909]: I1128 16:36:40.246637 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-keystone-listener-8588dd4f7d-772fj"] Nov 28 16:36:40 crc kubenswrapper[4909]: I1128 16:36:40.247961 4909 scope.go:117] "RemoveContainer" containerID="c9a6ef16e79ec3520b4ab028495d1a8dc6b763fafac55c5afb96c84780e98938" Nov 28 16:36:40 crc kubenswrapper[4909]: I1128 16:36:40.269624 4909 scope.go:117] "RemoveContainer" containerID="e082a22ef08996aa9a428e8fe49aaaf3d2faeeef0059056c795acba881811baa" Nov 28 16:36:40 crc kubenswrapper[4909]: E1128 16:36:40.270160 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e082a22ef08996aa9a428e8fe49aaaf3d2faeeef0059056c795acba881811baa\": container with ID starting with e082a22ef08996aa9a428e8fe49aaaf3d2faeeef0059056c795acba881811baa not found: ID does not exist" containerID="e082a22ef08996aa9a428e8fe49aaaf3d2faeeef0059056c795acba881811baa" Nov 28 16:36:40 crc kubenswrapper[4909]: I1128 16:36:40.270199 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e082a22ef08996aa9a428e8fe49aaaf3d2faeeef0059056c795acba881811baa"} err="failed to get container status \"e082a22ef08996aa9a428e8fe49aaaf3d2faeeef0059056c795acba881811baa\": rpc error: code = NotFound desc = could not find container \"e082a22ef08996aa9a428e8fe49aaaf3d2faeeef0059056c795acba881811baa\": container with ID starting with e082a22ef08996aa9a428e8fe49aaaf3d2faeeef0059056c795acba881811baa not found: ID does not exist" Nov 28 16:36:40 crc kubenswrapper[4909]: I1128 16:36:40.270233 4909 scope.go:117] "RemoveContainer" containerID="c9a6ef16e79ec3520b4ab028495d1a8dc6b763fafac55c5afb96c84780e98938" Nov 28 16:36:40 crc kubenswrapper[4909]: E1128 16:36:40.270506 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c9a6ef16e79ec3520b4ab028495d1a8dc6b763fafac55c5afb96c84780e98938\": container with ID starting with c9a6ef16e79ec3520b4ab028495d1a8dc6b763fafac55c5afb96c84780e98938 not found: ID does not exist" containerID="c9a6ef16e79ec3520b4ab028495d1a8dc6b763fafac55c5afb96c84780e98938" Nov 28 16:36:40 crc kubenswrapper[4909]: I1128 16:36:40.270538 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c9a6ef16e79ec3520b4ab028495d1a8dc6b763fafac55c5afb96c84780e98938"} err="failed to get container status \"c9a6ef16e79ec3520b4ab028495d1a8dc6b763fafac55c5afb96c84780e98938\": rpc error: code = NotFound desc = could not find container \"c9a6ef16e79ec3520b4ab028495d1a8dc6b763fafac55c5afb96c84780e98938\": container with ID starting with c9a6ef16e79ec3520b4ab028495d1a8dc6b763fafac55c5afb96c84780e98938 not found: ID does not exist" Nov 28 16:36:40 crc kubenswrapper[4909]: I1128 16:36:40.545510 4909 pod_container_manager_linux.go:210] "Failed to delete cgroup paths" cgroupName=["kubepods","besteffort","pode792abf7-967c-4293-b5f4-f073b07c8cf1"] err="unable to destroy cgroup paths for cgroup [kubepods besteffort pode792abf7-967c-4293-b5f4-f073b07c8cf1] : Timed out while waiting for systemd to remove kubepods-besteffort-pode792abf7_967c_4293_b5f4_f073b07c8cf1.slice" Nov 28 16:36:40 crc kubenswrapper[4909]: I1128 16:36:40.552797 4909 pod_container_manager_linux.go:210] "Failed to delete cgroup paths" cgroupName=["kubepods","besteffort","podbac06af4-bbe1-482a-8815-14a9cf2a1699"] err="unable to destroy cgroup paths for cgroup [kubepods besteffort podbac06af4-bbe1-482a-8815-14a9cf2a1699] : Timed out while waiting for systemd to remove kubepods-besteffort-podbac06af4_bbe1_482a_8815_14a9cf2a1699.slice" Nov 28 16:36:41 crc kubenswrapper[4909]: E1128 16:36:41.727022 4909 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc2cc9842_6f8f_4afc_9895_2b7a75a9696c.slice/crio-conmon-09205ac4d06967aa940b5f5c0504e1750c1b1ffd2519f1715951a28c3d850f7f.scope\": RecentStats: unable to find data in memory cache]" Nov 28 16:36:41 crc kubenswrapper[4909]: I1128 16:36:41.847537 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder937d-account-delete-4rdjd" Nov 28 16:36:41 crc kubenswrapper[4909]: I1128 16:36:41.898768 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wttq4\" (UniqueName: \"kubernetes.io/projected/c2cc9842-6f8f-4afc-9895-2b7a75a9696c-kube-api-access-wttq4\") pod \"c2cc9842-6f8f-4afc-9895-2b7a75a9696c\" (UID: \"c2cc9842-6f8f-4afc-9895-2b7a75a9696c\") " Nov 28 16:36:41 crc kubenswrapper[4909]: I1128 16:36:41.898905 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c2cc9842-6f8f-4afc-9895-2b7a75a9696c-operator-scripts\") pod \"c2cc9842-6f8f-4afc-9895-2b7a75a9696c\" (UID: \"c2cc9842-6f8f-4afc-9895-2b7a75a9696c\") " Nov 28 16:36:41 crc kubenswrapper[4909]: I1128 16:36:41.899808 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c2cc9842-6f8f-4afc-9895-2b7a75a9696c-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "c2cc9842-6f8f-4afc-9895-2b7a75a9696c" (UID: "c2cc9842-6f8f-4afc-9895-2b7a75a9696c"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:36:41 crc kubenswrapper[4909]: I1128 16:36:41.900076 4909 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c2cc9842-6f8f-4afc-9895-2b7a75a9696c-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:41 crc kubenswrapper[4909]: I1128 16:36:41.906764 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c2cc9842-6f8f-4afc-9895-2b7a75a9696c-kube-api-access-wttq4" (OuterVolumeSpecName: "kube-api-access-wttq4") pod "c2cc9842-6f8f-4afc-9895-2b7a75a9696c" (UID: "c2cc9842-6f8f-4afc-9895-2b7a75a9696c"). InnerVolumeSpecName "kube-api-access-wttq4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:36:41 crc kubenswrapper[4909]: I1128 16:36:41.914211 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0e95a7b6-74fd-4db5-bb83-b8e8f80a698f" path="/var/lib/kubelet/pods/0e95a7b6-74fd-4db5-bb83-b8e8f80a698f/volumes" Nov 28 16:36:41 crc kubenswrapper[4909]: I1128 16:36:41.914989 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="99398e49-db85-4878-b759-367747402c8b" path="/var/lib/kubelet/pods/99398e49-db85-4878-b759-367747402c8b/volumes" Nov 28 16:36:42 crc kubenswrapper[4909]: I1128 16:36:42.001765 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wttq4\" (UniqueName: \"kubernetes.io/projected/c2cc9842-6f8f-4afc-9895-2b7a75a9696c-kube-api-access-wttq4\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:42 crc kubenswrapper[4909]: E1128 16:36:42.002274 4909 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Nov 28 16:36:42 crc kubenswrapper[4909]: E1128 16:36:42.002381 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/2cfda3c9-a8dd-4a86-bb0d-d3b826b82a0f-operator-scripts podName:2cfda3c9-a8dd-4a86-bb0d-d3b826b82a0f nodeName:}" failed. No retries permitted until 2025-11-28 16:37:14.002354156 +0000 UTC m=+1616.399038690 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/2cfda3c9-a8dd-4a86-bb0d-d3b826b82a0f-operator-scripts") pod "placement3326-account-delete-6mrc7" (UID: "2cfda3c9-a8dd-4a86-bb0d-d3b826b82a0f") : configmap "openstack-scripts" not found Nov 28 16:36:42 crc kubenswrapper[4909]: I1128 16:36:42.218938 4909 generic.go:334] "Generic (PLEG): container finished" podID="8fa2662b-eed3-4461-ba5d-d4554ca4a22b" containerID="dddd51e21ee1da11fcdbb2f80b518588c94f66fdf38cd76c69d36bbd73a25213" exitCode=137 Nov 28 16:36:42 crc kubenswrapper[4909]: I1128 16:36:42.219053 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican9799-account-delete-z2p59" event={"ID":"8fa2662b-eed3-4461-ba5d-d4554ca4a22b","Type":"ContainerDied","Data":"dddd51e21ee1da11fcdbb2f80b518588c94f66fdf38cd76c69d36bbd73a25213"} Nov 28 16:36:42 crc kubenswrapper[4909]: I1128 16:36:42.220958 4909 generic.go:334] "Generic (PLEG): container finished" podID="38f993aa-ed40-45f0-821f-e5d7f482ec99" containerID="c85df760aafac3a038f71027a3c1a79c7e72fbde573d1a5e4dfa79b8700bbd6d" exitCode=137 Nov 28 16:36:42 crc kubenswrapper[4909]: I1128 16:36:42.221049 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/novaapiab11-account-delete-2d7kv" event={"ID":"38f993aa-ed40-45f0-821f-e5d7f482ec99","Type":"ContainerDied","Data":"c85df760aafac3a038f71027a3c1a79c7e72fbde573d1a5e4dfa79b8700bbd6d"} Nov 28 16:36:42 crc kubenswrapper[4909]: I1128 16:36:42.222372 4909 generic.go:334] "Generic (PLEG): container finished" podID="c2cc9842-6f8f-4afc-9895-2b7a75a9696c" containerID="09205ac4d06967aa940b5f5c0504e1750c1b1ffd2519f1715951a28c3d850f7f" exitCode=137 Nov 28 16:36:42 crc kubenswrapper[4909]: I1128 16:36:42.222396 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder937d-account-delete-4rdjd" event={"ID":"c2cc9842-6f8f-4afc-9895-2b7a75a9696c","Type":"ContainerDied","Data":"09205ac4d06967aa940b5f5c0504e1750c1b1ffd2519f1715951a28c3d850f7f"} Nov 28 16:36:42 crc kubenswrapper[4909]: I1128 16:36:42.222410 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder937d-account-delete-4rdjd" event={"ID":"c2cc9842-6f8f-4afc-9895-2b7a75a9696c","Type":"ContainerDied","Data":"e8bf24bda58e5121229915ceab0bc295fb4e4833801ddc66f26c95fbafcc8094"} Nov 28 16:36:42 crc kubenswrapper[4909]: I1128 16:36:42.222429 4909 scope.go:117] "RemoveContainer" containerID="09205ac4d06967aa940b5f5c0504e1750c1b1ffd2519f1715951a28c3d850f7f" Nov 28 16:36:42 crc kubenswrapper[4909]: I1128 16:36:42.222512 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder937d-account-delete-4rdjd" Nov 28 16:36:42 crc kubenswrapper[4909]: I1128 16:36:42.247993 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder937d-account-delete-4rdjd"] Nov 28 16:36:42 crc kubenswrapper[4909]: I1128 16:36:42.260721 4909 scope.go:117] "RemoveContainer" containerID="09205ac4d06967aa940b5f5c0504e1750c1b1ffd2519f1715951a28c3d850f7f" Nov 28 16:36:42 crc kubenswrapper[4909]: E1128 16:36:42.261254 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"09205ac4d06967aa940b5f5c0504e1750c1b1ffd2519f1715951a28c3d850f7f\": container with ID starting with 09205ac4d06967aa940b5f5c0504e1750c1b1ffd2519f1715951a28c3d850f7f not found: ID does not exist" containerID="09205ac4d06967aa940b5f5c0504e1750c1b1ffd2519f1715951a28c3d850f7f" Nov 28 16:36:42 crc kubenswrapper[4909]: I1128 16:36:42.261293 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"09205ac4d06967aa940b5f5c0504e1750c1b1ffd2519f1715951a28c3d850f7f"} err="failed to get container status \"09205ac4d06967aa940b5f5c0504e1750c1b1ffd2519f1715951a28c3d850f7f\": rpc error: code = NotFound desc = could not find container \"09205ac4d06967aa940b5f5c0504e1750c1b1ffd2519f1715951a28c3d850f7f\": container with ID starting with 09205ac4d06967aa940b5f5c0504e1750c1b1ffd2519f1715951a28c3d850f7f not found: ID does not exist" Nov 28 16:36:42 crc kubenswrapper[4909]: I1128 16:36:42.266192 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder937d-account-delete-4rdjd"] Nov 28 16:36:42 crc kubenswrapper[4909]: I1128 16:36:42.278134 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/novaapiab11-account-delete-2d7kv" Nov 28 16:36:42 crc kubenswrapper[4909]: I1128 16:36:42.308934 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican9799-account-delete-z2p59" Nov 28 16:36:42 crc kubenswrapper[4909]: I1128 16:36:42.408740 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/38f993aa-ed40-45f0-821f-e5d7f482ec99-operator-scripts\") pod \"38f993aa-ed40-45f0-821f-e5d7f482ec99\" (UID: \"38f993aa-ed40-45f0-821f-e5d7f482ec99\") " Nov 28 16:36:42 crc kubenswrapper[4909]: I1128 16:36:42.408956 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sf79k\" (UniqueName: \"kubernetes.io/projected/38f993aa-ed40-45f0-821f-e5d7f482ec99-kube-api-access-sf79k\") pod \"38f993aa-ed40-45f0-821f-e5d7f482ec99\" (UID: \"38f993aa-ed40-45f0-821f-e5d7f482ec99\") " Nov 28 16:36:42 crc kubenswrapper[4909]: I1128 16:36:42.409375 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/38f993aa-ed40-45f0-821f-e5d7f482ec99-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "38f993aa-ed40-45f0-821f-e5d7f482ec99" (UID: "38f993aa-ed40-45f0-821f-e5d7f482ec99"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:36:42 crc kubenswrapper[4909]: I1128 16:36:42.414704 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/38f993aa-ed40-45f0-821f-e5d7f482ec99-kube-api-access-sf79k" (OuterVolumeSpecName: "kube-api-access-sf79k") pod "38f993aa-ed40-45f0-821f-e5d7f482ec99" (UID: "38f993aa-ed40-45f0-821f-e5d7f482ec99"). InnerVolumeSpecName "kube-api-access-sf79k". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:36:42 crc kubenswrapper[4909]: I1128 16:36:42.510037 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8fa2662b-eed3-4461-ba5d-d4554ca4a22b-operator-scripts\") pod \"8fa2662b-eed3-4461-ba5d-d4554ca4a22b\" (UID: \"8fa2662b-eed3-4461-ba5d-d4554ca4a22b\") " Nov 28 16:36:42 crc kubenswrapper[4909]: I1128 16:36:42.510192 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7wdw8\" (UniqueName: \"kubernetes.io/projected/8fa2662b-eed3-4461-ba5d-d4554ca4a22b-kube-api-access-7wdw8\") pod \"8fa2662b-eed3-4461-ba5d-d4554ca4a22b\" (UID: \"8fa2662b-eed3-4461-ba5d-d4554ca4a22b\") " Nov 28 16:36:42 crc kubenswrapper[4909]: I1128 16:36:42.510481 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8fa2662b-eed3-4461-ba5d-d4554ca4a22b-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "8fa2662b-eed3-4461-ba5d-d4554ca4a22b" (UID: "8fa2662b-eed3-4461-ba5d-d4554ca4a22b"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:36:42 crc kubenswrapper[4909]: I1128 16:36:42.510634 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sf79k\" (UniqueName: \"kubernetes.io/projected/38f993aa-ed40-45f0-821f-e5d7f482ec99-kube-api-access-sf79k\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:42 crc kubenswrapper[4909]: I1128 16:36:42.510691 4909 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8fa2662b-eed3-4461-ba5d-d4554ca4a22b-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:42 crc kubenswrapper[4909]: I1128 16:36:42.510709 4909 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/38f993aa-ed40-45f0-821f-e5d7f482ec99-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:42 crc kubenswrapper[4909]: I1128 16:36:42.513335 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8fa2662b-eed3-4461-ba5d-d4554ca4a22b-kube-api-access-7wdw8" (OuterVolumeSpecName: "kube-api-access-7wdw8") pod "8fa2662b-eed3-4461-ba5d-d4554ca4a22b" (UID: "8fa2662b-eed3-4461-ba5d-d4554ca4a22b"). InnerVolumeSpecName "kube-api-access-7wdw8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:36:42 crc kubenswrapper[4909]: I1128 16:36:42.612060 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7wdw8\" (UniqueName: \"kubernetes.io/projected/8fa2662b-eed3-4461-ba5d-d4554ca4a22b-kube-api-access-7wdw8\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:43 crc kubenswrapper[4909]: I1128 16:36:43.002366 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/novacell077a6-account-delete-dqcds" Nov 28 16:36:43 crc kubenswrapper[4909]: I1128 16:36:43.008530 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement3326-account-delete-6mrc7" Nov 28 16:36:43 crc kubenswrapper[4909]: I1128 16:36:43.122865 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2cfda3c9-a8dd-4a86-bb0d-d3b826b82a0f-operator-scripts\") pod \"2cfda3c9-a8dd-4a86-bb0d-d3b826b82a0f\" (UID: \"2cfda3c9-a8dd-4a86-bb0d-d3b826b82a0f\") " Nov 28 16:36:43 crc kubenswrapper[4909]: I1128 16:36:43.122924 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b6a50190-5f38-40b8-87b1-3e67fe7d3cf4-operator-scripts\") pod \"b6a50190-5f38-40b8-87b1-3e67fe7d3cf4\" (UID: \"b6a50190-5f38-40b8-87b1-3e67fe7d3cf4\") " Nov 28 16:36:43 crc kubenswrapper[4909]: I1128 16:36:43.122969 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9fksp\" (UniqueName: \"kubernetes.io/projected/b6a50190-5f38-40b8-87b1-3e67fe7d3cf4-kube-api-access-9fksp\") pod \"b6a50190-5f38-40b8-87b1-3e67fe7d3cf4\" (UID: \"b6a50190-5f38-40b8-87b1-3e67fe7d3cf4\") " Nov 28 16:36:43 crc kubenswrapper[4909]: I1128 16:36:43.123025 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m9sgf\" (UniqueName: \"kubernetes.io/projected/2cfda3c9-a8dd-4a86-bb0d-d3b826b82a0f-kube-api-access-m9sgf\") pod \"2cfda3c9-a8dd-4a86-bb0d-d3b826b82a0f\" (UID: \"2cfda3c9-a8dd-4a86-bb0d-d3b826b82a0f\") " Nov 28 16:36:43 crc kubenswrapper[4909]: I1128 16:36:43.123395 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b6a50190-5f38-40b8-87b1-3e67fe7d3cf4-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "b6a50190-5f38-40b8-87b1-3e67fe7d3cf4" (UID: "b6a50190-5f38-40b8-87b1-3e67fe7d3cf4"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:36:43 crc kubenswrapper[4909]: I1128 16:36:43.124149 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2cfda3c9-a8dd-4a86-bb0d-d3b826b82a0f-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "2cfda3c9-a8dd-4a86-bb0d-d3b826b82a0f" (UID: "2cfda3c9-a8dd-4a86-bb0d-d3b826b82a0f"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:36:43 crc kubenswrapper[4909]: I1128 16:36:43.126980 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6a50190-5f38-40b8-87b1-3e67fe7d3cf4-kube-api-access-9fksp" (OuterVolumeSpecName: "kube-api-access-9fksp") pod "b6a50190-5f38-40b8-87b1-3e67fe7d3cf4" (UID: "b6a50190-5f38-40b8-87b1-3e67fe7d3cf4"). InnerVolumeSpecName "kube-api-access-9fksp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:36:43 crc kubenswrapper[4909]: I1128 16:36:43.127566 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2cfda3c9-a8dd-4a86-bb0d-d3b826b82a0f-kube-api-access-m9sgf" (OuterVolumeSpecName: "kube-api-access-m9sgf") pod "2cfda3c9-a8dd-4a86-bb0d-d3b826b82a0f" (UID: "2cfda3c9-a8dd-4a86-bb0d-d3b826b82a0f"). InnerVolumeSpecName "kube-api-access-m9sgf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:36:43 crc kubenswrapper[4909]: I1128 16:36:43.224410 4909 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2cfda3c9-a8dd-4a86-bb0d-d3b826b82a0f-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:43 crc kubenswrapper[4909]: I1128 16:36:43.224450 4909 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b6a50190-5f38-40b8-87b1-3e67fe7d3cf4-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:43 crc kubenswrapper[4909]: I1128 16:36:43.224459 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9fksp\" (UniqueName: \"kubernetes.io/projected/b6a50190-5f38-40b8-87b1-3e67fe7d3cf4-kube-api-access-9fksp\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:43 crc kubenswrapper[4909]: I1128 16:36:43.224469 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m9sgf\" (UniqueName: \"kubernetes.io/projected/2cfda3c9-a8dd-4a86-bb0d-d3b826b82a0f-kube-api-access-m9sgf\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:43 crc kubenswrapper[4909]: I1128 16:36:43.234849 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican9799-account-delete-z2p59" Nov 28 16:36:43 crc kubenswrapper[4909]: I1128 16:36:43.234852 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican9799-account-delete-z2p59" event={"ID":"8fa2662b-eed3-4461-ba5d-d4554ca4a22b","Type":"ContainerDied","Data":"94bf77be6b236b8999a881b64809cc5d3228f9ba7c55e297eb5bb70916649ff8"} Nov 28 16:36:43 crc kubenswrapper[4909]: I1128 16:36:43.234935 4909 scope.go:117] "RemoveContainer" containerID="dddd51e21ee1da11fcdbb2f80b518588c94f66fdf38cd76c69d36bbd73a25213" Nov 28 16:36:43 crc kubenswrapper[4909]: I1128 16:36:43.240825 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/novaapiab11-account-delete-2d7kv" Nov 28 16:36:43 crc kubenswrapper[4909]: I1128 16:36:43.241973 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/novaapiab11-account-delete-2d7kv" event={"ID":"38f993aa-ed40-45f0-821f-e5d7f482ec99","Type":"ContainerDied","Data":"b62786e4643f30fc830419e2c3f64fafab8b9e4690f1a7ab2286171bc0dd5d7a"} Nov 28 16:36:43 crc kubenswrapper[4909]: I1128 16:36:43.246542 4909 generic.go:334] "Generic (PLEG): container finished" podID="2cfda3c9-a8dd-4a86-bb0d-d3b826b82a0f" containerID="83480d25e4e745360a0b09a414270260f2f6fd2bc5ea6f0e7c2301194f75d223" exitCode=137 Nov 28 16:36:43 crc kubenswrapper[4909]: I1128 16:36:43.246622 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement3326-account-delete-6mrc7" event={"ID":"2cfda3c9-a8dd-4a86-bb0d-d3b826b82a0f","Type":"ContainerDied","Data":"83480d25e4e745360a0b09a414270260f2f6fd2bc5ea6f0e7c2301194f75d223"} Nov 28 16:36:43 crc kubenswrapper[4909]: I1128 16:36:43.246682 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement3326-account-delete-6mrc7" event={"ID":"2cfda3c9-a8dd-4a86-bb0d-d3b826b82a0f","Type":"ContainerDied","Data":"abe14875a3d0a4033d9e1a171aa9a4afd425397efc4e742ac63ad06775ba8f35"} Nov 28 16:36:43 crc kubenswrapper[4909]: I1128 16:36:43.246767 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement3326-account-delete-6mrc7" Nov 28 16:36:43 crc kubenswrapper[4909]: I1128 16:36:43.249240 4909 generic.go:334] "Generic (PLEG): container finished" podID="b6a50190-5f38-40b8-87b1-3e67fe7d3cf4" containerID="a2a36a920755c46c32ca06509345b534a6606de3b3aa2f0a0f11b2df67f1283c" exitCode=137 Nov 28 16:36:43 crc kubenswrapper[4909]: I1128 16:36:43.249285 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/novacell077a6-account-delete-dqcds" event={"ID":"b6a50190-5f38-40b8-87b1-3e67fe7d3cf4","Type":"ContainerDied","Data":"a2a36a920755c46c32ca06509345b534a6606de3b3aa2f0a0f11b2df67f1283c"} Nov 28 16:36:43 crc kubenswrapper[4909]: I1128 16:36:43.249316 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/novacell077a6-account-delete-dqcds" event={"ID":"b6a50190-5f38-40b8-87b1-3e67fe7d3cf4","Type":"ContainerDied","Data":"308974de15c3aa0ad5f3404fa69c6c81d13d1b6401da416491cfece3da4efade"} Nov 28 16:36:43 crc kubenswrapper[4909]: I1128 16:36:43.250755 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/novacell077a6-account-delete-dqcds" Nov 28 16:36:43 crc kubenswrapper[4909]: I1128 16:36:43.280796 4909 scope.go:117] "RemoveContainer" containerID="c85df760aafac3a038f71027a3c1a79c7e72fbde573d1a5e4dfa79b8700bbd6d" Nov 28 16:36:43 crc kubenswrapper[4909]: I1128 16:36:43.310363 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican9799-account-delete-z2p59"] Nov 28 16:36:43 crc kubenswrapper[4909]: I1128 16:36:43.315953 4909 scope.go:117] "RemoveContainer" containerID="83480d25e4e745360a0b09a414270260f2f6fd2bc5ea6f0e7c2301194f75d223" Nov 28 16:36:43 crc kubenswrapper[4909]: I1128 16:36:43.336049 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican9799-account-delete-z2p59"] Nov 28 16:36:43 crc kubenswrapper[4909]: I1128 16:36:43.348397 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/novaapiab11-account-delete-2d7kv"] Nov 28 16:36:43 crc kubenswrapper[4909]: I1128 16:36:43.348699 4909 scope.go:117] "RemoveContainer" containerID="83480d25e4e745360a0b09a414270260f2f6fd2bc5ea6f0e7c2301194f75d223" Nov 28 16:36:43 crc kubenswrapper[4909]: E1128 16:36:43.349414 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"83480d25e4e745360a0b09a414270260f2f6fd2bc5ea6f0e7c2301194f75d223\": container with ID starting with 83480d25e4e745360a0b09a414270260f2f6fd2bc5ea6f0e7c2301194f75d223 not found: ID does not exist" containerID="83480d25e4e745360a0b09a414270260f2f6fd2bc5ea6f0e7c2301194f75d223" Nov 28 16:36:43 crc kubenswrapper[4909]: I1128 16:36:43.349481 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"83480d25e4e745360a0b09a414270260f2f6fd2bc5ea6f0e7c2301194f75d223"} err="failed to get container status \"83480d25e4e745360a0b09a414270260f2f6fd2bc5ea6f0e7c2301194f75d223\": rpc error: code = NotFound desc = could not find container \"83480d25e4e745360a0b09a414270260f2f6fd2bc5ea6f0e7c2301194f75d223\": container with ID starting with 83480d25e4e745360a0b09a414270260f2f6fd2bc5ea6f0e7c2301194f75d223 not found: ID does not exist" Nov 28 16:36:43 crc kubenswrapper[4909]: I1128 16:36:43.349511 4909 scope.go:117] "RemoveContainer" containerID="a2a36a920755c46c32ca06509345b534a6606de3b3aa2f0a0f11b2df67f1283c" Nov 28 16:36:43 crc kubenswrapper[4909]: I1128 16:36:43.358175 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/novaapiab11-account-delete-2d7kv"] Nov 28 16:36:43 crc kubenswrapper[4909]: I1128 16:36:43.364633 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement3326-account-delete-6mrc7"] Nov 28 16:36:43 crc kubenswrapper[4909]: I1128 16:36:43.369845 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement3326-account-delete-6mrc7"] Nov 28 16:36:43 crc kubenswrapper[4909]: I1128 16:36:43.372219 4909 scope.go:117] "RemoveContainer" containerID="a2a36a920755c46c32ca06509345b534a6606de3b3aa2f0a0f11b2df67f1283c" Nov 28 16:36:43 crc kubenswrapper[4909]: E1128 16:36:43.373251 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a2a36a920755c46c32ca06509345b534a6606de3b3aa2f0a0f11b2df67f1283c\": container with ID starting with a2a36a920755c46c32ca06509345b534a6606de3b3aa2f0a0f11b2df67f1283c not found: ID does not exist" containerID="a2a36a920755c46c32ca06509345b534a6606de3b3aa2f0a0f11b2df67f1283c" Nov 28 16:36:43 crc kubenswrapper[4909]: I1128 16:36:43.373306 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a2a36a920755c46c32ca06509345b534a6606de3b3aa2f0a0f11b2df67f1283c"} err="failed to get container status \"a2a36a920755c46c32ca06509345b534a6606de3b3aa2f0a0f11b2df67f1283c\": rpc error: code = NotFound desc = could not find container \"a2a36a920755c46c32ca06509345b534a6606de3b3aa2f0a0f11b2df67f1283c\": container with ID starting with a2a36a920755c46c32ca06509345b534a6606de3b3aa2f0a0f11b2df67f1283c not found: ID does not exist" Nov 28 16:36:43 crc kubenswrapper[4909]: I1128 16:36:43.374433 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/novacell077a6-account-delete-dqcds"] Nov 28 16:36:43 crc kubenswrapper[4909]: I1128 16:36:43.378676 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/novacell077a6-account-delete-dqcds"] Nov 28 16:36:43 crc kubenswrapper[4909]: I1128 16:36:43.918581 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2cfda3c9-a8dd-4a86-bb0d-d3b826b82a0f" path="/var/lib/kubelet/pods/2cfda3c9-a8dd-4a86-bb0d-d3b826b82a0f/volumes" Nov 28 16:36:43 crc kubenswrapper[4909]: I1128 16:36:43.919627 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="38f993aa-ed40-45f0-821f-e5d7f482ec99" path="/var/lib/kubelet/pods/38f993aa-ed40-45f0-821f-e5d7f482ec99/volumes" Nov 28 16:36:43 crc kubenswrapper[4909]: I1128 16:36:43.920722 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8fa2662b-eed3-4461-ba5d-d4554ca4a22b" path="/var/lib/kubelet/pods/8fa2662b-eed3-4461-ba5d-d4554ca4a22b/volumes" Nov 28 16:36:43 crc kubenswrapper[4909]: I1128 16:36:43.921754 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6a50190-5f38-40b8-87b1-3e67fe7d3cf4" path="/var/lib/kubelet/pods/b6a50190-5f38-40b8-87b1-3e67fe7d3cf4/volumes" Nov 28 16:36:43 crc kubenswrapper[4909]: I1128 16:36:43.924331 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c2cc9842-6f8f-4afc-9895-2b7a75a9696c" path="/var/lib/kubelet/pods/c2cc9842-6f8f-4afc-9895-2b7a75a9696c/volumes" Nov 28 16:37:19 crc kubenswrapper[4909]: I1128 16:37:19.910558 4909 patch_prober.go:28] interesting pod/machine-config-daemon-d5nd7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 16:37:19 crc kubenswrapper[4909]: I1128 16:37:19.911107 4909 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 16:37:26 crc kubenswrapper[4909]: I1128 16:37:26.679029 4909 scope.go:117] "RemoveContainer" containerID="8c0f8e8d2130828381ded896af12e6ddd0f48282a02278e34abc81a2ab38a850" Nov 28 16:37:26 crc kubenswrapper[4909]: I1128 16:37:26.714978 4909 scope.go:117] "RemoveContainer" containerID="f6d78d914a06108a8a1936303120febaccb4ca250cbf426cf7dfa81e792a33fc" Nov 28 16:37:26 crc kubenswrapper[4909]: I1128 16:37:26.754383 4909 scope.go:117] "RemoveContainer" containerID="89da003334cfba5cb7ceaf4db57edbaa578763aa0e7456486f5ae6a90f2ceb86" Nov 28 16:37:26 crc kubenswrapper[4909]: I1128 16:37:26.779153 4909 scope.go:117] "RemoveContainer" containerID="e6cc5cc31656fc880bbdc23b2063b2d818555caa73caf69f28bf8decd88df8aa" Nov 28 16:37:26 crc kubenswrapper[4909]: I1128 16:37:26.816604 4909 scope.go:117] "RemoveContainer" containerID="4d967a924bb9f3661cbaa046a03c44466b87bb582b5e43bc95fede216a25720e" Nov 28 16:37:26 crc kubenswrapper[4909]: I1128 16:37:26.834253 4909 scope.go:117] "RemoveContainer" containerID="ab680f3690017a9ac709a17864c1695750297aac86880cad19fbe1957e9e96cf" Nov 28 16:37:26 crc kubenswrapper[4909]: I1128 16:37:26.859312 4909 scope.go:117] "RemoveContainer" containerID="b15e6ee9a424c1a2201ed8acab650a198cf7fac5fbe51d039543b682ba20f5b0" Nov 28 16:37:26 crc kubenswrapper[4909]: I1128 16:37:26.884137 4909 scope.go:117] "RemoveContainer" containerID="ad3df5afbfd584781834b3bb93b0448fff4b3864ed8ef602a45c14c6bbecf55b" Nov 28 16:37:26 crc kubenswrapper[4909]: I1128 16:37:26.906406 4909 scope.go:117] "RemoveContainer" containerID="e54825a33d457bd02ac5f8d82396b60ba6d36114687450eaeb63fbb7ee2b93b3" Nov 28 16:37:26 crc kubenswrapper[4909]: I1128 16:37:26.936214 4909 scope.go:117] "RemoveContainer" containerID="285b7816902378eb6aafb34755baa7d6f7517f3859e339349c4092091bf2462d" Nov 28 16:37:26 crc kubenswrapper[4909]: I1128 16:37:26.953138 4909 scope.go:117] "RemoveContainer" containerID="221a2a3a02a2a8fb9553609d5de48c8286407e498f136ee370834da2d6b9d893" Nov 28 16:37:26 crc kubenswrapper[4909]: I1128 16:37:26.984322 4909 scope.go:117] "RemoveContainer" containerID="f7d9e81b5c5bad1caa908ec31490b084c28a10b2175c027689445d8ecc258612" Nov 28 16:37:27 crc kubenswrapper[4909]: I1128 16:37:27.015834 4909 scope.go:117] "RemoveContainer" containerID="9c0ed5eb3169a895fddc352403d70cbfcb4b590d019f84677427fdac6ec6cf71" Nov 28 16:37:27 crc kubenswrapper[4909]: I1128 16:37:27.036410 4909 scope.go:117] "RemoveContainer" containerID="d3f40b1a27b1ea548b445a46b92770d9795f4ed9429685b38f64c4173b4e0c3f" Nov 28 16:37:27 crc kubenswrapper[4909]: I1128 16:37:27.062916 4909 scope.go:117] "RemoveContainer" containerID="50a2302af69a187d0d31ed9cd28408c4635015a121cf1e21481bf48f95da59e6" Nov 28 16:37:27 crc kubenswrapper[4909]: I1128 16:37:27.090813 4909 scope.go:117] "RemoveContainer" containerID="e765eee4154b9a239a1ea2aab2c7f476eaed3191104b1030d1738062b69d97f7" Nov 28 16:37:27 crc kubenswrapper[4909]: I1128 16:37:27.119817 4909 scope.go:117] "RemoveContainer" containerID="312e25d9f0fde457f4417d6363e4360b0a69a29525131691497a7915ca3db2ab" Nov 28 16:37:49 crc kubenswrapper[4909]: I1128 16:37:49.910592 4909 patch_prober.go:28] interesting pod/machine-config-daemon-d5nd7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 16:37:49 crc kubenswrapper[4909]: I1128 16:37:49.911571 4909 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 16:38:19 crc kubenswrapper[4909]: I1128 16:38:19.911539 4909 patch_prober.go:28] interesting pod/machine-config-daemon-d5nd7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 16:38:19 crc kubenswrapper[4909]: I1128 16:38:19.912177 4909 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 16:38:19 crc kubenswrapper[4909]: I1128 16:38:19.912220 4909 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" Nov 28 16:38:19 crc kubenswrapper[4909]: I1128 16:38:19.912857 4909 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"0051ae960019817a1b9d0126f56eb43672e5b1694e62841c31a539d1caca21e9"} pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 16:38:19 crc kubenswrapper[4909]: I1128 16:38:19.912903 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerName="machine-config-daemon" containerID="cri-o://0051ae960019817a1b9d0126f56eb43672e5b1694e62841c31a539d1caca21e9" gracePeriod=600 Nov 28 16:38:20 crc kubenswrapper[4909]: E1128 16:38:20.059071 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 16:38:20 crc kubenswrapper[4909]: I1128 16:38:20.223766 4909 generic.go:334] "Generic (PLEG): container finished" podID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerID="0051ae960019817a1b9d0126f56eb43672e5b1694e62841c31a539d1caca21e9" exitCode=0 Nov 28 16:38:20 crc kubenswrapper[4909]: I1128 16:38:20.223825 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" event={"ID":"5f0ac931-d37b-4342-8c12-c2779b455cc5","Type":"ContainerDied","Data":"0051ae960019817a1b9d0126f56eb43672e5b1694e62841c31a539d1caca21e9"} Nov 28 16:38:20 crc kubenswrapper[4909]: I1128 16:38:20.223873 4909 scope.go:117] "RemoveContainer" containerID="076c0f146f0317096c61ce94d56824e15d18793233a7703c2d57740fa454a4f9" Nov 28 16:38:20 crc kubenswrapper[4909]: I1128 16:38:20.224470 4909 scope.go:117] "RemoveContainer" containerID="0051ae960019817a1b9d0126f56eb43672e5b1694e62841c31a539d1caca21e9" Nov 28 16:38:20 crc kubenswrapper[4909]: E1128 16:38:20.224828 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 16:38:27 crc kubenswrapper[4909]: I1128 16:38:27.609198 4909 scope.go:117] "RemoveContainer" containerID="d40a4b6cfcdb1ace4ed94b638cf6d1b874b6ed7fea6b7915423222d760158ab8" Nov 28 16:38:27 crc kubenswrapper[4909]: I1128 16:38:27.640027 4909 scope.go:117] "RemoveContainer" containerID="ada4fc1a9bf251c75fd0b917cc3f6fe1006748ce560d0b4a79f1f22185d7e0ee" Nov 28 16:38:27 crc kubenswrapper[4909]: I1128 16:38:27.673683 4909 scope.go:117] "RemoveContainer" containerID="baaeeafb5f0ff35dc0eed801db85347a6019df3603c8d5fda752ecad50cc5dee" Nov 28 16:38:27 crc kubenswrapper[4909]: I1128 16:38:27.710388 4909 scope.go:117] "RemoveContainer" containerID="c3b9dc337a3ea52218200ac2c915acc7e5c688e21e83792557a9a417ce242639" Nov 28 16:38:27 crc kubenswrapper[4909]: I1128 16:38:27.740232 4909 scope.go:117] "RemoveContainer" containerID="77eec265c191d92518e9615ece2181462c2321d96a38aae2b1a5320db748c0bb" Nov 28 16:38:27 crc kubenswrapper[4909]: I1128 16:38:27.776540 4909 scope.go:117] "RemoveContainer" containerID="4b78e9d3d2edc4545741c20fd85514898c5bba454bef6b22b492f5f8a652b138" Nov 28 16:38:27 crc kubenswrapper[4909]: I1128 16:38:27.819718 4909 scope.go:117] "RemoveContainer" containerID="ddaf1e784f7ded7f9c54a05ec17a7bae8623c22eea8fc597422047afc91451a0" Nov 28 16:38:32 crc kubenswrapper[4909]: I1128 16:38:32.901771 4909 scope.go:117] "RemoveContainer" containerID="0051ae960019817a1b9d0126f56eb43672e5b1694e62841c31a539d1caca21e9" Nov 28 16:38:32 crc kubenswrapper[4909]: E1128 16:38:32.903295 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 16:38:46 crc kubenswrapper[4909]: I1128 16:38:46.902813 4909 scope.go:117] "RemoveContainer" containerID="0051ae960019817a1b9d0126f56eb43672e5b1694e62841c31a539d1caca21e9" Nov 28 16:38:46 crc kubenswrapper[4909]: E1128 16:38:46.903693 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 16:38:58 crc kubenswrapper[4909]: I1128 16:38:58.901458 4909 scope.go:117] "RemoveContainer" containerID="0051ae960019817a1b9d0126f56eb43672e5b1694e62841c31a539d1caca21e9" Nov 28 16:38:58 crc kubenswrapper[4909]: E1128 16:38:58.903435 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 16:39:10 crc kubenswrapper[4909]: I1128 16:39:10.902209 4909 scope.go:117] "RemoveContainer" containerID="0051ae960019817a1b9d0126f56eb43672e5b1694e62841c31a539d1caca21e9" Nov 28 16:39:10 crc kubenswrapper[4909]: E1128 16:39:10.903083 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 16:39:25 crc kubenswrapper[4909]: I1128 16:39:25.901350 4909 scope.go:117] "RemoveContainer" containerID="0051ae960019817a1b9d0126f56eb43672e5b1694e62841c31a539d1caca21e9" Nov 28 16:39:25 crc kubenswrapper[4909]: E1128 16:39:25.902095 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 16:39:27 crc kubenswrapper[4909]: I1128 16:39:27.992984 4909 scope.go:117] "RemoveContainer" containerID="bb569642f57e937293a08525ea86da1ca995760deb6efcdbd4112c1b2e6df5c2" Nov 28 16:39:28 crc kubenswrapper[4909]: I1128 16:39:28.036012 4909 scope.go:117] "RemoveContainer" containerID="f12da04a1ce03bbd5f18972fc45e9a4b43b48df4a153d6335fb8de7e7922ae5b" Nov 28 16:39:28 crc kubenswrapper[4909]: I1128 16:39:28.083002 4909 scope.go:117] "RemoveContainer" containerID="c5b2101613b15e354dda8e19429c85ff82a7aa110a1c2928c61644354ffa188c" Nov 28 16:39:28 crc kubenswrapper[4909]: I1128 16:39:28.101026 4909 scope.go:117] "RemoveContainer" containerID="52e65448aef539353daa8f7db84d105cd71f70da4604565423fd5950afa26a6b" Nov 28 16:39:28 crc kubenswrapper[4909]: I1128 16:39:28.129643 4909 scope.go:117] "RemoveContainer" containerID="2dedb170588deaa6f11bf8d0e9ccb4ac0fb1f6ba18fbbaac5554659c70446bce" Nov 28 16:39:28 crc kubenswrapper[4909]: I1128 16:39:28.148425 4909 scope.go:117] "RemoveContainer" containerID="dbc15b80c1cd50c3f062d20e4bbfd0c4ab351bae72bf60617a069c1be00aaa4b" Nov 28 16:39:28 crc kubenswrapper[4909]: I1128 16:39:28.162179 4909 scope.go:117] "RemoveContainer" containerID="123e4bf1e8a307c46d39779dbbb6fd173da51f3bf9f94d9b59703bfb90dc1d0c" Nov 28 16:39:28 crc kubenswrapper[4909]: I1128 16:39:28.179650 4909 scope.go:117] "RemoveContainer" containerID="3cd8ac1736c6fbc1977f593e8c58c7c95ab9e0dac8a3505b8acefff70b5cfba5" Nov 28 16:39:28 crc kubenswrapper[4909]: I1128 16:39:28.199518 4909 scope.go:117] "RemoveContainer" containerID="6e1fc2641917ff4d1fab4358ecd7104b0ea53db894cd04eda884fbd1c239f5a1" Nov 28 16:39:28 crc kubenswrapper[4909]: I1128 16:39:28.216258 4909 scope.go:117] "RemoveContainer" containerID="61af0c4690ad10f578c11a1874dcaa8f66ff04b25238b214fda8321e55f07b14" Nov 28 16:39:28 crc kubenswrapper[4909]: I1128 16:39:28.249913 4909 scope.go:117] "RemoveContainer" containerID="c45af0789eb64b0332ed9444cb13a77731843941ac7d5caa108d91d85a7f2bfa" Nov 28 16:39:28 crc kubenswrapper[4909]: I1128 16:39:28.270128 4909 scope.go:117] "RemoveContainer" containerID="29d0bc179bbb27c3f9f6023ab4558b76e568b29b98ddf992b4f8391b462dd92d" Nov 28 16:39:28 crc kubenswrapper[4909]: I1128 16:39:28.287604 4909 scope.go:117] "RemoveContainer" containerID="444bf66121973198387c53d2fe4d567588df490dfe1e0393da998e6a7e899b67" Nov 28 16:39:28 crc kubenswrapper[4909]: I1128 16:39:28.311422 4909 scope.go:117] "RemoveContainer" containerID="440e32fab867b54123f7d40ba325b3066e9c3cfb206e853ad32cc65a3e064bd4" Nov 28 16:39:36 crc kubenswrapper[4909]: I1128 16:39:36.901267 4909 scope.go:117] "RemoveContainer" containerID="0051ae960019817a1b9d0126f56eb43672e5b1694e62841c31a539d1caca21e9" Nov 28 16:39:36 crc kubenswrapper[4909]: E1128 16:39:36.903511 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 16:39:51 crc kubenswrapper[4909]: I1128 16:39:51.901901 4909 scope.go:117] "RemoveContainer" containerID="0051ae960019817a1b9d0126f56eb43672e5b1694e62841c31a539d1caca21e9" Nov 28 16:39:51 crc kubenswrapper[4909]: E1128 16:39:51.902564 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.589049 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-6x9bb"] Nov 28 16:39:53 crc kubenswrapper[4909]: E1128 16:39:53.589540 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c64b6821-6f46-4764-be55-97ed8c71fefa" containerName="nova-cell0-conductor-conductor" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.589571 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="c64b6821-6f46-4764-be55-97ed8c71fefa" containerName="nova-cell0-conductor-conductor" Nov 28 16:39:53 crc kubenswrapper[4909]: E1128 16:39:53.589604 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="175903ef-59e0-4c1f-820f-bd3d2692462d" containerName="barbican-worker" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.589624 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="175903ef-59e0-4c1f-820f-bd3d2692462d" containerName="barbican-worker" Nov 28 16:39:53 crc kubenswrapper[4909]: E1128 16:39:53.589646 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="44195e2b-7f1d-4542-8948-93a818071fd2" containerName="ceilometer-notification-agent" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.589697 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="44195e2b-7f1d-4542-8948-93a818071fd2" containerName="ceilometer-notification-agent" Nov 28 16:39:53 crc kubenswrapper[4909]: E1128 16:39:53.589751 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dc78dec8-567e-41a1-9fbf-793224410d3b" containerName="ovn-northd" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.589766 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="dc78dec8-567e-41a1-9fbf-793224410d3b" containerName="ovn-northd" Nov 28 16:39:53 crc kubenswrapper[4909]: E1128 16:39:53.589790 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2cfda3c9-a8dd-4a86-bb0d-d3b826b82a0f" containerName="mariadb-account-delete" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.589812 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="2cfda3c9-a8dd-4a86-bb0d-d3b826b82a0f" containerName="mariadb-account-delete" Nov 28 16:39:53 crc kubenswrapper[4909]: E1128 16:39:53.589839 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="19094b17-f379-494e-b377-8191ddab4924" containerName="keystone-api" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.589852 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="19094b17-f379-494e-b377-8191ddab4924" containerName="keystone-api" Nov 28 16:39:53 crc kubenswrapper[4909]: E1128 16:39:53.589872 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ffd60458-19af-464b-9649-57d25893f22a" containerName="barbican-api" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.589884 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="ffd60458-19af-464b-9649-57d25893f22a" containerName="barbican-api" Nov 28 16:39:53 crc kubenswrapper[4909]: E1128 16:39:53.589900 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="00913f80-f496-44ec-a619-99129724cb89" containerName="glance-log" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.589913 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="00913f80-f496-44ec-a619-99129724cb89" containerName="glance-log" Nov 28 16:39:53 crc kubenswrapper[4909]: E1128 16:39:53.589936 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="af476a0f-b390-443d-b7a5-14181e7c7bc7" containerName="object-replicator" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.589947 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="af476a0f-b390-443d-b7a5-14181e7c7bc7" containerName="object-replicator" Nov 28 16:39:53 crc kubenswrapper[4909]: E1128 16:39:53.589970 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="af476a0f-b390-443d-b7a5-14181e7c7bc7" containerName="account-server" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.589983 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="af476a0f-b390-443d-b7a5-14181e7c7bc7" containerName="account-server" Nov 28 16:39:53 crc kubenswrapper[4909]: E1128 16:39:53.590009 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1c81b153-5498-4d63-9c98-fa8b79d5acdd" containerName="nova-api-api" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.590021 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="1c81b153-5498-4d63-9c98-fa8b79d5acdd" containerName="nova-api-api" Nov 28 16:39:53 crc kubenswrapper[4909]: E1128 16:39:53.590037 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b97782ba-8bf0-4da9-bd81-97e88b4e73e7" containerName="glance-log" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.590048 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="b97782ba-8bf0-4da9-bd81-97e88b4e73e7" containerName="glance-log" Nov 28 16:39:53 crc kubenswrapper[4909]: E1128 16:39:53.590064 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e792abf7-967c-4293-b5f4-f073b07c8cf1" containerName="proxy-httpd" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.590075 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="e792abf7-967c-4293-b5f4-f073b07c8cf1" containerName="proxy-httpd" Nov 28 16:39:53 crc kubenswrapper[4909]: E1128 16:39:53.590100 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b0f3cfb2-6884-4ef1-9844-cf494a2e21bb" containerName="mariadb-account-delete" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.590113 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="b0f3cfb2-6884-4ef1-9844-cf494a2e21bb" containerName="mariadb-account-delete" Nov 28 16:39:53 crc kubenswrapper[4909]: E1128 16:39:53.590138 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2a8757b1-bbbf-4d00-9bbb-9f4bc855a9d9" containerName="galera" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.590151 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="2a8757b1-bbbf-4d00-9bbb-9f4bc855a9d9" containerName="galera" Nov 28 16:39:53 crc kubenswrapper[4909]: E1128 16:39:53.590173 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="af476a0f-b390-443d-b7a5-14181e7c7bc7" containerName="account-reaper" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.590186 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="af476a0f-b390-443d-b7a5-14181e7c7bc7" containerName="account-reaper" Nov 28 16:39:53 crc kubenswrapper[4909]: E1128 16:39:53.590208 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d7651107-0120-4611-87d0-be009f3749d7" containerName="placement-log" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.590220 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="d7651107-0120-4611-87d0-be009f3749d7" containerName="placement-log" Nov 28 16:39:53 crc kubenswrapper[4909]: E1128 16:39:53.590242 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e086b29e-c7fb-45a4-a6f2-c30508f1b25a" containerName="barbican-keystone-listener-log" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.590255 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="e086b29e-c7fb-45a4-a6f2-c30508f1b25a" containerName="barbican-keystone-listener-log" Nov 28 16:39:53 crc kubenswrapper[4909]: E1128 16:39:53.590268 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="64b9a5c2-09a8-48fb-9e1b-b66c1003cf61" containerName="kube-state-metrics" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.590281 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="64b9a5c2-09a8-48fb-9e1b-b66c1003cf61" containerName="kube-state-metrics" Nov 28 16:39:53 crc kubenswrapper[4909]: E1128 16:39:53.590307 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b6a50190-5f38-40b8-87b1-3e67fe7d3cf4" containerName="mariadb-account-delete" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.590320 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="b6a50190-5f38-40b8-87b1-3e67fe7d3cf4" containerName="mariadb-account-delete" Nov 28 16:39:53 crc kubenswrapper[4909]: E1128 16:39:53.590335 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="44195e2b-7f1d-4542-8948-93a818071fd2" containerName="sg-core" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.590347 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="44195e2b-7f1d-4542-8948-93a818071fd2" containerName="sg-core" Nov 28 16:39:53 crc kubenswrapper[4909]: E1128 16:39:53.590372 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="af476a0f-b390-443d-b7a5-14181e7c7bc7" containerName="object-expirer" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.590383 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="af476a0f-b390-443d-b7a5-14181e7c7bc7" containerName="object-expirer" Nov 28 16:39:53 crc kubenswrapper[4909]: E1128 16:39:53.590403 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5ad0a326-11b9-40c8-b251-5994a436110a" containerName="barbican-api-log" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.590415 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="5ad0a326-11b9-40c8-b251-5994a436110a" containerName="barbican-api-log" Nov 28 16:39:53 crc kubenswrapper[4909]: E1128 16:39:53.590436 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7b7568e8-e3d4-4e06-a25f-33656bdf089f" containerName="probe" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.590452 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="7b7568e8-e3d4-4e06-a25f-33656bdf089f" containerName="probe" Nov 28 16:39:53 crc kubenswrapper[4909]: E1128 16:39:53.590472 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ff22194e-63a9-410d-80b6-9b1a1e68b164" containerName="ovsdb-server" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.590488 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="ff22194e-63a9-410d-80b6-9b1a1e68b164" containerName="ovsdb-server" Nov 28 16:39:53 crc kubenswrapper[4909]: E1128 16:39:53.590515 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="44195e2b-7f1d-4542-8948-93a818071fd2" containerName="proxy-httpd" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.590530 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="44195e2b-7f1d-4542-8948-93a818071fd2" containerName="proxy-httpd" Nov 28 16:39:53 crc kubenswrapper[4909]: E1128 16:39:53.590549 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="febda67e-3daf-4cb4-9fd1-530d6c398404" containerName="neutron-api" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.590564 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="febda67e-3daf-4cb4-9fd1-530d6c398404" containerName="neutron-api" Nov 28 16:39:53 crc kubenswrapper[4909]: E1128 16:39:53.590591 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="44195e2b-7f1d-4542-8948-93a818071fd2" containerName="ceilometer-central-agent" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.590607 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="44195e2b-7f1d-4542-8948-93a818071fd2" containerName="ceilometer-central-agent" Nov 28 16:39:53 crc kubenswrapper[4909]: E1128 16:39:53.590634 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2a8757b1-bbbf-4d00-9bbb-9f4bc855a9d9" containerName="mysql-bootstrap" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.590651 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="2a8757b1-bbbf-4d00-9bbb-9f4bc855a9d9" containerName="mysql-bootstrap" Nov 28 16:39:53 crc kubenswrapper[4909]: E1128 16:39:53.590714 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0b1d1797-999d-4453-b674-c40f53d4231e" containerName="galera" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.590731 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="0b1d1797-999d-4453-b674-c40f53d4231e" containerName="galera" Nov 28 16:39:53 crc kubenswrapper[4909]: E1128 16:39:53.590754 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="14b66e32-a660-4643-9f57-f66bf12a56ef" containerName="ovn-controller" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.590787 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="14b66e32-a660-4643-9f57-f66bf12a56ef" containerName="ovn-controller" Nov 28 16:39:53 crc kubenswrapper[4909]: E1128 16:39:53.590806 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444" containerName="rabbitmq" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.590821 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444" containerName="rabbitmq" Nov 28 16:39:53 crc kubenswrapper[4909]: E1128 16:39:53.590848 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1c81b153-5498-4d63-9c98-fa8b79d5acdd" containerName="nova-api-log" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.590865 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="1c81b153-5498-4d63-9c98-fa8b79d5acdd" containerName="nova-api-log" Nov 28 16:39:53 crc kubenswrapper[4909]: E1128 16:39:53.590895 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="af476a0f-b390-443d-b7a5-14181e7c7bc7" containerName="account-auditor" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.590911 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="af476a0f-b390-443d-b7a5-14181e7c7bc7" containerName="account-auditor" Nov 28 16:39:53 crc kubenswrapper[4909]: E1128 16:39:53.590939 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="af476a0f-b390-443d-b7a5-14181e7c7bc7" containerName="container-updater" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.590955 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="af476a0f-b390-443d-b7a5-14181e7c7bc7" containerName="container-updater" Nov 28 16:39:53 crc kubenswrapper[4909]: E1128 16:39:53.590975 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0e95a7b6-74fd-4db5-bb83-b8e8f80a698f" containerName="barbican-keystone-listener" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.590992 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="0e95a7b6-74fd-4db5-bb83-b8e8f80a698f" containerName="barbican-keystone-listener" Nov 28 16:39:53 crc kubenswrapper[4909]: E1128 16:39:53.591018 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444" containerName="setup-container" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.591036 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444" containerName="setup-container" Nov 28 16:39:53 crc kubenswrapper[4909]: E1128 16:39:53.591055 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="af476a0f-b390-443d-b7a5-14181e7c7bc7" containerName="container-replicator" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.591071 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="af476a0f-b390-443d-b7a5-14181e7c7bc7" containerName="container-replicator" Nov 28 16:39:53 crc kubenswrapper[4909]: E1128 16:39:53.591091 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="af476a0f-b390-443d-b7a5-14181e7c7bc7" containerName="object-updater" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.591106 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="af476a0f-b390-443d-b7a5-14181e7c7bc7" containerName="object-updater" Nov 28 16:39:53 crc kubenswrapper[4909]: E1128 16:39:53.591128 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="38f993aa-ed40-45f0-821f-e5d7f482ec99" containerName="mariadb-account-delete" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.591144 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="38f993aa-ed40-45f0-821f-e5d7f482ec99" containerName="mariadb-account-delete" Nov 28 16:39:53 crc kubenswrapper[4909]: E1128 16:39:53.591167 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="af476a0f-b390-443d-b7a5-14181e7c7bc7" containerName="container-server" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.591183 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="af476a0f-b390-443d-b7a5-14181e7c7bc7" containerName="container-server" Nov 28 16:39:53 crc kubenswrapper[4909]: E1128 16:39:53.591202 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="af476a0f-b390-443d-b7a5-14181e7c7bc7" containerName="container-auditor" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.591218 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="af476a0f-b390-443d-b7a5-14181e7c7bc7" containerName="container-auditor" Nov 28 16:39:53 crc kubenswrapper[4909]: E1128 16:39:53.591248 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ff22194e-63a9-410d-80b6-9b1a1e68b164" containerName="ovs-vswitchd" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.591265 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="ff22194e-63a9-410d-80b6-9b1a1e68b164" containerName="ovs-vswitchd" Nov 28 16:39:53 crc kubenswrapper[4909]: E1128 16:39:53.591285 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="af476a0f-b390-443d-b7a5-14181e7c7bc7" containerName="object-auditor" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.591301 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="af476a0f-b390-443d-b7a5-14181e7c7bc7" containerName="object-auditor" Nov 28 16:39:53 crc kubenswrapper[4909]: E1128 16:39:53.591335 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="febda67e-3daf-4cb4-9fd1-530d6c398404" containerName="neutron-httpd" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.591351 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="febda67e-3daf-4cb4-9fd1-530d6c398404" containerName="neutron-httpd" Nov 28 16:39:53 crc kubenswrapper[4909]: E1128 16:39:53.591374 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7b7568e8-e3d4-4e06-a25f-33656bdf089f" containerName="cinder-scheduler" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.591390 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="7b7568e8-e3d4-4e06-a25f-33656bdf089f" containerName="cinder-scheduler" Nov 28 16:39:53 crc kubenswrapper[4909]: E1128 16:39:53.591407 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9a3a5941-5c86-4a65-be1e-26327ca990ad" containerName="ovsdbserver-nb" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.591423 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="9a3a5941-5c86-4a65-be1e-26327ca990ad" containerName="ovsdbserver-nb" Nov 28 16:39:53 crc kubenswrapper[4909]: E1128 16:39:53.591441 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f69e804c-fdc4-4b8f-86f3-d497612f42b8" containerName="openstack-network-exporter" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.591457 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="f69e804c-fdc4-4b8f-86f3-d497612f42b8" containerName="openstack-network-exporter" Nov 28 16:39:53 crc kubenswrapper[4909]: E1128 16:39:53.591477 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="287e7e9a-0240-478e-a15b-b01122e79c32" containerName="cinder-api" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.591491 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="287e7e9a-0240-478e-a15b-b01122e79c32" containerName="cinder-api" Nov 28 16:39:53 crc kubenswrapper[4909]: E1128 16:39:53.591510 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e8b95945-6169-4e44-861a-f4abd48a7161" containerName="nova-scheduler-scheduler" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.591527 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="e8b95945-6169-4e44-861a-f4abd48a7161" containerName="nova-scheduler-scheduler" Nov 28 16:39:53 crc kubenswrapper[4909]: E1128 16:39:53.591547 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0b1d1797-999d-4453-b674-c40f53d4231e" containerName="mysql-bootstrap" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.591564 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="0b1d1797-999d-4453-b674-c40f53d4231e" containerName="mysql-bootstrap" Nov 28 16:39:53 crc kubenswrapper[4909]: E1128 16:39:53.591591 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="af476a0f-b390-443d-b7a5-14181e7c7bc7" containerName="account-replicator" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.591606 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="af476a0f-b390-443d-b7a5-14181e7c7bc7" containerName="account-replicator" Nov 28 16:39:53 crc kubenswrapper[4909]: E1128 16:39:53.591625 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c2cc9842-6f8f-4afc-9895-2b7a75a9696c" containerName="mariadb-account-delete" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.591641 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="c2cc9842-6f8f-4afc-9895-2b7a75a9696c" containerName="mariadb-account-delete" Nov 28 16:39:53 crc kubenswrapper[4909]: E1128 16:39:53.591689 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5b472e4d-2724-4ea4-93c9-5552d92af793" containerName="mariadb-account-delete" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.591705 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="5b472e4d-2724-4ea4-93c9-5552d92af793" containerName="mariadb-account-delete" Nov 28 16:39:53 crc kubenswrapper[4909]: E1128 16:39:53.591733 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="72f0e500-fe06-4373-9bc3-6cdaa2520043" containerName="memcached" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.591752 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="72f0e500-fe06-4373-9bc3-6cdaa2520043" containerName="memcached" Nov 28 16:39:53 crc kubenswrapper[4909]: E1128 16:39:53.591772 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="99398e49-db85-4878-b759-367747402c8b" containerName="barbican-worker-log" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.591787 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="99398e49-db85-4878-b759-367747402c8b" containerName="barbican-worker-log" Nov 28 16:39:53 crc kubenswrapper[4909]: E1128 16:39:53.591812 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d7651107-0120-4611-87d0-be009f3749d7" containerName="placement-api" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.591825 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="d7651107-0120-4611-87d0-be009f3749d7" containerName="placement-api" Nov 28 16:39:53 crc kubenswrapper[4909]: E1128 16:39:53.591845 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="00913f80-f496-44ec-a619-99129724cb89" containerName="glance-httpd" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.591878 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="00913f80-f496-44ec-a619-99129724cb89" containerName="glance-httpd" Nov 28 16:39:53 crc kubenswrapper[4909]: E1128 16:39:53.591904 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b2ea37c8-3213-4043-9da2-a9e76f9284e4" containerName="nova-cell1-conductor-conductor" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.591922 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="b2ea37c8-3213-4043-9da2-a9e76f9284e4" containerName="nova-cell1-conductor-conductor" Nov 28 16:39:53 crc kubenswrapper[4909]: E1128 16:39:53.591952 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e8837df0-c6fe-42a6-bf0f-8ca14f1961a6" containerName="nova-metadata-metadata" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.591966 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="e8837df0-c6fe-42a6-bf0f-8ca14f1961a6" containerName="nova-metadata-metadata" Nov 28 16:39:53 crc kubenswrapper[4909]: E1128 16:39:53.591990 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5ad0a326-11b9-40c8-b251-5994a436110a" containerName="barbican-api" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.592006 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="5ad0a326-11b9-40c8-b251-5994a436110a" containerName="barbican-api" Nov 28 16:39:53 crc kubenswrapper[4909]: E1128 16:39:53.592027 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dc78dec8-567e-41a1-9fbf-793224410d3b" containerName="openstack-network-exporter" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.592045 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="dc78dec8-567e-41a1-9fbf-793224410d3b" containerName="openstack-network-exporter" Nov 28 16:39:53 crc kubenswrapper[4909]: E1128 16:39:53.592063 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="af476a0f-b390-443d-b7a5-14181e7c7bc7" containerName="rsync" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.592104 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="af476a0f-b390-443d-b7a5-14181e7c7bc7" containerName="rsync" Nov 28 16:39:53 crc kubenswrapper[4909]: E1128 16:39:53.592121 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="175903ef-59e0-4c1f-820f-bd3d2692462d" containerName="barbican-worker-log" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.592139 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="175903ef-59e0-4c1f-820f-bd3d2692462d" containerName="barbican-worker-log" Nov 28 16:39:53 crc kubenswrapper[4909]: E1128 16:39:53.592159 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bac06af4-bbe1-482a-8815-14a9cf2a1699" containerName="nova-cell1-novncproxy-novncproxy" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.592176 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="bac06af4-bbe1-482a-8815-14a9cf2a1699" containerName="nova-cell1-novncproxy-novncproxy" Nov 28 16:39:53 crc kubenswrapper[4909]: E1128 16:39:53.592201 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="20c5e3ee-fe01-49c7-96fd-153897da815e" containerName="init" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.592216 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="20c5e3ee-fe01-49c7-96fd-153897da815e" containerName="init" Nov 28 16:39:53 crc kubenswrapper[4909]: E1128 16:39:53.592240 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ff22194e-63a9-410d-80b6-9b1a1e68b164" containerName="ovsdb-server-init" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.592256 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="ff22194e-63a9-410d-80b6-9b1a1e68b164" containerName="ovsdb-server-init" Nov 28 16:39:53 crc kubenswrapper[4909]: E1128 16:39:53.592280 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8fa2662b-eed3-4461-ba5d-d4554ca4a22b" containerName="mariadb-account-delete" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.592295 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="8fa2662b-eed3-4461-ba5d-d4554ca4a22b" containerName="mariadb-account-delete" Nov 28 16:39:53 crc kubenswrapper[4909]: E1128 16:39:53.592319 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0e95a7b6-74fd-4db5-bb83-b8e8f80a698f" containerName="barbican-keystone-listener-log" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.592335 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="0e95a7b6-74fd-4db5-bb83-b8e8f80a698f" containerName="barbican-keystone-listener-log" Nov 28 16:39:53 crc kubenswrapper[4909]: E1128 16:39:53.592353 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="02c83d05-a6ce-4c22-9015-91c0a766a518" containerName="setup-container" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.592369 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="02c83d05-a6ce-4c22-9015-91c0a766a518" containerName="setup-container" Nov 28 16:39:53 crc kubenswrapper[4909]: E1128 16:39:53.592396 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9a3a5941-5c86-4a65-be1e-26327ca990ad" containerName="openstack-network-exporter" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.592411 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="9a3a5941-5c86-4a65-be1e-26327ca990ad" containerName="openstack-network-exporter" Nov 28 16:39:53 crc kubenswrapper[4909]: E1128 16:39:53.592431 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="02c83d05-a6ce-4c22-9015-91c0a766a518" containerName="rabbitmq" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.592445 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="02c83d05-a6ce-4c22-9015-91c0a766a518" containerName="rabbitmq" Nov 28 16:39:53 crc kubenswrapper[4909]: E1128 16:39:53.592461 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e086b29e-c7fb-45a4-a6f2-c30508f1b25a" containerName="barbican-keystone-listener" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.592477 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="e086b29e-c7fb-45a4-a6f2-c30508f1b25a" containerName="barbican-keystone-listener" Nov 28 16:39:53 crc kubenswrapper[4909]: E1128 16:39:53.592499 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="99398e49-db85-4878-b759-367747402c8b" containerName="barbican-worker" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.592516 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="99398e49-db85-4878-b759-367747402c8b" containerName="barbican-worker" Nov 28 16:39:53 crc kubenswrapper[4909]: E1128 16:39:53.592544 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="af476a0f-b390-443d-b7a5-14181e7c7bc7" containerName="object-server" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.592560 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="af476a0f-b390-443d-b7a5-14181e7c7bc7" containerName="object-server" Nov 28 16:39:53 crc kubenswrapper[4909]: E1128 16:39:53.592580 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="287e7e9a-0240-478e-a15b-b01122e79c32" containerName="cinder-api-log" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.592628 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="287e7e9a-0240-478e-a15b-b01122e79c32" containerName="cinder-api-log" Nov 28 16:39:53 crc kubenswrapper[4909]: E1128 16:39:53.592681 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="af476a0f-b390-443d-b7a5-14181e7c7bc7" containerName="swift-recon-cron" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.592698 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="af476a0f-b390-443d-b7a5-14181e7c7bc7" containerName="swift-recon-cron" Nov 28 16:39:53 crc kubenswrapper[4909]: E1128 16:39:53.592721 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ffd60458-19af-464b-9649-57d25893f22a" containerName="barbican-api-log" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.592737 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="ffd60458-19af-464b-9649-57d25893f22a" containerName="barbican-api-log" Nov 28 16:39:53 crc kubenswrapper[4909]: E1128 16:39:53.592764 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e8837df0-c6fe-42a6-bf0f-8ca14f1961a6" containerName="nova-metadata-log" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.592779 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="e8837df0-c6fe-42a6-bf0f-8ca14f1961a6" containerName="nova-metadata-log" Nov 28 16:39:53 crc kubenswrapper[4909]: E1128 16:39:53.592801 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="20c5e3ee-fe01-49c7-96fd-153897da815e" containerName="dnsmasq-dns" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.592816 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="20c5e3ee-fe01-49c7-96fd-153897da815e" containerName="dnsmasq-dns" Nov 28 16:39:53 crc kubenswrapper[4909]: E1128 16:39:53.592839 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e792abf7-967c-4293-b5f4-f073b07c8cf1" containerName="proxy-server" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.592855 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="e792abf7-967c-4293-b5f4-f073b07c8cf1" containerName="proxy-server" Nov 28 16:39:53 crc kubenswrapper[4909]: E1128 16:39:53.592878 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b97782ba-8bf0-4da9-bd81-97e88b4e73e7" containerName="glance-httpd" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.592893 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="b97782ba-8bf0-4da9-bd81-97e88b4e73e7" containerName="glance-httpd" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.593303 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="e8b95945-6169-4e44-861a-f4abd48a7161" containerName="nova-scheduler-scheduler" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.593338 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="e8837df0-c6fe-42a6-bf0f-8ca14f1961a6" containerName="nova-metadata-metadata" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.593369 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="1c81b153-5498-4d63-9c98-fa8b79d5acdd" containerName="nova-api-api" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.593389 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="0e95a7b6-74fd-4db5-bb83-b8e8f80a698f" containerName="barbican-keystone-listener-log" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.593409 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="d7651107-0120-4611-87d0-be009f3749d7" containerName="placement-api" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.593428 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="febda67e-3daf-4cb4-9fd1-530d6c398404" containerName="neutron-api" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.593460 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="af476a0f-b390-443d-b7a5-14181e7c7bc7" containerName="object-updater" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.593492 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="64b9a5c2-09a8-48fb-9e1b-b66c1003cf61" containerName="kube-state-metrics" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.593517 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="af476a0f-b390-443d-b7a5-14181e7c7bc7" containerName="object-auditor" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.593546 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="af476a0f-b390-443d-b7a5-14181e7c7bc7" containerName="object-expirer" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.593575 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="af476a0f-b390-443d-b7a5-14181e7c7bc7" containerName="rsync" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.593604 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="7b7568e8-e3d4-4e06-a25f-33656bdf089f" containerName="cinder-scheduler" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.593632 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="0e95a7b6-74fd-4db5-bb83-b8e8f80a698f" containerName="barbican-keystone-listener" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.593699 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="dc78dec8-567e-41a1-9fbf-793224410d3b" containerName="ovn-northd" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.593721 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="175903ef-59e0-4c1f-820f-bd3d2692462d" containerName="barbican-worker" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.593751 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="af476a0f-b390-443d-b7a5-14181e7c7bc7" containerName="account-replicator" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.593775 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="2a8757b1-bbbf-4d00-9bbb-9f4bc855a9d9" containerName="galera" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.593809 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="7eee3d9c-42a3-4d59-8a29-8d5f1cbc7444" containerName="rabbitmq" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.593838 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="19094b17-f379-494e-b377-8191ddab4924" containerName="keystone-api" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.593876 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="0b1d1797-999d-4453-b674-c40f53d4231e" containerName="galera" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.593897 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="e086b29e-c7fb-45a4-a6f2-c30508f1b25a" containerName="barbican-keystone-listener" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.593916 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="af476a0f-b390-443d-b7a5-14181e7c7bc7" containerName="account-server" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.593933 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="1c81b153-5498-4d63-9c98-fa8b79d5acdd" containerName="nova-api-log" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.593964 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="38f993aa-ed40-45f0-821f-e5d7f482ec99" containerName="mariadb-account-delete" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.593989 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="af476a0f-b390-443d-b7a5-14181e7c7bc7" containerName="object-server" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.594008 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="af476a0f-b390-443d-b7a5-14181e7c7bc7" containerName="account-reaper" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.594029 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="ff22194e-63a9-410d-80b6-9b1a1e68b164" containerName="ovs-vswitchd" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.594053 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="b0f3cfb2-6884-4ef1-9844-cf494a2e21bb" containerName="mariadb-account-delete" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.594079 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="b97782ba-8bf0-4da9-bd81-97e88b4e73e7" containerName="glance-httpd" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.594102 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="c64b6821-6f46-4764-be55-97ed8c71fefa" containerName="nova-cell0-conductor-conductor" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.594129 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="5ad0a326-11b9-40c8-b251-5994a436110a" containerName="barbican-api-log" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.594153 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="287e7e9a-0240-478e-a15b-b01122e79c32" containerName="cinder-api" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.594174 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="e086b29e-c7fb-45a4-a6f2-c30508f1b25a" containerName="barbican-keystone-listener-log" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.594199 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="bac06af4-bbe1-482a-8815-14a9cf2a1699" containerName="nova-cell1-novncproxy-novncproxy" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.594226 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="febda67e-3daf-4cb4-9fd1-530d6c398404" containerName="neutron-httpd" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.594249 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="af476a0f-b390-443d-b7a5-14181e7c7bc7" containerName="container-replicator" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.594272 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="e792abf7-967c-4293-b5f4-f073b07c8cf1" containerName="proxy-server" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.594292 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="00913f80-f496-44ec-a619-99129724cb89" containerName="glance-log" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.594313 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="44195e2b-7f1d-4542-8948-93a818071fd2" containerName="ceilometer-central-agent" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.594334 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="af476a0f-b390-443d-b7a5-14181e7c7bc7" containerName="object-replicator" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.594364 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="ffd60458-19af-464b-9649-57d25893f22a" containerName="barbican-api-log" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.594391 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="44195e2b-7f1d-4542-8948-93a818071fd2" containerName="ceilometer-notification-agent" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.594423 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="d7651107-0120-4611-87d0-be009f3749d7" containerName="placement-log" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.594442 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="20c5e3ee-fe01-49c7-96fd-153897da815e" containerName="dnsmasq-dns" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.594472 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="72f0e500-fe06-4373-9bc3-6cdaa2520043" containerName="memcached" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.594490 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="2cfda3c9-a8dd-4a86-bb0d-d3b826b82a0f" containerName="mariadb-account-delete" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.594509 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="b6a50190-5f38-40b8-87b1-3e67fe7d3cf4" containerName="mariadb-account-delete" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.604692 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="af476a0f-b390-443d-b7a5-14181e7c7bc7" containerName="container-server" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.604739 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="b97782ba-8bf0-4da9-bd81-97e88b4e73e7" containerName="glance-log" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.604788 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="9a3a5941-5c86-4a65-be1e-26327ca990ad" containerName="openstack-network-exporter" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.604806 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="ffd60458-19af-464b-9649-57d25893f22a" containerName="barbican-api" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.604837 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="7b7568e8-e3d4-4e06-a25f-33656bdf089f" containerName="probe" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.604855 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="175903ef-59e0-4c1f-820f-bd3d2692462d" containerName="barbican-worker-log" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.604881 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="af476a0f-b390-443d-b7a5-14181e7c7bc7" containerName="container-updater" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.604908 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="f69e804c-fdc4-4b8f-86f3-d497612f42b8" containerName="openstack-network-exporter" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.604925 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="44195e2b-7f1d-4542-8948-93a818071fd2" containerName="sg-core" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.604944 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="99398e49-db85-4878-b759-367747402c8b" containerName="barbican-worker-log" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.604965 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="c2cc9842-6f8f-4afc-9895-2b7a75a9696c" containerName="mariadb-account-delete" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.604991 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="99398e49-db85-4878-b759-367747402c8b" containerName="barbican-worker" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.605011 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="8fa2662b-eed3-4461-ba5d-d4554ca4a22b" containerName="mariadb-account-delete" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.605040 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="ff22194e-63a9-410d-80b6-9b1a1e68b164" containerName="ovsdb-server" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.605056 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="af476a0f-b390-443d-b7a5-14181e7c7bc7" containerName="account-auditor" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.605079 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="af476a0f-b390-443d-b7a5-14181e7c7bc7" containerName="swift-recon-cron" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.605103 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="e8837df0-c6fe-42a6-bf0f-8ca14f1961a6" containerName="nova-metadata-log" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.605131 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="9a3a5941-5c86-4a65-be1e-26327ca990ad" containerName="ovsdbserver-nb" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.605148 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="dc78dec8-567e-41a1-9fbf-793224410d3b" containerName="openstack-network-exporter" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.605170 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="b2ea37c8-3213-4043-9da2-a9e76f9284e4" containerName="nova-cell1-conductor-conductor" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.605193 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="02c83d05-a6ce-4c22-9015-91c0a766a518" containerName="rabbitmq" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.605217 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="af476a0f-b390-443d-b7a5-14181e7c7bc7" containerName="container-auditor" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.605239 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="5b472e4d-2724-4ea4-93c9-5552d92af793" containerName="mariadb-account-delete" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.605257 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="e792abf7-967c-4293-b5f4-f073b07c8cf1" containerName="proxy-httpd" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.605284 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="44195e2b-7f1d-4542-8948-93a818071fd2" containerName="proxy-httpd" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.605311 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="14b66e32-a660-4643-9f57-f66bf12a56ef" containerName="ovn-controller" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.605353 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="5ad0a326-11b9-40c8-b251-5994a436110a" containerName="barbican-api" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.605382 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="287e7e9a-0240-478e-a15b-b01122e79c32" containerName="cinder-api-log" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.605407 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="00913f80-f496-44ec-a619-99129724cb89" containerName="glance-httpd" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.609649 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-6x9bb"] Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.609890 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-6x9bb" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.727060 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vtwkb\" (UniqueName: \"kubernetes.io/projected/e8fcce10-4507-490f-b3bf-0870b91ab416-kube-api-access-vtwkb\") pod \"community-operators-6x9bb\" (UID: \"e8fcce10-4507-490f-b3bf-0870b91ab416\") " pod="openshift-marketplace/community-operators-6x9bb" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.727477 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e8fcce10-4507-490f-b3bf-0870b91ab416-utilities\") pod \"community-operators-6x9bb\" (UID: \"e8fcce10-4507-490f-b3bf-0870b91ab416\") " pod="openshift-marketplace/community-operators-6x9bb" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.727515 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e8fcce10-4507-490f-b3bf-0870b91ab416-catalog-content\") pod \"community-operators-6x9bb\" (UID: \"e8fcce10-4507-490f-b3bf-0870b91ab416\") " pod="openshift-marketplace/community-operators-6x9bb" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.828427 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e8fcce10-4507-490f-b3bf-0870b91ab416-utilities\") pod \"community-operators-6x9bb\" (UID: \"e8fcce10-4507-490f-b3bf-0870b91ab416\") " pod="openshift-marketplace/community-operators-6x9bb" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.828489 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e8fcce10-4507-490f-b3bf-0870b91ab416-catalog-content\") pod \"community-operators-6x9bb\" (UID: \"e8fcce10-4507-490f-b3bf-0870b91ab416\") " pod="openshift-marketplace/community-operators-6x9bb" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.828543 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vtwkb\" (UniqueName: \"kubernetes.io/projected/e8fcce10-4507-490f-b3bf-0870b91ab416-kube-api-access-vtwkb\") pod \"community-operators-6x9bb\" (UID: \"e8fcce10-4507-490f-b3bf-0870b91ab416\") " pod="openshift-marketplace/community-operators-6x9bb" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.829166 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e8fcce10-4507-490f-b3bf-0870b91ab416-utilities\") pod \"community-operators-6x9bb\" (UID: \"e8fcce10-4507-490f-b3bf-0870b91ab416\") " pod="openshift-marketplace/community-operators-6x9bb" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.829181 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e8fcce10-4507-490f-b3bf-0870b91ab416-catalog-content\") pod \"community-operators-6x9bb\" (UID: \"e8fcce10-4507-490f-b3bf-0870b91ab416\") " pod="openshift-marketplace/community-operators-6x9bb" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.853994 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vtwkb\" (UniqueName: \"kubernetes.io/projected/e8fcce10-4507-490f-b3bf-0870b91ab416-kube-api-access-vtwkb\") pod \"community-operators-6x9bb\" (UID: \"e8fcce10-4507-490f-b3bf-0870b91ab416\") " pod="openshift-marketplace/community-operators-6x9bb" Nov 28 16:39:53 crc kubenswrapper[4909]: I1128 16:39:53.956268 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-6x9bb" Nov 28 16:39:54 crc kubenswrapper[4909]: I1128 16:39:54.246460 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-6x9bb"] Nov 28 16:39:55 crc kubenswrapper[4909]: I1128 16:39:55.080979 4909 generic.go:334] "Generic (PLEG): container finished" podID="e8fcce10-4507-490f-b3bf-0870b91ab416" containerID="ace8b0be4a571d20d615ab73708faf6bb517adcb9b357673f9650cf6fe93ebd5" exitCode=0 Nov 28 16:39:55 crc kubenswrapper[4909]: I1128 16:39:55.081040 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6x9bb" event={"ID":"e8fcce10-4507-490f-b3bf-0870b91ab416","Type":"ContainerDied","Data":"ace8b0be4a571d20d615ab73708faf6bb517adcb9b357673f9650cf6fe93ebd5"} Nov 28 16:39:55 crc kubenswrapper[4909]: I1128 16:39:55.081293 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6x9bb" event={"ID":"e8fcce10-4507-490f-b3bf-0870b91ab416","Type":"ContainerStarted","Data":"5f77c3078943d1d06b41b0e7c7667a59c1acd509c277c6651d89f33107fabd34"} Nov 28 16:39:55 crc kubenswrapper[4909]: I1128 16:39:55.082912 4909 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 28 16:39:57 crc kubenswrapper[4909]: I1128 16:39:57.097038 4909 generic.go:334] "Generic (PLEG): container finished" podID="e8fcce10-4507-490f-b3bf-0870b91ab416" containerID="f458e562839d8e5afcdee8bc2ebf7b2a4e5c5426c1fec77b038e126565659e6f" exitCode=0 Nov 28 16:39:57 crc kubenswrapper[4909]: I1128 16:39:57.097371 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6x9bb" event={"ID":"e8fcce10-4507-490f-b3bf-0870b91ab416","Type":"ContainerDied","Data":"f458e562839d8e5afcdee8bc2ebf7b2a4e5c5426c1fec77b038e126565659e6f"} Nov 28 16:39:59 crc kubenswrapper[4909]: I1128 16:39:59.116976 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6x9bb" event={"ID":"e8fcce10-4507-490f-b3bf-0870b91ab416","Type":"ContainerStarted","Data":"5be963cd4742aa63c74fd70d723c180faa0f464b3c068e516a5e589346076f4b"} Nov 28 16:40:03 crc kubenswrapper[4909]: I1128 16:40:03.956498 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-6x9bb" Nov 28 16:40:03 crc kubenswrapper[4909]: I1128 16:40:03.956895 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-6x9bb" Nov 28 16:40:03 crc kubenswrapper[4909]: I1128 16:40:03.999516 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-6x9bb" Nov 28 16:40:04 crc kubenswrapper[4909]: I1128 16:40:04.013926 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-6x9bb" podStartSLOduration=8.182616154 podStartE2EDuration="11.013911724s" podCreationTimestamp="2025-11-28 16:39:53 +0000 UTC" firstStartedPulling="2025-11-28 16:39:55.082623537 +0000 UTC m=+1777.479308071" lastFinishedPulling="2025-11-28 16:39:57.913919077 +0000 UTC m=+1780.310603641" observedRunningTime="2025-11-28 16:39:59.143102482 +0000 UTC m=+1781.539787006" watchObservedRunningTime="2025-11-28 16:40:04.013911724 +0000 UTC m=+1786.410596248" Nov 28 16:40:04 crc kubenswrapper[4909]: I1128 16:40:04.195915 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-6x9bb" Nov 28 16:40:04 crc kubenswrapper[4909]: I1128 16:40:04.239702 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-6x9bb"] Nov 28 16:40:06 crc kubenswrapper[4909]: I1128 16:40:06.170657 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-6x9bb" podUID="e8fcce10-4507-490f-b3bf-0870b91ab416" containerName="registry-server" containerID="cri-o://5be963cd4742aa63c74fd70d723c180faa0f464b3c068e516a5e589346076f4b" gracePeriod=2 Nov 28 16:40:06 crc kubenswrapper[4909]: I1128 16:40:06.604535 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-6x9bb" Nov 28 16:40:06 crc kubenswrapper[4909]: I1128 16:40:06.711126 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vtwkb\" (UniqueName: \"kubernetes.io/projected/e8fcce10-4507-490f-b3bf-0870b91ab416-kube-api-access-vtwkb\") pod \"e8fcce10-4507-490f-b3bf-0870b91ab416\" (UID: \"e8fcce10-4507-490f-b3bf-0870b91ab416\") " Nov 28 16:40:06 crc kubenswrapper[4909]: I1128 16:40:06.711237 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e8fcce10-4507-490f-b3bf-0870b91ab416-catalog-content\") pod \"e8fcce10-4507-490f-b3bf-0870b91ab416\" (UID: \"e8fcce10-4507-490f-b3bf-0870b91ab416\") " Nov 28 16:40:06 crc kubenswrapper[4909]: I1128 16:40:06.711331 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e8fcce10-4507-490f-b3bf-0870b91ab416-utilities\") pod \"e8fcce10-4507-490f-b3bf-0870b91ab416\" (UID: \"e8fcce10-4507-490f-b3bf-0870b91ab416\") " Nov 28 16:40:06 crc kubenswrapper[4909]: I1128 16:40:06.713186 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e8fcce10-4507-490f-b3bf-0870b91ab416-utilities" (OuterVolumeSpecName: "utilities") pod "e8fcce10-4507-490f-b3bf-0870b91ab416" (UID: "e8fcce10-4507-490f-b3bf-0870b91ab416"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:40:06 crc kubenswrapper[4909]: I1128 16:40:06.716627 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e8fcce10-4507-490f-b3bf-0870b91ab416-kube-api-access-vtwkb" (OuterVolumeSpecName: "kube-api-access-vtwkb") pod "e8fcce10-4507-490f-b3bf-0870b91ab416" (UID: "e8fcce10-4507-490f-b3bf-0870b91ab416"). InnerVolumeSpecName "kube-api-access-vtwkb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:40:06 crc kubenswrapper[4909]: I1128 16:40:06.813608 4909 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e8fcce10-4507-490f-b3bf-0870b91ab416-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 16:40:06 crc kubenswrapper[4909]: I1128 16:40:06.813663 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vtwkb\" (UniqueName: \"kubernetes.io/projected/e8fcce10-4507-490f-b3bf-0870b91ab416-kube-api-access-vtwkb\") on node \"crc\" DevicePath \"\"" Nov 28 16:40:06 crc kubenswrapper[4909]: I1128 16:40:06.848812 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e8fcce10-4507-490f-b3bf-0870b91ab416-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "e8fcce10-4507-490f-b3bf-0870b91ab416" (UID: "e8fcce10-4507-490f-b3bf-0870b91ab416"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:40:06 crc kubenswrapper[4909]: I1128 16:40:06.901880 4909 scope.go:117] "RemoveContainer" containerID="0051ae960019817a1b9d0126f56eb43672e5b1694e62841c31a539d1caca21e9" Nov 28 16:40:06 crc kubenswrapper[4909]: E1128 16:40:06.902163 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 16:40:06 crc kubenswrapper[4909]: I1128 16:40:06.915409 4909 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e8fcce10-4507-490f-b3bf-0870b91ab416-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 16:40:07 crc kubenswrapper[4909]: I1128 16:40:07.180759 4909 generic.go:334] "Generic (PLEG): container finished" podID="e8fcce10-4507-490f-b3bf-0870b91ab416" containerID="5be963cd4742aa63c74fd70d723c180faa0f464b3c068e516a5e589346076f4b" exitCode=0 Nov 28 16:40:07 crc kubenswrapper[4909]: I1128 16:40:07.180817 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-6x9bb" Nov 28 16:40:07 crc kubenswrapper[4909]: I1128 16:40:07.180838 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6x9bb" event={"ID":"e8fcce10-4507-490f-b3bf-0870b91ab416","Type":"ContainerDied","Data":"5be963cd4742aa63c74fd70d723c180faa0f464b3c068e516a5e589346076f4b"} Nov 28 16:40:07 crc kubenswrapper[4909]: I1128 16:40:07.181296 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6x9bb" event={"ID":"e8fcce10-4507-490f-b3bf-0870b91ab416","Type":"ContainerDied","Data":"5f77c3078943d1d06b41b0e7c7667a59c1acd509c277c6651d89f33107fabd34"} Nov 28 16:40:07 crc kubenswrapper[4909]: I1128 16:40:07.181312 4909 scope.go:117] "RemoveContainer" containerID="5be963cd4742aa63c74fd70d723c180faa0f464b3c068e516a5e589346076f4b" Nov 28 16:40:07 crc kubenswrapper[4909]: I1128 16:40:07.200445 4909 scope.go:117] "RemoveContainer" containerID="f458e562839d8e5afcdee8bc2ebf7b2a4e5c5426c1fec77b038e126565659e6f" Nov 28 16:40:07 crc kubenswrapper[4909]: I1128 16:40:07.217394 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-6x9bb"] Nov 28 16:40:07 crc kubenswrapper[4909]: I1128 16:40:07.225472 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-6x9bb"] Nov 28 16:40:07 crc kubenswrapper[4909]: I1128 16:40:07.236784 4909 scope.go:117] "RemoveContainer" containerID="ace8b0be4a571d20d615ab73708faf6bb517adcb9b357673f9650cf6fe93ebd5" Nov 28 16:40:07 crc kubenswrapper[4909]: I1128 16:40:07.258513 4909 scope.go:117] "RemoveContainer" containerID="5be963cd4742aa63c74fd70d723c180faa0f464b3c068e516a5e589346076f4b" Nov 28 16:40:07 crc kubenswrapper[4909]: E1128 16:40:07.258908 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5be963cd4742aa63c74fd70d723c180faa0f464b3c068e516a5e589346076f4b\": container with ID starting with 5be963cd4742aa63c74fd70d723c180faa0f464b3c068e516a5e589346076f4b not found: ID does not exist" containerID="5be963cd4742aa63c74fd70d723c180faa0f464b3c068e516a5e589346076f4b" Nov 28 16:40:07 crc kubenswrapper[4909]: I1128 16:40:07.258934 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5be963cd4742aa63c74fd70d723c180faa0f464b3c068e516a5e589346076f4b"} err="failed to get container status \"5be963cd4742aa63c74fd70d723c180faa0f464b3c068e516a5e589346076f4b\": rpc error: code = NotFound desc = could not find container \"5be963cd4742aa63c74fd70d723c180faa0f464b3c068e516a5e589346076f4b\": container with ID starting with 5be963cd4742aa63c74fd70d723c180faa0f464b3c068e516a5e589346076f4b not found: ID does not exist" Nov 28 16:40:07 crc kubenswrapper[4909]: I1128 16:40:07.258961 4909 scope.go:117] "RemoveContainer" containerID="f458e562839d8e5afcdee8bc2ebf7b2a4e5c5426c1fec77b038e126565659e6f" Nov 28 16:40:07 crc kubenswrapper[4909]: E1128 16:40:07.259304 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f458e562839d8e5afcdee8bc2ebf7b2a4e5c5426c1fec77b038e126565659e6f\": container with ID starting with f458e562839d8e5afcdee8bc2ebf7b2a4e5c5426c1fec77b038e126565659e6f not found: ID does not exist" containerID="f458e562839d8e5afcdee8bc2ebf7b2a4e5c5426c1fec77b038e126565659e6f" Nov 28 16:40:07 crc kubenswrapper[4909]: I1128 16:40:07.259327 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f458e562839d8e5afcdee8bc2ebf7b2a4e5c5426c1fec77b038e126565659e6f"} err="failed to get container status \"f458e562839d8e5afcdee8bc2ebf7b2a4e5c5426c1fec77b038e126565659e6f\": rpc error: code = NotFound desc = could not find container \"f458e562839d8e5afcdee8bc2ebf7b2a4e5c5426c1fec77b038e126565659e6f\": container with ID starting with f458e562839d8e5afcdee8bc2ebf7b2a4e5c5426c1fec77b038e126565659e6f not found: ID does not exist" Nov 28 16:40:07 crc kubenswrapper[4909]: I1128 16:40:07.259343 4909 scope.go:117] "RemoveContainer" containerID="ace8b0be4a571d20d615ab73708faf6bb517adcb9b357673f9650cf6fe93ebd5" Nov 28 16:40:07 crc kubenswrapper[4909]: E1128 16:40:07.259823 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ace8b0be4a571d20d615ab73708faf6bb517adcb9b357673f9650cf6fe93ebd5\": container with ID starting with ace8b0be4a571d20d615ab73708faf6bb517adcb9b357673f9650cf6fe93ebd5 not found: ID does not exist" containerID="ace8b0be4a571d20d615ab73708faf6bb517adcb9b357673f9650cf6fe93ebd5" Nov 28 16:40:07 crc kubenswrapper[4909]: I1128 16:40:07.259840 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ace8b0be4a571d20d615ab73708faf6bb517adcb9b357673f9650cf6fe93ebd5"} err="failed to get container status \"ace8b0be4a571d20d615ab73708faf6bb517adcb9b357673f9650cf6fe93ebd5\": rpc error: code = NotFound desc = could not find container \"ace8b0be4a571d20d615ab73708faf6bb517adcb9b357673f9650cf6fe93ebd5\": container with ID starting with ace8b0be4a571d20d615ab73708faf6bb517adcb9b357673f9650cf6fe93ebd5 not found: ID does not exist" Nov 28 16:40:07 crc kubenswrapper[4909]: I1128 16:40:07.909915 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e8fcce10-4507-490f-b3bf-0870b91ab416" path="/var/lib/kubelet/pods/e8fcce10-4507-490f-b3bf-0870b91ab416/volumes" Nov 28 16:40:20 crc kubenswrapper[4909]: I1128 16:40:20.901884 4909 scope.go:117] "RemoveContainer" containerID="0051ae960019817a1b9d0126f56eb43672e5b1694e62841c31a539d1caca21e9" Nov 28 16:40:20 crc kubenswrapper[4909]: E1128 16:40:20.902643 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 16:40:28 crc kubenswrapper[4909]: I1128 16:40:28.493185 4909 scope.go:117] "RemoveContainer" containerID="ce2426fd0deaef85cce4c6ed203e672ca1bccd2feef4b79a7cc06bd0d0e935c9" Nov 28 16:40:28 crc kubenswrapper[4909]: I1128 16:40:28.541356 4909 scope.go:117] "RemoveContainer" containerID="87c3eb1a8e998586487a23e05591adebf7a28f64c15f5f300ef14bc6f7e03672" Nov 28 16:40:28 crc kubenswrapper[4909]: I1128 16:40:28.565724 4909 scope.go:117] "RemoveContainer" containerID="0d48820f5d21bc197eb801dacd5dc3118d806007250e25469dbf27b1b1789b47" Nov 28 16:40:28 crc kubenswrapper[4909]: I1128 16:40:28.591050 4909 scope.go:117] "RemoveContainer" containerID="71ea8b09eb6024bffef6a00074f38edb5df62a42149d93f74d218f410d00292c" Nov 28 16:40:28 crc kubenswrapper[4909]: I1128 16:40:28.611063 4909 scope.go:117] "RemoveContainer" containerID="b06f7ceb8721d82fd465811ed29d9a70387b3cc0eee40cd66aaef773726c983a" Nov 28 16:40:28 crc kubenswrapper[4909]: I1128 16:40:28.651283 4909 scope.go:117] "RemoveContainer" containerID="5c1abe607c19c7135c249b2516147df7ded4fcccb8eff97993dd74c4f627e143" Nov 28 16:40:28 crc kubenswrapper[4909]: I1128 16:40:28.669143 4909 scope.go:117] "RemoveContainer" containerID="3a9032519f8e9fe424e691068a12060322948da1fc696b223487c23fb4a7c0eb" Nov 28 16:40:28 crc kubenswrapper[4909]: I1128 16:40:28.684807 4909 scope.go:117] "RemoveContainer" containerID="e6e645fabdf72540bad71659d62cc61d550d5131f071ac02e662ebfb69991f04" Nov 28 16:40:28 crc kubenswrapper[4909]: I1128 16:40:28.705583 4909 scope.go:117] "RemoveContainer" containerID="b74657ce0e6f1d6aebf3eb081305b8dbfaf43ab222ea5e2a386baad9203784be" Nov 28 16:40:28 crc kubenswrapper[4909]: I1128 16:40:28.735182 4909 scope.go:117] "RemoveContainer" containerID="1b0631590dbf2f02faff4199c7f351befddd6177ddde8165eb64c85e0c20b740" Nov 28 16:40:28 crc kubenswrapper[4909]: I1128 16:40:28.754201 4909 scope.go:117] "RemoveContainer" containerID="25149522bc5abac18b8b5b177459e0d1b3747e94a6872d2b3d9ef9b026c8cf52" Nov 28 16:40:32 crc kubenswrapper[4909]: I1128 16:40:32.901856 4909 scope.go:117] "RemoveContainer" containerID="0051ae960019817a1b9d0126f56eb43672e5b1694e62841c31a539d1caca21e9" Nov 28 16:40:32 crc kubenswrapper[4909]: E1128 16:40:32.902567 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 16:40:43 crc kubenswrapper[4909]: I1128 16:40:43.902041 4909 scope.go:117] "RemoveContainer" containerID="0051ae960019817a1b9d0126f56eb43672e5b1694e62841c31a539d1caca21e9" Nov 28 16:40:43 crc kubenswrapper[4909]: E1128 16:40:43.902805 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 16:40:57 crc kubenswrapper[4909]: I1128 16:40:57.908740 4909 scope.go:117] "RemoveContainer" containerID="0051ae960019817a1b9d0126f56eb43672e5b1694e62841c31a539d1caca21e9" Nov 28 16:40:57 crc kubenswrapper[4909]: E1128 16:40:57.909696 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 16:41:11 crc kubenswrapper[4909]: I1128 16:41:11.901920 4909 scope.go:117] "RemoveContainer" containerID="0051ae960019817a1b9d0126f56eb43672e5b1694e62841c31a539d1caca21e9" Nov 28 16:41:11 crc kubenswrapper[4909]: E1128 16:41:11.902799 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 16:41:25 crc kubenswrapper[4909]: I1128 16:41:25.260201 4909 scope.go:117] "RemoveContainer" containerID="0051ae960019817a1b9d0126f56eb43672e5b1694e62841c31a539d1caca21e9" Nov 28 16:41:25 crc kubenswrapper[4909]: E1128 16:41:25.261101 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 16:41:28 crc kubenswrapper[4909]: I1128 16:41:28.871825 4909 scope.go:117] "RemoveContainer" containerID="bd5449498d8b191c04307f37b3bba2f646c9d3de3fd836f283ecb8d81e786377" Nov 28 16:41:28 crc kubenswrapper[4909]: I1128 16:41:28.901832 4909 scope.go:117] "RemoveContainer" containerID="49028e9be7659ad35e5108e0a2c128957971bb8f2381bd1914434307ad430e5b" Nov 28 16:41:28 crc kubenswrapper[4909]: I1128 16:41:28.971504 4909 scope.go:117] "RemoveContainer" containerID="edce0e3a1b79a461c6e384cfefd0cbf3c0e7f50280e3c51aafc961a31f14493c" Nov 28 16:41:28 crc kubenswrapper[4909]: I1128 16:41:28.998689 4909 scope.go:117] "RemoveContainer" containerID="bcd3a169e67b44354a85ac02fdf79896704f5e85915fcde17e813b5bf5c5d5ac" Nov 28 16:41:38 crc kubenswrapper[4909]: I1128 16:41:38.901425 4909 scope.go:117] "RemoveContainer" containerID="0051ae960019817a1b9d0126f56eb43672e5b1694e62841c31a539d1caca21e9" Nov 28 16:41:38 crc kubenswrapper[4909]: E1128 16:41:38.902970 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 16:41:53 crc kubenswrapper[4909]: I1128 16:41:53.902355 4909 scope.go:117] "RemoveContainer" containerID="0051ae960019817a1b9d0126f56eb43672e5b1694e62841c31a539d1caca21e9" Nov 28 16:41:53 crc kubenswrapper[4909]: E1128 16:41:53.903370 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 16:42:08 crc kubenswrapper[4909]: I1128 16:42:08.902467 4909 scope.go:117] "RemoveContainer" containerID="0051ae960019817a1b9d0126f56eb43672e5b1694e62841c31a539d1caca21e9" Nov 28 16:42:08 crc kubenswrapper[4909]: E1128 16:42:08.903481 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 16:42:21 crc kubenswrapper[4909]: I1128 16:42:21.902278 4909 scope.go:117] "RemoveContainer" containerID="0051ae960019817a1b9d0126f56eb43672e5b1694e62841c31a539d1caca21e9" Nov 28 16:42:21 crc kubenswrapper[4909]: E1128 16:42:21.902938 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 16:42:29 crc kubenswrapper[4909]: I1128 16:42:29.058894 4909 scope.go:117] "RemoveContainer" containerID="0ffd2c461bc2721d0397f08b5b47716138419e6b87145bbcd26504ee193d270a" Nov 28 16:42:29 crc kubenswrapper[4909]: I1128 16:42:29.082553 4909 scope.go:117] "RemoveContainer" containerID="85bc892f485ead62ff7a3454236fc204edfbf282d2ecb39184a891c168d3bf1b" Nov 28 16:42:29 crc kubenswrapper[4909]: I1128 16:42:29.114385 4909 scope.go:117] "RemoveContainer" containerID="8ac297b5d405d834d8680a5f9f28838b752040016567cfba2f20902c9525c643" Nov 28 16:42:29 crc kubenswrapper[4909]: I1128 16:42:29.143356 4909 scope.go:117] "RemoveContainer" containerID="fd84d749d965cf5cee571a2a5d5cf252fed713747b600b28bdfecaeac7232d7b" Nov 28 16:42:29 crc kubenswrapper[4909]: I1128 16:42:29.160030 4909 scope.go:117] "RemoveContainer" containerID="ef94b6682122c37e874d2c9670f4f10b050ca9f317d02f480fb72e2a87ccec9c" Nov 28 16:42:33 crc kubenswrapper[4909]: I1128 16:42:33.901400 4909 scope.go:117] "RemoveContainer" containerID="0051ae960019817a1b9d0126f56eb43672e5b1694e62841c31a539d1caca21e9" Nov 28 16:42:33 crc kubenswrapper[4909]: E1128 16:42:33.901962 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 16:42:48 crc kubenswrapper[4909]: I1128 16:42:48.902082 4909 scope.go:117] "RemoveContainer" containerID="0051ae960019817a1b9d0126f56eb43672e5b1694e62841c31a539d1caca21e9" Nov 28 16:42:48 crc kubenswrapper[4909]: E1128 16:42:48.903055 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 16:43:00 crc kubenswrapper[4909]: I1128 16:43:00.901822 4909 scope.go:117] "RemoveContainer" containerID="0051ae960019817a1b9d0126f56eb43672e5b1694e62841c31a539d1caca21e9" Nov 28 16:43:00 crc kubenswrapper[4909]: E1128 16:43:00.902693 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 16:43:14 crc kubenswrapper[4909]: I1128 16:43:14.901389 4909 scope.go:117] "RemoveContainer" containerID="0051ae960019817a1b9d0126f56eb43672e5b1694e62841c31a539d1caca21e9" Nov 28 16:43:14 crc kubenswrapper[4909]: E1128 16:43:14.902285 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 16:43:29 crc kubenswrapper[4909]: I1128 16:43:29.902106 4909 scope.go:117] "RemoveContainer" containerID="0051ae960019817a1b9d0126f56eb43672e5b1694e62841c31a539d1caca21e9" Nov 28 16:43:30 crc kubenswrapper[4909]: I1128 16:43:30.745604 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" event={"ID":"5f0ac931-d37b-4342-8c12-c2779b455cc5","Type":"ContainerStarted","Data":"0e5c5e36930f47b4e9058bccdf356b7644aadbb7c92cf95c374bde0bff111148"} Nov 28 16:43:54 crc kubenswrapper[4909]: I1128 16:43:54.885079 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-qgxfs"] Nov 28 16:43:54 crc kubenswrapper[4909]: E1128 16:43:54.885936 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e8fcce10-4507-490f-b3bf-0870b91ab416" containerName="registry-server" Nov 28 16:43:54 crc kubenswrapper[4909]: I1128 16:43:54.885949 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="e8fcce10-4507-490f-b3bf-0870b91ab416" containerName="registry-server" Nov 28 16:43:54 crc kubenswrapper[4909]: E1128 16:43:54.885967 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e8fcce10-4507-490f-b3bf-0870b91ab416" containerName="extract-utilities" Nov 28 16:43:54 crc kubenswrapper[4909]: I1128 16:43:54.885976 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="e8fcce10-4507-490f-b3bf-0870b91ab416" containerName="extract-utilities" Nov 28 16:43:54 crc kubenswrapper[4909]: E1128 16:43:54.885990 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e8fcce10-4507-490f-b3bf-0870b91ab416" containerName="extract-content" Nov 28 16:43:54 crc kubenswrapper[4909]: I1128 16:43:54.885997 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="e8fcce10-4507-490f-b3bf-0870b91ab416" containerName="extract-content" Nov 28 16:43:54 crc kubenswrapper[4909]: I1128 16:43:54.886129 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="e8fcce10-4507-490f-b3bf-0870b91ab416" containerName="registry-server" Nov 28 16:43:54 crc kubenswrapper[4909]: I1128 16:43:54.887358 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-qgxfs" Nov 28 16:43:54 crc kubenswrapper[4909]: I1128 16:43:54.904265 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-qgxfs"] Nov 28 16:43:54 crc kubenswrapper[4909]: I1128 16:43:54.983306 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0bec1bba-afa5-44af-8fd0-ae772b895c7e-utilities\") pod \"redhat-operators-qgxfs\" (UID: \"0bec1bba-afa5-44af-8fd0-ae772b895c7e\") " pod="openshift-marketplace/redhat-operators-qgxfs" Nov 28 16:43:54 crc kubenswrapper[4909]: I1128 16:43:54.983372 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x68wk\" (UniqueName: \"kubernetes.io/projected/0bec1bba-afa5-44af-8fd0-ae772b895c7e-kube-api-access-x68wk\") pod \"redhat-operators-qgxfs\" (UID: \"0bec1bba-afa5-44af-8fd0-ae772b895c7e\") " pod="openshift-marketplace/redhat-operators-qgxfs" Nov 28 16:43:54 crc kubenswrapper[4909]: I1128 16:43:54.983426 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0bec1bba-afa5-44af-8fd0-ae772b895c7e-catalog-content\") pod \"redhat-operators-qgxfs\" (UID: \"0bec1bba-afa5-44af-8fd0-ae772b895c7e\") " pod="openshift-marketplace/redhat-operators-qgxfs" Nov 28 16:43:55 crc kubenswrapper[4909]: I1128 16:43:55.085179 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0bec1bba-afa5-44af-8fd0-ae772b895c7e-utilities\") pod \"redhat-operators-qgxfs\" (UID: \"0bec1bba-afa5-44af-8fd0-ae772b895c7e\") " pod="openshift-marketplace/redhat-operators-qgxfs" Nov 28 16:43:55 crc kubenswrapper[4909]: I1128 16:43:55.085228 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x68wk\" (UniqueName: \"kubernetes.io/projected/0bec1bba-afa5-44af-8fd0-ae772b895c7e-kube-api-access-x68wk\") pod \"redhat-operators-qgxfs\" (UID: \"0bec1bba-afa5-44af-8fd0-ae772b895c7e\") " pod="openshift-marketplace/redhat-operators-qgxfs" Nov 28 16:43:55 crc kubenswrapper[4909]: I1128 16:43:55.085270 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0bec1bba-afa5-44af-8fd0-ae772b895c7e-catalog-content\") pod \"redhat-operators-qgxfs\" (UID: \"0bec1bba-afa5-44af-8fd0-ae772b895c7e\") " pod="openshift-marketplace/redhat-operators-qgxfs" Nov 28 16:43:55 crc kubenswrapper[4909]: I1128 16:43:55.085760 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0bec1bba-afa5-44af-8fd0-ae772b895c7e-utilities\") pod \"redhat-operators-qgxfs\" (UID: \"0bec1bba-afa5-44af-8fd0-ae772b895c7e\") " pod="openshift-marketplace/redhat-operators-qgxfs" Nov 28 16:43:55 crc kubenswrapper[4909]: I1128 16:43:55.085795 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0bec1bba-afa5-44af-8fd0-ae772b895c7e-catalog-content\") pod \"redhat-operators-qgxfs\" (UID: \"0bec1bba-afa5-44af-8fd0-ae772b895c7e\") " pod="openshift-marketplace/redhat-operators-qgxfs" Nov 28 16:43:55 crc kubenswrapper[4909]: I1128 16:43:55.113039 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x68wk\" (UniqueName: \"kubernetes.io/projected/0bec1bba-afa5-44af-8fd0-ae772b895c7e-kube-api-access-x68wk\") pod \"redhat-operators-qgxfs\" (UID: \"0bec1bba-afa5-44af-8fd0-ae772b895c7e\") " pod="openshift-marketplace/redhat-operators-qgxfs" Nov 28 16:43:55 crc kubenswrapper[4909]: I1128 16:43:55.225943 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-qgxfs" Nov 28 16:43:55 crc kubenswrapper[4909]: I1128 16:43:55.659201 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-qgxfs"] Nov 28 16:43:55 crc kubenswrapper[4909]: I1128 16:43:55.998039 4909 generic.go:334] "Generic (PLEG): container finished" podID="0bec1bba-afa5-44af-8fd0-ae772b895c7e" containerID="e3ff20699b98811eb9d38f168f9c45b99e1dd16e8aa187d62899636ce54a12b9" exitCode=0 Nov 28 16:43:55 crc kubenswrapper[4909]: I1128 16:43:55.998096 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-qgxfs" event={"ID":"0bec1bba-afa5-44af-8fd0-ae772b895c7e","Type":"ContainerDied","Data":"e3ff20699b98811eb9d38f168f9c45b99e1dd16e8aa187d62899636ce54a12b9"} Nov 28 16:43:55 crc kubenswrapper[4909]: I1128 16:43:55.998153 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-qgxfs" event={"ID":"0bec1bba-afa5-44af-8fd0-ae772b895c7e","Type":"ContainerStarted","Data":"851229bd4bf6a1733a718c67657fbf828b83c429c14e75bd195df29bdfcf2eba"} Nov 28 16:43:57 crc kubenswrapper[4909]: I1128 16:43:57.009463 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-qgxfs" event={"ID":"0bec1bba-afa5-44af-8fd0-ae772b895c7e","Type":"ContainerStarted","Data":"6a169c7c24e78a3188fcef8b93ff27d1d2170918633eaf29575fbf1cace21866"} Nov 28 16:43:58 crc kubenswrapper[4909]: I1128 16:43:58.024956 4909 generic.go:334] "Generic (PLEG): container finished" podID="0bec1bba-afa5-44af-8fd0-ae772b895c7e" containerID="6a169c7c24e78a3188fcef8b93ff27d1d2170918633eaf29575fbf1cace21866" exitCode=0 Nov 28 16:43:58 crc kubenswrapper[4909]: I1128 16:43:58.025015 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-qgxfs" event={"ID":"0bec1bba-afa5-44af-8fd0-ae772b895c7e","Type":"ContainerDied","Data":"6a169c7c24e78a3188fcef8b93ff27d1d2170918633eaf29575fbf1cace21866"} Nov 28 16:43:59 crc kubenswrapper[4909]: I1128 16:43:59.035270 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-qgxfs" event={"ID":"0bec1bba-afa5-44af-8fd0-ae772b895c7e","Type":"ContainerStarted","Data":"183ddd64f7bf978df6d14e067b86ea9c19d1e48f3ce5cced5e0de0b295cd5b3d"} Nov 28 16:43:59 crc kubenswrapper[4909]: I1128 16:43:59.060545 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-qgxfs" podStartSLOduration=2.427988053 podStartE2EDuration="5.060527754s" podCreationTimestamp="2025-11-28 16:43:54 +0000 UTC" firstStartedPulling="2025-11-28 16:43:56.000456378 +0000 UTC m=+2018.397140902" lastFinishedPulling="2025-11-28 16:43:58.632996039 +0000 UTC m=+2021.029680603" observedRunningTime="2025-11-28 16:43:59.051101381 +0000 UTC m=+2021.447785915" watchObservedRunningTime="2025-11-28 16:43:59.060527754 +0000 UTC m=+2021.457212278" Nov 28 16:44:05 crc kubenswrapper[4909]: I1128 16:44:05.226704 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-qgxfs" Nov 28 16:44:05 crc kubenswrapper[4909]: I1128 16:44:05.227325 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-qgxfs" Nov 28 16:44:05 crc kubenswrapper[4909]: I1128 16:44:05.277436 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-qgxfs" Nov 28 16:44:06 crc kubenswrapper[4909]: I1128 16:44:06.128357 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-qgxfs" Nov 28 16:44:06 crc kubenswrapper[4909]: I1128 16:44:06.167412 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-qgxfs"] Nov 28 16:44:08 crc kubenswrapper[4909]: I1128 16:44:08.104889 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-qgxfs" podUID="0bec1bba-afa5-44af-8fd0-ae772b895c7e" containerName="registry-server" containerID="cri-o://183ddd64f7bf978df6d14e067b86ea9c19d1e48f3ce5cced5e0de0b295cd5b3d" gracePeriod=2 Nov 28 16:44:09 crc kubenswrapper[4909]: I1128 16:44:09.613281 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-qgxfs" Nov 28 16:44:09 crc kubenswrapper[4909]: I1128 16:44:09.715650 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x68wk\" (UniqueName: \"kubernetes.io/projected/0bec1bba-afa5-44af-8fd0-ae772b895c7e-kube-api-access-x68wk\") pod \"0bec1bba-afa5-44af-8fd0-ae772b895c7e\" (UID: \"0bec1bba-afa5-44af-8fd0-ae772b895c7e\") " Nov 28 16:44:09 crc kubenswrapper[4909]: I1128 16:44:09.715795 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0bec1bba-afa5-44af-8fd0-ae772b895c7e-catalog-content\") pod \"0bec1bba-afa5-44af-8fd0-ae772b895c7e\" (UID: \"0bec1bba-afa5-44af-8fd0-ae772b895c7e\") " Nov 28 16:44:09 crc kubenswrapper[4909]: I1128 16:44:09.715917 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0bec1bba-afa5-44af-8fd0-ae772b895c7e-utilities\") pod \"0bec1bba-afa5-44af-8fd0-ae772b895c7e\" (UID: \"0bec1bba-afa5-44af-8fd0-ae772b895c7e\") " Nov 28 16:44:09 crc kubenswrapper[4909]: I1128 16:44:09.717309 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0bec1bba-afa5-44af-8fd0-ae772b895c7e-utilities" (OuterVolumeSpecName: "utilities") pod "0bec1bba-afa5-44af-8fd0-ae772b895c7e" (UID: "0bec1bba-afa5-44af-8fd0-ae772b895c7e"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:44:09 crc kubenswrapper[4909]: I1128 16:44:09.724002 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0bec1bba-afa5-44af-8fd0-ae772b895c7e-kube-api-access-x68wk" (OuterVolumeSpecName: "kube-api-access-x68wk") pod "0bec1bba-afa5-44af-8fd0-ae772b895c7e" (UID: "0bec1bba-afa5-44af-8fd0-ae772b895c7e"). InnerVolumeSpecName "kube-api-access-x68wk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:44:09 crc kubenswrapper[4909]: I1128 16:44:09.817670 4909 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0bec1bba-afa5-44af-8fd0-ae772b895c7e-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 16:44:09 crc kubenswrapper[4909]: I1128 16:44:09.817703 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x68wk\" (UniqueName: \"kubernetes.io/projected/0bec1bba-afa5-44af-8fd0-ae772b895c7e-kube-api-access-x68wk\") on node \"crc\" DevicePath \"\"" Nov 28 16:44:09 crc kubenswrapper[4909]: I1128 16:44:09.827696 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0bec1bba-afa5-44af-8fd0-ae772b895c7e-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "0bec1bba-afa5-44af-8fd0-ae772b895c7e" (UID: "0bec1bba-afa5-44af-8fd0-ae772b895c7e"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:44:09 crc kubenswrapper[4909]: I1128 16:44:09.919172 4909 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0bec1bba-afa5-44af-8fd0-ae772b895c7e-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 16:44:10 crc kubenswrapper[4909]: I1128 16:44:10.124457 4909 generic.go:334] "Generic (PLEG): container finished" podID="0bec1bba-afa5-44af-8fd0-ae772b895c7e" containerID="183ddd64f7bf978df6d14e067b86ea9c19d1e48f3ce5cced5e0de0b295cd5b3d" exitCode=0 Nov 28 16:44:10 crc kubenswrapper[4909]: I1128 16:44:10.124531 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-qgxfs" Nov 28 16:44:10 crc kubenswrapper[4909]: I1128 16:44:10.124521 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-qgxfs" event={"ID":"0bec1bba-afa5-44af-8fd0-ae772b895c7e","Type":"ContainerDied","Data":"183ddd64f7bf978df6d14e067b86ea9c19d1e48f3ce5cced5e0de0b295cd5b3d"} Nov 28 16:44:10 crc kubenswrapper[4909]: I1128 16:44:10.124701 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-qgxfs" event={"ID":"0bec1bba-afa5-44af-8fd0-ae772b895c7e","Type":"ContainerDied","Data":"851229bd4bf6a1733a718c67657fbf828b83c429c14e75bd195df29bdfcf2eba"} Nov 28 16:44:10 crc kubenswrapper[4909]: I1128 16:44:10.124734 4909 scope.go:117] "RemoveContainer" containerID="183ddd64f7bf978df6d14e067b86ea9c19d1e48f3ce5cced5e0de0b295cd5b3d" Nov 28 16:44:10 crc kubenswrapper[4909]: I1128 16:44:10.158288 4909 scope.go:117] "RemoveContainer" containerID="6a169c7c24e78a3188fcef8b93ff27d1d2170918633eaf29575fbf1cace21866" Nov 28 16:44:10 crc kubenswrapper[4909]: I1128 16:44:10.161126 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-qgxfs"] Nov 28 16:44:10 crc kubenswrapper[4909]: I1128 16:44:10.171672 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-qgxfs"] Nov 28 16:44:10 crc kubenswrapper[4909]: I1128 16:44:10.179099 4909 scope.go:117] "RemoveContainer" containerID="e3ff20699b98811eb9d38f168f9c45b99e1dd16e8aa187d62899636ce54a12b9" Nov 28 16:44:10 crc kubenswrapper[4909]: I1128 16:44:10.201721 4909 scope.go:117] "RemoveContainer" containerID="183ddd64f7bf978df6d14e067b86ea9c19d1e48f3ce5cced5e0de0b295cd5b3d" Nov 28 16:44:10 crc kubenswrapper[4909]: E1128 16:44:10.202164 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"183ddd64f7bf978df6d14e067b86ea9c19d1e48f3ce5cced5e0de0b295cd5b3d\": container with ID starting with 183ddd64f7bf978df6d14e067b86ea9c19d1e48f3ce5cced5e0de0b295cd5b3d not found: ID does not exist" containerID="183ddd64f7bf978df6d14e067b86ea9c19d1e48f3ce5cced5e0de0b295cd5b3d" Nov 28 16:44:10 crc kubenswrapper[4909]: I1128 16:44:10.202215 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"183ddd64f7bf978df6d14e067b86ea9c19d1e48f3ce5cced5e0de0b295cd5b3d"} err="failed to get container status \"183ddd64f7bf978df6d14e067b86ea9c19d1e48f3ce5cced5e0de0b295cd5b3d\": rpc error: code = NotFound desc = could not find container \"183ddd64f7bf978df6d14e067b86ea9c19d1e48f3ce5cced5e0de0b295cd5b3d\": container with ID starting with 183ddd64f7bf978df6d14e067b86ea9c19d1e48f3ce5cced5e0de0b295cd5b3d not found: ID does not exist" Nov 28 16:44:10 crc kubenswrapper[4909]: I1128 16:44:10.202245 4909 scope.go:117] "RemoveContainer" containerID="6a169c7c24e78a3188fcef8b93ff27d1d2170918633eaf29575fbf1cace21866" Nov 28 16:44:10 crc kubenswrapper[4909]: E1128 16:44:10.202533 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6a169c7c24e78a3188fcef8b93ff27d1d2170918633eaf29575fbf1cace21866\": container with ID starting with 6a169c7c24e78a3188fcef8b93ff27d1d2170918633eaf29575fbf1cace21866 not found: ID does not exist" containerID="6a169c7c24e78a3188fcef8b93ff27d1d2170918633eaf29575fbf1cace21866" Nov 28 16:44:10 crc kubenswrapper[4909]: I1128 16:44:10.202610 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6a169c7c24e78a3188fcef8b93ff27d1d2170918633eaf29575fbf1cace21866"} err="failed to get container status \"6a169c7c24e78a3188fcef8b93ff27d1d2170918633eaf29575fbf1cace21866\": rpc error: code = NotFound desc = could not find container \"6a169c7c24e78a3188fcef8b93ff27d1d2170918633eaf29575fbf1cace21866\": container with ID starting with 6a169c7c24e78a3188fcef8b93ff27d1d2170918633eaf29575fbf1cace21866 not found: ID does not exist" Nov 28 16:44:10 crc kubenswrapper[4909]: I1128 16:44:10.202720 4909 scope.go:117] "RemoveContainer" containerID="e3ff20699b98811eb9d38f168f9c45b99e1dd16e8aa187d62899636ce54a12b9" Nov 28 16:44:10 crc kubenswrapper[4909]: E1128 16:44:10.203040 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e3ff20699b98811eb9d38f168f9c45b99e1dd16e8aa187d62899636ce54a12b9\": container with ID starting with e3ff20699b98811eb9d38f168f9c45b99e1dd16e8aa187d62899636ce54a12b9 not found: ID does not exist" containerID="e3ff20699b98811eb9d38f168f9c45b99e1dd16e8aa187d62899636ce54a12b9" Nov 28 16:44:10 crc kubenswrapper[4909]: I1128 16:44:10.203070 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e3ff20699b98811eb9d38f168f9c45b99e1dd16e8aa187d62899636ce54a12b9"} err="failed to get container status \"e3ff20699b98811eb9d38f168f9c45b99e1dd16e8aa187d62899636ce54a12b9\": rpc error: code = NotFound desc = could not find container \"e3ff20699b98811eb9d38f168f9c45b99e1dd16e8aa187d62899636ce54a12b9\": container with ID starting with e3ff20699b98811eb9d38f168f9c45b99e1dd16e8aa187d62899636ce54a12b9 not found: ID does not exist" Nov 28 16:44:11 crc kubenswrapper[4909]: I1128 16:44:11.918334 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0bec1bba-afa5-44af-8fd0-ae772b895c7e" path="/var/lib/kubelet/pods/0bec1bba-afa5-44af-8fd0-ae772b895c7e/volumes" Nov 28 16:44:29 crc kubenswrapper[4909]: I1128 16:44:29.348734 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-95brm"] Nov 28 16:44:29 crc kubenswrapper[4909]: E1128 16:44:29.349747 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0bec1bba-afa5-44af-8fd0-ae772b895c7e" containerName="extract-content" Nov 28 16:44:29 crc kubenswrapper[4909]: I1128 16:44:29.349764 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="0bec1bba-afa5-44af-8fd0-ae772b895c7e" containerName="extract-content" Nov 28 16:44:29 crc kubenswrapper[4909]: E1128 16:44:29.349779 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0bec1bba-afa5-44af-8fd0-ae772b895c7e" containerName="registry-server" Nov 28 16:44:29 crc kubenswrapper[4909]: I1128 16:44:29.349788 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="0bec1bba-afa5-44af-8fd0-ae772b895c7e" containerName="registry-server" Nov 28 16:44:29 crc kubenswrapper[4909]: E1128 16:44:29.349798 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0bec1bba-afa5-44af-8fd0-ae772b895c7e" containerName="extract-utilities" Nov 28 16:44:29 crc kubenswrapper[4909]: I1128 16:44:29.349806 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="0bec1bba-afa5-44af-8fd0-ae772b895c7e" containerName="extract-utilities" Nov 28 16:44:29 crc kubenswrapper[4909]: I1128 16:44:29.349978 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="0bec1bba-afa5-44af-8fd0-ae772b895c7e" containerName="registry-server" Nov 28 16:44:29 crc kubenswrapper[4909]: I1128 16:44:29.353197 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-95brm" Nov 28 16:44:29 crc kubenswrapper[4909]: I1128 16:44:29.371684 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-95brm"] Nov 28 16:44:29 crc kubenswrapper[4909]: I1128 16:44:29.468131 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d3ad8384-7d0b-4022-86e0-af31edea53bc-utilities\") pod \"certified-operators-95brm\" (UID: \"d3ad8384-7d0b-4022-86e0-af31edea53bc\") " pod="openshift-marketplace/certified-operators-95brm" Nov 28 16:44:29 crc kubenswrapper[4909]: I1128 16:44:29.468217 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d3ad8384-7d0b-4022-86e0-af31edea53bc-catalog-content\") pod \"certified-operators-95brm\" (UID: \"d3ad8384-7d0b-4022-86e0-af31edea53bc\") " pod="openshift-marketplace/certified-operators-95brm" Nov 28 16:44:29 crc kubenswrapper[4909]: I1128 16:44:29.468256 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wkbjm\" (UniqueName: \"kubernetes.io/projected/d3ad8384-7d0b-4022-86e0-af31edea53bc-kube-api-access-wkbjm\") pod \"certified-operators-95brm\" (UID: \"d3ad8384-7d0b-4022-86e0-af31edea53bc\") " pod="openshift-marketplace/certified-operators-95brm" Nov 28 16:44:29 crc kubenswrapper[4909]: I1128 16:44:29.569332 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d3ad8384-7d0b-4022-86e0-af31edea53bc-utilities\") pod \"certified-operators-95brm\" (UID: \"d3ad8384-7d0b-4022-86e0-af31edea53bc\") " pod="openshift-marketplace/certified-operators-95brm" Nov 28 16:44:29 crc kubenswrapper[4909]: I1128 16:44:29.569441 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d3ad8384-7d0b-4022-86e0-af31edea53bc-catalog-content\") pod \"certified-operators-95brm\" (UID: \"d3ad8384-7d0b-4022-86e0-af31edea53bc\") " pod="openshift-marketplace/certified-operators-95brm" Nov 28 16:44:29 crc kubenswrapper[4909]: I1128 16:44:29.569485 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wkbjm\" (UniqueName: \"kubernetes.io/projected/d3ad8384-7d0b-4022-86e0-af31edea53bc-kube-api-access-wkbjm\") pod \"certified-operators-95brm\" (UID: \"d3ad8384-7d0b-4022-86e0-af31edea53bc\") " pod="openshift-marketplace/certified-operators-95brm" Nov 28 16:44:29 crc kubenswrapper[4909]: I1128 16:44:29.570391 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d3ad8384-7d0b-4022-86e0-af31edea53bc-utilities\") pod \"certified-operators-95brm\" (UID: \"d3ad8384-7d0b-4022-86e0-af31edea53bc\") " pod="openshift-marketplace/certified-operators-95brm" Nov 28 16:44:29 crc kubenswrapper[4909]: I1128 16:44:29.570393 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d3ad8384-7d0b-4022-86e0-af31edea53bc-catalog-content\") pod \"certified-operators-95brm\" (UID: \"d3ad8384-7d0b-4022-86e0-af31edea53bc\") " pod="openshift-marketplace/certified-operators-95brm" Nov 28 16:44:29 crc kubenswrapper[4909]: I1128 16:44:29.589747 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wkbjm\" (UniqueName: \"kubernetes.io/projected/d3ad8384-7d0b-4022-86e0-af31edea53bc-kube-api-access-wkbjm\") pod \"certified-operators-95brm\" (UID: \"d3ad8384-7d0b-4022-86e0-af31edea53bc\") " pod="openshift-marketplace/certified-operators-95brm" Nov 28 16:44:29 crc kubenswrapper[4909]: I1128 16:44:29.729766 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-95brm" Nov 28 16:44:30 crc kubenswrapper[4909]: I1128 16:44:30.205625 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-95brm"] Nov 28 16:44:30 crc kubenswrapper[4909]: I1128 16:44:30.294727 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-95brm" event={"ID":"d3ad8384-7d0b-4022-86e0-af31edea53bc","Type":"ContainerStarted","Data":"3acd8cea45e6a88e75c25acfdc32a6e32fff971452917e06fb5bc87a165f35ba"} Nov 28 16:44:31 crc kubenswrapper[4909]: I1128 16:44:31.304107 4909 generic.go:334] "Generic (PLEG): container finished" podID="d3ad8384-7d0b-4022-86e0-af31edea53bc" containerID="878a02eebf28a77ba8d60c1cbd8e0a5d0f842dc5353225fddcbf13de1361e043" exitCode=0 Nov 28 16:44:31 crc kubenswrapper[4909]: I1128 16:44:31.304158 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-95brm" event={"ID":"d3ad8384-7d0b-4022-86e0-af31edea53bc","Type":"ContainerDied","Data":"878a02eebf28a77ba8d60c1cbd8e0a5d0f842dc5353225fddcbf13de1361e043"} Nov 28 16:44:32 crc kubenswrapper[4909]: I1128 16:44:32.314604 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-95brm" event={"ID":"d3ad8384-7d0b-4022-86e0-af31edea53bc","Type":"ContainerStarted","Data":"ab8620fde0f32f2eda51ded2addb3e45c21d829735b8a06fc346c4a750c924e0"} Nov 28 16:44:33 crc kubenswrapper[4909]: I1128 16:44:33.326416 4909 generic.go:334] "Generic (PLEG): container finished" podID="d3ad8384-7d0b-4022-86e0-af31edea53bc" containerID="ab8620fde0f32f2eda51ded2addb3e45c21d829735b8a06fc346c4a750c924e0" exitCode=0 Nov 28 16:44:33 crc kubenswrapper[4909]: I1128 16:44:33.326489 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-95brm" event={"ID":"d3ad8384-7d0b-4022-86e0-af31edea53bc","Type":"ContainerDied","Data":"ab8620fde0f32f2eda51ded2addb3e45c21d829735b8a06fc346c4a750c924e0"} Nov 28 16:44:34 crc kubenswrapper[4909]: I1128 16:44:34.335363 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-95brm" event={"ID":"d3ad8384-7d0b-4022-86e0-af31edea53bc","Type":"ContainerStarted","Data":"ebc250992f6e34c8475fcb129e52636f899d471e4de76c3deb2f36f4d2f8daa3"} Nov 28 16:44:34 crc kubenswrapper[4909]: I1128 16:44:34.357297 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-95brm" podStartSLOduration=2.923435857 podStartE2EDuration="5.357271724s" podCreationTimestamp="2025-11-28 16:44:29 +0000 UTC" firstStartedPulling="2025-11-28 16:44:31.306812006 +0000 UTC m=+2053.703496520" lastFinishedPulling="2025-11-28 16:44:33.740647863 +0000 UTC m=+2056.137332387" observedRunningTime="2025-11-28 16:44:34.351030057 +0000 UTC m=+2056.747714591" watchObservedRunningTime="2025-11-28 16:44:34.357271724 +0000 UTC m=+2056.753956248" Nov 28 16:44:39 crc kubenswrapper[4909]: I1128 16:44:39.731107 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-95brm" Nov 28 16:44:39 crc kubenswrapper[4909]: I1128 16:44:39.731767 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-95brm" Nov 28 16:44:39 crc kubenswrapper[4909]: I1128 16:44:39.767092 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-95brm" Nov 28 16:44:40 crc kubenswrapper[4909]: I1128 16:44:40.451831 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-95brm" Nov 28 16:44:40 crc kubenswrapper[4909]: I1128 16:44:40.506948 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-95brm"] Nov 28 16:44:42 crc kubenswrapper[4909]: I1128 16:44:42.407243 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-95brm" podUID="d3ad8384-7d0b-4022-86e0-af31edea53bc" containerName="registry-server" containerID="cri-o://ebc250992f6e34c8475fcb129e52636f899d471e4de76c3deb2f36f4d2f8daa3" gracePeriod=2 Nov 28 16:44:42 crc kubenswrapper[4909]: I1128 16:44:42.958361 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-95brm" Nov 28 16:44:42 crc kubenswrapper[4909]: I1128 16:44:42.967000 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d3ad8384-7d0b-4022-86e0-af31edea53bc-utilities\") pod \"d3ad8384-7d0b-4022-86e0-af31edea53bc\" (UID: \"d3ad8384-7d0b-4022-86e0-af31edea53bc\") " Nov 28 16:44:42 crc kubenswrapper[4909]: I1128 16:44:42.967050 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d3ad8384-7d0b-4022-86e0-af31edea53bc-catalog-content\") pod \"d3ad8384-7d0b-4022-86e0-af31edea53bc\" (UID: \"d3ad8384-7d0b-4022-86e0-af31edea53bc\") " Nov 28 16:44:42 crc kubenswrapper[4909]: I1128 16:44:42.967124 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wkbjm\" (UniqueName: \"kubernetes.io/projected/d3ad8384-7d0b-4022-86e0-af31edea53bc-kube-api-access-wkbjm\") pod \"d3ad8384-7d0b-4022-86e0-af31edea53bc\" (UID: \"d3ad8384-7d0b-4022-86e0-af31edea53bc\") " Nov 28 16:44:42 crc kubenswrapper[4909]: I1128 16:44:42.971870 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d3ad8384-7d0b-4022-86e0-af31edea53bc-utilities" (OuterVolumeSpecName: "utilities") pod "d3ad8384-7d0b-4022-86e0-af31edea53bc" (UID: "d3ad8384-7d0b-4022-86e0-af31edea53bc"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:44:42 crc kubenswrapper[4909]: I1128 16:44:42.975253 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d3ad8384-7d0b-4022-86e0-af31edea53bc-kube-api-access-wkbjm" (OuterVolumeSpecName: "kube-api-access-wkbjm") pod "d3ad8384-7d0b-4022-86e0-af31edea53bc" (UID: "d3ad8384-7d0b-4022-86e0-af31edea53bc"). InnerVolumeSpecName "kube-api-access-wkbjm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:44:43 crc kubenswrapper[4909]: I1128 16:44:43.053478 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d3ad8384-7d0b-4022-86e0-af31edea53bc-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "d3ad8384-7d0b-4022-86e0-af31edea53bc" (UID: "d3ad8384-7d0b-4022-86e0-af31edea53bc"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:44:43 crc kubenswrapper[4909]: I1128 16:44:43.068582 4909 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d3ad8384-7d0b-4022-86e0-af31edea53bc-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 16:44:43 crc kubenswrapper[4909]: I1128 16:44:43.068640 4909 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d3ad8384-7d0b-4022-86e0-af31edea53bc-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 16:44:43 crc kubenswrapper[4909]: I1128 16:44:43.068679 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wkbjm\" (UniqueName: \"kubernetes.io/projected/d3ad8384-7d0b-4022-86e0-af31edea53bc-kube-api-access-wkbjm\") on node \"crc\" DevicePath \"\"" Nov 28 16:44:43 crc kubenswrapper[4909]: I1128 16:44:43.419428 4909 generic.go:334] "Generic (PLEG): container finished" podID="d3ad8384-7d0b-4022-86e0-af31edea53bc" containerID="ebc250992f6e34c8475fcb129e52636f899d471e4de76c3deb2f36f4d2f8daa3" exitCode=0 Nov 28 16:44:43 crc kubenswrapper[4909]: I1128 16:44:43.419475 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-95brm" event={"ID":"d3ad8384-7d0b-4022-86e0-af31edea53bc","Type":"ContainerDied","Data":"ebc250992f6e34c8475fcb129e52636f899d471e4de76c3deb2f36f4d2f8daa3"} Nov 28 16:44:43 crc kubenswrapper[4909]: I1128 16:44:43.419512 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-95brm" event={"ID":"d3ad8384-7d0b-4022-86e0-af31edea53bc","Type":"ContainerDied","Data":"3acd8cea45e6a88e75c25acfdc32a6e32fff971452917e06fb5bc87a165f35ba"} Nov 28 16:44:43 crc kubenswrapper[4909]: I1128 16:44:43.419530 4909 scope.go:117] "RemoveContainer" containerID="ebc250992f6e34c8475fcb129e52636f899d471e4de76c3deb2f36f4d2f8daa3" Nov 28 16:44:43 crc kubenswrapper[4909]: I1128 16:44:43.419585 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-95brm" Nov 28 16:44:43 crc kubenswrapper[4909]: I1128 16:44:43.448285 4909 scope.go:117] "RemoveContainer" containerID="ab8620fde0f32f2eda51ded2addb3e45c21d829735b8a06fc346c4a750c924e0" Nov 28 16:44:43 crc kubenswrapper[4909]: I1128 16:44:43.470681 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-95brm"] Nov 28 16:44:43 crc kubenswrapper[4909]: I1128 16:44:43.480890 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-95brm"] Nov 28 16:44:43 crc kubenswrapper[4909]: I1128 16:44:43.493072 4909 scope.go:117] "RemoveContainer" containerID="878a02eebf28a77ba8d60c1cbd8e0a5d0f842dc5353225fddcbf13de1361e043" Nov 28 16:44:43 crc kubenswrapper[4909]: I1128 16:44:43.529343 4909 scope.go:117] "RemoveContainer" containerID="ebc250992f6e34c8475fcb129e52636f899d471e4de76c3deb2f36f4d2f8daa3" Nov 28 16:44:43 crc kubenswrapper[4909]: E1128 16:44:43.529978 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ebc250992f6e34c8475fcb129e52636f899d471e4de76c3deb2f36f4d2f8daa3\": container with ID starting with ebc250992f6e34c8475fcb129e52636f899d471e4de76c3deb2f36f4d2f8daa3 not found: ID does not exist" containerID="ebc250992f6e34c8475fcb129e52636f899d471e4de76c3deb2f36f4d2f8daa3" Nov 28 16:44:43 crc kubenswrapper[4909]: I1128 16:44:43.530040 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ebc250992f6e34c8475fcb129e52636f899d471e4de76c3deb2f36f4d2f8daa3"} err="failed to get container status \"ebc250992f6e34c8475fcb129e52636f899d471e4de76c3deb2f36f4d2f8daa3\": rpc error: code = NotFound desc = could not find container \"ebc250992f6e34c8475fcb129e52636f899d471e4de76c3deb2f36f4d2f8daa3\": container with ID starting with ebc250992f6e34c8475fcb129e52636f899d471e4de76c3deb2f36f4d2f8daa3 not found: ID does not exist" Nov 28 16:44:43 crc kubenswrapper[4909]: I1128 16:44:43.530082 4909 scope.go:117] "RemoveContainer" containerID="ab8620fde0f32f2eda51ded2addb3e45c21d829735b8a06fc346c4a750c924e0" Nov 28 16:44:43 crc kubenswrapper[4909]: E1128 16:44:43.530568 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ab8620fde0f32f2eda51ded2addb3e45c21d829735b8a06fc346c4a750c924e0\": container with ID starting with ab8620fde0f32f2eda51ded2addb3e45c21d829735b8a06fc346c4a750c924e0 not found: ID does not exist" containerID="ab8620fde0f32f2eda51ded2addb3e45c21d829735b8a06fc346c4a750c924e0" Nov 28 16:44:43 crc kubenswrapper[4909]: I1128 16:44:43.530839 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ab8620fde0f32f2eda51ded2addb3e45c21d829735b8a06fc346c4a750c924e0"} err="failed to get container status \"ab8620fde0f32f2eda51ded2addb3e45c21d829735b8a06fc346c4a750c924e0\": rpc error: code = NotFound desc = could not find container \"ab8620fde0f32f2eda51ded2addb3e45c21d829735b8a06fc346c4a750c924e0\": container with ID starting with ab8620fde0f32f2eda51ded2addb3e45c21d829735b8a06fc346c4a750c924e0 not found: ID does not exist" Nov 28 16:44:43 crc kubenswrapper[4909]: I1128 16:44:43.530929 4909 scope.go:117] "RemoveContainer" containerID="878a02eebf28a77ba8d60c1cbd8e0a5d0f842dc5353225fddcbf13de1361e043" Nov 28 16:44:43 crc kubenswrapper[4909]: E1128 16:44:43.531272 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"878a02eebf28a77ba8d60c1cbd8e0a5d0f842dc5353225fddcbf13de1361e043\": container with ID starting with 878a02eebf28a77ba8d60c1cbd8e0a5d0f842dc5353225fddcbf13de1361e043 not found: ID does not exist" containerID="878a02eebf28a77ba8d60c1cbd8e0a5d0f842dc5353225fddcbf13de1361e043" Nov 28 16:44:43 crc kubenswrapper[4909]: I1128 16:44:43.531315 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"878a02eebf28a77ba8d60c1cbd8e0a5d0f842dc5353225fddcbf13de1361e043"} err="failed to get container status \"878a02eebf28a77ba8d60c1cbd8e0a5d0f842dc5353225fddcbf13de1361e043\": rpc error: code = NotFound desc = could not find container \"878a02eebf28a77ba8d60c1cbd8e0a5d0f842dc5353225fddcbf13de1361e043\": container with ID starting with 878a02eebf28a77ba8d60c1cbd8e0a5d0f842dc5353225fddcbf13de1361e043 not found: ID does not exist" Nov 28 16:44:43 crc kubenswrapper[4909]: I1128 16:44:43.931191 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d3ad8384-7d0b-4022-86e0-af31edea53bc" path="/var/lib/kubelet/pods/d3ad8384-7d0b-4022-86e0-af31edea53bc/volumes" Nov 28 16:45:00 crc kubenswrapper[4909]: I1128 16:45:00.150312 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405805-44l8w"] Nov 28 16:45:00 crc kubenswrapper[4909]: E1128 16:45:00.151241 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d3ad8384-7d0b-4022-86e0-af31edea53bc" containerName="extract-utilities" Nov 28 16:45:00 crc kubenswrapper[4909]: I1128 16:45:00.151257 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="d3ad8384-7d0b-4022-86e0-af31edea53bc" containerName="extract-utilities" Nov 28 16:45:00 crc kubenswrapper[4909]: E1128 16:45:00.151286 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d3ad8384-7d0b-4022-86e0-af31edea53bc" containerName="registry-server" Nov 28 16:45:00 crc kubenswrapper[4909]: I1128 16:45:00.151294 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="d3ad8384-7d0b-4022-86e0-af31edea53bc" containerName="registry-server" Nov 28 16:45:00 crc kubenswrapper[4909]: E1128 16:45:00.151312 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d3ad8384-7d0b-4022-86e0-af31edea53bc" containerName="extract-content" Nov 28 16:45:00 crc kubenswrapper[4909]: I1128 16:45:00.151319 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="d3ad8384-7d0b-4022-86e0-af31edea53bc" containerName="extract-content" Nov 28 16:45:00 crc kubenswrapper[4909]: I1128 16:45:00.151496 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="d3ad8384-7d0b-4022-86e0-af31edea53bc" containerName="registry-server" Nov 28 16:45:00 crc kubenswrapper[4909]: I1128 16:45:00.152113 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405805-44l8w" Nov 28 16:45:00 crc kubenswrapper[4909]: I1128 16:45:00.154242 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 28 16:45:00 crc kubenswrapper[4909]: I1128 16:45:00.154479 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 28 16:45:00 crc kubenswrapper[4909]: I1128 16:45:00.165198 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405805-44l8w"] Nov 28 16:45:00 crc kubenswrapper[4909]: I1128 16:45:00.328748 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/54925a18-3574-4cf3-b36d-6f12d9c424fd-config-volume\") pod \"collect-profiles-29405805-44l8w\" (UID: \"54925a18-3574-4cf3-b36d-6f12d9c424fd\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405805-44l8w" Nov 28 16:45:00 crc kubenswrapper[4909]: I1128 16:45:00.329246 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p58x9\" (UniqueName: \"kubernetes.io/projected/54925a18-3574-4cf3-b36d-6f12d9c424fd-kube-api-access-p58x9\") pod \"collect-profiles-29405805-44l8w\" (UID: \"54925a18-3574-4cf3-b36d-6f12d9c424fd\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405805-44l8w" Nov 28 16:45:00 crc kubenswrapper[4909]: I1128 16:45:00.329496 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/54925a18-3574-4cf3-b36d-6f12d9c424fd-secret-volume\") pod \"collect-profiles-29405805-44l8w\" (UID: \"54925a18-3574-4cf3-b36d-6f12d9c424fd\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405805-44l8w" Nov 28 16:45:00 crc kubenswrapper[4909]: I1128 16:45:00.431277 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p58x9\" (UniqueName: \"kubernetes.io/projected/54925a18-3574-4cf3-b36d-6f12d9c424fd-kube-api-access-p58x9\") pod \"collect-profiles-29405805-44l8w\" (UID: \"54925a18-3574-4cf3-b36d-6f12d9c424fd\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405805-44l8w" Nov 28 16:45:00 crc kubenswrapper[4909]: I1128 16:45:00.431419 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/54925a18-3574-4cf3-b36d-6f12d9c424fd-secret-volume\") pod \"collect-profiles-29405805-44l8w\" (UID: \"54925a18-3574-4cf3-b36d-6f12d9c424fd\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405805-44l8w" Nov 28 16:45:00 crc kubenswrapper[4909]: I1128 16:45:00.431541 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/54925a18-3574-4cf3-b36d-6f12d9c424fd-config-volume\") pod \"collect-profiles-29405805-44l8w\" (UID: \"54925a18-3574-4cf3-b36d-6f12d9c424fd\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405805-44l8w" Nov 28 16:45:00 crc kubenswrapper[4909]: I1128 16:45:00.433132 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/54925a18-3574-4cf3-b36d-6f12d9c424fd-config-volume\") pod \"collect-profiles-29405805-44l8w\" (UID: \"54925a18-3574-4cf3-b36d-6f12d9c424fd\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405805-44l8w" Nov 28 16:45:00 crc kubenswrapper[4909]: I1128 16:45:00.438092 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/54925a18-3574-4cf3-b36d-6f12d9c424fd-secret-volume\") pod \"collect-profiles-29405805-44l8w\" (UID: \"54925a18-3574-4cf3-b36d-6f12d9c424fd\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405805-44l8w" Nov 28 16:45:00 crc kubenswrapper[4909]: I1128 16:45:00.460521 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p58x9\" (UniqueName: \"kubernetes.io/projected/54925a18-3574-4cf3-b36d-6f12d9c424fd-kube-api-access-p58x9\") pod \"collect-profiles-29405805-44l8w\" (UID: \"54925a18-3574-4cf3-b36d-6f12d9c424fd\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405805-44l8w" Nov 28 16:45:00 crc kubenswrapper[4909]: I1128 16:45:00.472307 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405805-44l8w" Nov 28 16:45:00 crc kubenswrapper[4909]: I1128 16:45:00.927524 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405805-44l8w"] Nov 28 16:45:01 crc kubenswrapper[4909]: I1128 16:45:01.562029 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405805-44l8w" event={"ID":"54925a18-3574-4cf3-b36d-6f12d9c424fd","Type":"ContainerStarted","Data":"a50f15cd57fdcfb7048afc764a560ed1849b7c7ceacb03a0d441ca42826aa9b2"} Nov 28 16:45:01 crc kubenswrapper[4909]: I1128 16:45:01.562397 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405805-44l8w" event={"ID":"54925a18-3574-4cf3-b36d-6f12d9c424fd","Type":"ContainerStarted","Data":"44542d66bbdc25e3dc6c87245e3f2f0b3bbd420aeae021eb1ce983ccc99cc69a"} Nov 28 16:45:01 crc kubenswrapper[4909]: I1128 16:45:01.582116 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29405805-44l8w" podStartSLOduration=1.582101684 podStartE2EDuration="1.582101684s" podCreationTimestamp="2025-11-28 16:45:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:45:01.579083333 +0000 UTC m=+2083.975767877" watchObservedRunningTime="2025-11-28 16:45:01.582101684 +0000 UTC m=+2083.978786208" Nov 28 16:45:02 crc kubenswrapper[4909]: I1128 16:45:02.570194 4909 generic.go:334] "Generic (PLEG): container finished" podID="54925a18-3574-4cf3-b36d-6f12d9c424fd" containerID="a50f15cd57fdcfb7048afc764a560ed1849b7c7ceacb03a0d441ca42826aa9b2" exitCode=0 Nov 28 16:45:02 crc kubenswrapper[4909]: I1128 16:45:02.570253 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405805-44l8w" event={"ID":"54925a18-3574-4cf3-b36d-6f12d9c424fd","Type":"ContainerDied","Data":"a50f15cd57fdcfb7048afc764a560ed1849b7c7ceacb03a0d441ca42826aa9b2"} Nov 28 16:45:03 crc kubenswrapper[4909]: I1128 16:45:03.918169 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405805-44l8w" Nov 28 16:45:04 crc kubenswrapper[4909]: I1128 16:45:04.081199 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p58x9\" (UniqueName: \"kubernetes.io/projected/54925a18-3574-4cf3-b36d-6f12d9c424fd-kube-api-access-p58x9\") pod \"54925a18-3574-4cf3-b36d-6f12d9c424fd\" (UID: \"54925a18-3574-4cf3-b36d-6f12d9c424fd\") " Nov 28 16:45:04 crc kubenswrapper[4909]: I1128 16:45:04.081742 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/54925a18-3574-4cf3-b36d-6f12d9c424fd-config-volume\") pod \"54925a18-3574-4cf3-b36d-6f12d9c424fd\" (UID: \"54925a18-3574-4cf3-b36d-6f12d9c424fd\") " Nov 28 16:45:04 crc kubenswrapper[4909]: I1128 16:45:04.082007 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/54925a18-3574-4cf3-b36d-6f12d9c424fd-secret-volume\") pod \"54925a18-3574-4cf3-b36d-6f12d9c424fd\" (UID: \"54925a18-3574-4cf3-b36d-6f12d9c424fd\") " Nov 28 16:45:04 crc kubenswrapper[4909]: I1128 16:45:04.082834 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/54925a18-3574-4cf3-b36d-6f12d9c424fd-config-volume" (OuterVolumeSpecName: "config-volume") pod "54925a18-3574-4cf3-b36d-6f12d9c424fd" (UID: "54925a18-3574-4cf3-b36d-6f12d9c424fd"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:45:04 crc kubenswrapper[4909]: I1128 16:45:04.083283 4909 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/54925a18-3574-4cf3-b36d-6f12d9c424fd-config-volume\") on node \"crc\" DevicePath \"\"" Nov 28 16:45:04 crc kubenswrapper[4909]: I1128 16:45:04.092332 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/54925a18-3574-4cf3-b36d-6f12d9c424fd-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "54925a18-3574-4cf3-b36d-6f12d9c424fd" (UID: "54925a18-3574-4cf3-b36d-6f12d9c424fd"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:45:04 crc kubenswrapper[4909]: I1128 16:45:04.095856 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/54925a18-3574-4cf3-b36d-6f12d9c424fd-kube-api-access-p58x9" (OuterVolumeSpecName: "kube-api-access-p58x9") pod "54925a18-3574-4cf3-b36d-6f12d9c424fd" (UID: "54925a18-3574-4cf3-b36d-6f12d9c424fd"). InnerVolumeSpecName "kube-api-access-p58x9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:45:04 crc kubenswrapper[4909]: I1128 16:45:04.184957 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p58x9\" (UniqueName: \"kubernetes.io/projected/54925a18-3574-4cf3-b36d-6f12d9c424fd-kube-api-access-p58x9\") on node \"crc\" DevicePath \"\"" Nov 28 16:45:04 crc kubenswrapper[4909]: I1128 16:45:04.185010 4909 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/54925a18-3574-4cf3-b36d-6f12d9c424fd-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 28 16:45:04 crc kubenswrapper[4909]: I1128 16:45:04.586928 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405805-44l8w" event={"ID":"54925a18-3574-4cf3-b36d-6f12d9c424fd","Type":"ContainerDied","Data":"44542d66bbdc25e3dc6c87245e3f2f0b3bbd420aeae021eb1ce983ccc99cc69a"} Nov 28 16:45:04 crc kubenswrapper[4909]: I1128 16:45:04.586987 4909 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="44542d66bbdc25e3dc6c87245e3f2f0b3bbd420aeae021eb1ce983ccc99cc69a" Nov 28 16:45:04 crc kubenswrapper[4909]: I1128 16:45:04.586987 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405805-44l8w" Nov 28 16:45:04 crc kubenswrapper[4909]: I1128 16:45:04.650053 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405760-qjm9k"] Nov 28 16:45:04 crc kubenswrapper[4909]: I1128 16:45:04.655023 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405760-qjm9k"] Nov 28 16:45:05 crc kubenswrapper[4909]: I1128 16:45:05.913438 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0731ea3e-3015-4311-88f4-bcd1f9f8204e" path="/var/lib/kubelet/pods/0731ea3e-3015-4311-88f4-bcd1f9f8204e/volumes" Nov 28 16:45:29 crc kubenswrapper[4909]: I1128 16:45:29.336219 4909 scope.go:117] "RemoveContainer" containerID="55e25c87ad488a0ee230737508f73167846e286147ac4f3da75ce9603ff85aad" Nov 28 16:45:49 crc kubenswrapper[4909]: I1128 16:45:49.910963 4909 patch_prober.go:28] interesting pod/machine-config-daemon-d5nd7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 16:45:49 crc kubenswrapper[4909]: I1128 16:45:49.911537 4909 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 16:46:19 crc kubenswrapper[4909]: I1128 16:46:19.911050 4909 patch_prober.go:28] interesting pod/machine-config-daemon-d5nd7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 16:46:19 crc kubenswrapper[4909]: I1128 16:46:19.911609 4909 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 16:46:49 crc kubenswrapper[4909]: I1128 16:46:49.911411 4909 patch_prober.go:28] interesting pod/machine-config-daemon-d5nd7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 16:46:49 crc kubenswrapper[4909]: I1128 16:46:49.912121 4909 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 16:46:49 crc kubenswrapper[4909]: I1128 16:46:49.916158 4909 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" Nov 28 16:46:49 crc kubenswrapper[4909]: I1128 16:46:49.917040 4909 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"0e5c5e36930f47b4e9058bccdf356b7644aadbb7c92cf95c374bde0bff111148"} pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 16:46:49 crc kubenswrapper[4909]: I1128 16:46:49.917149 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerName="machine-config-daemon" containerID="cri-o://0e5c5e36930f47b4e9058bccdf356b7644aadbb7c92cf95c374bde0bff111148" gracePeriod=600 Nov 28 16:46:50 crc kubenswrapper[4909]: I1128 16:46:50.838341 4909 generic.go:334] "Generic (PLEG): container finished" podID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerID="0e5c5e36930f47b4e9058bccdf356b7644aadbb7c92cf95c374bde0bff111148" exitCode=0 Nov 28 16:46:50 crc kubenswrapper[4909]: I1128 16:46:50.838452 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" event={"ID":"5f0ac931-d37b-4342-8c12-c2779b455cc5","Type":"ContainerDied","Data":"0e5c5e36930f47b4e9058bccdf356b7644aadbb7c92cf95c374bde0bff111148"} Nov 28 16:46:50 crc kubenswrapper[4909]: I1128 16:46:50.838664 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" event={"ID":"5f0ac931-d37b-4342-8c12-c2779b455cc5","Type":"ContainerStarted","Data":"b7386394d46b2f3c02902742fe6df89e754e581b8f84011ca3dd5e4176b24bb6"} Nov 28 16:46:50 crc kubenswrapper[4909]: I1128 16:46:50.838688 4909 scope.go:117] "RemoveContainer" containerID="0051ae960019817a1b9d0126f56eb43672e5b1694e62841c31a539d1caca21e9" Nov 28 16:49:19 crc kubenswrapper[4909]: I1128 16:49:19.911100 4909 patch_prober.go:28] interesting pod/machine-config-daemon-d5nd7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 16:49:19 crc kubenswrapper[4909]: I1128 16:49:19.912615 4909 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 16:49:49 crc kubenswrapper[4909]: I1128 16:49:49.911258 4909 patch_prober.go:28] interesting pod/machine-config-daemon-d5nd7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 16:49:49 crc kubenswrapper[4909]: I1128 16:49:49.911829 4909 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 16:49:54 crc kubenswrapper[4909]: I1128 16:49:54.894970 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-v2w7h"] Nov 28 16:49:54 crc kubenswrapper[4909]: E1128 16:49:54.901008 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="54925a18-3574-4cf3-b36d-6f12d9c424fd" containerName="collect-profiles" Nov 28 16:49:54 crc kubenswrapper[4909]: I1128 16:49:54.901056 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="54925a18-3574-4cf3-b36d-6f12d9c424fd" containerName="collect-profiles" Nov 28 16:49:54 crc kubenswrapper[4909]: I1128 16:49:54.901883 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="54925a18-3574-4cf3-b36d-6f12d9c424fd" containerName="collect-profiles" Nov 28 16:49:54 crc kubenswrapper[4909]: I1128 16:49:54.910930 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-v2w7h" Nov 28 16:49:54 crc kubenswrapper[4909]: I1128 16:49:54.918201 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-v2w7h"] Nov 28 16:49:55 crc kubenswrapper[4909]: I1128 16:49:55.008121 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2c50bbab-643f-4062-a649-47b2651cefe7-utilities\") pod \"community-operators-v2w7h\" (UID: \"2c50bbab-643f-4062-a649-47b2651cefe7\") " pod="openshift-marketplace/community-operators-v2w7h" Nov 28 16:49:55 crc kubenswrapper[4909]: I1128 16:49:55.008220 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g24sr\" (UniqueName: \"kubernetes.io/projected/2c50bbab-643f-4062-a649-47b2651cefe7-kube-api-access-g24sr\") pod \"community-operators-v2w7h\" (UID: \"2c50bbab-643f-4062-a649-47b2651cefe7\") " pod="openshift-marketplace/community-operators-v2w7h" Nov 28 16:49:55 crc kubenswrapper[4909]: I1128 16:49:55.008278 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2c50bbab-643f-4062-a649-47b2651cefe7-catalog-content\") pod \"community-operators-v2w7h\" (UID: \"2c50bbab-643f-4062-a649-47b2651cefe7\") " pod="openshift-marketplace/community-operators-v2w7h" Nov 28 16:49:55 crc kubenswrapper[4909]: I1128 16:49:55.109388 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g24sr\" (UniqueName: \"kubernetes.io/projected/2c50bbab-643f-4062-a649-47b2651cefe7-kube-api-access-g24sr\") pod \"community-operators-v2w7h\" (UID: \"2c50bbab-643f-4062-a649-47b2651cefe7\") " pod="openshift-marketplace/community-operators-v2w7h" Nov 28 16:49:55 crc kubenswrapper[4909]: I1128 16:49:55.109476 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2c50bbab-643f-4062-a649-47b2651cefe7-catalog-content\") pod \"community-operators-v2w7h\" (UID: \"2c50bbab-643f-4062-a649-47b2651cefe7\") " pod="openshift-marketplace/community-operators-v2w7h" Nov 28 16:49:55 crc kubenswrapper[4909]: I1128 16:49:55.109550 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2c50bbab-643f-4062-a649-47b2651cefe7-utilities\") pod \"community-operators-v2w7h\" (UID: \"2c50bbab-643f-4062-a649-47b2651cefe7\") " pod="openshift-marketplace/community-operators-v2w7h" Nov 28 16:49:55 crc kubenswrapper[4909]: I1128 16:49:55.110188 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2c50bbab-643f-4062-a649-47b2651cefe7-utilities\") pod \"community-operators-v2w7h\" (UID: \"2c50bbab-643f-4062-a649-47b2651cefe7\") " pod="openshift-marketplace/community-operators-v2w7h" Nov 28 16:49:55 crc kubenswrapper[4909]: I1128 16:49:55.110403 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2c50bbab-643f-4062-a649-47b2651cefe7-catalog-content\") pod \"community-operators-v2w7h\" (UID: \"2c50bbab-643f-4062-a649-47b2651cefe7\") " pod="openshift-marketplace/community-operators-v2w7h" Nov 28 16:49:55 crc kubenswrapper[4909]: I1128 16:49:55.144962 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g24sr\" (UniqueName: \"kubernetes.io/projected/2c50bbab-643f-4062-a649-47b2651cefe7-kube-api-access-g24sr\") pod \"community-operators-v2w7h\" (UID: \"2c50bbab-643f-4062-a649-47b2651cefe7\") " pod="openshift-marketplace/community-operators-v2w7h" Nov 28 16:49:55 crc kubenswrapper[4909]: I1128 16:49:55.237061 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-v2w7h" Nov 28 16:49:55 crc kubenswrapper[4909]: I1128 16:49:55.607538 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-v2w7h"] Nov 28 16:49:56 crc kubenswrapper[4909]: I1128 16:49:56.419389 4909 generic.go:334] "Generic (PLEG): container finished" podID="2c50bbab-643f-4062-a649-47b2651cefe7" containerID="8b497ae107d0f9e6d40424a6a7c16f2478df396296da66bb2e26698024859b29" exitCode=0 Nov 28 16:49:56 crc kubenswrapper[4909]: I1128 16:49:56.419452 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-v2w7h" event={"ID":"2c50bbab-643f-4062-a649-47b2651cefe7","Type":"ContainerDied","Data":"8b497ae107d0f9e6d40424a6a7c16f2478df396296da66bb2e26698024859b29"} Nov 28 16:49:56 crc kubenswrapper[4909]: I1128 16:49:56.419491 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-v2w7h" event={"ID":"2c50bbab-643f-4062-a649-47b2651cefe7","Type":"ContainerStarted","Data":"6951b4b5beec31281497be4d13b40f6764dd638be419265375258ff72eb580fd"} Nov 28 16:49:56 crc kubenswrapper[4909]: I1128 16:49:56.421292 4909 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 28 16:49:58 crc kubenswrapper[4909]: I1128 16:49:58.439291 4909 generic.go:334] "Generic (PLEG): container finished" podID="2c50bbab-643f-4062-a649-47b2651cefe7" containerID="8a3519b9aec5fa9226ddf245dd488b49273347f41f2504e355087aea087b6cee" exitCode=0 Nov 28 16:49:58 crc kubenswrapper[4909]: I1128 16:49:58.439364 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-v2w7h" event={"ID":"2c50bbab-643f-4062-a649-47b2651cefe7","Type":"ContainerDied","Data":"8a3519b9aec5fa9226ddf245dd488b49273347f41f2504e355087aea087b6cee"} Nov 28 16:49:59 crc kubenswrapper[4909]: I1128 16:49:59.449124 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-v2w7h" event={"ID":"2c50bbab-643f-4062-a649-47b2651cefe7","Type":"ContainerStarted","Data":"f42498e39e04d3166bb49d3bdd30587b52b13dbb3cefe23c7c4ffc2bd4e6a3f5"} Nov 28 16:49:59 crc kubenswrapper[4909]: I1128 16:49:59.472253 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-v2w7h" podStartSLOduration=2.833159087 podStartE2EDuration="5.47223769s" podCreationTimestamp="2025-11-28 16:49:54 +0000 UTC" firstStartedPulling="2025-11-28 16:49:56.421087938 +0000 UTC m=+2378.817772462" lastFinishedPulling="2025-11-28 16:49:59.060166541 +0000 UTC m=+2381.456851065" observedRunningTime="2025-11-28 16:49:59.468126429 +0000 UTC m=+2381.864810953" watchObservedRunningTime="2025-11-28 16:49:59.47223769 +0000 UTC m=+2381.868922214" Nov 28 16:50:05 crc kubenswrapper[4909]: I1128 16:50:05.238147 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-v2w7h" Nov 28 16:50:05 crc kubenswrapper[4909]: I1128 16:50:05.238750 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-v2w7h" Nov 28 16:50:05 crc kubenswrapper[4909]: I1128 16:50:05.288116 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-v2w7h" Nov 28 16:50:05 crc kubenswrapper[4909]: I1128 16:50:05.536826 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-v2w7h" Nov 28 16:50:05 crc kubenswrapper[4909]: I1128 16:50:05.585984 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-v2w7h"] Nov 28 16:50:07 crc kubenswrapper[4909]: I1128 16:50:07.503972 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-v2w7h" podUID="2c50bbab-643f-4062-a649-47b2651cefe7" containerName="registry-server" containerID="cri-o://f42498e39e04d3166bb49d3bdd30587b52b13dbb3cefe23c7c4ffc2bd4e6a3f5" gracePeriod=2 Nov 28 16:50:08 crc kubenswrapper[4909]: I1128 16:50:08.370936 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-v2w7h" Nov 28 16:50:08 crc kubenswrapper[4909]: I1128 16:50:08.506121 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2c50bbab-643f-4062-a649-47b2651cefe7-catalog-content\") pod \"2c50bbab-643f-4062-a649-47b2651cefe7\" (UID: \"2c50bbab-643f-4062-a649-47b2651cefe7\") " Nov 28 16:50:08 crc kubenswrapper[4909]: I1128 16:50:08.506459 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g24sr\" (UniqueName: \"kubernetes.io/projected/2c50bbab-643f-4062-a649-47b2651cefe7-kube-api-access-g24sr\") pod \"2c50bbab-643f-4062-a649-47b2651cefe7\" (UID: \"2c50bbab-643f-4062-a649-47b2651cefe7\") " Nov 28 16:50:08 crc kubenswrapper[4909]: I1128 16:50:08.506561 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2c50bbab-643f-4062-a649-47b2651cefe7-utilities\") pod \"2c50bbab-643f-4062-a649-47b2651cefe7\" (UID: \"2c50bbab-643f-4062-a649-47b2651cefe7\") " Nov 28 16:50:08 crc kubenswrapper[4909]: I1128 16:50:08.507376 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2c50bbab-643f-4062-a649-47b2651cefe7-utilities" (OuterVolumeSpecName: "utilities") pod "2c50bbab-643f-4062-a649-47b2651cefe7" (UID: "2c50bbab-643f-4062-a649-47b2651cefe7"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:50:08 crc kubenswrapper[4909]: I1128 16:50:08.511912 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2c50bbab-643f-4062-a649-47b2651cefe7-kube-api-access-g24sr" (OuterVolumeSpecName: "kube-api-access-g24sr") pod "2c50bbab-643f-4062-a649-47b2651cefe7" (UID: "2c50bbab-643f-4062-a649-47b2651cefe7"). InnerVolumeSpecName "kube-api-access-g24sr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:50:08 crc kubenswrapper[4909]: I1128 16:50:08.523024 4909 generic.go:334] "Generic (PLEG): container finished" podID="2c50bbab-643f-4062-a649-47b2651cefe7" containerID="f42498e39e04d3166bb49d3bdd30587b52b13dbb3cefe23c7c4ffc2bd4e6a3f5" exitCode=0 Nov 28 16:50:08 crc kubenswrapper[4909]: I1128 16:50:08.523083 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-v2w7h" event={"ID":"2c50bbab-643f-4062-a649-47b2651cefe7","Type":"ContainerDied","Data":"f42498e39e04d3166bb49d3bdd30587b52b13dbb3cefe23c7c4ffc2bd4e6a3f5"} Nov 28 16:50:08 crc kubenswrapper[4909]: I1128 16:50:08.523134 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-v2w7h" event={"ID":"2c50bbab-643f-4062-a649-47b2651cefe7","Type":"ContainerDied","Data":"6951b4b5beec31281497be4d13b40f6764dd638be419265375258ff72eb580fd"} Nov 28 16:50:08 crc kubenswrapper[4909]: I1128 16:50:08.523158 4909 scope.go:117] "RemoveContainer" containerID="f42498e39e04d3166bb49d3bdd30587b52b13dbb3cefe23c7c4ffc2bd4e6a3f5" Nov 28 16:50:08 crc kubenswrapper[4909]: I1128 16:50:08.523392 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-v2w7h" Nov 28 16:50:08 crc kubenswrapper[4909]: I1128 16:50:08.544185 4909 scope.go:117] "RemoveContainer" containerID="8a3519b9aec5fa9226ddf245dd488b49273347f41f2504e355087aea087b6cee" Nov 28 16:50:08 crc kubenswrapper[4909]: I1128 16:50:08.562997 4909 scope.go:117] "RemoveContainer" containerID="8b497ae107d0f9e6d40424a6a7c16f2478df396296da66bb2e26698024859b29" Nov 28 16:50:08 crc kubenswrapper[4909]: I1128 16:50:08.573824 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2c50bbab-643f-4062-a649-47b2651cefe7-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "2c50bbab-643f-4062-a649-47b2651cefe7" (UID: "2c50bbab-643f-4062-a649-47b2651cefe7"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:50:08 crc kubenswrapper[4909]: I1128 16:50:08.588205 4909 scope.go:117] "RemoveContainer" containerID="f42498e39e04d3166bb49d3bdd30587b52b13dbb3cefe23c7c4ffc2bd4e6a3f5" Nov 28 16:50:08 crc kubenswrapper[4909]: E1128 16:50:08.588695 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f42498e39e04d3166bb49d3bdd30587b52b13dbb3cefe23c7c4ffc2bd4e6a3f5\": container with ID starting with f42498e39e04d3166bb49d3bdd30587b52b13dbb3cefe23c7c4ffc2bd4e6a3f5 not found: ID does not exist" containerID="f42498e39e04d3166bb49d3bdd30587b52b13dbb3cefe23c7c4ffc2bd4e6a3f5" Nov 28 16:50:08 crc kubenswrapper[4909]: I1128 16:50:08.588733 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f42498e39e04d3166bb49d3bdd30587b52b13dbb3cefe23c7c4ffc2bd4e6a3f5"} err="failed to get container status \"f42498e39e04d3166bb49d3bdd30587b52b13dbb3cefe23c7c4ffc2bd4e6a3f5\": rpc error: code = NotFound desc = could not find container \"f42498e39e04d3166bb49d3bdd30587b52b13dbb3cefe23c7c4ffc2bd4e6a3f5\": container with ID starting with f42498e39e04d3166bb49d3bdd30587b52b13dbb3cefe23c7c4ffc2bd4e6a3f5 not found: ID does not exist" Nov 28 16:50:08 crc kubenswrapper[4909]: I1128 16:50:08.588762 4909 scope.go:117] "RemoveContainer" containerID="8a3519b9aec5fa9226ddf245dd488b49273347f41f2504e355087aea087b6cee" Nov 28 16:50:08 crc kubenswrapper[4909]: E1128 16:50:08.589074 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8a3519b9aec5fa9226ddf245dd488b49273347f41f2504e355087aea087b6cee\": container with ID starting with 8a3519b9aec5fa9226ddf245dd488b49273347f41f2504e355087aea087b6cee not found: ID does not exist" containerID="8a3519b9aec5fa9226ddf245dd488b49273347f41f2504e355087aea087b6cee" Nov 28 16:50:08 crc kubenswrapper[4909]: I1128 16:50:08.589099 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8a3519b9aec5fa9226ddf245dd488b49273347f41f2504e355087aea087b6cee"} err="failed to get container status \"8a3519b9aec5fa9226ddf245dd488b49273347f41f2504e355087aea087b6cee\": rpc error: code = NotFound desc = could not find container \"8a3519b9aec5fa9226ddf245dd488b49273347f41f2504e355087aea087b6cee\": container with ID starting with 8a3519b9aec5fa9226ddf245dd488b49273347f41f2504e355087aea087b6cee not found: ID does not exist" Nov 28 16:50:08 crc kubenswrapper[4909]: I1128 16:50:08.589116 4909 scope.go:117] "RemoveContainer" containerID="8b497ae107d0f9e6d40424a6a7c16f2478df396296da66bb2e26698024859b29" Nov 28 16:50:08 crc kubenswrapper[4909]: E1128 16:50:08.589318 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8b497ae107d0f9e6d40424a6a7c16f2478df396296da66bb2e26698024859b29\": container with ID starting with 8b497ae107d0f9e6d40424a6a7c16f2478df396296da66bb2e26698024859b29 not found: ID does not exist" containerID="8b497ae107d0f9e6d40424a6a7c16f2478df396296da66bb2e26698024859b29" Nov 28 16:50:08 crc kubenswrapper[4909]: I1128 16:50:08.589345 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8b497ae107d0f9e6d40424a6a7c16f2478df396296da66bb2e26698024859b29"} err="failed to get container status \"8b497ae107d0f9e6d40424a6a7c16f2478df396296da66bb2e26698024859b29\": rpc error: code = NotFound desc = could not find container \"8b497ae107d0f9e6d40424a6a7c16f2478df396296da66bb2e26698024859b29\": container with ID starting with 8b497ae107d0f9e6d40424a6a7c16f2478df396296da66bb2e26698024859b29 not found: ID does not exist" Nov 28 16:50:08 crc kubenswrapper[4909]: I1128 16:50:08.608788 4909 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2c50bbab-643f-4062-a649-47b2651cefe7-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 16:50:08 crc kubenswrapper[4909]: I1128 16:50:08.608831 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g24sr\" (UniqueName: \"kubernetes.io/projected/2c50bbab-643f-4062-a649-47b2651cefe7-kube-api-access-g24sr\") on node \"crc\" DevicePath \"\"" Nov 28 16:50:08 crc kubenswrapper[4909]: I1128 16:50:08.608843 4909 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2c50bbab-643f-4062-a649-47b2651cefe7-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 16:50:08 crc kubenswrapper[4909]: I1128 16:50:08.867814 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-v2w7h"] Nov 28 16:50:08 crc kubenswrapper[4909]: I1128 16:50:08.875979 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-v2w7h"] Nov 28 16:50:09 crc kubenswrapper[4909]: I1128 16:50:09.914241 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2c50bbab-643f-4062-a649-47b2651cefe7" path="/var/lib/kubelet/pods/2c50bbab-643f-4062-a649-47b2651cefe7/volumes" Nov 28 16:50:19 crc kubenswrapper[4909]: I1128 16:50:19.910812 4909 patch_prober.go:28] interesting pod/machine-config-daemon-d5nd7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 16:50:19 crc kubenswrapper[4909]: I1128 16:50:19.911323 4909 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 16:50:19 crc kubenswrapper[4909]: I1128 16:50:19.911370 4909 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" Nov 28 16:50:19 crc kubenswrapper[4909]: I1128 16:50:19.911974 4909 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"b7386394d46b2f3c02902742fe6df89e754e581b8f84011ca3dd5e4176b24bb6"} pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 16:50:19 crc kubenswrapper[4909]: I1128 16:50:19.912026 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerName="machine-config-daemon" containerID="cri-o://b7386394d46b2f3c02902742fe6df89e754e581b8f84011ca3dd5e4176b24bb6" gracePeriod=600 Nov 28 16:50:20 crc kubenswrapper[4909]: E1128 16:50:20.035200 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 16:50:20 crc kubenswrapper[4909]: I1128 16:50:20.647679 4909 generic.go:334] "Generic (PLEG): container finished" podID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerID="b7386394d46b2f3c02902742fe6df89e754e581b8f84011ca3dd5e4176b24bb6" exitCode=0 Nov 28 16:50:20 crc kubenswrapper[4909]: I1128 16:50:20.647692 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" event={"ID":"5f0ac931-d37b-4342-8c12-c2779b455cc5","Type":"ContainerDied","Data":"b7386394d46b2f3c02902742fe6df89e754e581b8f84011ca3dd5e4176b24bb6"} Nov 28 16:50:20 crc kubenswrapper[4909]: I1128 16:50:20.647773 4909 scope.go:117] "RemoveContainer" containerID="0e5c5e36930f47b4e9058bccdf356b7644aadbb7c92cf95c374bde0bff111148" Nov 28 16:50:20 crc kubenswrapper[4909]: I1128 16:50:20.648303 4909 scope.go:117] "RemoveContainer" containerID="b7386394d46b2f3c02902742fe6df89e754e581b8f84011ca3dd5e4176b24bb6" Nov 28 16:50:20 crc kubenswrapper[4909]: E1128 16:50:20.648562 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 16:50:31 crc kubenswrapper[4909]: I1128 16:50:31.901790 4909 scope.go:117] "RemoveContainer" containerID="b7386394d46b2f3c02902742fe6df89e754e581b8f84011ca3dd5e4176b24bb6" Nov 28 16:50:31 crc kubenswrapper[4909]: E1128 16:50:31.902371 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 16:50:45 crc kubenswrapper[4909]: I1128 16:50:45.902393 4909 scope.go:117] "RemoveContainer" containerID="b7386394d46b2f3c02902742fe6df89e754e581b8f84011ca3dd5e4176b24bb6" Nov 28 16:50:45 crc kubenswrapper[4909]: E1128 16:50:45.903363 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 16:50:57 crc kubenswrapper[4909]: I1128 16:50:57.901808 4909 scope.go:117] "RemoveContainer" containerID="b7386394d46b2f3c02902742fe6df89e754e581b8f84011ca3dd5e4176b24bb6" Nov 28 16:50:57 crc kubenswrapper[4909]: E1128 16:50:57.904921 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 16:51:11 crc kubenswrapper[4909]: I1128 16:51:11.901772 4909 scope.go:117] "RemoveContainer" containerID="b7386394d46b2f3c02902742fe6df89e754e581b8f84011ca3dd5e4176b24bb6" Nov 28 16:51:11 crc kubenswrapper[4909]: E1128 16:51:11.902673 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 16:51:24 crc kubenswrapper[4909]: I1128 16:51:24.902311 4909 scope.go:117] "RemoveContainer" containerID="b7386394d46b2f3c02902742fe6df89e754e581b8f84011ca3dd5e4176b24bb6" Nov 28 16:51:24 crc kubenswrapper[4909]: E1128 16:51:24.903407 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 16:51:37 crc kubenswrapper[4909]: I1128 16:51:37.905438 4909 scope.go:117] "RemoveContainer" containerID="b7386394d46b2f3c02902742fe6df89e754e581b8f84011ca3dd5e4176b24bb6" Nov 28 16:51:37 crc kubenswrapper[4909]: E1128 16:51:37.906218 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 16:51:52 crc kubenswrapper[4909]: I1128 16:51:52.901790 4909 scope.go:117] "RemoveContainer" containerID="b7386394d46b2f3c02902742fe6df89e754e581b8f84011ca3dd5e4176b24bb6" Nov 28 16:51:52 crc kubenswrapper[4909]: E1128 16:51:52.902636 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 16:52:03 crc kubenswrapper[4909]: I1128 16:52:03.689626 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-s86lj"] Nov 28 16:52:03 crc kubenswrapper[4909]: E1128 16:52:03.690763 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2c50bbab-643f-4062-a649-47b2651cefe7" containerName="extract-content" Nov 28 16:52:03 crc kubenswrapper[4909]: I1128 16:52:03.690784 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="2c50bbab-643f-4062-a649-47b2651cefe7" containerName="extract-content" Nov 28 16:52:03 crc kubenswrapper[4909]: E1128 16:52:03.690817 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2c50bbab-643f-4062-a649-47b2651cefe7" containerName="extract-utilities" Nov 28 16:52:03 crc kubenswrapper[4909]: I1128 16:52:03.690828 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="2c50bbab-643f-4062-a649-47b2651cefe7" containerName="extract-utilities" Nov 28 16:52:03 crc kubenswrapper[4909]: E1128 16:52:03.690851 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2c50bbab-643f-4062-a649-47b2651cefe7" containerName="registry-server" Nov 28 16:52:03 crc kubenswrapper[4909]: I1128 16:52:03.690864 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="2c50bbab-643f-4062-a649-47b2651cefe7" containerName="registry-server" Nov 28 16:52:03 crc kubenswrapper[4909]: I1128 16:52:03.691079 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="2c50bbab-643f-4062-a649-47b2651cefe7" containerName="registry-server" Nov 28 16:52:03 crc kubenswrapper[4909]: I1128 16:52:03.692590 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-s86lj" Nov 28 16:52:03 crc kubenswrapper[4909]: I1128 16:52:03.706524 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-s86lj"] Nov 28 16:52:03 crc kubenswrapper[4909]: I1128 16:52:03.847666 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-np4kx\" (UniqueName: \"kubernetes.io/projected/7f859c26-06d7-4276-9c54-f5004959ca18-kube-api-access-np4kx\") pod \"redhat-marketplace-s86lj\" (UID: \"7f859c26-06d7-4276-9c54-f5004959ca18\") " pod="openshift-marketplace/redhat-marketplace-s86lj" Nov 28 16:52:03 crc kubenswrapper[4909]: I1128 16:52:03.847997 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7f859c26-06d7-4276-9c54-f5004959ca18-catalog-content\") pod \"redhat-marketplace-s86lj\" (UID: \"7f859c26-06d7-4276-9c54-f5004959ca18\") " pod="openshift-marketplace/redhat-marketplace-s86lj" Nov 28 16:52:03 crc kubenswrapper[4909]: I1128 16:52:03.848114 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7f859c26-06d7-4276-9c54-f5004959ca18-utilities\") pod \"redhat-marketplace-s86lj\" (UID: \"7f859c26-06d7-4276-9c54-f5004959ca18\") " pod="openshift-marketplace/redhat-marketplace-s86lj" Nov 28 16:52:03 crc kubenswrapper[4909]: I1128 16:52:03.954238 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-np4kx\" (UniqueName: \"kubernetes.io/projected/7f859c26-06d7-4276-9c54-f5004959ca18-kube-api-access-np4kx\") pod \"redhat-marketplace-s86lj\" (UID: \"7f859c26-06d7-4276-9c54-f5004959ca18\") " pod="openshift-marketplace/redhat-marketplace-s86lj" Nov 28 16:52:03 crc kubenswrapper[4909]: I1128 16:52:03.954323 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7f859c26-06d7-4276-9c54-f5004959ca18-catalog-content\") pod \"redhat-marketplace-s86lj\" (UID: \"7f859c26-06d7-4276-9c54-f5004959ca18\") " pod="openshift-marketplace/redhat-marketplace-s86lj" Nov 28 16:52:03 crc kubenswrapper[4909]: I1128 16:52:03.954357 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7f859c26-06d7-4276-9c54-f5004959ca18-utilities\") pod \"redhat-marketplace-s86lj\" (UID: \"7f859c26-06d7-4276-9c54-f5004959ca18\") " pod="openshift-marketplace/redhat-marketplace-s86lj" Nov 28 16:52:03 crc kubenswrapper[4909]: I1128 16:52:03.955930 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7f859c26-06d7-4276-9c54-f5004959ca18-catalog-content\") pod \"redhat-marketplace-s86lj\" (UID: \"7f859c26-06d7-4276-9c54-f5004959ca18\") " pod="openshift-marketplace/redhat-marketplace-s86lj" Nov 28 16:52:03 crc kubenswrapper[4909]: I1128 16:52:03.956236 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7f859c26-06d7-4276-9c54-f5004959ca18-utilities\") pod \"redhat-marketplace-s86lj\" (UID: \"7f859c26-06d7-4276-9c54-f5004959ca18\") " pod="openshift-marketplace/redhat-marketplace-s86lj" Nov 28 16:52:03 crc kubenswrapper[4909]: I1128 16:52:03.988779 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-np4kx\" (UniqueName: \"kubernetes.io/projected/7f859c26-06d7-4276-9c54-f5004959ca18-kube-api-access-np4kx\") pod \"redhat-marketplace-s86lj\" (UID: \"7f859c26-06d7-4276-9c54-f5004959ca18\") " pod="openshift-marketplace/redhat-marketplace-s86lj" Nov 28 16:52:04 crc kubenswrapper[4909]: I1128 16:52:04.023666 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-s86lj" Nov 28 16:52:04 crc kubenswrapper[4909]: I1128 16:52:04.462583 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-s86lj"] Nov 28 16:52:04 crc kubenswrapper[4909]: I1128 16:52:04.508544 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-s86lj" event={"ID":"7f859c26-06d7-4276-9c54-f5004959ca18","Type":"ContainerStarted","Data":"d2f5528b3096902dfc3af69f19f2a48e4ac8973fffe67a7498ea5eef74ec4343"} Nov 28 16:52:05 crc kubenswrapper[4909]: I1128 16:52:05.516906 4909 generic.go:334] "Generic (PLEG): container finished" podID="7f859c26-06d7-4276-9c54-f5004959ca18" containerID="935f477bcbbb770d3c5d6b75f53e1a6cd7fb5b8d49d3448121e55fd8b522aef7" exitCode=0 Nov 28 16:52:05 crc kubenswrapper[4909]: I1128 16:52:05.516991 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-s86lj" event={"ID":"7f859c26-06d7-4276-9c54-f5004959ca18","Type":"ContainerDied","Data":"935f477bcbbb770d3c5d6b75f53e1a6cd7fb5b8d49d3448121e55fd8b522aef7"} Nov 28 16:52:05 crc kubenswrapper[4909]: I1128 16:52:05.902462 4909 scope.go:117] "RemoveContainer" containerID="b7386394d46b2f3c02902742fe6df89e754e581b8f84011ca3dd5e4176b24bb6" Nov 28 16:52:05 crc kubenswrapper[4909]: E1128 16:52:05.902686 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 16:52:06 crc kubenswrapper[4909]: I1128 16:52:06.528111 4909 generic.go:334] "Generic (PLEG): container finished" podID="7f859c26-06d7-4276-9c54-f5004959ca18" containerID="2eac3bc60c1cfd445116cc8e848610127555524ac27e44ad5d8cae6cc3ae6c46" exitCode=0 Nov 28 16:52:06 crc kubenswrapper[4909]: I1128 16:52:06.528169 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-s86lj" event={"ID":"7f859c26-06d7-4276-9c54-f5004959ca18","Type":"ContainerDied","Data":"2eac3bc60c1cfd445116cc8e848610127555524ac27e44ad5d8cae6cc3ae6c46"} Nov 28 16:52:07 crc kubenswrapper[4909]: I1128 16:52:07.537707 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-s86lj" event={"ID":"7f859c26-06d7-4276-9c54-f5004959ca18","Type":"ContainerStarted","Data":"6f7e8224421f07a78910358c58539ef240c53b70c40e096bf98c573d6746e153"} Nov 28 16:52:07 crc kubenswrapper[4909]: I1128 16:52:07.562553 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-s86lj" podStartSLOduration=2.952183437 podStartE2EDuration="4.562536077s" podCreationTimestamp="2025-11-28 16:52:03 +0000 UTC" firstStartedPulling="2025-11-28 16:52:05.518918895 +0000 UTC m=+2507.915603419" lastFinishedPulling="2025-11-28 16:52:07.129271525 +0000 UTC m=+2509.525956059" observedRunningTime="2025-11-28 16:52:07.555560548 +0000 UTC m=+2509.952245082" watchObservedRunningTime="2025-11-28 16:52:07.562536077 +0000 UTC m=+2509.959220601" Nov 28 16:52:14 crc kubenswrapper[4909]: I1128 16:52:14.024701 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-s86lj" Nov 28 16:52:14 crc kubenswrapper[4909]: I1128 16:52:14.025438 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-s86lj" Nov 28 16:52:14 crc kubenswrapper[4909]: I1128 16:52:14.077617 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-s86lj" Nov 28 16:52:14 crc kubenswrapper[4909]: I1128 16:52:14.634703 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-s86lj" Nov 28 16:52:14 crc kubenswrapper[4909]: I1128 16:52:14.683291 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-s86lj"] Nov 28 16:52:16 crc kubenswrapper[4909]: I1128 16:52:16.608504 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-s86lj" podUID="7f859c26-06d7-4276-9c54-f5004959ca18" containerName="registry-server" containerID="cri-o://6f7e8224421f07a78910358c58539ef240c53b70c40e096bf98c573d6746e153" gracePeriod=2 Nov 28 16:52:17 crc kubenswrapper[4909]: I1128 16:52:17.534453 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-s86lj" Nov 28 16:52:17 crc kubenswrapper[4909]: I1128 16:52:17.618587 4909 generic.go:334] "Generic (PLEG): container finished" podID="7f859c26-06d7-4276-9c54-f5004959ca18" containerID="6f7e8224421f07a78910358c58539ef240c53b70c40e096bf98c573d6746e153" exitCode=0 Nov 28 16:52:17 crc kubenswrapper[4909]: I1128 16:52:17.618623 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-s86lj" event={"ID":"7f859c26-06d7-4276-9c54-f5004959ca18","Type":"ContainerDied","Data":"6f7e8224421f07a78910358c58539ef240c53b70c40e096bf98c573d6746e153"} Nov 28 16:52:17 crc kubenswrapper[4909]: I1128 16:52:17.618683 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-s86lj" event={"ID":"7f859c26-06d7-4276-9c54-f5004959ca18","Type":"ContainerDied","Data":"d2f5528b3096902dfc3af69f19f2a48e4ac8973fffe67a7498ea5eef74ec4343"} Nov 28 16:52:17 crc kubenswrapper[4909]: I1128 16:52:17.618711 4909 scope.go:117] "RemoveContainer" containerID="6f7e8224421f07a78910358c58539ef240c53b70c40e096bf98c573d6746e153" Nov 28 16:52:17 crc kubenswrapper[4909]: I1128 16:52:17.618707 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-s86lj" Nov 28 16:52:17 crc kubenswrapper[4909]: I1128 16:52:17.643881 4909 scope.go:117] "RemoveContainer" containerID="2eac3bc60c1cfd445116cc8e848610127555524ac27e44ad5d8cae6cc3ae6c46" Nov 28 16:52:17 crc kubenswrapper[4909]: I1128 16:52:17.644860 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-np4kx\" (UniqueName: \"kubernetes.io/projected/7f859c26-06d7-4276-9c54-f5004959ca18-kube-api-access-np4kx\") pod \"7f859c26-06d7-4276-9c54-f5004959ca18\" (UID: \"7f859c26-06d7-4276-9c54-f5004959ca18\") " Nov 28 16:52:17 crc kubenswrapper[4909]: I1128 16:52:17.644941 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7f859c26-06d7-4276-9c54-f5004959ca18-catalog-content\") pod \"7f859c26-06d7-4276-9c54-f5004959ca18\" (UID: \"7f859c26-06d7-4276-9c54-f5004959ca18\") " Nov 28 16:52:17 crc kubenswrapper[4909]: I1128 16:52:17.645008 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7f859c26-06d7-4276-9c54-f5004959ca18-utilities\") pod \"7f859c26-06d7-4276-9c54-f5004959ca18\" (UID: \"7f859c26-06d7-4276-9c54-f5004959ca18\") " Nov 28 16:52:17 crc kubenswrapper[4909]: I1128 16:52:17.646210 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7f859c26-06d7-4276-9c54-f5004959ca18-utilities" (OuterVolumeSpecName: "utilities") pod "7f859c26-06d7-4276-9c54-f5004959ca18" (UID: "7f859c26-06d7-4276-9c54-f5004959ca18"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:52:17 crc kubenswrapper[4909]: I1128 16:52:17.653856 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7f859c26-06d7-4276-9c54-f5004959ca18-kube-api-access-np4kx" (OuterVolumeSpecName: "kube-api-access-np4kx") pod "7f859c26-06d7-4276-9c54-f5004959ca18" (UID: "7f859c26-06d7-4276-9c54-f5004959ca18"). InnerVolumeSpecName "kube-api-access-np4kx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:52:17 crc kubenswrapper[4909]: I1128 16:52:17.658907 4909 scope.go:117] "RemoveContainer" containerID="935f477bcbbb770d3c5d6b75f53e1a6cd7fb5b8d49d3448121e55fd8b522aef7" Nov 28 16:52:17 crc kubenswrapper[4909]: I1128 16:52:17.671594 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7f859c26-06d7-4276-9c54-f5004959ca18-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "7f859c26-06d7-4276-9c54-f5004959ca18" (UID: "7f859c26-06d7-4276-9c54-f5004959ca18"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:52:17 crc kubenswrapper[4909]: I1128 16:52:17.725847 4909 scope.go:117] "RemoveContainer" containerID="6f7e8224421f07a78910358c58539ef240c53b70c40e096bf98c573d6746e153" Nov 28 16:52:17 crc kubenswrapper[4909]: E1128 16:52:17.726397 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6f7e8224421f07a78910358c58539ef240c53b70c40e096bf98c573d6746e153\": container with ID starting with 6f7e8224421f07a78910358c58539ef240c53b70c40e096bf98c573d6746e153 not found: ID does not exist" containerID="6f7e8224421f07a78910358c58539ef240c53b70c40e096bf98c573d6746e153" Nov 28 16:52:17 crc kubenswrapper[4909]: I1128 16:52:17.726459 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6f7e8224421f07a78910358c58539ef240c53b70c40e096bf98c573d6746e153"} err="failed to get container status \"6f7e8224421f07a78910358c58539ef240c53b70c40e096bf98c573d6746e153\": rpc error: code = NotFound desc = could not find container \"6f7e8224421f07a78910358c58539ef240c53b70c40e096bf98c573d6746e153\": container with ID starting with 6f7e8224421f07a78910358c58539ef240c53b70c40e096bf98c573d6746e153 not found: ID does not exist" Nov 28 16:52:17 crc kubenswrapper[4909]: I1128 16:52:17.726501 4909 scope.go:117] "RemoveContainer" containerID="2eac3bc60c1cfd445116cc8e848610127555524ac27e44ad5d8cae6cc3ae6c46" Nov 28 16:52:17 crc kubenswrapper[4909]: E1128 16:52:17.727058 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2eac3bc60c1cfd445116cc8e848610127555524ac27e44ad5d8cae6cc3ae6c46\": container with ID starting with 2eac3bc60c1cfd445116cc8e848610127555524ac27e44ad5d8cae6cc3ae6c46 not found: ID does not exist" containerID="2eac3bc60c1cfd445116cc8e848610127555524ac27e44ad5d8cae6cc3ae6c46" Nov 28 16:52:17 crc kubenswrapper[4909]: I1128 16:52:17.727097 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2eac3bc60c1cfd445116cc8e848610127555524ac27e44ad5d8cae6cc3ae6c46"} err="failed to get container status \"2eac3bc60c1cfd445116cc8e848610127555524ac27e44ad5d8cae6cc3ae6c46\": rpc error: code = NotFound desc = could not find container \"2eac3bc60c1cfd445116cc8e848610127555524ac27e44ad5d8cae6cc3ae6c46\": container with ID starting with 2eac3bc60c1cfd445116cc8e848610127555524ac27e44ad5d8cae6cc3ae6c46 not found: ID does not exist" Nov 28 16:52:17 crc kubenswrapper[4909]: I1128 16:52:17.727128 4909 scope.go:117] "RemoveContainer" containerID="935f477bcbbb770d3c5d6b75f53e1a6cd7fb5b8d49d3448121e55fd8b522aef7" Nov 28 16:52:17 crc kubenswrapper[4909]: E1128 16:52:17.727474 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"935f477bcbbb770d3c5d6b75f53e1a6cd7fb5b8d49d3448121e55fd8b522aef7\": container with ID starting with 935f477bcbbb770d3c5d6b75f53e1a6cd7fb5b8d49d3448121e55fd8b522aef7 not found: ID does not exist" containerID="935f477bcbbb770d3c5d6b75f53e1a6cd7fb5b8d49d3448121e55fd8b522aef7" Nov 28 16:52:17 crc kubenswrapper[4909]: I1128 16:52:17.727509 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"935f477bcbbb770d3c5d6b75f53e1a6cd7fb5b8d49d3448121e55fd8b522aef7"} err="failed to get container status \"935f477bcbbb770d3c5d6b75f53e1a6cd7fb5b8d49d3448121e55fd8b522aef7\": rpc error: code = NotFound desc = could not find container \"935f477bcbbb770d3c5d6b75f53e1a6cd7fb5b8d49d3448121e55fd8b522aef7\": container with ID starting with 935f477bcbbb770d3c5d6b75f53e1a6cd7fb5b8d49d3448121e55fd8b522aef7 not found: ID does not exist" Nov 28 16:52:17 crc kubenswrapper[4909]: I1128 16:52:17.746440 4909 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7f859c26-06d7-4276-9c54-f5004959ca18-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 16:52:17 crc kubenswrapper[4909]: I1128 16:52:17.746469 4909 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7f859c26-06d7-4276-9c54-f5004959ca18-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 16:52:17 crc kubenswrapper[4909]: I1128 16:52:17.746482 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-np4kx\" (UniqueName: \"kubernetes.io/projected/7f859c26-06d7-4276-9c54-f5004959ca18-kube-api-access-np4kx\") on node \"crc\" DevicePath \"\"" Nov 28 16:52:17 crc kubenswrapper[4909]: I1128 16:52:17.968120 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-s86lj"] Nov 28 16:52:17 crc kubenswrapper[4909]: I1128 16:52:17.983271 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-s86lj"] Nov 28 16:52:19 crc kubenswrapper[4909]: I1128 16:52:19.911469 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7f859c26-06d7-4276-9c54-f5004959ca18" path="/var/lib/kubelet/pods/7f859c26-06d7-4276-9c54-f5004959ca18/volumes" Nov 28 16:52:20 crc kubenswrapper[4909]: I1128 16:52:20.901951 4909 scope.go:117] "RemoveContainer" containerID="b7386394d46b2f3c02902742fe6df89e754e581b8f84011ca3dd5e4176b24bb6" Nov 28 16:52:20 crc kubenswrapper[4909]: E1128 16:52:20.902325 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 16:52:34 crc kubenswrapper[4909]: I1128 16:52:34.902008 4909 scope.go:117] "RemoveContainer" containerID="b7386394d46b2f3c02902742fe6df89e754e581b8f84011ca3dd5e4176b24bb6" Nov 28 16:52:34 crc kubenswrapper[4909]: E1128 16:52:34.902855 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 16:52:49 crc kubenswrapper[4909]: I1128 16:52:49.901924 4909 scope.go:117] "RemoveContainer" containerID="b7386394d46b2f3c02902742fe6df89e754e581b8f84011ca3dd5e4176b24bb6" Nov 28 16:52:49 crc kubenswrapper[4909]: E1128 16:52:49.902722 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 16:53:04 crc kubenswrapper[4909]: I1128 16:53:04.901426 4909 scope.go:117] "RemoveContainer" containerID="b7386394d46b2f3c02902742fe6df89e754e581b8f84011ca3dd5e4176b24bb6" Nov 28 16:53:04 crc kubenswrapper[4909]: E1128 16:53:04.902267 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 16:53:18 crc kubenswrapper[4909]: I1128 16:53:18.902165 4909 scope.go:117] "RemoveContainer" containerID="b7386394d46b2f3c02902742fe6df89e754e581b8f84011ca3dd5e4176b24bb6" Nov 28 16:53:18 crc kubenswrapper[4909]: E1128 16:53:18.903202 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 16:53:33 crc kubenswrapper[4909]: I1128 16:53:33.902499 4909 scope.go:117] "RemoveContainer" containerID="b7386394d46b2f3c02902742fe6df89e754e581b8f84011ca3dd5e4176b24bb6" Nov 28 16:53:33 crc kubenswrapper[4909]: E1128 16:53:33.903697 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 16:53:44 crc kubenswrapper[4909]: I1128 16:53:44.901273 4909 scope.go:117] "RemoveContainer" containerID="b7386394d46b2f3c02902742fe6df89e754e581b8f84011ca3dd5e4176b24bb6" Nov 28 16:53:44 crc kubenswrapper[4909]: E1128 16:53:44.902015 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 16:53:56 crc kubenswrapper[4909]: I1128 16:53:56.901097 4909 scope.go:117] "RemoveContainer" containerID="b7386394d46b2f3c02902742fe6df89e754e581b8f84011ca3dd5e4176b24bb6" Nov 28 16:53:56 crc kubenswrapper[4909]: E1128 16:53:56.901862 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 16:54:08 crc kubenswrapper[4909]: I1128 16:54:08.902174 4909 scope.go:117] "RemoveContainer" containerID="b7386394d46b2f3c02902742fe6df89e754e581b8f84011ca3dd5e4176b24bb6" Nov 28 16:54:08 crc kubenswrapper[4909]: E1128 16:54:08.903030 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 16:54:15 crc kubenswrapper[4909]: I1128 16:54:15.437623 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-qtlg9"] Nov 28 16:54:15 crc kubenswrapper[4909]: E1128 16:54:15.438698 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7f859c26-06d7-4276-9c54-f5004959ca18" containerName="registry-server" Nov 28 16:54:15 crc kubenswrapper[4909]: I1128 16:54:15.438722 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="7f859c26-06d7-4276-9c54-f5004959ca18" containerName="registry-server" Nov 28 16:54:15 crc kubenswrapper[4909]: E1128 16:54:15.438744 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7f859c26-06d7-4276-9c54-f5004959ca18" containerName="extract-content" Nov 28 16:54:15 crc kubenswrapper[4909]: I1128 16:54:15.438756 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="7f859c26-06d7-4276-9c54-f5004959ca18" containerName="extract-content" Nov 28 16:54:15 crc kubenswrapper[4909]: E1128 16:54:15.438779 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7f859c26-06d7-4276-9c54-f5004959ca18" containerName="extract-utilities" Nov 28 16:54:15 crc kubenswrapper[4909]: I1128 16:54:15.438790 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="7f859c26-06d7-4276-9c54-f5004959ca18" containerName="extract-utilities" Nov 28 16:54:15 crc kubenswrapper[4909]: I1128 16:54:15.439052 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="7f859c26-06d7-4276-9c54-f5004959ca18" containerName="registry-server" Nov 28 16:54:15 crc kubenswrapper[4909]: I1128 16:54:15.440720 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-qtlg9" Nov 28 16:54:15 crc kubenswrapper[4909]: I1128 16:54:15.446621 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-qtlg9"] Nov 28 16:54:15 crc kubenswrapper[4909]: I1128 16:54:15.478625 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2x5tl\" (UniqueName: \"kubernetes.io/projected/694d948f-b813-4ac7-a889-5df17df36753-kube-api-access-2x5tl\") pod \"redhat-operators-qtlg9\" (UID: \"694d948f-b813-4ac7-a889-5df17df36753\") " pod="openshift-marketplace/redhat-operators-qtlg9" Nov 28 16:54:15 crc kubenswrapper[4909]: I1128 16:54:15.478756 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/694d948f-b813-4ac7-a889-5df17df36753-utilities\") pod \"redhat-operators-qtlg9\" (UID: \"694d948f-b813-4ac7-a889-5df17df36753\") " pod="openshift-marketplace/redhat-operators-qtlg9" Nov 28 16:54:15 crc kubenswrapper[4909]: I1128 16:54:15.478820 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/694d948f-b813-4ac7-a889-5df17df36753-catalog-content\") pod \"redhat-operators-qtlg9\" (UID: \"694d948f-b813-4ac7-a889-5df17df36753\") " pod="openshift-marketplace/redhat-operators-qtlg9" Nov 28 16:54:15 crc kubenswrapper[4909]: I1128 16:54:15.579417 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/694d948f-b813-4ac7-a889-5df17df36753-catalog-content\") pod \"redhat-operators-qtlg9\" (UID: \"694d948f-b813-4ac7-a889-5df17df36753\") " pod="openshift-marketplace/redhat-operators-qtlg9" Nov 28 16:54:15 crc kubenswrapper[4909]: I1128 16:54:15.579489 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2x5tl\" (UniqueName: \"kubernetes.io/projected/694d948f-b813-4ac7-a889-5df17df36753-kube-api-access-2x5tl\") pod \"redhat-operators-qtlg9\" (UID: \"694d948f-b813-4ac7-a889-5df17df36753\") " pod="openshift-marketplace/redhat-operators-qtlg9" Nov 28 16:54:15 crc kubenswrapper[4909]: I1128 16:54:15.579532 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/694d948f-b813-4ac7-a889-5df17df36753-utilities\") pod \"redhat-operators-qtlg9\" (UID: \"694d948f-b813-4ac7-a889-5df17df36753\") " pod="openshift-marketplace/redhat-operators-qtlg9" Nov 28 16:54:15 crc kubenswrapper[4909]: I1128 16:54:15.579987 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/694d948f-b813-4ac7-a889-5df17df36753-catalog-content\") pod \"redhat-operators-qtlg9\" (UID: \"694d948f-b813-4ac7-a889-5df17df36753\") " pod="openshift-marketplace/redhat-operators-qtlg9" Nov 28 16:54:15 crc kubenswrapper[4909]: I1128 16:54:15.580014 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/694d948f-b813-4ac7-a889-5df17df36753-utilities\") pod \"redhat-operators-qtlg9\" (UID: \"694d948f-b813-4ac7-a889-5df17df36753\") " pod="openshift-marketplace/redhat-operators-qtlg9" Nov 28 16:54:15 crc kubenswrapper[4909]: I1128 16:54:15.598697 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2x5tl\" (UniqueName: \"kubernetes.io/projected/694d948f-b813-4ac7-a889-5df17df36753-kube-api-access-2x5tl\") pod \"redhat-operators-qtlg9\" (UID: \"694d948f-b813-4ac7-a889-5df17df36753\") " pod="openshift-marketplace/redhat-operators-qtlg9" Nov 28 16:54:15 crc kubenswrapper[4909]: I1128 16:54:15.759166 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-qtlg9" Nov 28 16:54:15 crc kubenswrapper[4909]: I1128 16:54:15.981368 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-qtlg9"] Nov 28 16:54:16 crc kubenswrapper[4909]: I1128 16:54:16.084004 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-qtlg9" event={"ID":"694d948f-b813-4ac7-a889-5df17df36753","Type":"ContainerStarted","Data":"9197ebee76035fc3cdbf6034cad322ad3e55dc0cf64828a117ade3d585abcd00"} Nov 28 16:54:17 crc kubenswrapper[4909]: I1128 16:54:17.100025 4909 generic.go:334] "Generic (PLEG): container finished" podID="694d948f-b813-4ac7-a889-5df17df36753" containerID="1d2a5934dec11be154606d5a5efb40c364c874b45c83d31cde20c405d558943e" exitCode=0 Nov 28 16:54:17 crc kubenswrapper[4909]: I1128 16:54:17.100171 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-qtlg9" event={"ID":"694d948f-b813-4ac7-a889-5df17df36753","Type":"ContainerDied","Data":"1d2a5934dec11be154606d5a5efb40c364c874b45c83d31cde20c405d558943e"} Nov 28 16:54:18 crc kubenswrapper[4909]: I1128 16:54:18.113840 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-qtlg9" event={"ID":"694d948f-b813-4ac7-a889-5df17df36753","Type":"ContainerStarted","Data":"789b6b1bd62728e5f3e4306e50419d67f0206999970354fa0ca12d35e7a5674d"} Nov 28 16:54:19 crc kubenswrapper[4909]: I1128 16:54:19.126993 4909 generic.go:334] "Generic (PLEG): container finished" podID="694d948f-b813-4ac7-a889-5df17df36753" containerID="789b6b1bd62728e5f3e4306e50419d67f0206999970354fa0ca12d35e7a5674d" exitCode=0 Nov 28 16:54:19 crc kubenswrapper[4909]: I1128 16:54:19.127078 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-qtlg9" event={"ID":"694d948f-b813-4ac7-a889-5df17df36753","Type":"ContainerDied","Data":"789b6b1bd62728e5f3e4306e50419d67f0206999970354fa0ca12d35e7a5674d"} Nov 28 16:54:20 crc kubenswrapper[4909]: I1128 16:54:20.136743 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-qtlg9" event={"ID":"694d948f-b813-4ac7-a889-5df17df36753","Type":"ContainerStarted","Data":"023fd7d18d414133529d16e7caff9ecb1d06da2ff796fb3e4849c23fe20711f3"} Nov 28 16:54:20 crc kubenswrapper[4909]: I1128 16:54:20.159559 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-qtlg9" podStartSLOduration=2.614792678 podStartE2EDuration="5.159523652s" podCreationTimestamp="2025-11-28 16:54:15 +0000 UTC" firstStartedPulling="2025-11-28 16:54:17.102143013 +0000 UTC m=+2639.498827567" lastFinishedPulling="2025-11-28 16:54:19.646873997 +0000 UTC m=+2642.043558541" observedRunningTime="2025-11-28 16:54:20.154790424 +0000 UTC m=+2642.551474958" watchObservedRunningTime="2025-11-28 16:54:20.159523652 +0000 UTC m=+2642.556208226" Nov 28 16:54:20 crc kubenswrapper[4909]: I1128 16:54:20.901525 4909 scope.go:117] "RemoveContainer" containerID="b7386394d46b2f3c02902742fe6df89e754e581b8f84011ca3dd5e4176b24bb6" Nov 28 16:54:20 crc kubenswrapper[4909]: E1128 16:54:20.901881 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 16:54:25 crc kubenswrapper[4909]: I1128 16:54:25.759902 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-qtlg9" Nov 28 16:54:25 crc kubenswrapper[4909]: I1128 16:54:25.762047 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-qtlg9" Nov 28 16:54:26 crc kubenswrapper[4909]: I1128 16:54:26.875619 4909 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-qtlg9" podUID="694d948f-b813-4ac7-a889-5df17df36753" containerName="registry-server" probeResult="failure" output=< Nov 28 16:54:26 crc kubenswrapper[4909]: timeout: failed to connect service ":50051" within 1s Nov 28 16:54:26 crc kubenswrapper[4909]: > Nov 28 16:54:34 crc kubenswrapper[4909]: I1128 16:54:34.902121 4909 scope.go:117] "RemoveContainer" containerID="b7386394d46b2f3c02902742fe6df89e754e581b8f84011ca3dd5e4176b24bb6" Nov 28 16:54:34 crc kubenswrapper[4909]: E1128 16:54:34.902768 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 16:54:35 crc kubenswrapper[4909]: I1128 16:54:35.819580 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-qtlg9" Nov 28 16:54:35 crc kubenswrapper[4909]: I1128 16:54:35.881873 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-qtlg9" Nov 28 16:54:36 crc kubenswrapper[4909]: I1128 16:54:36.062594 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-qtlg9"] Nov 28 16:54:37 crc kubenswrapper[4909]: I1128 16:54:37.295366 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-qtlg9" podUID="694d948f-b813-4ac7-a889-5df17df36753" containerName="registry-server" containerID="cri-o://023fd7d18d414133529d16e7caff9ecb1d06da2ff796fb3e4849c23fe20711f3" gracePeriod=2 Nov 28 16:54:39 crc kubenswrapper[4909]: I1128 16:54:39.319285 4909 generic.go:334] "Generic (PLEG): container finished" podID="694d948f-b813-4ac7-a889-5df17df36753" containerID="023fd7d18d414133529d16e7caff9ecb1d06da2ff796fb3e4849c23fe20711f3" exitCode=0 Nov 28 16:54:39 crc kubenswrapper[4909]: I1128 16:54:39.319374 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-qtlg9" event={"ID":"694d948f-b813-4ac7-a889-5df17df36753","Type":"ContainerDied","Data":"023fd7d18d414133529d16e7caff9ecb1d06da2ff796fb3e4849c23fe20711f3"} Nov 28 16:54:39 crc kubenswrapper[4909]: I1128 16:54:39.578702 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-qtlg9" Nov 28 16:54:39 crc kubenswrapper[4909]: I1128 16:54:39.649045 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2x5tl\" (UniqueName: \"kubernetes.io/projected/694d948f-b813-4ac7-a889-5df17df36753-kube-api-access-2x5tl\") pod \"694d948f-b813-4ac7-a889-5df17df36753\" (UID: \"694d948f-b813-4ac7-a889-5df17df36753\") " Nov 28 16:54:39 crc kubenswrapper[4909]: I1128 16:54:39.649171 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/694d948f-b813-4ac7-a889-5df17df36753-utilities\") pod \"694d948f-b813-4ac7-a889-5df17df36753\" (UID: \"694d948f-b813-4ac7-a889-5df17df36753\") " Nov 28 16:54:39 crc kubenswrapper[4909]: I1128 16:54:39.649212 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/694d948f-b813-4ac7-a889-5df17df36753-catalog-content\") pod \"694d948f-b813-4ac7-a889-5df17df36753\" (UID: \"694d948f-b813-4ac7-a889-5df17df36753\") " Nov 28 16:54:39 crc kubenswrapper[4909]: I1128 16:54:39.650155 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/694d948f-b813-4ac7-a889-5df17df36753-utilities" (OuterVolumeSpecName: "utilities") pod "694d948f-b813-4ac7-a889-5df17df36753" (UID: "694d948f-b813-4ac7-a889-5df17df36753"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:54:39 crc kubenswrapper[4909]: I1128 16:54:39.655005 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/694d948f-b813-4ac7-a889-5df17df36753-kube-api-access-2x5tl" (OuterVolumeSpecName: "kube-api-access-2x5tl") pod "694d948f-b813-4ac7-a889-5df17df36753" (UID: "694d948f-b813-4ac7-a889-5df17df36753"). InnerVolumeSpecName "kube-api-access-2x5tl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:54:39 crc kubenswrapper[4909]: I1128 16:54:39.753742 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2x5tl\" (UniqueName: \"kubernetes.io/projected/694d948f-b813-4ac7-a889-5df17df36753-kube-api-access-2x5tl\") on node \"crc\" DevicePath \"\"" Nov 28 16:54:39 crc kubenswrapper[4909]: I1128 16:54:39.753829 4909 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/694d948f-b813-4ac7-a889-5df17df36753-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 16:54:39 crc kubenswrapper[4909]: I1128 16:54:39.760516 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/694d948f-b813-4ac7-a889-5df17df36753-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "694d948f-b813-4ac7-a889-5df17df36753" (UID: "694d948f-b813-4ac7-a889-5df17df36753"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:54:39 crc kubenswrapper[4909]: I1128 16:54:39.855719 4909 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/694d948f-b813-4ac7-a889-5df17df36753-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 16:54:40 crc kubenswrapper[4909]: I1128 16:54:40.328999 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-qtlg9" event={"ID":"694d948f-b813-4ac7-a889-5df17df36753","Type":"ContainerDied","Data":"9197ebee76035fc3cdbf6034cad322ad3e55dc0cf64828a117ade3d585abcd00"} Nov 28 16:54:40 crc kubenswrapper[4909]: I1128 16:54:40.329048 4909 scope.go:117] "RemoveContainer" containerID="023fd7d18d414133529d16e7caff9ecb1d06da2ff796fb3e4849c23fe20711f3" Nov 28 16:54:40 crc kubenswrapper[4909]: I1128 16:54:40.329114 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-qtlg9" Nov 28 16:54:40 crc kubenswrapper[4909]: I1128 16:54:40.345879 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-qtlg9"] Nov 28 16:54:40 crc kubenswrapper[4909]: I1128 16:54:40.351149 4909 scope.go:117] "RemoveContainer" containerID="789b6b1bd62728e5f3e4306e50419d67f0206999970354fa0ca12d35e7a5674d" Nov 28 16:54:40 crc kubenswrapper[4909]: I1128 16:54:40.360142 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-qtlg9"] Nov 28 16:54:40 crc kubenswrapper[4909]: I1128 16:54:40.370835 4909 scope.go:117] "RemoveContainer" containerID="1d2a5934dec11be154606d5a5efb40c364c874b45c83d31cde20c405d558943e" Nov 28 16:54:41 crc kubenswrapper[4909]: I1128 16:54:41.911530 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="694d948f-b813-4ac7-a889-5df17df36753" path="/var/lib/kubelet/pods/694d948f-b813-4ac7-a889-5df17df36753/volumes" Nov 28 16:54:46 crc kubenswrapper[4909]: I1128 16:54:46.901786 4909 scope.go:117] "RemoveContainer" containerID="b7386394d46b2f3c02902742fe6df89e754e581b8f84011ca3dd5e4176b24bb6" Nov 28 16:54:46 crc kubenswrapper[4909]: E1128 16:54:46.904101 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 16:55:00 crc kubenswrapper[4909]: I1128 16:55:00.902609 4909 scope.go:117] "RemoveContainer" containerID="b7386394d46b2f3c02902742fe6df89e754e581b8f84011ca3dd5e4176b24bb6" Nov 28 16:55:00 crc kubenswrapper[4909]: E1128 16:55:00.903971 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 16:55:08 crc kubenswrapper[4909]: I1128 16:55:08.538000 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-dbf6m"] Nov 28 16:55:08 crc kubenswrapper[4909]: E1128 16:55:08.538795 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="694d948f-b813-4ac7-a889-5df17df36753" containerName="extract-content" Nov 28 16:55:08 crc kubenswrapper[4909]: I1128 16:55:08.538826 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="694d948f-b813-4ac7-a889-5df17df36753" containerName="extract-content" Nov 28 16:55:08 crc kubenswrapper[4909]: E1128 16:55:08.538852 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="694d948f-b813-4ac7-a889-5df17df36753" containerName="extract-utilities" Nov 28 16:55:08 crc kubenswrapper[4909]: I1128 16:55:08.538859 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="694d948f-b813-4ac7-a889-5df17df36753" containerName="extract-utilities" Nov 28 16:55:08 crc kubenswrapper[4909]: E1128 16:55:08.538866 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="694d948f-b813-4ac7-a889-5df17df36753" containerName="registry-server" Nov 28 16:55:08 crc kubenswrapper[4909]: I1128 16:55:08.538873 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="694d948f-b813-4ac7-a889-5df17df36753" containerName="registry-server" Nov 28 16:55:08 crc kubenswrapper[4909]: I1128 16:55:08.539021 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="694d948f-b813-4ac7-a889-5df17df36753" containerName="registry-server" Nov 28 16:55:08 crc kubenswrapper[4909]: I1128 16:55:08.540427 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-dbf6m" Nov 28 16:55:08 crc kubenswrapper[4909]: I1128 16:55:08.551451 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-dbf6m"] Nov 28 16:55:08 crc kubenswrapper[4909]: I1128 16:55:08.621176 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e74057fd-de46-44ec-93d8-192feb7fb953-catalog-content\") pod \"certified-operators-dbf6m\" (UID: \"e74057fd-de46-44ec-93d8-192feb7fb953\") " pod="openshift-marketplace/certified-operators-dbf6m" Nov 28 16:55:08 crc kubenswrapper[4909]: I1128 16:55:08.621248 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k8w47\" (UniqueName: \"kubernetes.io/projected/e74057fd-de46-44ec-93d8-192feb7fb953-kube-api-access-k8w47\") pod \"certified-operators-dbf6m\" (UID: \"e74057fd-de46-44ec-93d8-192feb7fb953\") " pod="openshift-marketplace/certified-operators-dbf6m" Nov 28 16:55:08 crc kubenswrapper[4909]: I1128 16:55:08.621273 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e74057fd-de46-44ec-93d8-192feb7fb953-utilities\") pod \"certified-operators-dbf6m\" (UID: \"e74057fd-de46-44ec-93d8-192feb7fb953\") " pod="openshift-marketplace/certified-operators-dbf6m" Nov 28 16:55:08 crc kubenswrapper[4909]: I1128 16:55:08.723179 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e74057fd-de46-44ec-93d8-192feb7fb953-catalog-content\") pod \"certified-operators-dbf6m\" (UID: \"e74057fd-de46-44ec-93d8-192feb7fb953\") " pod="openshift-marketplace/certified-operators-dbf6m" Nov 28 16:55:08 crc kubenswrapper[4909]: I1128 16:55:08.723488 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k8w47\" (UniqueName: \"kubernetes.io/projected/e74057fd-de46-44ec-93d8-192feb7fb953-kube-api-access-k8w47\") pod \"certified-operators-dbf6m\" (UID: \"e74057fd-de46-44ec-93d8-192feb7fb953\") " pod="openshift-marketplace/certified-operators-dbf6m" Nov 28 16:55:08 crc kubenswrapper[4909]: I1128 16:55:08.723613 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e74057fd-de46-44ec-93d8-192feb7fb953-utilities\") pod \"certified-operators-dbf6m\" (UID: \"e74057fd-de46-44ec-93d8-192feb7fb953\") " pod="openshift-marketplace/certified-operators-dbf6m" Nov 28 16:55:08 crc kubenswrapper[4909]: I1128 16:55:08.724144 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e74057fd-de46-44ec-93d8-192feb7fb953-utilities\") pod \"certified-operators-dbf6m\" (UID: \"e74057fd-de46-44ec-93d8-192feb7fb953\") " pod="openshift-marketplace/certified-operators-dbf6m" Nov 28 16:55:08 crc kubenswrapper[4909]: I1128 16:55:08.724191 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e74057fd-de46-44ec-93d8-192feb7fb953-catalog-content\") pod \"certified-operators-dbf6m\" (UID: \"e74057fd-de46-44ec-93d8-192feb7fb953\") " pod="openshift-marketplace/certified-operators-dbf6m" Nov 28 16:55:08 crc kubenswrapper[4909]: I1128 16:55:08.741579 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k8w47\" (UniqueName: \"kubernetes.io/projected/e74057fd-de46-44ec-93d8-192feb7fb953-kube-api-access-k8w47\") pod \"certified-operators-dbf6m\" (UID: \"e74057fd-de46-44ec-93d8-192feb7fb953\") " pod="openshift-marketplace/certified-operators-dbf6m" Nov 28 16:55:08 crc kubenswrapper[4909]: I1128 16:55:08.894450 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-dbf6m" Nov 28 16:55:09 crc kubenswrapper[4909]: I1128 16:55:09.339440 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-dbf6m"] Nov 28 16:55:09 crc kubenswrapper[4909]: I1128 16:55:09.595037 4909 generic.go:334] "Generic (PLEG): container finished" podID="e74057fd-de46-44ec-93d8-192feb7fb953" containerID="80fb4839d870af1d2ced2a5edc6b465af6f57af2b9483ea31fe1194833dcb63d" exitCode=0 Nov 28 16:55:09 crc kubenswrapper[4909]: I1128 16:55:09.595275 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dbf6m" event={"ID":"e74057fd-de46-44ec-93d8-192feb7fb953","Type":"ContainerDied","Data":"80fb4839d870af1d2ced2a5edc6b465af6f57af2b9483ea31fe1194833dcb63d"} Nov 28 16:55:09 crc kubenswrapper[4909]: I1128 16:55:09.595579 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dbf6m" event={"ID":"e74057fd-de46-44ec-93d8-192feb7fb953","Type":"ContainerStarted","Data":"2d8709afa4dc9704fd52f27978ee2e012f4cedfbb078ab615124d7f31e661c96"} Nov 28 16:55:09 crc kubenswrapper[4909]: I1128 16:55:09.597538 4909 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 28 16:55:11 crc kubenswrapper[4909]: I1128 16:55:11.617294 4909 generic.go:334] "Generic (PLEG): container finished" podID="e74057fd-de46-44ec-93d8-192feb7fb953" containerID="0fa9aa0d8db4a81bcc48f5cda9f105c0681de91bb663c35da9ed432c3bba4a07" exitCode=0 Nov 28 16:55:11 crc kubenswrapper[4909]: I1128 16:55:11.617378 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dbf6m" event={"ID":"e74057fd-de46-44ec-93d8-192feb7fb953","Type":"ContainerDied","Data":"0fa9aa0d8db4a81bcc48f5cda9f105c0681de91bb663c35da9ed432c3bba4a07"} Nov 28 16:55:12 crc kubenswrapper[4909]: I1128 16:55:12.624903 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dbf6m" event={"ID":"e74057fd-de46-44ec-93d8-192feb7fb953","Type":"ContainerStarted","Data":"83f730132a1a37a6aafd5fd982d1b76777676aa1a025b43abae8c93ca05121d9"} Nov 28 16:55:12 crc kubenswrapper[4909]: I1128 16:55:12.646287 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-dbf6m" podStartSLOduration=2.099983702 podStartE2EDuration="4.646269389s" podCreationTimestamp="2025-11-28 16:55:08 +0000 UTC" firstStartedPulling="2025-11-28 16:55:09.597244103 +0000 UTC m=+2691.993928627" lastFinishedPulling="2025-11-28 16:55:12.14352976 +0000 UTC m=+2694.540214314" observedRunningTime="2025-11-28 16:55:12.641364537 +0000 UTC m=+2695.038049071" watchObservedRunningTime="2025-11-28 16:55:12.646269389 +0000 UTC m=+2695.042953913" Nov 28 16:55:13 crc kubenswrapper[4909]: I1128 16:55:13.901534 4909 scope.go:117] "RemoveContainer" containerID="b7386394d46b2f3c02902742fe6df89e754e581b8f84011ca3dd5e4176b24bb6" Nov 28 16:55:13 crc kubenswrapper[4909]: E1128 16:55:13.901893 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 16:55:18 crc kubenswrapper[4909]: I1128 16:55:18.895268 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-dbf6m" Nov 28 16:55:18 crc kubenswrapper[4909]: I1128 16:55:18.895915 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-dbf6m" Nov 28 16:55:18 crc kubenswrapper[4909]: I1128 16:55:18.955030 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-dbf6m" Nov 28 16:55:19 crc kubenswrapper[4909]: I1128 16:55:19.726609 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-dbf6m" Nov 28 16:55:19 crc kubenswrapper[4909]: I1128 16:55:19.775104 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-dbf6m"] Nov 28 16:55:21 crc kubenswrapper[4909]: I1128 16:55:21.704237 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-dbf6m" podUID="e74057fd-de46-44ec-93d8-192feb7fb953" containerName="registry-server" containerID="cri-o://83f730132a1a37a6aafd5fd982d1b76777676aa1a025b43abae8c93ca05121d9" gracePeriod=2 Nov 28 16:55:22 crc kubenswrapper[4909]: I1128 16:55:22.717048 4909 generic.go:334] "Generic (PLEG): container finished" podID="e74057fd-de46-44ec-93d8-192feb7fb953" containerID="83f730132a1a37a6aafd5fd982d1b76777676aa1a025b43abae8c93ca05121d9" exitCode=0 Nov 28 16:55:22 crc kubenswrapper[4909]: I1128 16:55:22.717098 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dbf6m" event={"ID":"e74057fd-de46-44ec-93d8-192feb7fb953","Type":"ContainerDied","Data":"83f730132a1a37a6aafd5fd982d1b76777676aa1a025b43abae8c93ca05121d9"} Nov 28 16:55:23 crc kubenswrapper[4909]: I1128 16:55:23.262617 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-dbf6m" Nov 28 16:55:23 crc kubenswrapper[4909]: I1128 16:55:23.363439 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e74057fd-de46-44ec-93d8-192feb7fb953-catalog-content\") pod \"e74057fd-de46-44ec-93d8-192feb7fb953\" (UID: \"e74057fd-de46-44ec-93d8-192feb7fb953\") " Nov 28 16:55:23 crc kubenswrapper[4909]: I1128 16:55:23.363687 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e74057fd-de46-44ec-93d8-192feb7fb953-utilities\") pod \"e74057fd-de46-44ec-93d8-192feb7fb953\" (UID: \"e74057fd-de46-44ec-93d8-192feb7fb953\") " Nov 28 16:55:23 crc kubenswrapper[4909]: I1128 16:55:23.363787 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k8w47\" (UniqueName: \"kubernetes.io/projected/e74057fd-de46-44ec-93d8-192feb7fb953-kube-api-access-k8w47\") pod \"e74057fd-de46-44ec-93d8-192feb7fb953\" (UID: \"e74057fd-de46-44ec-93d8-192feb7fb953\") " Nov 28 16:55:23 crc kubenswrapper[4909]: I1128 16:55:23.365478 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e74057fd-de46-44ec-93d8-192feb7fb953-utilities" (OuterVolumeSpecName: "utilities") pod "e74057fd-de46-44ec-93d8-192feb7fb953" (UID: "e74057fd-de46-44ec-93d8-192feb7fb953"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:55:23 crc kubenswrapper[4909]: I1128 16:55:23.372074 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e74057fd-de46-44ec-93d8-192feb7fb953-kube-api-access-k8w47" (OuterVolumeSpecName: "kube-api-access-k8w47") pod "e74057fd-de46-44ec-93d8-192feb7fb953" (UID: "e74057fd-de46-44ec-93d8-192feb7fb953"). InnerVolumeSpecName "kube-api-access-k8w47". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:55:23 crc kubenswrapper[4909]: I1128 16:55:23.428163 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e74057fd-de46-44ec-93d8-192feb7fb953-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "e74057fd-de46-44ec-93d8-192feb7fb953" (UID: "e74057fd-de46-44ec-93d8-192feb7fb953"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:55:23 crc kubenswrapper[4909]: I1128 16:55:23.465483 4909 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e74057fd-de46-44ec-93d8-192feb7fb953-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 16:55:23 crc kubenswrapper[4909]: I1128 16:55:23.465541 4909 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e74057fd-de46-44ec-93d8-192feb7fb953-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 16:55:23 crc kubenswrapper[4909]: I1128 16:55:23.465561 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k8w47\" (UniqueName: \"kubernetes.io/projected/e74057fd-de46-44ec-93d8-192feb7fb953-kube-api-access-k8w47\") on node \"crc\" DevicePath \"\"" Nov 28 16:55:23 crc kubenswrapper[4909]: I1128 16:55:23.739364 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dbf6m" event={"ID":"e74057fd-de46-44ec-93d8-192feb7fb953","Type":"ContainerDied","Data":"2d8709afa4dc9704fd52f27978ee2e012f4cedfbb078ab615124d7f31e661c96"} Nov 28 16:55:23 crc kubenswrapper[4909]: I1128 16:55:23.739735 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-dbf6m" Nov 28 16:55:23 crc kubenswrapper[4909]: I1128 16:55:23.739913 4909 scope.go:117] "RemoveContainer" containerID="83f730132a1a37a6aafd5fd982d1b76777676aa1a025b43abae8c93ca05121d9" Nov 28 16:55:23 crc kubenswrapper[4909]: I1128 16:55:23.779286 4909 scope.go:117] "RemoveContainer" containerID="0fa9aa0d8db4a81bcc48f5cda9f105c0681de91bb663c35da9ed432c3bba4a07" Nov 28 16:55:23 crc kubenswrapper[4909]: I1128 16:55:23.781823 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-dbf6m"] Nov 28 16:55:23 crc kubenswrapper[4909]: I1128 16:55:23.787621 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-dbf6m"] Nov 28 16:55:23 crc kubenswrapper[4909]: I1128 16:55:23.800631 4909 scope.go:117] "RemoveContainer" containerID="80fb4839d870af1d2ced2a5edc6b465af6f57af2b9483ea31fe1194833dcb63d" Nov 28 16:55:23 crc kubenswrapper[4909]: I1128 16:55:23.917747 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e74057fd-de46-44ec-93d8-192feb7fb953" path="/var/lib/kubelet/pods/e74057fd-de46-44ec-93d8-192feb7fb953/volumes" Nov 28 16:55:24 crc kubenswrapper[4909]: I1128 16:55:24.901034 4909 scope.go:117] "RemoveContainer" containerID="b7386394d46b2f3c02902742fe6df89e754e581b8f84011ca3dd5e4176b24bb6" Nov 28 16:55:25 crc kubenswrapper[4909]: I1128 16:55:25.760140 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" event={"ID":"5f0ac931-d37b-4342-8c12-c2779b455cc5","Type":"ContainerStarted","Data":"f1e70730a4fa23492c83658f97f1423373cb0d7eefe5b1d7998b6248a2f74ea2"} Nov 28 16:57:49 crc kubenswrapper[4909]: I1128 16:57:49.911448 4909 patch_prober.go:28] interesting pod/machine-config-daemon-d5nd7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 16:57:49 crc kubenswrapper[4909]: I1128 16:57:49.912036 4909 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 16:58:19 crc kubenswrapper[4909]: I1128 16:58:19.911408 4909 patch_prober.go:28] interesting pod/machine-config-daemon-d5nd7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 16:58:19 crc kubenswrapper[4909]: I1128 16:58:19.912342 4909 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 16:58:49 crc kubenswrapper[4909]: I1128 16:58:49.911126 4909 patch_prober.go:28] interesting pod/machine-config-daemon-d5nd7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 16:58:49 crc kubenswrapper[4909]: I1128 16:58:49.911894 4909 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 16:58:49 crc kubenswrapper[4909]: I1128 16:58:49.916774 4909 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" Nov 28 16:58:49 crc kubenswrapper[4909]: I1128 16:58:49.917551 4909 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"f1e70730a4fa23492c83658f97f1423373cb0d7eefe5b1d7998b6248a2f74ea2"} pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 16:58:49 crc kubenswrapper[4909]: I1128 16:58:49.917690 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerName="machine-config-daemon" containerID="cri-o://f1e70730a4fa23492c83658f97f1423373cb0d7eefe5b1d7998b6248a2f74ea2" gracePeriod=600 Nov 28 16:58:50 crc kubenswrapper[4909]: I1128 16:58:50.554972 4909 generic.go:334] "Generic (PLEG): container finished" podID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerID="f1e70730a4fa23492c83658f97f1423373cb0d7eefe5b1d7998b6248a2f74ea2" exitCode=0 Nov 28 16:58:50 crc kubenswrapper[4909]: I1128 16:58:50.555049 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" event={"ID":"5f0ac931-d37b-4342-8c12-c2779b455cc5","Type":"ContainerDied","Data":"f1e70730a4fa23492c83658f97f1423373cb0d7eefe5b1d7998b6248a2f74ea2"} Nov 28 16:58:50 crc kubenswrapper[4909]: I1128 16:58:50.555349 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" event={"ID":"5f0ac931-d37b-4342-8c12-c2779b455cc5","Type":"ContainerStarted","Data":"9b1b15834604c6d55924bd307fea12db73306780564a02880884938e0ae6d2bd"} Nov 28 16:58:50 crc kubenswrapper[4909]: I1128 16:58:50.555375 4909 scope.go:117] "RemoveContainer" containerID="b7386394d46b2f3c02902742fe6df89e754e581b8f84011ca3dd5e4176b24bb6" Nov 28 17:00:00 crc kubenswrapper[4909]: I1128 17:00:00.153643 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405820-6xxj8"] Nov 28 17:00:00 crc kubenswrapper[4909]: E1128 17:00:00.154578 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e74057fd-de46-44ec-93d8-192feb7fb953" containerName="extract-content" Nov 28 17:00:00 crc kubenswrapper[4909]: I1128 17:00:00.154595 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="e74057fd-de46-44ec-93d8-192feb7fb953" containerName="extract-content" Nov 28 17:00:00 crc kubenswrapper[4909]: E1128 17:00:00.154631 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e74057fd-de46-44ec-93d8-192feb7fb953" containerName="registry-server" Nov 28 17:00:00 crc kubenswrapper[4909]: I1128 17:00:00.154639 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="e74057fd-de46-44ec-93d8-192feb7fb953" containerName="registry-server" Nov 28 17:00:00 crc kubenswrapper[4909]: E1128 17:00:00.154670 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e74057fd-de46-44ec-93d8-192feb7fb953" containerName="extract-utilities" Nov 28 17:00:00 crc kubenswrapper[4909]: I1128 17:00:00.154679 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="e74057fd-de46-44ec-93d8-192feb7fb953" containerName="extract-utilities" Nov 28 17:00:00 crc kubenswrapper[4909]: I1128 17:00:00.154865 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="e74057fd-de46-44ec-93d8-192feb7fb953" containerName="registry-server" Nov 28 17:00:00 crc kubenswrapper[4909]: I1128 17:00:00.155396 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405820-6xxj8" Nov 28 17:00:00 crc kubenswrapper[4909]: I1128 17:00:00.157988 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 28 17:00:00 crc kubenswrapper[4909]: I1128 17:00:00.159146 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 28 17:00:00 crc kubenswrapper[4909]: I1128 17:00:00.187564 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405820-6xxj8"] Nov 28 17:00:00 crc kubenswrapper[4909]: I1128 17:00:00.255549 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/2e890f14-3666-43fd-9809-568c1ea5d014-secret-volume\") pod \"collect-profiles-29405820-6xxj8\" (UID: \"2e890f14-3666-43fd-9809-568c1ea5d014\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405820-6xxj8" Nov 28 17:00:00 crc kubenswrapper[4909]: I1128 17:00:00.255630 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mz9mr\" (UniqueName: \"kubernetes.io/projected/2e890f14-3666-43fd-9809-568c1ea5d014-kube-api-access-mz9mr\") pod \"collect-profiles-29405820-6xxj8\" (UID: \"2e890f14-3666-43fd-9809-568c1ea5d014\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405820-6xxj8" Nov 28 17:00:00 crc kubenswrapper[4909]: I1128 17:00:00.255714 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/2e890f14-3666-43fd-9809-568c1ea5d014-config-volume\") pod \"collect-profiles-29405820-6xxj8\" (UID: \"2e890f14-3666-43fd-9809-568c1ea5d014\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405820-6xxj8" Nov 28 17:00:00 crc kubenswrapper[4909]: I1128 17:00:00.356557 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/2e890f14-3666-43fd-9809-568c1ea5d014-secret-volume\") pod \"collect-profiles-29405820-6xxj8\" (UID: \"2e890f14-3666-43fd-9809-568c1ea5d014\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405820-6xxj8" Nov 28 17:00:00 crc kubenswrapper[4909]: I1128 17:00:00.356677 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mz9mr\" (UniqueName: \"kubernetes.io/projected/2e890f14-3666-43fd-9809-568c1ea5d014-kube-api-access-mz9mr\") pod \"collect-profiles-29405820-6xxj8\" (UID: \"2e890f14-3666-43fd-9809-568c1ea5d014\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405820-6xxj8" Nov 28 17:00:00 crc kubenswrapper[4909]: I1128 17:00:00.356751 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/2e890f14-3666-43fd-9809-568c1ea5d014-config-volume\") pod \"collect-profiles-29405820-6xxj8\" (UID: \"2e890f14-3666-43fd-9809-568c1ea5d014\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405820-6xxj8" Nov 28 17:00:00 crc kubenswrapper[4909]: I1128 17:00:00.358011 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/2e890f14-3666-43fd-9809-568c1ea5d014-config-volume\") pod \"collect-profiles-29405820-6xxj8\" (UID: \"2e890f14-3666-43fd-9809-568c1ea5d014\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405820-6xxj8" Nov 28 17:00:00 crc kubenswrapper[4909]: I1128 17:00:00.373637 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mz9mr\" (UniqueName: \"kubernetes.io/projected/2e890f14-3666-43fd-9809-568c1ea5d014-kube-api-access-mz9mr\") pod \"collect-profiles-29405820-6xxj8\" (UID: \"2e890f14-3666-43fd-9809-568c1ea5d014\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405820-6xxj8" Nov 28 17:00:00 crc kubenswrapper[4909]: I1128 17:00:00.378544 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/2e890f14-3666-43fd-9809-568c1ea5d014-secret-volume\") pod \"collect-profiles-29405820-6xxj8\" (UID: \"2e890f14-3666-43fd-9809-568c1ea5d014\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405820-6xxj8" Nov 28 17:00:00 crc kubenswrapper[4909]: I1128 17:00:00.482974 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405820-6xxj8" Nov 28 17:00:00 crc kubenswrapper[4909]: I1128 17:00:00.924548 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405820-6xxj8"] Nov 28 17:00:01 crc kubenswrapper[4909]: I1128 17:00:01.247435 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405820-6xxj8" event={"ID":"2e890f14-3666-43fd-9809-568c1ea5d014","Type":"ContainerStarted","Data":"0e7ce8997695d0e3fa03e38fcaa1e8558e1d78da8ccd96fdbce3fee63f52c05b"} Nov 28 17:00:01 crc kubenswrapper[4909]: I1128 17:00:01.247521 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405820-6xxj8" event={"ID":"2e890f14-3666-43fd-9809-568c1ea5d014","Type":"ContainerStarted","Data":"4879ab97a342e8d5c4a1e6e127209916e86c04cadd8a8d04d7df801481cb0c71"} Nov 28 17:00:01 crc kubenswrapper[4909]: I1128 17:00:01.270282 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29405820-6xxj8" podStartSLOduration=1.270255323 podStartE2EDuration="1.270255323s" podCreationTimestamp="2025-11-28 17:00:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 17:00:01.263697086 +0000 UTC m=+2983.660381620" watchObservedRunningTime="2025-11-28 17:00:01.270255323 +0000 UTC m=+2983.666939847" Nov 28 17:00:02 crc kubenswrapper[4909]: I1128 17:00:02.264314 4909 generic.go:334] "Generic (PLEG): container finished" podID="2e890f14-3666-43fd-9809-568c1ea5d014" containerID="0e7ce8997695d0e3fa03e38fcaa1e8558e1d78da8ccd96fdbce3fee63f52c05b" exitCode=0 Nov 28 17:00:02 crc kubenswrapper[4909]: I1128 17:00:02.264403 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405820-6xxj8" event={"ID":"2e890f14-3666-43fd-9809-568c1ea5d014","Type":"ContainerDied","Data":"0e7ce8997695d0e3fa03e38fcaa1e8558e1d78da8ccd96fdbce3fee63f52c05b"} Nov 28 17:00:03 crc kubenswrapper[4909]: I1128 17:00:03.573385 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405820-6xxj8" Nov 28 17:00:03 crc kubenswrapper[4909]: I1128 17:00:03.705695 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/2e890f14-3666-43fd-9809-568c1ea5d014-config-volume\") pod \"2e890f14-3666-43fd-9809-568c1ea5d014\" (UID: \"2e890f14-3666-43fd-9809-568c1ea5d014\") " Nov 28 17:00:03 crc kubenswrapper[4909]: I1128 17:00:03.706243 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/2e890f14-3666-43fd-9809-568c1ea5d014-secret-volume\") pod \"2e890f14-3666-43fd-9809-568c1ea5d014\" (UID: \"2e890f14-3666-43fd-9809-568c1ea5d014\") " Nov 28 17:00:03 crc kubenswrapper[4909]: I1128 17:00:03.706265 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mz9mr\" (UniqueName: \"kubernetes.io/projected/2e890f14-3666-43fd-9809-568c1ea5d014-kube-api-access-mz9mr\") pod \"2e890f14-3666-43fd-9809-568c1ea5d014\" (UID: \"2e890f14-3666-43fd-9809-568c1ea5d014\") " Nov 28 17:00:03 crc kubenswrapper[4909]: I1128 17:00:03.706115 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2e890f14-3666-43fd-9809-568c1ea5d014-config-volume" (OuterVolumeSpecName: "config-volume") pod "2e890f14-3666-43fd-9809-568c1ea5d014" (UID: "2e890f14-3666-43fd-9809-568c1ea5d014"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 17:00:03 crc kubenswrapper[4909]: I1128 17:00:03.706608 4909 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/2e890f14-3666-43fd-9809-568c1ea5d014-config-volume\") on node \"crc\" DevicePath \"\"" Nov 28 17:00:03 crc kubenswrapper[4909]: I1128 17:00:03.714839 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2e890f14-3666-43fd-9809-568c1ea5d014-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "2e890f14-3666-43fd-9809-568c1ea5d014" (UID: "2e890f14-3666-43fd-9809-568c1ea5d014"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:00:03 crc kubenswrapper[4909]: I1128 17:00:03.714891 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2e890f14-3666-43fd-9809-568c1ea5d014-kube-api-access-mz9mr" (OuterVolumeSpecName: "kube-api-access-mz9mr") pod "2e890f14-3666-43fd-9809-568c1ea5d014" (UID: "2e890f14-3666-43fd-9809-568c1ea5d014"). InnerVolumeSpecName "kube-api-access-mz9mr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:00:03 crc kubenswrapper[4909]: I1128 17:00:03.808383 4909 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/2e890f14-3666-43fd-9809-568c1ea5d014-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 28 17:00:03 crc kubenswrapper[4909]: I1128 17:00:03.808810 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mz9mr\" (UniqueName: \"kubernetes.io/projected/2e890f14-3666-43fd-9809-568c1ea5d014-kube-api-access-mz9mr\") on node \"crc\" DevicePath \"\"" Nov 28 17:00:04 crc kubenswrapper[4909]: I1128 17:00:04.287262 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405820-6xxj8" event={"ID":"2e890f14-3666-43fd-9809-568c1ea5d014","Type":"ContainerDied","Data":"4879ab97a342e8d5c4a1e6e127209916e86c04cadd8a8d04d7df801481cb0c71"} Nov 28 17:00:04 crc kubenswrapper[4909]: I1128 17:00:04.287304 4909 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4879ab97a342e8d5c4a1e6e127209916e86c04cadd8a8d04d7df801481cb0c71" Nov 28 17:00:04 crc kubenswrapper[4909]: I1128 17:00:04.287355 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405820-6xxj8" Nov 28 17:00:04 crc kubenswrapper[4909]: I1128 17:00:04.345297 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405775-nv5g7"] Nov 28 17:00:04 crc kubenswrapper[4909]: I1128 17:00:04.352565 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405775-nv5g7"] Nov 28 17:00:05 crc kubenswrapper[4909]: I1128 17:00:05.910237 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="897228c9-f7a7-4b65-9dcc-207146ee92f6" path="/var/lib/kubelet/pods/897228c9-f7a7-4b65-9dcc-207146ee92f6/volumes" Nov 28 17:00:29 crc kubenswrapper[4909]: I1128 17:00:29.678971 4909 scope.go:117] "RemoveContainer" containerID="5ba11df5c984ab61981376feae17e538d7f828e2e0c91df70a4373a6bd428ef6" Nov 28 17:01:19 crc kubenswrapper[4909]: I1128 17:01:19.911088 4909 patch_prober.go:28] interesting pod/machine-config-daemon-d5nd7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 17:01:19 crc kubenswrapper[4909]: I1128 17:01:19.911934 4909 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 17:01:49 crc kubenswrapper[4909]: I1128 17:01:49.911884 4909 patch_prober.go:28] interesting pod/machine-config-daemon-d5nd7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 17:01:49 crc kubenswrapper[4909]: I1128 17:01:49.912884 4909 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 17:02:19 crc kubenswrapper[4909]: I1128 17:02:19.911670 4909 patch_prober.go:28] interesting pod/machine-config-daemon-d5nd7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 17:02:19 crc kubenswrapper[4909]: I1128 17:02:19.912260 4909 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 17:02:19 crc kubenswrapper[4909]: I1128 17:02:19.913591 4909 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" Nov 28 17:02:19 crc kubenswrapper[4909]: I1128 17:02:19.914302 4909 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"9b1b15834604c6d55924bd307fea12db73306780564a02880884938e0ae6d2bd"} pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 17:02:19 crc kubenswrapper[4909]: I1128 17:02:19.914389 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerName="machine-config-daemon" containerID="cri-o://9b1b15834604c6d55924bd307fea12db73306780564a02880884938e0ae6d2bd" gracePeriod=600 Nov 28 17:02:20 crc kubenswrapper[4909]: E1128 17:02:20.040803 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 17:02:20 crc kubenswrapper[4909]: I1128 17:02:20.568375 4909 generic.go:334] "Generic (PLEG): container finished" podID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerID="9b1b15834604c6d55924bd307fea12db73306780564a02880884938e0ae6d2bd" exitCode=0 Nov 28 17:02:20 crc kubenswrapper[4909]: I1128 17:02:20.568443 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" event={"ID":"5f0ac931-d37b-4342-8c12-c2779b455cc5","Type":"ContainerDied","Data":"9b1b15834604c6d55924bd307fea12db73306780564a02880884938e0ae6d2bd"} Nov 28 17:02:20 crc kubenswrapper[4909]: I1128 17:02:20.568492 4909 scope.go:117] "RemoveContainer" containerID="f1e70730a4fa23492c83658f97f1423373cb0d7eefe5b1d7998b6248a2f74ea2" Nov 28 17:02:20 crc kubenswrapper[4909]: I1128 17:02:20.569194 4909 scope.go:117] "RemoveContainer" containerID="9b1b15834604c6d55924bd307fea12db73306780564a02880884938e0ae6d2bd" Nov 28 17:02:20 crc kubenswrapper[4909]: E1128 17:02:20.569582 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 17:02:35 crc kubenswrapper[4909]: I1128 17:02:35.902016 4909 scope.go:117] "RemoveContainer" containerID="9b1b15834604c6d55924bd307fea12db73306780564a02880884938e0ae6d2bd" Nov 28 17:02:35 crc kubenswrapper[4909]: E1128 17:02:35.903089 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 17:02:50 crc kubenswrapper[4909]: I1128 17:02:50.902506 4909 scope.go:117] "RemoveContainer" containerID="9b1b15834604c6d55924bd307fea12db73306780564a02880884938e0ae6d2bd" Nov 28 17:02:50 crc kubenswrapper[4909]: E1128 17:02:50.903687 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 17:03:05 crc kubenswrapper[4909]: I1128 17:03:05.902919 4909 scope.go:117] "RemoveContainer" containerID="9b1b15834604c6d55924bd307fea12db73306780564a02880884938e0ae6d2bd" Nov 28 17:03:05 crc kubenswrapper[4909]: E1128 17:03:05.903831 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 17:03:17 crc kubenswrapper[4909]: I1128 17:03:17.481762 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-zvqgx"] Nov 28 17:03:17 crc kubenswrapper[4909]: E1128 17:03:17.482784 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2e890f14-3666-43fd-9809-568c1ea5d014" containerName="collect-profiles" Nov 28 17:03:17 crc kubenswrapper[4909]: I1128 17:03:17.482819 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="2e890f14-3666-43fd-9809-568c1ea5d014" containerName="collect-profiles" Nov 28 17:03:17 crc kubenswrapper[4909]: I1128 17:03:17.483197 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="2e890f14-3666-43fd-9809-568c1ea5d014" containerName="collect-profiles" Nov 28 17:03:17 crc kubenswrapper[4909]: I1128 17:03:17.485556 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-zvqgx" Nov 28 17:03:17 crc kubenswrapper[4909]: I1128 17:03:17.490807 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-zvqgx"] Nov 28 17:03:17 crc kubenswrapper[4909]: I1128 17:03:17.568584 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2d23c6e1-8623-482d-aef1-4367ab709c41-utilities\") pod \"redhat-marketplace-zvqgx\" (UID: \"2d23c6e1-8623-482d-aef1-4367ab709c41\") " pod="openshift-marketplace/redhat-marketplace-zvqgx" Nov 28 17:03:17 crc kubenswrapper[4909]: I1128 17:03:17.568675 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dt7mm\" (UniqueName: \"kubernetes.io/projected/2d23c6e1-8623-482d-aef1-4367ab709c41-kube-api-access-dt7mm\") pod \"redhat-marketplace-zvqgx\" (UID: \"2d23c6e1-8623-482d-aef1-4367ab709c41\") " pod="openshift-marketplace/redhat-marketplace-zvqgx" Nov 28 17:03:17 crc kubenswrapper[4909]: I1128 17:03:17.568714 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2d23c6e1-8623-482d-aef1-4367ab709c41-catalog-content\") pod \"redhat-marketplace-zvqgx\" (UID: \"2d23c6e1-8623-482d-aef1-4367ab709c41\") " pod="openshift-marketplace/redhat-marketplace-zvqgx" Nov 28 17:03:17 crc kubenswrapper[4909]: I1128 17:03:17.670008 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dt7mm\" (UniqueName: \"kubernetes.io/projected/2d23c6e1-8623-482d-aef1-4367ab709c41-kube-api-access-dt7mm\") pod \"redhat-marketplace-zvqgx\" (UID: \"2d23c6e1-8623-482d-aef1-4367ab709c41\") " pod="openshift-marketplace/redhat-marketplace-zvqgx" Nov 28 17:03:17 crc kubenswrapper[4909]: I1128 17:03:17.670082 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2d23c6e1-8623-482d-aef1-4367ab709c41-catalog-content\") pod \"redhat-marketplace-zvqgx\" (UID: \"2d23c6e1-8623-482d-aef1-4367ab709c41\") " pod="openshift-marketplace/redhat-marketplace-zvqgx" Nov 28 17:03:17 crc kubenswrapper[4909]: I1128 17:03:17.670133 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2d23c6e1-8623-482d-aef1-4367ab709c41-utilities\") pod \"redhat-marketplace-zvqgx\" (UID: \"2d23c6e1-8623-482d-aef1-4367ab709c41\") " pod="openshift-marketplace/redhat-marketplace-zvqgx" Nov 28 17:03:17 crc kubenswrapper[4909]: I1128 17:03:17.670552 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2d23c6e1-8623-482d-aef1-4367ab709c41-utilities\") pod \"redhat-marketplace-zvqgx\" (UID: \"2d23c6e1-8623-482d-aef1-4367ab709c41\") " pod="openshift-marketplace/redhat-marketplace-zvqgx" Nov 28 17:03:17 crc kubenswrapper[4909]: I1128 17:03:17.671150 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2d23c6e1-8623-482d-aef1-4367ab709c41-catalog-content\") pod \"redhat-marketplace-zvqgx\" (UID: \"2d23c6e1-8623-482d-aef1-4367ab709c41\") " pod="openshift-marketplace/redhat-marketplace-zvqgx" Nov 28 17:03:17 crc kubenswrapper[4909]: I1128 17:03:17.698223 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dt7mm\" (UniqueName: \"kubernetes.io/projected/2d23c6e1-8623-482d-aef1-4367ab709c41-kube-api-access-dt7mm\") pod \"redhat-marketplace-zvqgx\" (UID: \"2d23c6e1-8623-482d-aef1-4367ab709c41\") " pod="openshift-marketplace/redhat-marketplace-zvqgx" Nov 28 17:03:17 crc kubenswrapper[4909]: I1128 17:03:17.815578 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-zvqgx" Nov 28 17:03:17 crc kubenswrapper[4909]: I1128 17:03:17.906300 4909 scope.go:117] "RemoveContainer" containerID="9b1b15834604c6d55924bd307fea12db73306780564a02880884938e0ae6d2bd" Nov 28 17:03:17 crc kubenswrapper[4909]: E1128 17:03:17.906960 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 17:03:18 crc kubenswrapper[4909]: I1128 17:03:18.325812 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-zvqgx"] Nov 28 17:03:19 crc kubenswrapper[4909]: I1128 17:03:19.173234 4909 generic.go:334] "Generic (PLEG): container finished" podID="2d23c6e1-8623-482d-aef1-4367ab709c41" containerID="f41b97efe364711030b6522262c6606f159d95fd15109ea50f0f051d2b30d9f8" exitCode=0 Nov 28 17:03:19 crc kubenswrapper[4909]: I1128 17:03:19.173274 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-zvqgx" event={"ID":"2d23c6e1-8623-482d-aef1-4367ab709c41","Type":"ContainerDied","Data":"f41b97efe364711030b6522262c6606f159d95fd15109ea50f0f051d2b30d9f8"} Nov 28 17:03:19 crc kubenswrapper[4909]: I1128 17:03:19.173297 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-zvqgx" event={"ID":"2d23c6e1-8623-482d-aef1-4367ab709c41","Type":"ContainerStarted","Data":"253717dec44cafe182bb7d91b3c6df3bd798d1a5cfda61d9e5bcb2011d0c11a4"} Nov 28 17:03:19 crc kubenswrapper[4909]: I1128 17:03:19.175993 4909 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 28 17:03:23 crc kubenswrapper[4909]: I1128 17:03:23.207055 4909 generic.go:334] "Generic (PLEG): container finished" podID="2d23c6e1-8623-482d-aef1-4367ab709c41" containerID="ed0b42a670b3a1f1e6bc87bf900d8607701408f046748dd75106723cc644cc22" exitCode=0 Nov 28 17:03:23 crc kubenswrapper[4909]: I1128 17:03:23.207092 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-zvqgx" event={"ID":"2d23c6e1-8623-482d-aef1-4367ab709c41","Type":"ContainerDied","Data":"ed0b42a670b3a1f1e6bc87bf900d8607701408f046748dd75106723cc644cc22"} Nov 28 17:03:25 crc kubenswrapper[4909]: I1128 17:03:25.244919 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-zvqgx" event={"ID":"2d23c6e1-8623-482d-aef1-4367ab709c41","Type":"ContainerStarted","Data":"c5cc942d66be5f7969af316bf6a821b50da963e965542482f5c8a90666a54d5c"} Nov 28 17:03:25 crc kubenswrapper[4909]: I1128 17:03:25.268385 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-zvqgx" podStartSLOduration=3.168606862 podStartE2EDuration="8.268357537s" podCreationTimestamp="2025-11-28 17:03:17 +0000 UTC" firstStartedPulling="2025-11-28 17:03:19.175764635 +0000 UTC m=+3181.572449159" lastFinishedPulling="2025-11-28 17:03:24.2755153 +0000 UTC m=+3186.672199834" observedRunningTime="2025-11-28 17:03:25.257830023 +0000 UTC m=+3187.654514567" watchObservedRunningTime="2025-11-28 17:03:25.268357537 +0000 UTC m=+3187.665042071" Nov 28 17:03:27 crc kubenswrapper[4909]: I1128 17:03:27.816048 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-zvqgx" Nov 28 17:03:27 crc kubenswrapper[4909]: I1128 17:03:27.817336 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-zvqgx" Nov 28 17:03:27 crc kubenswrapper[4909]: I1128 17:03:27.893636 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-zvqgx" Nov 28 17:03:29 crc kubenswrapper[4909]: I1128 17:03:29.360331 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-zvqgx" Nov 28 17:03:29 crc kubenswrapper[4909]: I1128 17:03:29.463189 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-zvqgx"] Nov 28 17:03:29 crc kubenswrapper[4909]: I1128 17:03:29.533737 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-skkwd"] Nov 28 17:03:29 crc kubenswrapper[4909]: I1128 17:03:29.534409 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-skkwd" podUID="ce487258-c9fc-4b91-9878-87ca92c76d15" containerName="registry-server" containerID="cri-o://a615688e3a4a2c8aadbefada068a9a1a8ebdd716706df0696d134b67b3798331" gracePeriod=2 Nov 28 17:03:30 crc kubenswrapper[4909]: I1128 17:03:30.335861 4909 generic.go:334] "Generic (PLEG): container finished" podID="ce487258-c9fc-4b91-9878-87ca92c76d15" containerID="a615688e3a4a2c8aadbefada068a9a1a8ebdd716706df0696d134b67b3798331" exitCode=0 Nov 28 17:03:30 crc kubenswrapper[4909]: I1128 17:03:30.335928 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-skkwd" event={"ID":"ce487258-c9fc-4b91-9878-87ca92c76d15","Type":"ContainerDied","Data":"a615688e3a4a2c8aadbefada068a9a1a8ebdd716706df0696d134b67b3798331"} Nov 28 17:03:30 crc kubenswrapper[4909]: I1128 17:03:30.542100 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-skkwd" Nov 28 17:03:30 crc kubenswrapper[4909]: I1128 17:03:30.646612 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8p6xw\" (UniqueName: \"kubernetes.io/projected/ce487258-c9fc-4b91-9878-87ca92c76d15-kube-api-access-8p6xw\") pod \"ce487258-c9fc-4b91-9878-87ca92c76d15\" (UID: \"ce487258-c9fc-4b91-9878-87ca92c76d15\") " Nov 28 17:03:30 crc kubenswrapper[4909]: I1128 17:03:30.646967 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ce487258-c9fc-4b91-9878-87ca92c76d15-catalog-content\") pod \"ce487258-c9fc-4b91-9878-87ca92c76d15\" (UID: \"ce487258-c9fc-4b91-9878-87ca92c76d15\") " Nov 28 17:03:30 crc kubenswrapper[4909]: I1128 17:03:30.646997 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ce487258-c9fc-4b91-9878-87ca92c76d15-utilities\") pod \"ce487258-c9fc-4b91-9878-87ca92c76d15\" (UID: \"ce487258-c9fc-4b91-9878-87ca92c76d15\") " Nov 28 17:03:30 crc kubenswrapper[4909]: I1128 17:03:30.647857 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ce487258-c9fc-4b91-9878-87ca92c76d15-utilities" (OuterVolumeSpecName: "utilities") pod "ce487258-c9fc-4b91-9878-87ca92c76d15" (UID: "ce487258-c9fc-4b91-9878-87ca92c76d15"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:03:30 crc kubenswrapper[4909]: I1128 17:03:30.651772 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ce487258-c9fc-4b91-9878-87ca92c76d15-kube-api-access-8p6xw" (OuterVolumeSpecName: "kube-api-access-8p6xw") pod "ce487258-c9fc-4b91-9878-87ca92c76d15" (UID: "ce487258-c9fc-4b91-9878-87ca92c76d15"). InnerVolumeSpecName "kube-api-access-8p6xw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:03:30 crc kubenswrapper[4909]: I1128 17:03:30.660960 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ce487258-c9fc-4b91-9878-87ca92c76d15-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "ce487258-c9fc-4b91-9878-87ca92c76d15" (UID: "ce487258-c9fc-4b91-9878-87ca92c76d15"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:03:30 crc kubenswrapper[4909]: I1128 17:03:30.748616 4909 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ce487258-c9fc-4b91-9878-87ca92c76d15-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 17:03:30 crc kubenswrapper[4909]: I1128 17:03:30.748840 4909 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ce487258-c9fc-4b91-9878-87ca92c76d15-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 17:03:30 crc kubenswrapper[4909]: I1128 17:03:30.748935 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8p6xw\" (UniqueName: \"kubernetes.io/projected/ce487258-c9fc-4b91-9878-87ca92c76d15-kube-api-access-8p6xw\") on node \"crc\" DevicePath \"\"" Nov 28 17:03:31 crc kubenswrapper[4909]: I1128 17:03:31.346898 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-skkwd" Nov 28 17:03:31 crc kubenswrapper[4909]: I1128 17:03:31.346904 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-skkwd" event={"ID":"ce487258-c9fc-4b91-9878-87ca92c76d15","Type":"ContainerDied","Data":"6422f3449fbf217b4a14d6322bf25a001b82306571b6337834ad59140f4bd350"} Nov 28 17:03:31 crc kubenswrapper[4909]: I1128 17:03:31.347017 4909 scope.go:117] "RemoveContainer" containerID="a615688e3a4a2c8aadbefada068a9a1a8ebdd716706df0696d134b67b3798331" Nov 28 17:03:31 crc kubenswrapper[4909]: I1128 17:03:31.368232 4909 scope.go:117] "RemoveContainer" containerID="2c87ff67c0dec40fb6678a41103c70dcf3570cbbf2ed150d51b34ad7fc1ad754" Nov 28 17:03:31 crc kubenswrapper[4909]: I1128 17:03:31.391966 4909 scope.go:117] "RemoveContainer" containerID="01904f59f3ed146d43d0cd2a0cd3a48487b01553abf0d95a40d4a371708a39d4" Nov 28 17:03:31 crc kubenswrapper[4909]: I1128 17:03:31.401767 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-skkwd"] Nov 28 17:03:31 crc kubenswrapper[4909]: I1128 17:03:31.408078 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-skkwd"] Nov 28 17:03:31 crc kubenswrapper[4909]: I1128 17:03:31.909517 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ce487258-c9fc-4b91-9878-87ca92c76d15" path="/var/lib/kubelet/pods/ce487258-c9fc-4b91-9878-87ca92c76d15/volumes" Nov 28 17:03:32 crc kubenswrapper[4909]: I1128 17:03:32.902067 4909 scope.go:117] "RemoveContainer" containerID="9b1b15834604c6d55924bd307fea12db73306780564a02880884938e0ae6d2bd" Nov 28 17:03:32 crc kubenswrapper[4909]: E1128 17:03:32.902488 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 17:03:44 crc kubenswrapper[4909]: I1128 17:03:44.901648 4909 scope.go:117] "RemoveContainer" containerID="9b1b15834604c6d55924bd307fea12db73306780564a02880884938e0ae6d2bd" Nov 28 17:03:44 crc kubenswrapper[4909]: E1128 17:03:44.902480 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 17:03:58 crc kubenswrapper[4909]: I1128 17:03:58.902341 4909 scope.go:117] "RemoveContainer" containerID="9b1b15834604c6d55924bd307fea12db73306780564a02880884938e0ae6d2bd" Nov 28 17:03:58 crc kubenswrapper[4909]: E1128 17:03:58.903457 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 17:04:09 crc kubenswrapper[4909]: I1128 17:04:09.900893 4909 scope.go:117] "RemoveContainer" containerID="9b1b15834604c6d55924bd307fea12db73306780564a02880884938e0ae6d2bd" Nov 28 17:04:09 crc kubenswrapper[4909]: E1128 17:04:09.901594 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 17:04:13 crc kubenswrapper[4909]: I1128 17:04:13.796359 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-fpmqh"] Nov 28 17:04:13 crc kubenswrapper[4909]: E1128 17:04:13.797444 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ce487258-c9fc-4b91-9878-87ca92c76d15" containerName="registry-server" Nov 28 17:04:13 crc kubenswrapper[4909]: I1128 17:04:13.797468 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="ce487258-c9fc-4b91-9878-87ca92c76d15" containerName="registry-server" Nov 28 17:04:13 crc kubenswrapper[4909]: E1128 17:04:13.797497 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ce487258-c9fc-4b91-9878-87ca92c76d15" containerName="extract-content" Nov 28 17:04:13 crc kubenswrapper[4909]: I1128 17:04:13.797506 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="ce487258-c9fc-4b91-9878-87ca92c76d15" containerName="extract-content" Nov 28 17:04:13 crc kubenswrapper[4909]: E1128 17:04:13.797537 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ce487258-c9fc-4b91-9878-87ca92c76d15" containerName="extract-utilities" Nov 28 17:04:13 crc kubenswrapper[4909]: I1128 17:04:13.797546 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="ce487258-c9fc-4b91-9878-87ca92c76d15" containerName="extract-utilities" Nov 28 17:04:13 crc kubenswrapper[4909]: I1128 17:04:13.797782 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="ce487258-c9fc-4b91-9878-87ca92c76d15" containerName="registry-server" Nov 28 17:04:13 crc kubenswrapper[4909]: I1128 17:04:13.799372 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-fpmqh" Nov 28 17:04:13 crc kubenswrapper[4909]: I1128 17:04:13.818485 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-fpmqh"] Nov 28 17:04:13 crc kubenswrapper[4909]: I1128 17:04:13.877758 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-42792\" (UniqueName: \"kubernetes.io/projected/612bcedf-28c4-4a4c-81c5-13f5e4478a8b-kube-api-access-42792\") pod \"community-operators-fpmqh\" (UID: \"612bcedf-28c4-4a4c-81c5-13f5e4478a8b\") " pod="openshift-marketplace/community-operators-fpmqh" Nov 28 17:04:13 crc kubenswrapper[4909]: I1128 17:04:13.877848 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/612bcedf-28c4-4a4c-81c5-13f5e4478a8b-catalog-content\") pod \"community-operators-fpmqh\" (UID: \"612bcedf-28c4-4a4c-81c5-13f5e4478a8b\") " pod="openshift-marketplace/community-operators-fpmqh" Nov 28 17:04:13 crc kubenswrapper[4909]: I1128 17:04:13.877879 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/612bcedf-28c4-4a4c-81c5-13f5e4478a8b-utilities\") pod \"community-operators-fpmqh\" (UID: \"612bcedf-28c4-4a4c-81c5-13f5e4478a8b\") " pod="openshift-marketplace/community-operators-fpmqh" Nov 28 17:04:13 crc kubenswrapper[4909]: I1128 17:04:13.979629 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/612bcedf-28c4-4a4c-81c5-13f5e4478a8b-catalog-content\") pod \"community-operators-fpmqh\" (UID: \"612bcedf-28c4-4a4c-81c5-13f5e4478a8b\") " pod="openshift-marketplace/community-operators-fpmqh" Nov 28 17:04:13 crc kubenswrapper[4909]: I1128 17:04:13.979757 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/612bcedf-28c4-4a4c-81c5-13f5e4478a8b-utilities\") pod \"community-operators-fpmqh\" (UID: \"612bcedf-28c4-4a4c-81c5-13f5e4478a8b\") " pod="openshift-marketplace/community-operators-fpmqh" Nov 28 17:04:13 crc kubenswrapper[4909]: I1128 17:04:13.979836 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-42792\" (UniqueName: \"kubernetes.io/projected/612bcedf-28c4-4a4c-81c5-13f5e4478a8b-kube-api-access-42792\") pod \"community-operators-fpmqh\" (UID: \"612bcedf-28c4-4a4c-81c5-13f5e4478a8b\") " pod="openshift-marketplace/community-operators-fpmqh" Nov 28 17:04:13 crc kubenswrapper[4909]: I1128 17:04:13.980186 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/612bcedf-28c4-4a4c-81c5-13f5e4478a8b-catalog-content\") pod \"community-operators-fpmqh\" (UID: \"612bcedf-28c4-4a4c-81c5-13f5e4478a8b\") " pod="openshift-marketplace/community-operators-fpmqh" Nov 28 17:04:13 crc kubenswrapper[4909]: I1128 17:04:13.981053 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/612bcedf-28c4-4a4c-81c5-13f5e4478a8b-utilities\") pod \"community-operators-fpmqh\" (UID: \"612bcedf-28c4-4a4c-81c5-13f5e4478a8b\") " pod="openshift-marketplace/community-operators-fpmqh" Nov 28 17:04:14 crc kubenswrapper[4909]: I1128 17:04:14.002544 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-42792\" (UniqueName: \"kubernetes.io/projected/612bcedf-28c4-4a4c-81c5-13f5e4478a8b-kube-api-access-42792\") pod \"community-operators-fpmqh\" (UID: \"612bcedf-28c4-4a4c-81c5-13f5e4478a8b\") " pod="openshift-marketplace/community-operators-fpmqh" Nov 28 17:04:14 crc kubenswrapper[4909]: I1128 17:04:14.126991 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-fpmqh" Nov 28 17:04:14 crc kubenswrapper[4909]: I1128 17:04:14.468880 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-fpmqh"] Nov 28 17:04:14 crc kubenswrapper[4909]: I1128 17:04:14.723975 4909 generic.go:334] "Generic (PLEG): container finished" podID="612bcedf-28c4-4a4c-81c5-13f5e4478a8b" containerID="867af9062ad6e4c05191e2d7823fd4dd6dd3317756da4985f7b84a4b5d966f4e" exitCode=0 Nov 28 17:04:14 crc kubenswrapper[4909]: I1128 17:04:14.724142 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fpmqh" event={"ID":"612bcedf-28c4-4a4c-81c5-13f5e4478a8b","Type":"ContainerDied","Data":"867af9062ad6e4c05191e2d7823fd4dd6dd3317756da4985f7b84a4b5d966f4e"} Nov 28 17:04:14 crc kubenswrapper[4909]: I1128 17:04:14.724230 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fpmqh" event={"ID":"612bcedf-28c4-4a4c-81c5-13f5e4478a8b","Type":"ContainerStarted","Data":"8a3088a694fdbcf4e5b7528320f926f7fe1cc516249cac0067ca836af94d67be"} Nov 28 17:04:15 crc kubenswrapper[4909]: I1128 17:04:15.782226 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-226tg"] Nov 28 17:04:15 crc kubenswrapper[4909]: I1128 17:04:15.784370 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-226tg" Nov 28 17:04:15 crc kubenswrapper[4909]: I1128 17:04:15.799283 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-226tg"] Nov 28 17:04:15 crc kubenswrapper[4909]: I1128 17:04:15.917793 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/955e1ad2-93a4-4648-891e-afbcbba2bac8-catalog-content\") pod \"redhat-operators-226tg\" (UID: \"955e1ad2-93a4-4648-891e-afbcbba2bac8\") " pod="openshift-marketplace/redhat-operators-226tg" Nov 28 17:04:15 crc kubenswrapper[4909]: I1128 17:04:15.918162 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/955e1ad2-93a4-4648-891e-afbcbba2bac8-utilities\") pod \"redhat-operators-226tg\" (UID: \"955e1ad2-93a4-4648-891e-afbcbba2bac8\") " pod="openshift-marketplace/redhat-operators-226tg" Nov 28 17:04:15 crc kubenswrapper[4909]: I1128 17:04:15.918221 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-24zk9\" (UniqueName: \"kubernetes.io/projected/955e1ad2-93a4-4648-891e-afbcbba2bac8-kube-api-access-24zk9\") pod \"redhat-operators-226tg\" (UID: \"955e1ad2-93a4-4648-891e-afbcbba2bac8\") " pod="openshift-marketplace/redhat-operators-226tg" Nov 28 17:04:16 crc kubenswrapper[4909]: I1128 17:04:16.020003 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/955e1ad2-93a4-4648-891e-afbcbba2bac8-catalog-content\") pod \"redhat-operators-226tg\" (UID: \"955e1ad2-93a4-4648-891e-afbcbba2bac8\") " pod="openshift-marketplace/redhat-operators-226tg" Nov 28 17:04:16 crc kubenswrapper[4909]: I1128 17:04:16.020070 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/955e1ad2-93a4-4648-891e-afbcbba2bac8-utilities\") pod \"redhat-operators-226tg\" (UID: \"955e1ad2-93a4-4648-891e-afbcbba2bac8\") " pod="openshift-marketplace/redhat-operators-226tg" Nov 28 17:04:16 crc kubenswrapper[4909]: I1128 17:04:16.020092 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-24zk9\" (UniqueName: \"kubernetes.io/projected/955e1ad2-93a4-4648-891e-afbcbba2bac8-kube-api-access-24zk9\") pod \"redhat-operators-226tg\" (UID: \"955e1ad2-93a4-4648-891e-afbcbba2bac8\") " pod="openshift-marketplace/redhat-operators-226tg" Nov 28 17:04:16 crc kubenswrapper[4909]: I1128 17:04:16.021092 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/955e1ad2-93a4-4648-891e-afbcbba2bac8-catalog-content\") pod \"redhat-operators-226tg\" (UID: \"955e1ad2-93a4-4648-891e-afbcbba2bac8\") " pod="openshift-marketplace/redhat-operators-226tg" Nov 28 17:04:16 crc kubenswrapper[4909]: I1128 17:04:16.021373 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/955e1ad2-93a4-4648-891e-afbcbba2bac8-utilities\") pod \"redhat-operators-226tg\" (UID: \"955e1ad2-93a4-4648-891e-afbcbba2bac8\") " pod="openshift-marketplace/redhat-operators-226tg" Nov 28 17:04:16 crc kubenswrapper[4909]: I1128 17:04:16.050915 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-24zk9\" (UniqueName: \"kubernetes.io/projected/955e1ad2-93a4-4648-891e-afbcbba2bac8-kube-api-access-24zk9\") pod \"redhat-operators-226tg\" (UID: \"955e1ad2-93a4-4648-891e-afbcbba2bac8\") " pod="openshift-marketplace/redhat-operators-226tg" Nov 28 17:04:16 crc kubenswrapper[4909]: I1128 17:04:16.105294 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-226tg" Nov 28 17:04:16 crc kubenswrapper[4909]: I1128 17:04:16.564532 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-226tg"] Nov 28 17:04:16 crc kubenswrapper[4909]: W1128 17:04:16.567810 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod955e1ad2_93a4_4648_891e_afbcbba2bac8.slice/crio-269ed9366ad14efb8cc6353076454abb041ae4f6f9b8351acca67ff8f986a38a WatchSource:0}: Error finding container 269ed9366ad14efb8cc6353076454abb041ae4f6f9b8351acca67ff8f986a38a: Status 404 returned error can't find the container with id 269ed9366ad14efb8cc6353076454abb041ae4f6f9b8351acca67ff8f986a38a Nov 28 17:04:16 crc kubenswrapper[4909]: I1128 17:04:16.735932 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-226tg" event={"ID":"955e1ad2-93a4-4648-891e-afbcbba2bac8","Type":"ContainerStarted","Data":"269ed9366ad14efb8cc6353076454abb041ae4f6f9b8351acca67ff8f986a38a"} Nov 28 17:04:16 crc kubenswrapper[4909]: I1128 17:04:16.738064 4909 generic.go:334] "Generic (PLEG): container finished" podID="612bcedf-28c4-4a4c-81c5-13f5e4478a8b" containerID="908a183113279ca2130479b4826cef5ff69c0b399e5f8c8ae3a753be68ac7941" exitCode=0 Nov 28 17:04:16 crc kubenswrapper[4909]: I1128 17:04:16.738103 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fpmqh" event={"ID":"612bcedf-28c4-4a4c-81c5-13f5e4478a8b","Type":"ContainerDied","Data":"908a183113279ca2130479b4826cef5ff69c0b399e5f8c8ae3a753be68ac7941"} Nov 28 17:04:17 crc kubenswrapper[4909]: I1128 17:04:17.747036 4909 generic.go:334] "Generic (PLEG): container finished" podID="955e1ad2-93a4-4648-891e-afbcbba2bac8" containerID="3d597fdc6ef39203e9753ccc76bd112989cdd01e5c144ef6bc09bbab8e420bfc" exitCode=0 Nov 28 17:04:17 crc kubenswrapper[4909]: I1128 17:04:17.747362 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-226tg" event={"ID":"955e1ad2-93a4-4648-891e-afbcbba2bac8","Type":"ContainerDied","Data":"3d597fdc6ef39203e9753ccc76bd112989cdd01e5c144ef6bc09bbab8e420bfc"} Nov 28 17:04:18 crc kubenswrapper[4909]: I1128 17:04:18.771820 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fpmqh" event={"ID":"612bcedf-28c4-4a4c-81c5-13f5e4478a8b","Type":"ContainerStarted","Data":"3ffe953852d305d4c816c603e1599efab9e811826b05fd2e451d2be2695d3243"} Nov 28 17:04:18 crc kubenswrapper[4909]: I1128 17:04:18.795930 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-fpmqh" podStartSLOduration=2.668240656 podStartE2EDuration="5.795911134s" podCreationTimestamp="2025-11-28 17:04:13 +0000 UTC" firstStartedPulling="2025-11-28 17:04:14.725313866 +0000 UTC m=+3237.121998390" lastFinishedPulling="2025-11-28 17:04:17.852984334 +0000 UTC m=+3240.249668868" observedRunningTime="2025-11-28 17:04:18.791370671 +0000 UTC m=+3241.188055205" watchObservedRunningTime="2025-11-28 17:04:18.795911134 +0000 UTC m=+3241.192595658" Nov 28 17:04:22 crc kubenswrapper[4909]: I1128 17:04:22.901297 4909 scope.go:117] "RemoveContainer" containerID="9b1b15834604c6d55924bd307fea12db73306780564a02880884938e0ae6d2bd" Nov 28 17:04:22 crc kubenswrapper[4909]: E1128 17:04:22.901918 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 17:04:24 crc kubenswrapper[4909]: I1128 17:04:24.127794 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-fpmqh" Nov 28 17:04:24 crc kubenswrapper[4909]: I1128 17:04:24.128037 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-fpmqh" Nov 28 17:04:24 crc kubenswrapper[4909]: I1128 17:04:24.173073 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-fpmqh" Nov 28 17:04:24 crc kubenswrapper[4909]: I1128 17:04:24.819577 4909 generic.go:334] "Generic (PLEG): container finished" podID="955e1ad2-93a4-4648-891e-afbcbba2bac8" containerID="a17e40fc58dedb8ac0fc0fa19ad778bf76dd886b0ce5a0fc882813c8056dbef2" exitCode=0 Nov 28 17:04:24 crc kubenswrapper[4909]: I1128 17:04:24.819684 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-226tg" event={"ID":"955e1ad2-93a4-4648-891e-afbcbba2bac8","Type":"ContainerDied","Data":"a17e40fc58dedb8ac0fc0fa19ad778bf76dd886b0ce5a0fc882813c8056dbef2"} Nov 28 17:04:24 crc kubenswrapper[4909]: I1128 17:04:24.865742 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-fpmqh" Nov 28 17:04:26 crc kubenswrapper[4909]: I1128 17:04:26.777473 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-fpmqh"] Nov 28 17:04:26 crc kubenswrapper[4909]: I1128 17:04:26.837319 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-226tg" event={"ID":"955e1ad2-93a4-4648-891e-afbcbba2bac8","Type":"ContainerStarted","Data":"dd4976f7360094973752052c03880599747562348d89900cc2987923c1b8d2c3"} Nov 28 17:04:26 crc kubenswrapper[4909]: I1128 17:04:26.837522 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-fpmqh" podUID="612bcedf-28c4-4a4c-81c5-13f5e4478a8b" containerName="registry-server" containerID="cri-o://3ffe953852d305d4c816c603e1599efab9e811826b05fd2e451d2be2695d3243" gracePeriod=2 Nov 28 17:04:26 crc kubenswrapper[4909]: I1128 17:04:26.864439 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-226tg" podStartSLOduration=5.217949847 podStartE2EDuration="11.864418728s" podCreationTimestamp="2025-11-28 17:04:15 +0000 UTC" firstStartedPulling="2025-11-28 17:04:18.775048371 +0000 UTC m=+3241.171732895" lastFinishedPulling="2025-11-28 17:04:25.421517242 +0000 UTC m=+3247.818201776" observedRunningTime="2025-11-28 17:04:26.863940475 +0000 UTC m=+3249.260625009" watchObservedRunningTime="2025-11-28 17:04:26.864418728 +0000 UTC m=+3249.261103262" Nov 28 17:04:28 crc kubenswrapper[4909]: I1128 17:04:28.703941 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-fpmqh" Nov 28 17:04:28 crc kubenswrapper[4909]: I1128 17:04:28.808858 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/612bcedf-28c4-4a4c-81c5-13f5e4478a8b-catalog-content\") pod \"612bcedf-28c4-4a4c-81c5-13f5e4478a8b\" (UID: \"612bcedf-28c4-4a4c-81c5-13f5e4478a8b\") " Nov 28 17:04:28 crc kubenswrapper[4909]: I1128 17:04:28.809142 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/612bcedf-28c4-4a4c-81c5-13f5e4478a8b-utilities\") pod \"612bcedf-28c4-4a4c-81c5-13f5e4478a8b\" (UID: \"612bcedf-28c4-4a4c-81c5-13f5e4478a8b\") " Nov 28 17:04:28 crc kubenswrapper[4909]: I1128 17:04:28.809245 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-42792\" (UniqueName: \"kubernetes.io/projected/612bcedf-28c4-4a4c-81c5-13f5e4478a8b-kube-api-access-42792\") pod \"612bcedf-28c4-4a4c-81c5-13f5e4478a8b\" (UID: \"612bcedf-28c4-4a4c-81c5-13f5e4478a8b\") " Nov 28 17:04:28 crc kubenswrapper[4909]: I1128 17:04:28.811233 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/612bcedf-28c4-4a4c-81c5-13f5e4478a8b-utilities" (OuterVolumeSpecName: "utilities") pod "612bcedf-28c4-4a4c-81c5-13f5e4478a8b" (UID: "612bcedf-28c4-4a4c-81c5-13f5e4478a8b"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:04:28 crc kubenswrapper[4909]: I1128 17:04:28.814525 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/612bcedf-28c4-4a4c-81c5-13f5e4478a8b-kube-api-access-42792" (OuterVolumeSpecName: "kube-api-access-42792") pod "612bcedf-28c4-4a4c-81c5-13f5e4478a8b" (UID: "612bcedf-28c4-4a4c-81c5-13f5e4478a8b"). InnerVolumeSpecName "kube-api-access-42792". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:04:28 crc kubenswrapper[4909]: I1128 17:04:28.851264 4909 generic.go:334] "Generic (PLEG): container finished" podID="612bcedf-28c4-4a4c-81c5-13f5e4478a8b" containerID="3ffe953852d305d4c816c603e1599efab9e811826b05fd2e451d2be2695d3243" exitCode=0 Nov 28 17:04:28 crc kubenswrapper[4909]: I1128 17:04:28.851488 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fpmqh" event={"ID":"612bcedf-28c4-4a4c-81c5-13f5e4478a8b","Type":"ContainerDied","Data":"3ffe953852d305d4c816c603e1599efab9e811826b05fd2e451d2be2695d3243"} Nov 28 17:04:28 crc kubenswrapper[4909]: I1128 17:04:28.851564 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fpmqh" event={"ID":"612bcedf-28c4-4a4c-81c5-13f5e4478a8b","Type":"ContainerDied","Data":"8a3088a694fdbcf4e5b7528320f926f7fe1cc516249cac0067ca836af94d67be"} Nov 28 17:04:28 crc kubenswrapper[4909]: I1128 17:04:28.851682 4909 scope.go:117] "RemoveContainer" containerID="3ffe953852d305d4c816c603e1599efab9e811826b05fd2e451d2be2695d3243" Nov 28 17:04:28 crc kubenswrapper[4909]: I1128 17:04:28.851876 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-fpmqh" Nov 28 17:04:28 crc kubenswrapper[4909]: I1128 17:04:28.880282 4909 scope.go:117] "RemoveContainer" containerID="908a183113279ca2130479b4826cef5ff69c0b399e5f8c8ae3a753be68ac7941" Nov 28 17:04:28 crc kubenswrapper[4909]: I1128 17:04:28.907891 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/612bcedf-28c4-4a4c-81c5-13f5e4478a8b-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "612bcedf-28c4-4a4c-81c5-13f5e4478a8b" (UID: "612bcedf-28c4-4a4c-81c5-13f5e4478a8b"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:04:28 crc kubenswrapper[4909]: I1128 17:04:28.917925 4909 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/612bcedf-28c4-4a4c-81c5-13f5e4478a8b-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 17:04:28 crc kubenswrapper[4909]: I1128 17:04:28.917954 4909 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/612bcedf-28c4-4a4c-81c5-13f5e4478a8b-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 17:04:28 crc kubenswrapper[4909]: I1128 17:04:28.917967 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-42792\" (UniqueName: \"kubernetes.io/projected/612bcedf-28c4-4a4c-81c5-13f5e4478a8b-kube-api-access-42792\") on node \"crc\" DevicePath \"\"" Nov 28 17:04:28 crc kubenswrapper[4909]: I1128 17:04:28.942728 4909 scope.go:117] "RemoveContainer" containerID="867af9062ad6e4c05191e2d7823fd4dd6dd3317756da4985f7b84a4b5d966f4e" Nov 28 17:04:28 crc kubenswrapper[4909]: I1128 17:04:28.963956 4909 scope.go:117] "RemoveContainer" containerID="3ffe953852d305d4c816c603e1599efab9e811826b05fd2e451d2be2695d3243" Nov 28 17:04:28 crc kubenswrapper[4909]: E1128 17:04:28.964425 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3ffe953852d305d4c816c603e1599efab9e811826b05fd2e451d2be2695d3243\": container with ID starting with 3ffe953852d305d4c816c603e1599efab9e811826b05fd2e451d2be2695d3243 not found: ID does not exist" containerID="3ffe953852d305d4c816c603e1599efab9e811826b05fd2e451d2be2695d3243" Nov 28 17:04:28 crc kubenswrapper[4909]: I1128 17:04:28.964468 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3ffe953852d305d4c816c603e1599efab9e811826b05fd2e451d2be2695d3243"} err="failed to get container status \"3ffe953852d305d4c816c603e1599efab9e811826b05fd2e451d2be2695d3243\": rpc error: code = NotFound desc = could not find container \"3ffe953852d305d4c816c603e1599efab9e811826b05fd2e451d2be2695d3243\": container with ID starting with 3ffe953852d305d4c816c603e1599efab9e811826b05fd2e451d2be2695d3243 not found: ID does not exist" Nov 28 17:04:28 crc kubenswrapper[4909]: I1128 17:04:28.964500 4909 scope.go:117] "RemoveContainer" containerID="908a183113279ca2130479b4826cef5ff69c0b399e5f8c8ae3a753be68ac7941" Nov 28 17:04:28 crc kubenswrapper[4909]: E1128 17:04:28.964891 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"908a183113279ca2130479b4826cef5ff69c0b399e5f8c8ae3a753be68ac7941\": container with ID starting with 908a183113279ca2130479b4826cef5ff69c0b399e5f8c8ae3a753be68ac7941 not found: ID does not exist" containerID="908a183113279ca2130479b4826cef5ff69c0b399e5f8c8ae3a753be68ac7941" Nov 28 17:04:28 crc kubenswrapper[4909]: I1128 17:04:28.964923 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"908a183113279ca2130479b4826cef5ff69c0b399e5f8c8ae3a753be68ac7941"} err="failed to get container status \"908a183113279ca2130479b4826cef5ff69c0b399e5f8c8ae3a753be68ac7941\": rpc error: code = NotFound desc = could not find container \"908a183113279ca2130479b4826cef5ff69c0b399e5f8c8ae3a753be68ac7941\": container with ID starting with 908a183113279ca2130479b4826cef5ff69c0b399e5f8c8ae3a753be68ac7941 not found: ID does not exist" Nov 28 17:04:28 crc kubenswrapper[4909]: I1128 17:04:28.964946 4909 scope.go:117] "RemoveContainer" containerID="867af9062ad6e4c05191e2d7823fd4dd6dd3317756da4985f7b84a4b5d966f4e" Nov 28 17:04:28 crc kubenswrapper[4909]: E1128 17:04:28.965196 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"867af9062ad6e4c05191e2d7823fd4dd6dd3317756da4985f7b84a4b5d966f4e\": container with ID starting with 867af9062ad6e4c05191e2d7823fd4dd6dd3317756da4985f7b84a4b5d966f4e not found: ID does not exist" containerID="867af9062ad6e4c05191e2d7823fd4dd6dd3317756da4985f7b84a4b5d966f4e" Nov 28 17:04:28 crc kubenswrapper[4909]: I1128 17:04:28.965381 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"867af9062ad6e4c05191e2d7823fd4dd6dd3317756da4985f7b84a4b5d966f4e"} err="failed to get container status \"867af9062ad6e4c05191e2d7823fd4dd6dd3317756da4985f7b84a4b5d966f4e\": rpc error: code = NotFound desc = could not find container \"867af9062ad6e4c05191e2d7823fd4dd6dd3317756da4985f7b84a4b5d966f4e\": container with ID starting with 867af9062ad6e4c05191e2d7823fd4dd6dd3317756da4985f7b84a4b5d966f4e not found: ID does not exist" Nov 28 17:04:29 crc kubenswrapper[4909]: I1128 17:04:29.185042 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-fpmqh"] Nov 28 17:04:29 crc kubenswrapper[4909]: I1128 17:04:29.190427 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-fpmqh"] Nov 28 17:04:29 crc kubenswrapper[4909]: I1128 17:04:29.911636 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="612bcedf-28c4-4a4c-81c5-13f5e4478a8b" path="/var/lib/kubelet/pods/612bcedf-28c4-4a4c-81c5-13f5e4478a8b/volumes" Nov 28 17:04:33 crc kubenswrapper[4909]: I1128 17:04:33.902413 4909 scope.go:117] "RemoveContainer" containerID="9b1b15834604c6d55924bd307fea12db73306780564a02880884938e0ae6d2bd" Nov 28 17:04:33 crc kubenswrapper[4909]: E1128 17:04:33.903027 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 17:04:36 crc kubenswrapper[4909]: I1128 17:04:36.105899 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-226tg" Nov 28 17:04:36 crc kubenswrapper[4909]: I1128 17:04:36.106382 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-226tg" Nov 28 17:04:36 crc kubenswrapper[4909]: I1128 17:04:36.169134 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-226tg" Nov 28 17:04:37 crc kubenswrapper[4909]: I1128 17:04:37.005472 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-226tg" Nov 28 17:04:37 crc kubenswrapper[4909]: I1128 17:04:37.097455 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-226tg"] Nov 28 17:04:38 crc kubenswrapper[4909]: I1128 17:04:38.946748 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-226tg" podUID="955e1ad2-93a4-4648-891e-afbcbba2bac8" containerName="registry-server" containerID="cri-o://dd4976f7360094973752052c03880599747562348d89900cc2987923c1b8d2c3" gracePeriod=2 Nov 28 17:04:40 crc kubenswrapper[4909]: I1128 17:04:40.963202 4909 generic.go:334] "Generic (PLEG): container finished" podID="955e1ad2-93a4-4648-891e-afbcbba2bac8" containerID="dd4976f7360094973752052c03880599747562348d89900cc2987923c1b8d2c3" exitCode=0 Nov 28 17:04:40 crc kubenswrapper[4909]: I1128 17:04:40.963279 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-226tg" event={"ID":"955e1ad2-93a4-4648-891e-afbcbba2bac8","Type":"ContainerDied","Data":"dd4976f7360094973752052c03880599747562348d89900cc2987923c1b8d2c3"} Nov 28 17:04:44 crc kubenswrapper[4909]: I1128 17:04:44.953190 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-226tg" Nov 28 17:04:44 crc kubenswrapper[4909]: I1128 17:04:44.968475 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-24zk9\" (UniqueName: \"kubernetes.io/projected/955e1ad2-93a4-4648-891e-afbcbba2bac8-kube-api-access-24zk9\") pod \"955e1ad2-93a4-4648-891e-afbcbba2bac8\" (UID: \"955e1ad2-93a4-4648-891e-afbcbba2bac8\") " Nov 28 17:04:44 crc kubenswrapper[4909]: I1128 17:04:44.968751 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/955e1ad2-93a4-4648-891e-afbcbba2bac8-catalog-content\") pod \"955e1ad2-93a4-4648-891e-afbcbba2bac8\" (UID: \"955e1ad2-93a4-4648-891e-afbcbba2bac8\") " Nov 28 17:04:44 crc kubenswrapper[4909]: I1128 17:04:44.968856 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/955e1ad2-93a4-4648-891e-afbcbba2bac8-utilities\") pod \"955e1ad2-93a4-4648-891e-afbcbba2bac8\" (UID: \"955e1ad2-93a4-4648-891e-afbcbba2bac8\") " Nov 28 17:04:44 crc kubenswrapper[4909]: I1128 17:04:44.973866 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/955e1ad2-93a4-4648-891e-afbcbba2bac8-utilities" (OuterVolumeSpecName: "utilities") pod "955e1ad2-93a4-4648-891e-afbcbba2bac8" (UID: "955e1ad2-93a4-4648-891e-afbcbba2bac8"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:04:44 crc kubenswrapper[4909]: I1128 17:04:44.975701 4909 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/955e1ad2-93a4-4648-891e-afbcbba2bac8-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 17:04:44 crc kubenswrapper[4909]: I1128 17:04:44.982209 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/955e1ad2-93a4-4648-891e-afbcbba2bac8-kube-api-access-24zk9" (OuterVolumeSpecName: "kube-api-access-24zk9") pod "955e1ad2-93a4-4648-891e-afbcbba2bac8" (UID: "955e1ad2-93a4-4648-891e-afbcbba2bac8"). InnerVolumeSpecName "kube-api-access-24zk9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:04:45 crc kubenswrapper[4909]: I1128 17:04:45.006211 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-226tg" event={"ID":"955e1ad2-93a4-4648-891e-afbcbba2bac8","Type":"ContainerDied","Data":"269ed9366ad14efb8cc6353076454abb041ae4f6f9b8351acca67ff8f986a38a"} Nov 28 17:04:45 crc kubenswrapper[4909]: I1128 17:04:45.006585 4909 scope.go:117] "RemoveContainer" containerID="dd4976f7360094973752052c03880599747562348d89900cc2987923c1b8d2c3" Nov 28 17:04:45 crc kubenswrapper[4909]: I1128 17:04:45.006406 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-226tg" Nov 28 17:04:45 crc kubenswrapper[4909]: I1128 17:04:45.023401 4909 scope.go:117] "RemoveContainer" containerID="a17e40fc58dedb8ac0fc0fa19ad778bf76dd886b0ce5a0fc882813c8056dbef2" Nov 28 17:04:45 crc kubenswrapper[4909]: I1128 17:04:45.039387 4909 scope.go:117] "RemoveContainer" containerID="3d597fdc6ef39203e9753ccc76bd112989cdd01e5c144ef6bc09bbab8e420bfc" Nov 28 17:04:45 crc kubenswrapper[4909]: I1128 17:04:45.076952 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-24zk9\" (UniqueName: \"kubernetes.io/projected/955e1ad2-93a4-4648-891e-afbcbba2bac8-kube-api-access-24zk9\") on node \"crc\" DevicePath \"\"" Nov 28 17:04:45 crc kubenswrapper[4909]: I1128 17:04:45.089028 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/955e1ad2-93a4-4648-891e-afbcbba2bac8-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "955e1ad2-93a4-4648-891e-afbcbba2bac8" (UID: "955e1ad2-93a4-4648-891e-afbcbba2bac8"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:04:45 crc kubenswrapper[4909]: I1128 17:04:45.177934 4909 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/955e1ad2-93a4-4648-891e-afbcbba2bac8-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 17:04:45 crc kubenswrapper[4909]: I1128 17:04:45.343256 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-226tg"] Nov 28 17:04:45 crc kubenswrapper[4909]: I1128 17:04:45.349068 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-226tg"] Nov 28 17:04:45 crc kubenswrapper[4909]: I1128 17:04:45.914910 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="955e1ad2-93a4-4648-891e-afbcbba2bac8" path="/var/lib/kubelet/pods/955e1ad2-93a4-4648-891e-afbcbba2bac8/volumes" Nov 28 17:04:48 crc kubenswrapper[4909]: I1128 17:04:48.902413 4909 scope.go:117] "RemoveContainer" containerID="9b1b15834604c6d55924bd307fea12db73306780564a02880884938e0ae6d2bd" Nov 28 17:04:48 crc kubenswrapper[4909]: E1128 17:04:48.903036 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 17:04:59 crc kubenswrapper[4909]: I1128 17:04:59.902093 4909 scope.go:117] "RemoveContainer" containerID="9b1b15834604c6d55924bd307fea12db73306780564a02880884938e0ae6d2bd" Nov 28 17:04:59 crc kubenswrapper[4909]: E1128 17:04:59.903098 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 17:05:10 crc kubenswrapper[4909]: I1128 17:05:10.901811 4909 scope.go:117] "RemoveContainer" containerID="9b1b15834604c6d55924bd307fea12db73306780564a02880884938e0ae6d2bd" Nov 28 17:05:10 crc kubenswrapper[4909]: E1128 17:05:10.903092 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 17:05:25 crc kubenswrapper[4909]: I1128 17:05:25.902039 4909 scope.go:117] "RemoveContainer" containerID="9b1b15834604c6d55924bd307fea12db73306780564a02880884938e0ae6d2bd" Nov 28 17:05:25 crc kubenswrapper[4909]: E1128 17:05:25.903216 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 17:05:36 crc kubenswrapper[4909]: I1128 17:05:36.902281 4909 scope.go:117] "RemoveContainer" containerID="9b1b15834604c6d55924bd307fea12db73306780564a02880884938e0ae6d2bd" Nov 28 17:05:36 crc kubenswrapper[4909]: E1128 17:05:36.903710 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 17:05:50 crc kubenswrapper[4909]: I1128 17:05:50.902166 4909 scope.go:117] "RemoveContainer" containerID="9b1b15834604c6d55924bd307fea12db73306780564a02880884938e0ae6d2bd" Nov 28 17:05:50 crc kubenswrapper[4909]: E1128 17:05:50.902930 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 17:06:01 crc kubenswrapper[4909]: I1128 17:06:01.902213 4909 scope.go:117] "RemoveContainer" containerID="9b1b15834604c6d55924bd307fea12db73306780564a02880884938e0ae6d2bd" Nov 28 17:06:01 crc kubenswrapper[4909]: E1128 17:06:01.903226 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 17:06:13 crc kubenswrapper[4909]: I1128 17:06:13.544575 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-qz4pw"] Nov 28 17:06:13 crc kubenswrapper[4909]: E1128 17:06:13.545637 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="612bcedf-28c4-4a4c-81c5-13f5e4478a8b" containerName="extract-utilities" Nov 28 17:06:13 crc kubenswrapper[4909]: I1128 17:06:13.545683 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="612bcedf-28c4-4a4c-81c5-13f5e4478a8b" containerName="extract-utilities" Nov 28 17:06:13 crc kubenswrapper[4909]: E1128 17:06:13.545708 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="955e1ad2-93a4-4648-891e-afbcbba2bac8" containerName="registry-server" Nov 28 17:06:13 crc kubenswrapper[4909]: I1128 17:06:13.545724 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="955e1ad2-93a4-4648-891e-afbcbba2bac8" containerName="registry-server" Nov 28 17:06:13 crc kubenswrapper[4909]: E1128 17:06:13.545746 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="612bcedf-28c4-4a4c-81c5-13f5e4478a8b" containerName="extract-content" Nov 28 17:06:13 crc kubenswrapper[4909]: I1128 17:06:13.545759 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="612bcedf-28c4-4a4c-81c5-13f5e4478a8b" containerName="extract-content" Nov 28 17:06:13 crc kubenswrapper[4909]: E1128 17:06:13.545780 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="955e1ad2-93a4-4648-891e-afbcbba2bac8" containerName="extract-content" Nov 28 17:06:13 crc kubenswrapper[4909]: I1128 17:06:13.545792 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="955e1ad2-93a4-4648-891e-afbcbba2bac8" containerName="extract-content" Nov 28 17:06:13 crc kubenswrapper[4909]: E1128 17:06:13.545834 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="955e1ad2-93a4-4648-891e-afbcbba2bac8" containerName="extract-utilities" Nov 28 17:06:13 crc kubenswrapper[4909]: I1128 17:06:13.545846 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="955e1ad2-93a4-4648-891e-afbcbba2bac8" containerName="extract-utilities" Nov 28 17:06:13 crc kubenswrapper[4909]: E1128 17:06:13.545872 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="612bcedf-28c4-4a4c-81c5-13f5e4478a8b" containerName="registry-server" Nov 28 17:06:13 crc kubenswrapper[4909]: I1128 17:06:13.545886 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="612bcedf-28c4-4a4c-81c5-13f5e4478a8b" containerName="registry-server" Nov 28 17:06:13 crc kubenswrapper[4909]: I1128 17:06:13.546165 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="955e1ad2-93a4-4648-891e-afbcbba2bac8" containerName="registry-server" Nov 28 17:06:13 crc kubenswrapper[4909]: I1128 17:06:13.546196 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="612bcedf-28c4-4a4c-81c5-13f5e4478a8b" containerName="registry-server" Nov 28 17:06:13 crc kubenswrapper[4909]: I1128 17:06:13.547995 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-qz4pw" Nov 28 17:06:13 crc kubenswrapper[4909]: I1128 17:06:13.558181 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-qz4pw"] Nov 28 17:06:13 crc kubenswrapper[4909]: I1128 17:06:13.633182 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0dfe4b20-d44b-4a87-95db-cabd6fa36fa5-utilities\") pod \"certified-operators-qz4pw\" (UID: \"0dfe4b20-d44b-4a87-95db-cabd6fa36fa5\") " pod="openshift-marketplace/certified-operators-qz4pw" Nov 28 17:06:13 crc kubenswrapper[4909]: I1128 17:06:13.633253 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0dfe4b20-d44b-4a87-95db-cabd6fa36fa5-catalog-content\") pod \"certified-operators-qz4pw\" (UID: \"0dfe4b20-d44b-4a87-95db-cabd6fa36fa5\") " pod="openshift-marketplace/certified-operators-qz4pw" Nov 28 17:06:13 crc kubenswrapper[4909]: I1128 17:06:13.633386 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ksjwp\" (UniqueName: \"kubernetes.io/projected/0dfe4b20-d44b-4a87-95db-cabd6fa36fa5-kube-api-access-ksjwp\") pod \"certified-operators-qz4pw\" (UID: \"0dfe4b20-d44b-4a87-95db-cabd6fa36fa5\") " pod="openshift-marketplace/certified-operators-qz4pw" Nov 28 17:06:13 crc kubenswrapper[4909]: I1128 17:06:13.734279 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0dfe4b20-d44b-4a87-95db-cabd6fa36fa5-utilities\") pod \"certified-operators-qz4pw\" (UID: \"0dfe4b20-d44b-4a87-95db-cabd6fa36fa5\") " pod="openshift-marketplace/certified-operators-qz4pw" Nov 28 17:06:13 crc kubenswrapper[4909]: I1128 17:06:13.734354 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0dfe4b20-d44b-4a87-95db-cabd6fa36fa5-catalog-content\") pod \"certified-operators-qz4pw\" (UID: \"0dfe4b20-d44b-4a87-95db-cabd6fa36fa5\") " pod="openshift-marketplace/certified-operators-qz4pw" Nov 28 17:06:13 crc kubenswrapper[4909]: I1128 17:06:13.734421 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ksjwp\" (UniqueName: \"kubernetes.io/projected/0dfe4b20-d44b-4a87-95db-cabd6fa36fa5-kube-api-access-ksjwp\") pod \"certified-operators-qz4pw\" (UID: \"0dfe4b20-d44b-4a87-95db-cabd6fa36fa5\") " pod="openshift-marketplace/certified-operators-qz4pw" Nov 28 17:06:13 crc kubenswrapper[4909]: I1128 17:06:13.735137 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0dfe4b20-d44b-4a87-95db-cabd6fa36fa5-utilities\") pod \"certified-operators-qz4pw\" (UID: \"0dfe4b20-d44b-4a87-95db-cabd6fa36fa5\") " pod="openshift-marketplace/certified-operators-qz4pw" Nov 28 17:06:13 crc kubenswrapper[4909]: I1128 17:06:13.735366 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0dfe4b20-d44b-4a87-95db-cabd6fa36fa5-catalog-content\") pod \"certified-operators-qz4pw\" (UID: \"0dfe4b20-d44b-4a87-95db-cabd6fa36fa5\") " pod="openshift-marketplace/certified-operators-qz4pw" Nov 28 17:06:13 crc kubenswrapper[4909]: I1128 17:06:13.758765 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ksjwp\" (UniqueName: \"kubernetes.io/projected/0dfe4b20-d44b-4a87-95db-cabd6fa36fa5-kube-api-access-ksjwp\") pod \"certified-operators-qz4pw\" (UID: \"0dfe4b20-d44b-4a87-95db-cabd6fa36fa5\") " pod="openshift-marketplace/certified-operators-qz4pw" Nov 28 17:06:13 crc kubenswrapper[4909]: I1128 17:06:13.868270 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-qz4pw" Nov 28 17:06:13 crc kubenswrapper[4909]: I1128 17:06:13.902067 4909 scope.go:117] "RemoveContainer" containerID="9b1b15834604c6d55924bd307fea12db73306780564a02880884938e0ae6d2bd" Nov 28 17:06:13 crc kubenswrapper[4909]: E1128 17:06:13.902502 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 17:06:14 crc kubenswrapper[4909]: I1128 17:06:14.382477 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-qz4pw"] Nov 28 17:06:14 crc kubenswrapper[4909]: I1128 17:06:14.742609 4909 generic.go:334] "Generic (PLEG): container finished" podID="0dfe4b20-d44b-4a87-95db-cabd6fa36fa5" containerID="9dae1507c9de3dfb659f831c63bd6a31f44e69ecf4fe6407497afa337f90e028" exitCode=0 Nov 28 17:06:14 crc kubenswrapper[4909]: I1128 17:06:14.742700 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qz4pw" event={"ID":"0dfe4b20-d44b-4a87-95db-cabd6fa36fa5","Type":"ContainerDied","Data":"9dae1507c9de3dfb659f831c63bd6a31f44e69ecf4fe6407497afa337f90e028"} Nov 28 17:06:14 crc kubenswrapper[4909]: I1128 17:06:14.743119 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qz4pw" event={"ID":"0dfe4b20-d44b-4a87-95db-cabd6fa36fa5","Type":"ContainerStarted","Data":"5b4778be7e57507e8a1fb957666e870a4e3bd66fbf2e24a25afa7872e881b31b"} Nov 28 17:06:15 crc kubenswrapper[4909]: I1128 17:06:15.752287 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qz4pw" event={"ID":"0dfe4b20-d44b-4a87-95db-cabd6fa36fa5","Type":"ContainerStarted","Data":"71fb5cf78993c101c638a3ae7f052faec659357af1a2744a55b0423023c7db5d"} Nov 28 17:06:16 crc kubenswrapper[4909]: I1128 17:06:16.763763 4909 generic.go:334] "Generic (PLEG): container finished" podID="0dfe4b20-d44b-4a87-95db-cabd6fa36fa5" containerID="71fb5cf78993c101c638a3ae7f052faec659357af1a2744a55b0423023c7db5d" exitCode=0 Nov 28 17:06:16 crc kubenswrapper[4909]: I1128 17:06:16.763834 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qz4pw" event={"ID":"0dfe4b20-d44b-4a87-95db-cabd6fa36fa5","Type":"ContainerDied","Data":"71fb5cf78993c101c638a3ae7f052faec659357af1a2744a55b0423023c7db5d"} Nov 28 17:06:17 crc kubenswrapper[4909]: I1128 17:06:17.778033 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qz4pw" event={"ID":"0dfe4b20-d44b-4a87-95db-cabd6fa36fa5","Type":"ContainerStarted","Data":"0a5d5d7072bd29f88ed5727500ef6a9d3226d439844b9115b7f1dcd3ff9fab6a"} Nov 28 17:06:17 crc kubenswrapper[4909]: I1128 17:06:17.802088 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-qz4pw" podStartSLOduration=2.096161116 podStartE2EDuration="4.802070909s" podCreationTimestamp="2025-11-28 17:06:13 +0000 UTC" firstStartedPulling="2025-11-28 17:06:14.74545124 +0000 UTC m=+3357.142135764" lastFinishedPulling="2025-11-28 17:06:17.451361023 +0000 UTC m=+3359.848045557" observedRunningTime="2025-11-28 17:06:17.79803302 +0000 UTC m=+3360.194717594" watchObservedRunningTime="2025-11-28 17:06:17.802070909 +0000 UTC m=+3360.198755433" Nov 28 17:06:23 crc kubenswrapper[4909]: I1128 17:06:23.869433 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-qz4pw" Nov 28 17:06:23 crc kubenswrapper[4909]: I1128 17:06:23.869902 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-qz4pw" Nov 28 17:06:23 crc kubenswrapper[4909]: I1128 17:06:23.944784 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-qz4pw" Nov 28 17:06:24 crc kubenswrapper[4909]: I1128 17:06:24.926897 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-qz4pw" Nov 28 17:06:24 crc kubenswrapper[4909]: I1128 17:06:24.994698 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-qz4pw"] Nov 28 17:06:26 crc kubenswrapper[4909]: I1128 17:06:26.877473 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-qz4pw" podUID="0dfe4b20-d44b-4a87-95db-cabd6fa36fa5" containerName="registry-server" containerID="cri-o://0a5d5d7072bd29f88ed5727500ef6a9d3226d439844b9115b7f1dcd3ff9fab6a" gracePeriod=2 Nov 28 17:06:28 crc kubenswrapper[4909]: I1128 17:06:28.902510 4909 scope.go:117] "RemoveContainer" containerID="9b1b15834604c6d55924bd307fea12db73306780564a02880884938e0ae6d2bd" Nov 28 17:06:28 crc kubenswrapper[4909]: E1128 17:06:28.903443 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 17:06:28 crc kubenswrapper[4909]: I1128 17:06:28.914010 4909 generic.go:334] "Generic (PLEG): container finished" podID="0dfe4b20-d44b-4a87-95db-cabd6fa36fa5" containerID="0a5d5d7072bd29f88ed5727500ef6a9d3226d439844b9115b7f1dcd3ff9fab6a" exitCode=0 Nov 28 17:06:28 crc kubenswrapper[4909]: I1128 17:06:28.914070 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qz4pw" event={"ID":"0dfe4b20-d44b-4a87-95db-cabd6fa36fa5","Type":"ContainerDied","Data":"0a5d5d7072bd29f88ed5727500ef6a9d3226d439844b9115b7f1dcd3ff9fab6a"} Nov 28 17:06:29 crc kubenswrapper[4909]: I1128 17:06:29.265568 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-qz4pw" Nov 28 17:06:29 crc kubenswrapper[4909]: I1128 17:06:29.389030 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0dfe4b20-d44b-4a87-95db-cabd6fa36fa5-utilities\") pod \"0dfe4b20-d44b-4a87-95db-cabd6fa36fa5\" (UID: \"0dfe4b20-d44b-4a87-95db-cabd6fa36fa5\") " Nov 28 17:06:29 crc kubenswrapper[4909]: I1128 17:06:29.389232 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0dfe4b20-d44b-4a87-95db-cabd6fa36fa5-catalog-content\") pod \"0dfe4b20-d44b-4a87-95db-cabd6fa36fa5\" (UID: \"0dfe4b20-d44b-4a87-95db-cabd6fa36fa5\") " Nov 28 17:06:29 crc kubenswrapper[4909]: I1128 17:06:29.389332 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ksjwp\" (UniqueName: \"kubernetes.io/projected/0dfe4b20-d44b-4a87-95db-cabd6fa36fa5-kube-api-access-ksjwp\") pod \"0dfe4b20-d44b-4a87-95db-cabd6fa36fa5\" (UID: \"0dfe4b20-d44b-4a87-95db-cabd6fa36fa5\") " Nov 28 17:06:29 crc kubenswrapper[4909]: I1128 17:06:29.391062 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0dfe4b20-d44b-4a87-95db-cabd6fa36fa5-utilities" (OuterVolumeSpecName: "utilities") pod "0dfe4b20-d44b-4a87-95db-cabd6fa36fa5" (UID: "0dfe4b20-d44b-4a87-95db-cabd6fa36fa5"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:06:29 crc kubenswrapper[4909]: I1128 17:06:29.397757 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0dfe4b20-d44b-4a87-95db-cabd6fa36fa5-kube-api-access-ksjwp" (OuterVolumeSpecName: "kube-api-access-ksjwp") pod "0dfe4b20-d44b-4a87-95db-cabd6fa36fa5" (UID: "0dfe4b20-d44b-4a87-95db-cabd6fa36fa5"). InnerVolumeSpecName "kube-api-access-ksjwp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:06:29 crc kubenswrapper[4909]: I1128 17:06:29.454392 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0dfe4b20-d44b-4a87-95db-cabd6fa36fa5-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "0dfe4b20-d44b-4a87-95db-cabd6fa36fa5" (UID: "0dfe4b20-d44b-4a87-95db-cabd6fa36fa5"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:06:29 crc kubenswrapper[4909]: I1128 17:06:29.490995 4909 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0dfe4b20-d44b-4a87-95db-cabd6fa36fa5-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 17:06:29 crc kubenswrapper[4909]: I1128 17:06:29.491299 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ksjwp\" (UniqueName: \"kubernetes.io/projected/0dfe4b20-d44b-4a87-95db-cabd6fa36fa5-kube-api-access-ksjwp\") on node \"crc\" DevicePath \"\"" Nov 28 17:06:29 crc kubenswrapper[4909]: I1128 17:06:29.491423 4909 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0dfe4b20-d44b-4a87-95db-cabd6fa36fa5-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 17:06:29 crc kubenswrapper[4909]: I1128 17:06:29.924895 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qz4pw" event={"ID":"0dfe4b20-d44b-4a87-95db-cabd6fa36fa5","Type":"ContainerDied","Data":"5b4778be7e57507e8a1fb957666e870a4e3bd66fbf2e24a25afa7872e881b31b"} Nov 28 17:06:29 crc kubenswrapper[4909]: I1128 17:06:29.924997 4909 scope.go:117] "RemoveContainer" containerID="0a5d5d7072bd29f88ed5727500ef6a9d3226d439844b9115b7f1dcd3ff9fab6a" Nov 28 17:06:29 crc kubenswrapper[4909]: I1128 17:06:29.924999 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-qz4pw" Nov 28 17:06:29 crc kubenswrapper[4909]: I1128 17:06:29.962048 4909 scope.go:117] "RemoveContainer" containerID="71fb5cf78993c101c638a3ae7f052faec659357af1a2744a55b0423023c7db5d" Nov 28 17:06:29 crc kubenswrapper[4909]: I1128 17:06:29.967887 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-qz4pw"] Nov 28 17:06:29 crc kubenswrapper[4909]: I1128 17:06:29.978053 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-qz4pw"] Nov 28 17:06:30 crc kubenswrapper[4909]: I1128 17:06:30.001564 4909 scope.go:117] "RemoveContainer" containerID="9dae1507c9de3dfb659f831c63bd6a31f44e69ecf4fe6407497afa337f90e028" Nov 28 17:06:31 crc kubenswrapper[4909]: I1128 17:06:31.918337 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0dfe4b20-d44b-4a87-95db-cabd6fa36fa5" path="/var/lib/kubelet/pods/0dfe4b20-d44b-4a87-95db-cabd6fa36fa5/volumes" Nov 28 17:06:41 crc kubenswrapper[4909]: I1128 17:06:41.902643 4909 scope.go:117] "RemoveContainer" containerID="9b1b15834604c6d55924bd307fea12db73306780564a02880884938e0ae6d2bd" Nov 28 17:06:41 crc kubenswrapper[4909]: E1128 17:06:41.903599 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 17:06:55 crc kubenswrapper[4909]: I1128 17:06:55.902194 4909 scope.go:117] "RemoveContainer" containerID="9b1b15834604c6d55924bd307fea12db73306780564a02880884938e0ae6d2bd" Nov 28 17:06:55 crc kubenswrapper[4909]: E1128 17:06:55.903291 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 17:07:07 crc kubenswrapper[4909]: I1128 17:07:07.907942 4909 scope.go:117] "RemoveContainer" containerID="9b1b15834604c6d55924bd307fea12db73306780564a02880884938e0ae6d2bd" Nov 28 17:07:07 crc kubenswrapper[4909]: E1128 17:07:07.908860 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 17:07:21 crc kubenswrapper[4909]: I1128 17:07:21.902072 4909 scope.go:117] "RemoveContainer" containerID="9b1b15834604c6d55924bd307fea12db73306780564a02880884938e0ae6d2bd" Nov 28 17:07:22 crc kubenswrapper[4909]: I1128 17:07:22.403183 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" event={"ID":"5f0ac931-d37b-4342-8c12-c2779b455cc5","Type":"ContainerStarted","Data":"d2da137404d42f3ab8a57a2117d7cd94a426eddd9214bd773bcbc46208fc5f20"} Nov 28 17:09:49 crc kubenswrapper[4909]: I1128 17:09:49.911475 4909 patch_prober.go:28] interesting pod/machine-config-daemon-d5nd7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 17:09:49 crc kubenswrapper[4909]: I1128 17:09:49.912106 4909 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 17:10:19 crc kubenswrapper[4909]: I1128 17:10:19.910629 4909 patch_prober.go:28] interesting pod/machine-config-daemon-d5nd7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 17:10:19 crc kubenswrapper[4909]: I1128 17:10:19.911407 4909 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 17:10:49 crc kubenswrapper[4909]: I1128 17:10:49.910894 4909 patch_prober.go:28] interesting pod/machine-config-daemon-d5nd7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 17:10:49 crc kubenswrapper[4909]: I1128 17:10:49.911571 4909 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 17:10:49 crc kubenswrapper[4909]: I1128 17:10:49.918700 4909 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" Nov 28 17:10:49 crc kubenswrapper[4909]: I1128 17:10:49.919604 4909 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"d2da137404d42f3ab8a57a2117d7cd94a426eddd9214bd773bcbc46208fc5f20"} pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 17:10:49 crc kubenswrapper[4909]: I1128 17:10:49.919749 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerName="machine-config-daemon" containerID="cri-o://d2da137404d42f3ab8a57a2117d7cd94a426eddd9214bd773bcbc46208fc5f20" gracePeriod=600 Nov 28 17:10:50 crc kubenswrapper[4909]: I1128 17:10:50.288975 4909 generic.go:334] "Generic (PLEG): container finished" podID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerID="d2da137404d42f3ab8a57a2117d7cd94a426eddd9214bd773bcbc46208fc5f20" exitCode=0 Nov 28 17:10:50 crc kubenswrapper[4909]: I1128 17:10:50.289693 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" event={"ID":"5f0ac931-d37b-4342-8c12-c2779b455cc5","Type":"ContainerDied","Data":"d2da137404d42f3ab8a57a2117d7cd94a426eddd9214bd773bcbc46208fc5f20"} Nov 28 17:10:50 crc kubenswrapper[4909]: I1128 17:10:50.289842 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" event={"ID":"5f0ac931-d37b-4342-8c12-c2779b455cc5","Type":"ContainerStarted","Data":"f26ce64ee7189509dfc18cbe7ce86f7f6598eb9f6ae426c5dc846407a726e8c6"} Nov 28 17:10:50 crc kubenswrapper[4909]: I1128 17:10:50.289969 4909 scope.go:117] "RemoveContainer" containerID="9b1b15834604c6d55924bd307fea12db73306780564a02880884938e0ae6d2bd" Nov 28 17:13:19 crc kubenswrapper[4909]: I1128 17:13:19.911427 4909 patch_prober.go:28] interesting pod/machine-config-daemon-d5nd7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 17:13:19 crc kubenswrapper[4909]: I1128 17:13:19.912326 4909 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 17:13:49 crc kubenswrapper[4909]: I1128 17:13:49.910556 4909 patch_prober.go:28] interesting pod/machine-config-daemon-d5nd7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 17:13:49 crc kubenswrapper[4909]: I1128 17:13:49.911204 4909 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 17:13:53 crc kubenswrapper[4909]: I1128 17:13:53.107765 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-z75kp"] Nov 28 17:13:53 crc kubenswrapper[4909]: E1128 17:13:53.108033 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0dfe4b20-d44b-4a87-95db-cabd6fa36fa5" containerName="extract-content" Nov 28 17:13:53 crc kubenswrapper[4909]: I1128 17:13:53.108045 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="0dfe4b20-d44b-4a87-95db-cabd6fa36fa5" containerName="extract-content" Nov 28 17:13:53 crc kubenswrapper[4909]: E1128 17:13:53.108058 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0dfe4b20-d44b-4a87-95db-cabd6fa36fa5" containerName="extract-utilities" Nov 28 17:13:53 crc kubenswrapper[4909]: I1128 17:13:53.108064 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="0dfe4b20-d44b-4a87-95db-cabd6fa36fa5" containerName="extract-utilities" Nov 28 17:13:53 crc kubenswrapper[4909]: E1128 17:13:53.108076 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0dfe4b20-d44b-4a87-95db-cabd6fa36fa5" containerName="registry-server" Nov 28 17:13:53 crc kubenswrapper[4909]: I1128 17:13:53.108083 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="0dfe4b20-d44b-4a87-95db-cabd6fa36fa5" containerName="registry-server" Nov 28 17:13:53 crc kubenswrapper[4909]: I1128 17:13:53.108205 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="0dfe4b20-d44b-4a87-95db-cabd6fa36fa5" containerName="registry-server" Nov 28 17:13:53 crc kubenswrapper[4909]: I1128 17:13:53.109115 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-z75kp" Nov 28 17:13:53 crc kubenswrapper[4909]: I1128 17:13:53.127260 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-z75kp"] Nov 28 17:13:53 crc kubenswrapper[4909]: I1128 17:13:53.207430 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4efd652c-f6cd-4216-8d5a-4b1e43dfc12b-catalog-content\") pod \"redhat-marketplace-z75kp\" (UID: \"4efd652c-f6cd-4216-8d5a-4b1e43dfc12b\") " pod="openshift-marketplace/redhat-marketplace-z75kp" Nov 28 17:13:53 crc kubenswrapper[4909]: I1128 17:13:53.207500 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4efd652c-f6cd-4216-8d5a-4b1e43dfc12b-utilities\") pod \"redhat-marketplace-z75kp\" (UID: \"4efd652c-f6cd-4216-8d5a-4b1e43dfc12b\") " pod="openshift-marketplace/redhat-marketplace-z75kp" Nov 28 17:13:53 crc kubenswrapper[4909]: I1128 17:13:53.207726 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t42zn\" (UniqueName: \"kubernetes.io/projected/4efd652c-f6cd-4216-8d5a-4b1e43dfc12b-kube-api-access-t42zn\") pod \"redhat-marketplace-z75kp\" (UID: \"4efd652c-f6cd-4216-8d5a-4b1e43dfc12b\") " pod="openshift-marketplace/redhat-marketplace-z75kp" Nov 28 17:13:53 crc kubenswrapper[4909]: I1128 17:13:53.308590 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4efd652c-f6cd-4216-8d5a-4b1e43dfc12b-utilities\") pod \"redhat-marketplace-z75kp\" (UID: \"4efd652c-f6cd-4216-8d5a-4b1e43dfc12b\") " pod="openshift-marketplace/redhat-marketplace-z75kp" Nov 28 17:13:53 crc kubenswrapper[4909]: I1128 17:13:53.308890 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t42zn\" (UniqueName: \"kubernetes.io/projected/4efd652c-f6cd-4216-8d5a-4b1e43dfc12b-kube-api-access-t42zn\") pod \"redhat-marketplace-z75kp\" (UID: \"4efd652c-f6cd-4216-8d5a-4b1e43dfc12b\") " pod="openshift-marketplace/redhat-marketplace-z75kp" Nov 28 17:13:53 crc kubenswrapper[4909]: I1128 17:13:53.308938 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4efd652c-f6cd-4216-8d5a-4b1e43dfc12b-catalog-content\") pod \"redhat-marketplace-z75kp\" (UID: \"4efd652c-f6cd-4216-8d5a-4b1e43dfc12b\") " pod="openshift-marketplace/redhat-marketplace-z75kp" Nov 28 17:13:53 crc kubenswrapper[4909]: I1128 17:13:53.309081 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4efd652c-f6cd-4216-8d5a-4b1e43dfc12b-utilities\") pod \"redhat-marketplace-z75kp\" (UID: \"4efd652c-f6cd-4216-8d5a-4b1e43dfc12b\") " pod="openshift-marketplace/redhat-marketplace-z75kp" Nov 28 17:13:53 crc kubenswrapper[4909]: I1128 17:13:53.309315 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4efd652c-f6cd-4216-8d5a-4b1e43dfc12b-catalog-content\") pod \"redhat-marketplace-z75kp\" (UID: \"4efd652c-f6cd-4216-8d5a-4b1e43dfc12b\") " pod="openshift-marketplace/redhat-marketplace-z75kp" Nov 28 17:13:53 crc kubenswrapper[4909]: I1128 17:13:53.327113 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t42zn\" (UniqueName: \"kubernetes.io/projected/4efd652c-f6cd-4216-8d5a-4b1e43dfc12b-kube-api-access-t42zn\") pod \"redhat-marketplace-z75kp\" (UID: \"4efd652c-f6cd-4216-8d5a-4b1e43dfc12b\") " pod="openshift-marketplace/redhat-marketplace-z75kp" Nov 28 17:13:53 crc kubenswrapper[4909]: I1128 17:13:53.431940 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-z75kp" Nov 28 17:13:53 crc kubenswrapper[4909]: I1128 17:13:53.878282 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-z75kp"] Nov 28 17:13:53 crc kubenswrapper[4909]: I1128 17:13:53.957184 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-z75kp" event={"ID":"4efd652c-f6cd-4216-8d5a-4b1e43dfc12b","Type":"ContainerStarted","Data":"2aeb737edeee4ff6a4ea8d531b1eefd4a4b00bd7999712053c63a8221d673852"} Nov 28 17:13:54 crc kubenswrapper[4909]: I1128 17:13:54.967481 4909 generic.go:334] "Generic (PLEG): container finished" podID="4efd652c-f6cd-4216-8d5a-4b1e43dfc12b" containerID="0fe42b3c6a783f60e8238c2f679d45bf35725d21eb33bbb64c303314ccc7e739" exitCode=0 Nov 28 17:13:54 crc kubenswrapper[4909]: I1128 17:13:54.967595 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-z75kp" event={"ID":"4efd652c-f6cd-4216-8d5a-4b1e43dfc12b","Type":"ContainerDied","Data":"0fe42b3c6a783f60e8238c2f679d45bf35725d21eb33bbb64c303314ccc7e739"} Nov 28 17:13:54 crc kubenswrapper[4909]: I1128 17:13:54.974097 4909 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 28 17:13:56 crc kubenswrapper[4909]: I1128 17:13:56.987222 4909 generic.go:334] "Generic (PLEG): container finished" podID="4efd652c-f6cd-4216-8d5a-4b1e43dfc12b" containerID="8036161a00f729cebbba7cb41f8c9002d88d91ddb335a4575ef38e3e2fb538c1" exitCode=0 Nov 28 17:13:56 crc kubenswrapper[4909]: I1128 17:13:56.987281 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-z75kp" event={"ID":"4efd652c-f6cd-4216-8d5a-4b1e43dfc12b","Type":"ContainerDied","Data":"8036161a00f729cebbba7cb41f8c9002d88d91ddb335a4575ef38e3e2fb538c1"} Nov 28 17:13:57 crc kubenswrapper[4909]: I1128 17:13:57.995730 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-z75kp" event={"ID":"4efd652c-f6cd-4216-8d5a-4b1e43dfc12b","Type":"ContainerStarted","Data":"bae5f271770e0548990a629c5893f0f64f08d8a25d2be55822a65628cc8a0797"} Nov 28 17:13:58 crc kubenswrapper[4909]: I1128 17:13:58.015778 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-z75kp" podStartSLOduration=2.538591467 podStartE2EDuration="5.015757004s" podCreationTimestamp="2025-11-28 17:13:53 +0000 UTC" firstStartedPulling="2025-11-28 17:13:54.973846377 +0000 UTC m=+3817.370530901" lastFinishedPulling="2025-11-28 17:13:57.451011904 +0000 UTC m=+3819.847696438" observedRunningTime="2025-11-28 17:13:58.010875824 +0000 UTC m=+3820.407560358" watchObservedRunningTime="2025-11-28 17:13:58.015757004 +0000 UTC m=+3820.412441558" Nov 28 17:14:03 crc kubenswrapper[4909]: I1128 17:14:03.433333 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-z75kp" Nov 28 17:14:03 crc kubenswrapper[4909]: I1128 17:14:03.433739 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-z75kp" Nov 28 17:14:03 crc kubenswrapper[4909]: I1128 17:14:03.493995 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-z75kp" Nov 28 17:14:04 crc kubenswrapper[4909]: I1128 17:14:04.121363 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-z75kp" Nov 28 17:14:04 crc kubenswrapper[4909]: I1128 17:14:04.193323 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-z75kp"] Nov 28 17:14:06 crc kubenswrapper[4909]: I1128 17:14:06.066251 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-z75kp" podUID="4efd652c-f6cd-4216-8d5a-4b1e43dfc12b" containerName="registry-server" containerID="cri-o://bae5f271770e0548990a629c5893f0f64f08d8a25d2be55822a65628cc8a0797" gracePeriod=2 Nov 28 17:14:07 crc kubenswrapper[4909]: I1128 17:14:07.600869 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-z75kp" Nov 28 17:14:07 crc kubenswrapper[4909]: I1128 17:14:07.615236 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4efd652c-f6cd-4216-8d5a-4b1e43dfc12b-catalog-content\") pod \"4efd652c-f6cd-4216-8d5a-4b1e43dfc12b\" (UID: \"4efd652c-f6cd-4216-8d5a-4b1e43dfc12b\") " Nov 28 17:14:07 crc kubenswrapper[4909]: I1128 17:14:07.615347 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t42zn\" (UniqueName: \"kubernetes.io/projected/4efd652c-f6cd-4216-8d5a-4b1e43dfc12b-kube-api-access-t42zn\") pod \"4efd652c-f6cd-4216-8d5a-4b1e43dfc12b\" (UID: \"4efd652c-f6cd-4216-8d5a-4b1e43dfc12b\") " Nov 28 17:14:07 crc kubenswrapper[4909]: I1128 17:14:07.615403 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4efd652c-f6cd-4216-8d5a-4b1e43dfc12b-utilities\") pod \"4efd652c-f6cd-4216-8d5a-4b1e43dfc12b\" (UID: \"4efd652c-f6cd-4216-8d5a-4b1e43dfc12b\") " Nov 28 17:14:07 crc kubenswrapper[4909]: I1128 17:14:07.620359 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4efd652c-f6cd-4216-8d5a-4b1e43dfc12b-utilities" (OuterVolumeSpecName: "utilities") pod "4efd652c-f6cd-4216-8d5a-4b1e43dfc12b" (UID: "4efd652c-f6cd-4216-8d5a-4b1e43dfc12b"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:14:07 crc kubenswrapper[4909]: I1128 17:14:07.621208 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4efd652c-f6cd-4216-8d5a-4b1e43dfc12b-kube-api-access-t42zn" (OuterVolumeSpecName: "kube-api-access-t42zn") pod "4efd652c-f6cd-4216-8d5a-4b1e43dfc12b" (UID: "4efd652c-f6cd-4216-8d5a-4b1e43dfc12b"). InnerVolumeSpecName "kube-api-access-t42zn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:14:07 crc kubenswrapper[4909]: I1128 17:14:07.653750 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4efd652c-f6cd-4216-8d5a-4b1e43dfc12b-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "4efd652c-f6cd-4216-8d5a-4b1e43dfc12b" (UID: "4efd652c-f6cd-4216-8d5a-4b1e43dfc12b"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:14:07 crc kubenswrapper[4909]: I1128 17:14:07.716893 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t42zn\" (UniqueName: \"kubernetes.io/projected/4efd652c-f6cd-4216-8d5a-4b1e43dfc12b-kube-api-access-t42zn\") on node \"crc\" DevicePath \"\"" Nov 28 17:14:07 crc kubenswrapper[4909]: I1128 17:14:07.716941 4909 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4efd652c-f6cd-4216-8d5a-4b1e43dfc12b-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 17:14:07 crc kubenswrapper[4909]: I1128 17:14:07.716955 4909 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4efd652c-f6cd-4216-8d5a-4b1e43dfc12b-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 17:14:08 crc kubenswrapper[4909]: I1128 17:14:08.086266 4909 generic.go:334] "Generic (PLEG): container finished" podID="4efd652c-f6cd-4216-8d5a-4b1e43dfc12b" containerID="bae5f271770e0548990a629c5893f0f64f08d8a25d2be55822a65628cc8a0797" exitCode=0 Nov 28 17:14:08 crc kubenswrapper[4909]: I1128 17:14:08.086308 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-z75kp" event={"ID":"4efd652c-f6cd-4216-8d5a-4b1e43dfc12b","Type":"ContainerDied","Data":"bae5f271770e0548990a629c5893f0f64f08d8a25d2be55822a65628cc8a0797"} Nov 28 17:14:08 crc kubenswrapper[4909]: I1128 17:14:08.086334 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-z75kp" event={"ID":"4efd652c-f6cd-4216-8d5a-4b1e43dfc12b","Type":"ContainerDied","Data":"2aeb737edeee4ff6a4ea8d531b1eefd4a4b00bd7999712053c63a8221d673852"} Nov 28 17:14:08 crc kubenswrapper[4909]: I1128 17:14:08.086348 4909 scope.go:117] "RemoveContainer" containerID="bae5f271770e0548990a629c5893f0f64f08d8a25d2be55822a65628cc8a0797" Nov 28 17:14:08 crc kubenswrapper[4909]: I1128 17:14:08.086481 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-z75kp" Nov 28 17:14:08 crc kubenswrapper[4909]: I1128 17:14:08.110499 4909 scope.go:117] "RemoveContainer" containerID="8036161a00f729cebbba7cb41f8c9002d88d91ddb335a4575ef38e3e2fb538c1" Nov 28 17:14:08 crc kubenswrapper[4909]: I1128 17:14:08.111327 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-z75kp"] Nov 28 17:14:08 crc kubenswrapper[4909]: I1128 17:14:08.127231 4909 scope.go:117] "RemoveContainer" containerID="0fe42b3c6a783f60e8238c2f679d45bf35725d21eb33bbb64c303314ccc7e739" Nov 28 17:14:08 crc kubenswrapper[4909]: I1128 17:14:08.135719 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-z75kp"] Nov 28 17:14:08 crc kubenswrapper[4909]: I1128 17:14:08.167366 4909 scope.go:117] "RemoveContainer" containerID="bae5f271770e0548990a629c5893f0f64f08d8a25d2be55822a65628cc8a0797" Nov 28 17:14:08 crc kubenswrapper[4909]: E1128 17:14:08.167903 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bae5f271770e0548990a629c5893f0f64f08d8a25d2be55822a65628cc8a0797\": container with ID starting with bae5f271770e0548990a629c5893f0f64f08d8a25d2be55822a65628cc8a0797 not found: ID does not exist" containerID="bae5f271770e0548990a629c5893f0f64f08d8a25d2be55822a65628cc8a0797" Nov 28 17:14:08 crc kubenswrapper[4909]: I1128 17:14:08.167971 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bae5f271770e0548990a629c5893f0f64f08d8a25d2be55822a65628cc8a0797"} err="failed to get container status \"bae5f271770e0548990a629c5893f0f64f08d8a25d2be55822a65628cc8a0797\": rpc error: code = NotFound desc = could not find container \"bae5f271770e0548990a629c5893f0f64f08d8a25d2be55822a65628cc8a0797\": container with ID starting with bae5f271770e0548990a629c5893f0f64f08d8a25d2be55822a65628cc8a0797 not found: ID does not exist" Nov 28 17:14:08 crc kubenswrapper[4909]: I1128 17:14:08.168004 4909 scope.go:117] "RemoveContainer" containerID="8036161a00f729cebbba7cb41f8c9002d88d91ddb335a4575ef38e3e2fb538c1" Nov 28 17:14:08 crc kubenswrapper[4909]: E1128 17:14:08.168408 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8036161a00f729cebbba7cb41f8c9002d88d91ddb335a4575ef38e3e2fb538c1\": container with ID starting with 8036161a00f729cebbba7cb41f8c9002d88d91ddb335a4575ef38e3e2fb538c1 not found: ID does not exist" containerID="8036161a00f729cebbba7cb41f8c9002d88d91ddb335a4575ef38e3e2fb538c1" Nov 28 17:14:08 crc kubenswrapper[4909]: I1128 17:14:08.168437 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8036161a00f729cebbba7cb41f8c9002d88d91ddb335a4575ef38e3e2fb538c1"} err="failed to get container status \"8036161a00f729cebbba7cb41f8c9002d88d91ddb335a4575ef38e3e2fb538c1\": rpc error: code = NotFound desc = could not find container \"8036161a00f729cebbba7cb41f8c9002d88d91ddb335a4575ef38e3e2fb538c1\": container with ID starting with 8036161a00f729cebbba7cb41f8c9002d88d91ddb335a4575ef38e3e2fb538c1 not found: ID does not exist" Nov 28 17:14:08 crc kubenswrapper[4909]: I1128 17:14:08.168458 4909 scope.go:117] "RemoveContainer" containerID="0fe42b3c6a783f60e8238c2f679d45bf35725d21eb33bbb64c303314ccc7e739" Nov 28 17:14:08 crc kubenswrapper[4909]: E1128 17:14:08.169133 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0fe42b3c6a783f60e8238c2f679d45bf35725d21eb33bbb64c303314ccc7e739\": container with ID starting with 0fe42b3c6a783f60e8238c2f679d45bf35725d21eb33bbb64c303314ccc7e739 not found: ID does not exist" containerID="0fe42b3c6a783f60e8238c2f679d45bf35725d21eb33bbb64c303314ccc7e739" Nov 28 17:14:08 crc kubenswrapper[4909]: I1128 17:14:08.169162 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0fe42b3c6a783f60e8238c2f679d45bf35725d21eb33bbb64c303314ccc7e739"} err="failed to get container status \"0fe42b3c6a783f60e8238c2f679d45bf35725d21eb33bbb64c303314ccc7e739\": rpc error: code = NotFound desc = could not find container \"0fe42b3c6a783f60e8238c2f679d45bf35725d21eb33bbb64c303314ccc7e739\": container with ID starting with 0fe42b3c6a783f60e8238c2f679d45bf35725d21eb33bbb64c303314ccc7e739 not found: ID does not exist" Nov 28 17:14:09 crc kubenswrapper[4909]: I1128 17:14:09.912753 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4efd652c-f6cd-4216-8d5a-4b1e43dfc12b" path="/var/lib/kubelet/pods/4efd652c-f6cd-4216-8d5a-4b1e43dfc12b/volumes" Nov 28 17:14:19 crc kubenswrapper[4909]: I1128 17:14:19.910616 4909 patch_prober.go:28] interesting pod/machine-config-daemon-d5nd7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 17:14:19 crc kubenswrapper[4909]: I1128 17:14:19.911242 4909 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 17:14:19 crc kubenswrapper[4909]: I1128 17:14:19.911864 4909 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" Nov 28 17:14:19 crc kubenswrapper[4909]: I1128 17:14:19.912959 4909 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"f26ce64ee7189509dfc18cbe7ce86f7f6598eb9f6ae426c5dc846407a726e8c6"} pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 17:14:19 crc kubenswrapper[4909]: I1128 17:14:19.913032 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerName="machine-config-daemon" containerID="cri-o://f26ce64ee7189509dfc18cbe7ce86f7f6598eb9f6ae426c5dc846407a726e8c6" gracePeriod=600 Nov 28 17:14:20 crc kubenswrapper[4909]: E1128 17:14:20.033487 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 17:14:20 crc kubenswrapper[4909]: I1128 17:14:20.196855 4909 generic.go:334] "Generic (PLEG): container finished" podID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerID="f26ce64ee7189509dfc18cbe7ce86f7f6598eb9f6ae426c5dc846407a726e8c6" exitCode=0 Nov 28 17:14:20 crc kubenswrapper[4909]: I1128 17:14:20.196916 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" event={"ID":"5f0ac931-d37b-4342-8c12-c2779b455cc5","Type":"ContainerDied","Data":"f26ce64ee7189509dfc18cbe7ce86f7f6598eb9f6ae426c5dc846407a726e8c6"} Nov 28 17:14:20 crc kubenswrapper[4909]: I1128 17:14:20.196956 4909 scope.go:117] "RemoveContainer" containerID="d2da137404d42f3ab8a57a2117d7cd94a426eddd9214bd773bcbc46208fc5f20" Nov 28 17:14:20 crc kubenswrapper[4909]: I1128 17:14:20.197747 4909 scope.go:117] "RemoveContainer" containerID="f26ce64ee7189509dfc18cbe7ce86f7f6598eb9f6ae426c5dc846407a726e8c6" Nov 28 17:14:20 crc kubenswrapper[4909]: E1128 17:14:20.198176 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 17:14:33 crc kubenswrapper[4909]: I1128 17:14:33.901231 4909 scope.go:117] "RemoveContainer" containerID="f26ce64ee7189509dfc18cbe7ce86f7f6598eb9f6ae426c5dc846407a726e8c6" Nov 28 17:14:33 crc kubenswrapper[4909]: E1128 17:14:33.902040 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 17:14:47 crc kubenswrapper[4909]: I1128 17:14:47.908135 4909 scope.go:117] "RemoveContainer" containerID="f26ce64ee7189509dfc18cbe7ce86f7f6598eb9f6ae426c5dc846407a726e8c6" Nov 28 17:14:47 crc kubenswrapper[4909]: E1128 17:14:47.909126 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 17:14:48 crc kubenswrapper[4909]: I1128 17:14:48.171017 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-b7rzb"] Nov 28 17:14:48 crc kubenswrapper[4909]: E1128 17:14:48.171587 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4efd652c-f6cd-4216-8d5a-4b1e43dfc12b" containerName="extract-content" Nov 28 17:14:48 crc kubenswrapper[4909]: I1128 17:14:48.171619 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="4efd652c-f6cd-4216-8d5a-4b1e43dfc12b" containerName="extract-content" Nov 28 17:14:48 crc kubenswrapper[4909]: E1128 17:14:48.171644 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4efd652c-f6cd-4216-8d5a-4b1e43dfc12b" containerName="extract-utilities" Nov 28 17:14:48 crc kubenswrapper[4909]: I1128 17:14:48.171698 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="4efd652c-f6cd-4216-8d5a-4b1e43dfc12b" containerName="extract-utilities" Nov 28 17:14:48 crc kubenswrapper[4909]: E1128 17:14:48.171770 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4efd652c-f6cd-4216-8d5a-4b1e43dfc12b" containerName="registry-server" Nov 28 17:14:48 crc kubenswrapper[4909]: I1128 17:14:48.171789 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="4efd652c-f6cd-4216-8d5a-4b1e43dfc12b" containerName="registry-server" Nov 28 17:14:48 crc kubenswrapper[4909]: I1128 17:14:48.172155 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="4efd652c-f6cd-4216-8d5a-4b1e43dfc12b" containerName="registry-server" Nov 28 17:14:48 crc kubenswrapper[4909]: I1128 17:14:48.176533 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-b7rzb" Nov 28 17:14:48 crc kubenswrapper[4909]: I1128 17:14:48.188368 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-b7rzb"] Nov 28 17:14:48 crc kubenswrapper[4909]: I1128 17:14:48.362725 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a9a06d13-d59b-4d0b-9d94-563cf81850a5-catalog-content\") pod \"redhat-operators-b7rzb\" (UID: \"a9a06d13-d59b-4d0b-9d94-563cf81850a5\") " pod="openshift-marketplace/redhat-operators-b7rzb" Nov 28 17:14:48 crc kubenswrapper[4909]: I1128 17:14:48.363704 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gpp7j\" (UniqueName: \"kubernetes.io/projected/a9a06d13-d59b-4d0b-9d94-563cf81850a5-kube-api-access-gpp7j\") pod \"redhat-operators-b7rzb\" (UID: \"a9a06d13-d59b-4d0b-9d94-563cf81850a5\") " pod="openshift-marketplace/redhat-operators-b7rzb" Nov 28 17:14:48 crc kubenswrapper[4909]: I1128 17:14:48.363903 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a9a06d13-d59b-4d0b-9d94-563cf81850a5-utilities\") pod \"redhat-operators-b7rzb\" (UID: \"a9a06d13-d59b-4d0b-9d94-563cf81850a5\") " pod="openshift-marketplace/redhat-operators-b7rzb" Nov 28 17:14:48 crc kubenswrapper[4909]: I1128 17:14:48.465491 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gpp7j\" (UniqueName: \"kubernetes.io/projected/a9a06d13-d59b-4d0b-9d94-563cf81850a5-kube-api-access-gpp7j\") pod \"redhat-operators-b7rzb\" (UID: \"a9a06d13-d59b-4d0b-9d94-563cf81850a5\") " pod="openshift-marketplace/redhat-operators-b7rzb" Nov 28 17:14:48 crc kubenswrapper[4909]: I1128 17:14:48.465599 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a9a06d13-d59b-4d0b-9d94-563cf81850a5-utilities\") pod \"redhat-operators-b7rzb\" (UID: \"a9a06d13-d59b-4d0b-9d94-563cf81850a5\") " pod="openshift-marketplace/redhat-operators-b7rzb" Nov 28 17:14:48 crc kubenswrapper[4909]: I1128 17:14:48.465769 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a9a06d13-d59b-4d0b-9d94-563cf81850a5-catalog-content\") pod \"redhat-operators-b7rzb\" (UID: \"a9a06d13-d59b-4d0b-9d94-563cf81850a5\") " pod="openshift-marketplace/redhat-operators-b7rzb" Nov 28 17:14:48 crc kubenswrapper[4909]: I1128 17:14:48.466625 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a9a06d13-d59b-4d0b-9d94-563cf81850a5-catalog-content\") pod \"redhat-operators-b7rzb\" (UID: \"a9a06d13-d59b-4d0b-9d94-563cf81850a5\") " pod="openshift-marketplace/redhat-operators-b7rzb" Nov 28 17:14:48 crc kubenswrapper[4909]: I1128 17:14:48.466735 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a9a06d13-d59b-4d0b-9d94-563cf81850a5-utilities\") pod \"redhat-operators-b7rzb\" (UID: \"a9a06d13-d59b-4d0b-9d94-563cf81850a5\") " pod="openshift-marketplace/redhat-operators-b7rzb" Nov 28 17:14:48 crc kubenswrapper[4909]: I1128 17:14:48.575830 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gpp7j\" (UniqueName: \"kubernetes.io/projected/a9a06d13-d59b-4d0b-9d94-563cf81850a5-kube-api-access-gpp7j\") pod \"redhat-operators-b7rzb\" (UID: \"a9a06d13-d59b-4d0b-9d94-563cf81850a5\") " pod="openshift-marketplace/redhat-operators-b7rzb" Nov 28 17:14:48 crc kubenswrapper[4909]: I1128 17:14:48.856173 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-b7rzb" Nov 28 17:14:49 crc kubenswrapper[4909]: I1128 17:14:49.096710 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-b7rzb"] Nov 28 17:14:49 crc kubenswrapper[4909]: I1128 17:14:49.468882 4909 generic.go:334] "Generic (PLEG): container finished" podID="a9a06d13-d59b-4d0b-9d94-563cf81850a5" containerID="d46c6ff42ede972493e55070f67637d8aa0716a9c1e7e8852e60a590793595d4" exitCode=0 Nov 28 17:14:49 crc kubenswrapper[4909]: I1128 17:14:49.469184 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-b7rzb" event={"ID":"a9a06d13-d59b-4d0b-9d94-563cf81850a5","Type":"ContainerDied","Data":"d46c6ff42ede972493e55070f67637d8aa0716a9c1e7e8852e60a590793595d4"} Nov 28 17:14:49 crc kubenswrapper[4909]: I1128 17:14:49.469282 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-b7rzb" event={"ID":"a9a06d13-d59b-4d0b-9d94-563cf81850a5","Type":"ContainerStarted","Data":"3b7165c86d085d6e7f3e1bc0fe4864240e3c66cb419c222b6868915c9280ca65"} Nov 28 17:14:51 crc kubenswrapper[4909]: I1128 17:14:51.498789 4909 generic.go:334] "Generic (PLEG): container finished" podID="a9a06d13-d59b-4d0b-9d94-563cf81850a5" containerID="19b17645b980780112784e4fd74695ec6a3835a8c84e8d142752a4b8634cf286" exitCode=0 Nov 28 17:14:51 crc kubenswrapper[4909]: I1128 17:14:51.499584 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-b7rzb" event={"ID":"a9a06d13-d59b-4d0b-9d94-563cf81850a5","Type":"ContainerDied","Data":"19b17645b980780112784e4fd74695ec6a3835a8c84e8d142752a4b8634cf286"} Nov 28 17:14:53 crc kubenswrapper[4909]: I1128 17:14:53.518783 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-b7rzb" event={"ID":"a9a06d13-d59b-4d0b-9d94-563cf81850a5","Type":"ContainerStarted","Data":"43af5db7fd10f0818694b33a9380ceca73d0643859349a53c05e5a6c88b74c95"} Nov 28 17:14:58 crc kubenswrapper[4909]: I1128 17:14:58.856911 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-b7rzb" Nov 28 17:14:58 crc kubenswrapper[4909]: I1128 17:14:58.857606 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-b7rzb" Nov 28 17:14:59 crc kubenswrapper[4909]: I1128 17:14:59.915918 4909 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-b7rzb" podUID="a9a06d13-d59b-4d0b-9d94-563cf81850a5" containerName="registry-server" probeResult="failure" output=< Nov 28 17:14:59 crc kubenswrapper[4909]: timeout: failed to connect service ":50051" within 1s Nov 28 17:14:59 crc kubenswrapper[4909]: > Nov 28 17:15:00 crc kubenswrapper[4909]: I1128 17:15:00.200508 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-b7rzb" podStartSLOduration=9.324301193 podStartE2EDuration="12.200488589s" podCreationTimestamp="2025-11-28 17:14:48 +0000 UTC" firstStartedPulling="2025-11-28 17:14:49.470533113 +0000 UTC m=+3871.867217637" lastFinishedPulling="2025-11-28 17:14:52.346720499 +0000 UTC m=+3874.743405033" observedRunningTime="2025-11-28 17:14:53.536759101 +0000 UTC m=+3875.933443645" watchObservedRunningTime="2025-11-28 17:15:00.200488589 +0000 UTC m=+3882.597173123" Nov 28 17:15:00 crc kubenswrapper[4909]: I1128 17:15:00.205530 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405835-8jtkk"] Nov 28 17:15:00 crc kubenswrapper[4909]: I1128 17:15:00.206639 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405835-8jtkk" Nov 28 17:15:00 crc kubenswrapper[4909]: I1128 17:15:00.210421 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 28 17:15:00 crc kubenswrapper[4909]: I1128 17:15:00.210449 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 28 17:15:00 crc kubenswrapper[4909]: I1128 17:15:00.217008 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405835-8jtkk"] Nov 28 17:15:00 crc kubenswrapper[4909]: I1128 17:15:00.350646 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nq8hc\" (UniqueName: \"kubernetes.io/projected/2cb2a5f4-dd73-41dc-a42e-c7cd82bfd60f-kube-api-access-nq8hc\") pod \"collect-profiles-29405835-8jtkk\" (UID: \"2cb2a5f4-dd73-41dc-a42e-c7cd82bfd60f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405835-8jtkk" Nov 28 17:15:00 crc kubenswrapper[4909]: I1128 17:15:00.350966 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/2cb2a5f4-dd73-41dc-a42e-c7cd82bfd60f-secret-volume\") pod \"collect-profiles-29405835-8jtkk\" (UID: \"2cb2a5f4-dd73-41dc-a42e-c7cd82bfd60f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405835-8jtkk" Nov 28 17:15:00 crc kubenswrapper[4909]: I1128 17:15:00.351106 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/2cb2a5f4-dd73-41dc-a42e-c7cd82bfd60f-config-volume\") pod \"collect-profiles-29405835-8jtkk\" (UID: \"2cb2a5f4-dd73-41dc-a42e-c7cd82bfd60f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405835-8jtkk" Nov 28 17:15:00 crc kubenswrapper[4909]: I1128 17:15:00.453793 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nq8hc\" (UniqueName: \"kubernetes.io/projected/2cb2a5f4-dd73-41dc-a42e-c7cd82bfd60f-kube-api-access-nq8hc\") pod \"collect-profiles-29405835-8jtkk\" (UID: \"2cb2a5f4-dd73-41dc-a42e-c7cd82bfd60f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405835-8jtkk" Nov 28 17:15:00 crc kubenswrapper[4909]: I1128 17:15:00.453944 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/2cb2a5f4-dd73-41dc-a42e-c7cd82bfd60f-secret-volume\") pod \"collect-profiles-29405835-8jtkk\" (UID: \"2cb2a5f4-dd73-41dc-a42e-c7cd82bfd60f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405835-8jtkk" Nov 28 17:15:00 crc kubenswrapper[4909]: I1128 17:15:00.454009 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/2cb2a5f4-dd73-41dc-a42e-c7cd82bfd60f-config-volume\") pod \"collect-profiles-29405835-8jtkk\" (UID: \"2cb2a5f4-dd73-41dc-a42e-c7cd82bfd60f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405835-8jtkk" Nov 28 17:15:00 crc kubenswrapper[4909]: I1128 17:15:00.455494 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/2cb2a5f4-dd73-41dc-a42e-c7cd82bfd60f-config-volume\") pod \"collect-profiles-29405835-8jtkk\" (UID: \"2cb2a5f4-dd73-41dc-a42e-c7cd82bfd60f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405835-8jtkk" Nov 28 17:15:00 crc kubenswrapper[4909]: I1128 17:15:00.464314 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/2cb2a5f4-dd73-41dc-a42e-c7cd82bfd60f-secret-volume\") pod \"collect-profiles-29405835-8jtkk\" (UID: \"2cb2a5f4-dd73-41dc-a42e-c7cd82bfd60f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405835-8jtkk" Nov 28 17:15:00 crc kubenswrapper[4909]: I1128 17:15:00.491826 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nq8hc\" (UniqueName: \"kubernetes.io/projected/2cb2a5f4-dd73-41dc-a42e-c7cd82bfd60f-kube-api-access-nq8hc\") pod \"collect-profiles-29405835-8jtkk\" (UID: \"2cb2a5f4-dd73-41dc-a42e-c7cd82bfd60f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405835-8jtkk" Nov 28 17:15:00 crc kubenswrapper[4909]: I1128 17:15:00.536898 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405835-8jtkk" Nov 28 17:15:01 crc kubenswrapper[4909]: I1128 17:15:01.002195 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405835-8jtkk"] Nov 28 17:15:01 crc kubenswrapper[4909]: I1128 17:15:01.608885 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405835-8jtkk" event={"ID":"2cb2a5f4-dd73-41dc-a42e-c7cd82bfd60f","Type":"ContainerStarted","Data":"061a710b781e8adc878490241bf71b2dbed1c3d6c77104c2da266b2310e248ba"} Nov 28 17:15:02 crc kubenswrapper[4909]: I1128 17:15:02.901929 4909 scope.go:117] "RemoveContainer" containerID="f26ce64ee7189509dfc18cbe7ce86f7f6598eb9f6ae426c5dc846407a726e8c6" Nov 28 17:15:02 crc kubenswrapper[4909]: E1128 17:15:02.902929 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 17:15:03 crc kubenswrapper[4909]: I1128 17:15:03.624992 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405835-8jtkk" event={"ID":"2cb2a5f4-dd73-41dc-a42e-c7cd82bfd60f","Type":"ContainerStarted","Data":"638693064e9a609ee3d18c4a13259d60f77284b9b9f8f977ac1a8aa09ca862dd"} Nov 28 17:15:03 crc kubenswrapper[4909]: I1128 17:15:03.649372 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29405835-8jtkk" podStartSLOduration=3.649355089 podStartE2EDuration="3.649355089s" podCreationTimestamp="2025-11-28 17:15:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 17:15:03.643215286 +0000 UTC m=+3886.039899820" watchObservedRunningTime="2025-11-28 17:15:03.649355089 +0000 UTC m=+3886.046039603" Nov 28 17:15:04 crc kubenswrapper[4909]: I1128 17:15:04.632116 4909 generic.go:334] "Generic (PLEG): container finished" podID="2cb2a5f4-dd73-41dc-a42e-c7cd82bfd60f" containerID="638693064e9a609ee3d18c4a13259d60f77284b9b9f8f977ac1a8aa09ca862dd" exitCode=0 Nov 28 17:15:04 crc kubenswrapper[4909]: I1128 17:15:04.632166 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405835-8jtkk" event={"ID":"2cb2a5f4-dd73-41dc-a42e-c7cd82bfd60f","Type":"ContainerDied","Data":"638693064e9a609ee3d18c4a13259d60f77284b9b9f8f977ac1a8aa09ca862dd"} Nov 28 17:15:05 crc kubenswrapper[4909]: I1128 17:15:05.978063 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405835-8jtkk" Nov 28 17:15:06 crc kubenswrapper[4909]: I1128 17:15:06.141497 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/2cb2a5f4-dd73-41dc-a42e-c7cd82bfd60f-config-volume\") pod \"2cb2a5f4-dd73-41dc-a42e-c7cd82bfd60f\" (UID: \"2cb2a5f4-dd73-41dc-a42e-c7cd82bfd60f\") " Nov 28 17:15:06 crc kubenswrapper[4909]: I1128 17:15:06.141817 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nq8hc\" (UniqueName: \"kubernetes.io/projected/2cb2a5f4-dd73-41dc-a42e-c7cd82bfd60f-kube-api-access-nq8hc\") pod \"2cb2a5f4-dd73-41dc-a42e-c7cd82bfd60f\" (UID: \"2cb2a5f4-dd73-41dc-a42e-c7cd82bfd60f\") " Nov 28 17:15:06 crc kubenswrapper[4909]: I1128 17:15:06.142264 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2cb2a5f4-dd73-41dc-a42e-c7cd82bfd60f-config-volume" (OuterVolumeSpecName: "config-volume") pod "2cb2a5f4-dd73-41dc-a42e-c7cd82bfd60f" (UID: "2cb2a5f4-dd73-41dc-a42e-c7cd82bfd60f"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 17:15:06 crc kubenswrapper[4909]: I1128 17:15:06.142905 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/2cb2a5f4-dd73-41dc-a42e-c7cd82bfd60f-secret-volume\") pod \"2cb2a5f4-dd73-41dc-a42e-c7cd82bfd60f\" (UID: \"2cb2a5f4-dd73-41dc-a42e-c7cd82bfd60f\") " Nov 28 17:15:06 crc kubenswrapper[4909]: I1128 17:15:06.143321 4909 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/2cb2a5f4-dd73-41dc-a42e-c7cd82bfd60f-config-volume\") on node \"crc\" DevicePath \"\"" Nov 28 17:15:06 crc kubenswrapper[4909]: I1128 17:15:06.148452 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2cb2a5f4-dd73-41dc-a42e-c7cd82bfd60f-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "2cb2a5f4-dd73-41dc-a42e-c7cd82bfd60f" (UID: "2cb2a5f4-dd73-41dc-a42e-c7cd82bfd60f"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:15:06 crc kubenswrapper[4909]: I1128 17:15:06.148736 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2cb2a5f4-dd73-41dc-a42e-c7cd82bfd60f-kube-api-access-nq8hc" (OuterVolumeSpecName: "kube-api-access-nq8hc") pod "2cb2a5f4-dd73-41dc-a42e-c7cd82bfd60f" (UID: "2cb2a5f4-dd73-41dc-a42e-c7cd82bfd60f"). InnerVolumeSpecName "kube-api-access-nq8hc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:15:06 crc kubenswrapper[4909]: I1128 17:15:06.245871 4909 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/2cb2a5f4-dd73-41dc-a42e-c7cd82bfd60f-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 28 17:15:06 crc kubenswrapper[4909]: I1128 17:15:06.245964 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nq8hc\" (UniqueName: \"kubernetes.io/projected/2cb2a5f4-dd73-41dc-a42e-c7cd82bfd60f-kube-api-access-nq8hc\") on node \"crc\" DevicePath \"\"" Nov 28 17:15:06 crc kubenswrapper[4909]: I1128 17:15:06.654868 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405835-8jtkk" event={"ID":"2cb2a5f4-dd73-41dc-a42e-c7cd82bfd60f","Type":"ContainerDied","Data":"061a710b781e8adc878490241bf71b2dbed1c3d6c77104c2da266b2310e248ba"} Nov 28 17:15:06 crc kubenswrapper[4909]: I1128 17:15:06.654921 4909 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="061a710b781e8adc878490241bf71b2dbed1c3d6c77104c2da266b2310e248ba" Nov 28 17:15:06 crc kubenswrapper[4909]: I1128 17:15:06.654951 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405835-8jtkk" Nov 28 17:15:06 crc kubenswrapper[4909]: I1128 17:15:06.729709 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405790-d6zq2"] Nov 28 17:15:06 crc kubenswrapper[4909]: I1128 17:15:06.738494 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405790-d6zq2"] Nov 28 17:15:07 crc kubenswrapper[4909]: I1128 17:15:07.916884 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8e9cbe0d-d1db-420f-8a4c-1e24d559dba1" path="/var/lib/kubelet/pods/8e9cbe0d-d1db-420f-8a4c-1e24d559dba1/volumes" Nov 28 17:15:08 crc kubenswrapper[4909]: I1128 17:15:08.929491 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-b7rzb" Nov 28 17:15:09 crc kubenswrapper[4909]: I1128 17:15:09.006430 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-b7rzb" Nov 28 17:15:09 crc kubenswrapper[4909]: I1128 17:15:09.176834 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-b7rzb"] Nov 28 17:15:10 crc kubenswrapper[4909]: I1128 17:15:10.686883 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-b7rzb" podUID="a9a06d13-d59b-4d0b-9d94-563cf81850a5" containerName="registry-server" containerID="cri-o://43af5db7fd10f0818694b33a9380ceca73d0643859349a53c05e5a6c88b74c95" gracePeriod=2 Nov 28 17:15:11 crc kubenswrapper[4909]: I1128 17:15:11.117753 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-b7rzb" Nov 28 17:15:11 crc kubenswrapper[4909]: I1128 17:15:11.217268 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a9a06d13-d59b-4d0b-9d94-563cf81850a5-utilities\") pod \"a9a06d13-d59b-4d0b-9d94-563cf81850a5\" (UID: \"a9a06d13-d59b-4d0b-9d94-563cf81850a5\") " Nov 28 17:15:11 crc kubenswrapper[4909]: I1128 17:15:11.217374 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a9a06d13-d59b-4d0b-9d94-563cf81850a5-catalog-content\") pod \"a9a06d13-d59b-4d0b-9d94-563cf81850a5\" (UID: \"a9a06d13-d59b-4d0b-9d94-563cf81850a5\") " Nov 28 17:15:11 crc kubenswrapper[4909]: I1128 17:15:11.217400 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gpp7j\" (UniqueName: \"kubernetes.io/projected/a9a06d13-d59b-4d0b-9d94-563cf81850a5-kube-api-access-gpp7j\") pod \"a9a06d13-d59b-4d0b-9d94-563cf81850a5\" (UID: \"a9a06d13-d59b-4d0b-9d94-563cf81850a5\") " Nov 28 17:15:11 crc kubenswrapper[4909]: I1128 17:15:11.218266 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a9a06d13-d59b-4d0b-9d94-563cf81850a5-utilities" (OuterVolumeSpecName: "utilities") pod "a9a06d13-d59b-4d0b-9d94-563cf81850a5" (UID: "a9a06d13-d59b-4d0b-9d94-563cf81850a5"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:15:11 crc kubenswrapper[4909]: I1128 17:15:11.225928 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a9a06d13-d59b-4d0b-9d94-563cf81850a5-kube-api-access-gpp7j" (OuterVolumeSpecName: "kube-api-access-gpp7j") pod "a9a06d13-d59b-4d0b-9d94-563cf81850a5" (UID: "a9a06d13-d59b-4d0b-9d94-563cf81850a5"). InnerVolumeSpecName "kube-api-access-gpp7j". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:15:11 crc kubenswrapper[4909]: I1128 17:15:11.319014 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gpp7j\" (UniqueName: \"kubernetes.io/projected/a9a06d13-d59b-4d0b-9d94-563cf81850a5-kube-api-access-gpp7j\") on node \"crc\" DevicePath \"\"" Nov 28 17:15:11 crc kubenswrapper[4909]: I1128 17:15:11.319056 4909 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a9a06d13-d59b-4d0b-9d94-563cf81850a5-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 17:15:11 crc kubenswrapper[4909]: I1128 17:15:11.324134 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a9a06d13-d59b-4d0b-9d94-563cf81850a5-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a9a06d13-d59b-4d0b-9d94-563cf81850a5" (UID: "a9a06d13-d59b-4d0b-9d94-563cf81850a5"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:15:11 crc kubenswrapper[4909]: I1128 17:15:11.419946 4909 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a9a06d13-d59b-4d0b-9d94-563cf81850a5-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 17:15:11 crc kubenswrapper[4909]: I1128 17:15:11.695686 4909 generic.go:334] "Generic (PLEG): container finished" podID="a9a06d13-d59b-4d0b-9d94-563cf81850a5" containerID="43af5db7fd10f0818694b33a9380ceca73d0643859349a53c05e5a6c88b74c95" exitCode=0 Nov 28 17:15:11 crc kubenswrapper[4909]: I1128 17:15:11.695740 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-b7rzb" event={"ID":"a9a06d13-d59b-4d0b-9d94-563cf81850a5","Type":"ContainerDied","Data":"43af5db7fd10f0818694b33a9380ceca73d0643859349a53c05e5a6c88b74c95"} Nov 28 17:15:11 crc kubenswrapper[4909]: I1128 17:15:11.695780 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-b7rzb" event={"ID":"a9a06d13-d59b-4d0b-9d94-563cf81850a5","Type":"ContainerDied","Data":"3b7165c86d085d6e7f3e1bc0fe4864240e3c66cb419c222b6868915c9280ca65"} Nov 28 17:15:11 crc kubenswrapper[4909]: I1128 17:15:11.695806 4909 scope.go:117] "RemoveContainer" containerID="43af5db7fd10f0818694b33a9380ceca73d0643859349a53c05e5a6c88b74c95" Nov 28 17:15:11 crc kubenswrapper[4909]: I1128 17:15:11.695811 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-b7rzb" Nov 28 17:15:11 crc kubenswrapper[4909]: I1128 17:15:11.716581 4909 scope.go:117] "RemoveContainer" containerID="19b17645b980780112784e4fd74695ec6a3835a8c84e8d142752a4b8634cf286" Nov 28 17:15:11 crc kubenswrapper[4909]: I1128 17:15:11.730674 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-b7rzb"] Nov 28 17:15:11 crc kubenswrapper[4909]: I1128 17:15:11.744205 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-b7rzb"] Nov 28 17:15:11 crc kubenswrapper[4909]: I1128 17:15:11.746741 4909 scope.go:117] "RemoveContainer" containerID="d46c6ff42ede972493e55070f67637d8aa0716a9c1e7e8852e60a590793595d4" Nov 28 17:15:11 crc kubenswrapper[4909]: I1128 17:15:11.796385 4909 scope.go:117] "RemoveContainer" containerID="43af5db7fd10f0818694b33a9380ceca73d0643859349a53c05e5a6c88b74c95" Nov 28 17:15:11 crc kubenswrapper[4909]: E1128 17:15:11.796918 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"43af5db7fd10f0818694b33a9380ceca73d0643859349a53c05e5a6c88b74c95\": container with ID starting with 43af5db7fd10f0818694b33a9380ceca73d0643859349a53c05e5a6c88b74c95 not found: ID does not exist" containerID="43af5db7fd10f0818694b33a9380ceca73d0643859349a53c05e5a6c88b74c95" Nov 28 17:15:11 crc kubenswrapper[4909]: I1128 17:15:11.796952 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"43af5db7fd10f0818694b33a9380ceca73d0643859349a53c05e5a6c88b74c95"} err="failed to get container status \"43af5db7fd10f0818694b33a9380ceca73d0643859349a53c05e5a6c88b74c95\": rpc error: code = NotFound desc = could not find container \"43af5db7fd10f0818694b33a9380ceca73d0643859349a53c05e5a6c88b74c95\": container with ID starting with 43af5db7fd10f0818694b33a9380ceca73d0643859349a53c05e5a6c88b74c95 not found: ID does not exist" Nov 28 17:15:11 crc kubenswrapper[4909]: I1128 17:15:11.796978 4909 scope.go:117] "RemoveContainer" containerID="19b17645b980780112784e4fd74695ec6a3835a8c84e8d142752a4b8634cf286" Nov 28 17:15:11 crc kubenswrapper[4909]: E1128 17:15:11.797238 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"19b17645b980780112784e4fd74695ec6a3835a8c84e8d142752a4b8634cf286\": container with ID starting with 19b17645b980780112784e4fd74695ec6a3835a8c84e8d142752a4b8634cf286 not found: ID does not exist" containerID="19b17645b980780112784e4fd74695ec6a3835a8c84e8d142752a4b8634cf286" Nov 28 17:15:11 crc kubenswrapper[4909]: I1128 17:15:11.797268 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"19b17645b980780112784e4fd74695ec6a3835a8c84e8d142752a4b8634cf286"} err="failed to get container status \"19b17645b980780112784e4fd74695ec6a3835a8c84e8d142752a4b8634cf286\": rpc error: code = NotFound desc = could not find container \"19b17645b980780112784e4fd74695ec6a3835a8c84e8d142752a4b8634cf286\": container with ID starting with 19b17645b980780112784e4fd74695ec6a3835a8c84e8d142752a4b8634cf286 not found: ID does not exist" Nov 28 17:15:11 crc kubenswrapper[4909]: I1128 17:15:11.797287 4909 scope.go:117] "RemoveContainer" containerID="d46c6ff42ede972493e55070f67637d8aa0716a9c1e7e8852e60a590793595d4" Nov 28 17:15:11 crc kubenswrapper[4909]: E1128 17:15:11.797533 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d46c6ff42ede972493e55070f67637d8aa0716a9c1e7e8852e60a590793595d4\": container with ID starting with d46c6ff42ede972493e55070f67637d8aa0716a9c1e7e8852e60a590793595d4 not found: ID does not exist" containerID="d46c6ff42ede972493e55070f67637d8aa0716a9c1e7e8852e60a590793595d4" Nov 28 17:15:11 crc kubenswrapper[4909]: I1128 17:15:11.797559 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d46c6ff42ede972493e55070f67637d8aa0716a9c1e7e8852e60a590793595d4"} err="failed to get container status \"d46c6ff42ede972493e55070f67637d8aa0716a9c1e7e8852e60a590793595d4\": rpc error: code = NotFound desc = could not find container \"d46c6ff42ede972493e55070f67637d8aa0716a9c1e7e8852e60a590793595d4\": container with ID starting with d46c6ff42ede972493e55070f67637d8aa0716a9c1e7e8852e60a590793595d4 not found: ID does not exist" Nov 28 17:15:11 crc kubenswrapper[4909]: I1128 17:15:11.910648 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a9a06d13-d59b-4d0b-9d94-563cf81850a5" path="/var/lib/kubelet/pods/a9a06d13-d59b-4d0b-9d94-563cf81850a5/volumes" Nov 28 17:15:15 crc kubenswrapper[4909]: I1128 17:15:15.901884 4909 scope.go:117] "RemoveContainer" containerID="f26ce64ee7189509dfc18cbe7ce86f7f6598eb9f6ae426c5dc846407a726e8c6" Nov 28 17:15:15 crc kubenswrapper[4909]: E1128 17:15:15.902599 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 17:15:26 crc kubenswrapper[4909]: I1128 17:15:26.295322 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-t5d7s"] Nov 28 17:15:26 crc kubenswrapper[4909]: E1128 17:15:26.296338 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a9a06d13-d59b-4d0b-9d94-563cf81850a5" containerName="extract-utilities" Nov 28 17:15:26 crc kubenswrapper[4909]: I1128 17:15:26.296353 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="a9a06d13-d59b-4d0b-9d94-563cf81850a5" containerName="extract-utilities" Nov 28 17:15:26 crc kubenswrapper[4909]: E1128 17:15:26.296368 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a9a06d13-d59b-4d0b-9d94-563cf81850a5" containerName="registry-server" Nov 28 17:15:26 crc kubenswrapper[4909]: I1128 17:15:26.296376 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="a9a06d13-d59b-4d0b-9d94-563cf81850a5" containerName="registry-server" Nov 28 17:15:26 crc kubenswrapper[4909]: E1128 17:15:26.296388 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a9a06d13-d59b-4d0b-9d94-563cf81850a5" containerName="extract-content" Nov 28 17:15:26 crc kubenswrapper[4909]: I1128 17:15:26.296397 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="a9a06d13-d59b-4d0b-9d94-563cf81850a5" containerName="extract-content" Nov 28 17:15:26 crc kubenswrapper[4909]: E1128 17:15:26.296415 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2cb2a5f4-dd73-41dc-a42e-c7cd82bfd60f" containerName="collect-profiles" Nov 28 17:15:26 crc kubenswrapper[4909]: I1128 17:15:26.296423 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="2cb2a5f4-dd73-41dc-a42e-c7cd82bfd60f" containerName="collect-profiles" Nov 28 17:15:26 crc kubenswrapper[4909]: I1128 17:15:26.296591 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="a9a06d13-d59b-4d0b-9d94-563cf81850a5" containerName="registry-server" Nov 28 17:15:26 crc kubenswrapper[4909]: I1128 17:15:26.296615 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="2cb2a5f4-dd73-41dc-a42e-c7cd82bfd60f" containerName="collect-profiles" Nov 28 17:15:26 crc kubenswrapper[4909]: I1128 17:15:26.298796 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-t5d7s" Nov 28 17:15:26 crc kubenswrapper[4909]: I1128 17:15:26.312563 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-t5d7s"] Nov 28 17:15:26 crc kubenswrapper[4909]: I1128 17:15:26.338119 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/973f08fa-a04a-40e0-b661-ce78f1ea6433-utilities\") pod \"community-operators-t5d7s\" (UID: \"973f08fa-a04a-40e0-b661-ce78f1ea6433\") " pod="openshift-marketplace/community-operators-t5d7s" Nov 28 17:15:26 crc kubenswrapper[4909]: I1128 17:15:26.338207 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/973f08fa-a04a-40e0-b661-ce78f1ea6433-catalog-content\") pod \"community-operators-t5d7s\" (UID: \"973f08fa-a04a-40e0-b661-ce78f1ea6433\") " pod="openshift-marketplace/community-operators-t5d7s" Nov 28 17:15:26 crc kubenswrapper[4909]: I1128 17:15:26.338226 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cwpcb\" (UniqueName: \"kubernetes.io/projected/973f08fa-a04a-40e0-b661-ce78f1ea6433-kube-api-access-cwpcb\") pod \"community-operators-t5d7s\" (UID: \"973f08fa-a04a-40e0-b661-ce78f1ea6433\") " pod="openshift-marketplace/community-operators-t5d7s" Nov 28 17:15:26 crc kubenswrapper[4909]: I1128 17:15:26.438861 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/973f08fa-a04a-40e0-b661-ce78f1ea6433-catalog-content\") pod \"community-operators-t5d7s\" (UID: \"973f08fa-a04a-40e0-b661-ce78f1ea6433\") " pod="openshift-marketplace/community-operators-t5d7s" Nov 28 17:15:26 crc kubenswrapper[4909]: I1128 17:15:26.438901 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cwpcb\" (UniqueName: \"kubernetes.io/projected/973f08fa-a04a-40e0-b661-ce78f1ea6433-kube-api-access-cwpcb\") pod \"community-operators-t5d7s\" (UID: \"973f08fa-a04a-40e0-b661-ce78f1ea6433\") " pod="openshift-marketplace/community-operators-t5d7s" Nov 28 17:15:26 crc kubenswrapper[4909]: I1128 17:15:26.438951 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/973f08fa-a04a-40e0-b661-ce78f1ea6433-utilities\") pod \"community-operators-t5d7s\" (UID: \"973f08fa-a04a-40e0-b661-ce78f1ea6433\") " pod="openshift-marketplace/community-operators-t5d7s" Nov 28 17:15:26 crc kubenswrapper[4909]: I1128 17:15:26.439382 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/973f08fa-a04a-40e0-b661-ce78f1ea6433-utilities\") pod \"community-operators-t5d7s\" (UID: \"973f08fa-a04a-40e0-b661-ce78f1ea6433\") " pod="openshift-marketplace/community-operators-t5d7s" Nov 28 17:15:26 crc kubenswrapper[4909]: I1128 17:15:26.439505 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/973f08fa-a04a-40e0-b661-ce78f1ea6433-catalog-content\") pod \"community-operators-t5d7s\" (UID: \"973f08fa-a04a-40e0-b661-ce78f1ea6433\") " pod="openshift-marketplace/community-operators-t5d7s" Nov 28 17:15:26 crc kubenswrapper[4909]: I1128 17:15:26.481179 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cwpcb\" (UniqueName: \"kubernetes.io/projected/973f08fa-a04a-40e0-b661-ce78f1ea6433-kube-api-access-cwpcb\") pod \"community-operators-t5d7s\" (UID: \"973f08fa-a04a-40e0-b661-ce78f1ea6433\") " pod="openshift-marketplace/community-operators-t5d7s" Nov 28 17:15:26 crc kubenswrapper[4909]: I1128 17:15:26.618006 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-t5d7s" Nov 28 17:15:26 crc kubenswrapper[4909]: I1128 17:15:26.902961 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-t5d7s"] Nov 28 17:15:27 crc kubenswrapper[4909]: I1128 17:15:27.818823 4909 generic.go:334] "Generic (PLEG): container finished" podID="973f08fa-a04a-40e0-b661-ce78f1ea6433" containerID="915c88c421020e3ba6ae4fc89443160286651650d1f0a0ad653c208c921a031b" exitCode=0 Nov 28 17:15:27 crc kubenswrapper[4909]: I1128 17:15:27.818900 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-t5d7s" event={"ID":"973f08fa-a04a-40e0-b661-ce78f1ea6433","Type":"ContainerDied","Data":"915c88c421020e3ba6ae4fc89443160286651650d1f0a0ad653c208c921a031b"} Nov 28 17:15:27 crc kubenswrapper[4909]: I1128 17:15:27.819183 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-t5d7s" event={"ID":"973f08fa-a04a-40e0-b661-ce78f1ea6433","Type":"ContainerStarted","Data":"929025b286afceda0b9533ea4afd9a15bb1322132379e0c2c5b7483026ce85d3"} Nov 28 17:15:29 crc kubenswrapper[4909]: I1128 17:15:29.837272 4909 generic.go:334] "Generic (PLEG): container finished" podID="973f08fa-a04a-40e0-b661-ce78f1ea6433" containerID="6c7752f669f406c44251c6218a83b764af2388894aa5bc10015638e84d60a284" exitCode=0 Nov 28 17:15:29 crc kubenswrapper[4909]: I1128 17:15:29.837450 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-t5d7s" event={"ID":"973f08fa-a04a-40e0-b661-ce78f1ea6433","Type":"ContainerDied","Data":"6c7752f669f406c44251c6218a83b764af2388894aa5bc10015638e84d60a284"} Nov 28 17:15:29 crc kubenswrapper[4909]: I1128 17:15:29.901431 4909 scope.go:117] "RemoveContainer" containerID="f26ce64ee7189509dfc18cbe7ce86f7f6598eb9f6ae426c5dc846407a726e8c6" Nov 28 17:15:29 crc kubenswrapper[4909]: E1128 17:15:29.901702 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 17:15:30 crc kubenswrapper[4909]: I1128 17:15:30.624268 4909 scope.go:117] "RemoveContainer" containerID="4ed4598a6df4a267eb842fe650e4d800b1e2e1739b94711a922bf86bb4b17b33" Nov 28 17:15:30 crc kubenswrapper[4909]: I1128 17:15:30.847732 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-t5d7s" event={"ID":"973f08fa-a04a-40e0-b661-ce78f1ea6433","Type":"ContainerStarted","Data":"d0a16b5c680b36f5a7848410ce2115b34180188dbec26d366dbd9a1360c99d00"} Nov 28 17:15:30 crc kubenswrapper[4909]: I1128 17:15:30.865330 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-t5d7s" podStartSLOduration=2.365806971 podStartE2EDuration="4.865308442s" podCreationTimestamp="2025-11-28 17:15:26 +0000 UTC" firstStartedPulling="2025-11-28 17:15:27.821112064 +0000 UTC m=+3910.217796598" lastFinishedPulling="2025-11-28 17:15:30.320613545 +0000 UTC m=+3912.717298069" observedRunningTime="2025-11-28 17:15:30.862892658 +0000 UTC m=+3913.259577192" watchObservedRunningTime="2025-11-28 17:15:30.865308442 +0000 UTC m=+3913.261992966" Nov 28 17:15:36 crc kubenswrapper[4909]: I1128 17:15:36.619047 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-t5d7s" Nov 28 17:15:36 crc kubenswrapper[4909]: I1128 17:15:36.619998 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-t5d7s" Nov 28 17:15:37 crc kubenswrapper[4909]: I1128 17:15:37.041045 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-t5d7s" Nov 28 17:15:37 crc kubenswrapper[4909]: I1128 17:15:37.154209 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-t5d7s" Nov 28 17:15:37 crc kubenswrapper[4909]: I1128 17:15:37.314729 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-t5d7s"] Nov 28 17:15:38 crc kubenswrapper[4909]: I1128 17:15:38.910995 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-t5d7s" podUID="973f08fa-a04a-40e0-b661-ce78f1ea6433" containerName="registry-server" containerID="cri-o://d0a16b5c680b36f5a7848410ce2115b34180188dbec26d366dbd9a1360c99d00" gracePeriod=2 Nov 28 17:15:40 crc kubenswrapper[4909]: I1128 17:15:40.902320 4909 scope.go:117] "RemoveContainer" containerID="f26ce64ee7189509dfc18cbe7ce86f7f6598eb9f6ae426c5dc846407a726e8c6" Nov 28 17:15:40 crc kubenswrapper[4909]: E1128 17:15:40.903025 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 17:15:40 crc kubenswrapper[4909]: I1128 17:15:40.927738 4909 generic.go:334] "Generic (PLEG): container finished" podID="973f08fa-a04a-40e0-b661-ce78f1ea6433" containerID="d0a16b5c680b36f5a7848410ce2115b34180188dbec26d366dbd9a1360c99d00" exitCode=0 Nov 28 17:15:40 crc kubenswrapper[4909]: I1128 17:15:40.927781 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-t5d7s" event={"ID":"973f08fa-a04a-40e0-b661-ce78f1ea6433","Type":"ContainerDied","Data":"d0a16b5c680b36f5a7848410ce2115b34180188dbec26d366dbd9a1360c99d00"} Nov 28 17:15:41 crc kubenswrapper[4909]: I1128 17:15:41.222151 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-t5d7s" Nov 28 17:15:41 crc kubenswrapper[4909]: I1128 17:15:41.372242 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/973f08fa-a04a-40e0-b661-ce78f1ea6433-catalog-content\") pod \"973f08fa-a04a-40e0-b661-ce78f1ea6433\" (UID: \"973f08fa-a04a-40e0-b661-ce78f1ea6433\") " Nov 28 17:15:41 crc kubenswrapper[4909]: I1128 17:15:41.372347 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cwpcb\" (UniqueName: \"kubernetes.io/projected/973f08fa-a04a-40e0-b661-ce78f1ea6433-kube-api-access-cwpcb\") pod \"973f08fa-a04a-40e0-b661-ce78f1ea6433\" (UID: \"973f08fa-a04a-40e0-b661-ce78f1ea6433\") " Nov 28 17:15:41 crc kubenswrapper[4909]: I1128 17:15:41.372428 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/973f08fa-a04a-40e0-b661-ce78f1ea6433-utilities\") pod \"973f08fa-a04a-40e0-b661-ce78f1ea6433\" (UID: \"973f08fa-a04a-40e0-b661-ce78f1ea6433\") " Nov 28 17:15:41 crc kubenswrapper[4909]: I1128 17:15:41.373561 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/973f08fa-a04a-40e0-b661-ce78f1ea6433-utilities" (OuterVolumeSpecName: "utilities") pod "973f08fa-a04a-40e0-b661-ce78f1ea6433" (UID: "973f08fa-a04a-40e0-b661-ce78f1ea6433"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:15:41 crc kubenswrapper[4909]: I1128 17:15:41.384916 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/973f08fa-a04a-40e0-b661-ce78f1ea6433-kube-api-access-cwpcb" (OuterVolumeSpecName: "kube-api-access-cwpcb") pod "973f08fa-a04a-40e0-b661-ce78f1ea6433" (UID: "973f08fa-a04a-40e0-b661-ce78f1ea6433"). InnerVolumeSpecName "kube-api-access-cwpcb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:15:41 crc kubenswrapper[4909]: I1128 17:15:41.424628 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/973f08fa-a04a-40e0-b661-ce78f1ea6433-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "973f08fa-a04a-40e0-b661-ce78f1ea6433" (UID: "973f08fa-a04a-40e0-b661-ce78f1ea6433"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:15:41 crc kubenswrapper[4909]: I1128 17:15:41.473963 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cwpcb\" (UniqueName: \"kubernetes.io/projected/973f08fa-a04a-40e0-b661-ce78f1ea6433-kube-api-access-cwpcb\") on node \"crc\" DevicePath \"\"" Nov 28 17:15:41 crc kubenswrapper[4909]: I1128 17:15:41.474006 4909 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/973f08fa-a04a-40e0-b661-ce78f1ea6433-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 17:15:41 crc kubenswrapper[4909]: I1128 17:15:41.474019 4909 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/973f08fa-a04a-40e0-b661-ce78f1ea6433-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 17:15:41 crc kubenswrapper[4909]: I1128 17:15:41.935601 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-t5d7s" event={"ID":"973f08fa-a04a-40e0-b661-ce78f1ea6433","Type":"ContainerDied","Data":"929025b286afceda0b9533ea4afd9a15bb1322132379e0c2c5b7483026ce85d3"} Nov 28 17:15:41 crc kubenswrapper[4909]: I1128 17:15:41.935683 4909 scope.go:117] "RemoveContainer" containerID="d0a16b5c680b36f5a7848410ce2115b34180188dbec26d366dbd9a1360c99d00" Nov 28 17:15:41 crc kubenswrapper[4909]: I1128 17:15:41.935720 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-t5d7s" Nov 28 17:15:41 crc kubenswrapper[4909]: I1128 17:15:41.954464 4909 scope.go:117] "RemoveContainer" containerID="6c7752f669f406c44251c6218a83b764af2388894aa5bc10015638e84d60a284" Nov 28 17:15:41 crc kubenswrapper[4909]: I1128 17:15:41.968732 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-t5d7s"] Nov 28 17:15:41 crc kubenswrapper[4909]: I1128 17:15:41.976604 4909 scope.go:117] "RemoveContainer" containerID="915c88c421020e3ba6ae4fc89443160286651650d1f0a0ad653c208c921a031b" Nov 28 17:15:41 crc kubenswrapper[4909]: I1128 17:15:41.978751 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-t5d7s"] Nov 28 17:15:43 crc kubenswrapper[4909]: I1128 17:15:43.912136 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="973f08fa-a04a-40e0-b661-ce78f1ea6433" path="/var/lib/kubelet/pods/973f08fa-a04a-40e0-b661-ce78f1ea6433/volumes" Nov 28 17:15:51 crc kubenswrapper[4909]: I1128 17:15:51.902462 4909 scope.go:117] "RemoveContainer" containerID="f26ce64ee7189509dfc18cbe7ce86f7f6598eb9f6ae426c5dc846407a726e8c6" Nov 28 17:15:51 crc kubenswrapper[4909]: E1128 17:15:51.903798 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 17:16:05 crc kubenswrapper[4909]: I1128 17:16:05.901782 4909 scope.go:117] "RemoveContainer" containerID="f26ce64ee7189509dfc18cbe7ce86f7f6598eb9f6ae426c5dc846407a726e8c6" Nov 28 17:16:05 crc kubenswrapper[4909]: E1128 17:16:05.902651 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 17:16:16 crc kubenswrapper[4909]: I1128 17:16:16.901414 4909 scope.go:117] "RemoveContainer" containerID="f26ce64ee7189509dfc18cbe7ce86f7f6598eb9f6ae426c5dc846407a726e8c6" Nov 28 17:16:16 crc kubenswrapper[4909]: E1128 17:16:16.902023 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 17:16:19 crc kubenswrapper[4909]: I1128 17:16:19.706301 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-pnmbg"] Nov 28 17:16:19 crc kubenswrapper[4909]: E1128 17:16:19.707447 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="973f08fa-a04a-40e0-b661-ce78f1ea6433" containerName="registry-server" Nov 28 17:16:19 crc kubenswrapper[4909]: I1128 17:16:19.707485 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="973f08fa-a04a-40e0-b661-ce78f1ea6433" containerName="registry-server" Nov 28 17:16:19 crc kubenswrapper[4909]: E1128 17:16:19.707523 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="973f08fa-a04a-40e0-b661-ce78f1ea6433" containerName="extract-content" Nov 28 17:16:19 crc kubenswrapper[4909]: I1128 17:16:19.707544 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="973f08fa-a04a-40e0-b661-ce78f1ea6433" containerName="extract-content" Nov 28 17:16:19 crc kubenswrapper[4909]: E1128 17:16:19.707815 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="973f08fa-a04a-40e0-b661-ce78f1ea6433" containerName="extract-utilities" Nov 28 17:16:19 crc kubenswrapper[4909]: I1128 17:16:19.707838 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="973f08fa-a04a-40e0-b661-ce78f1ea6433" containerName="extract-utilities" Nov 28 17:16:19 crc kubenswrapper[4909]: I1128 17:16:19.708201 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="973f08fa-a04a-40e0-b661-ce78f1ea6433" containerName="registry-server" Nov 28 17:16:19 crc kubenswrapper[4909]: I1128 17:16:19.710442 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-pnmbg" Nov 28 17:16:19 crc kubenswrapper[4909]: I1128 17:16:19.715449 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-pnmbg"] Nov 28 17:16:19 crc kubenswrapper[4909]: I1128 17:16:19.850381 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6v6r8\" (UniqueName: \"kubernetes.io/projected/177bfbb3-3a47-4f19-9dfe-93a450884bb1-kube-api-access-6v6r8\") pod \"certified-operators-pnmbg\" (UID: \"177bfbb3-3a47-4f19-9dfe-93a450884bb1\") " pod="openshift-marketplace/certified-operators-pnmbg" Nov 28 17:16:19 crc kubenswrapper[4909]: I1128 17:16:19.850487 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/177bfbb3-3a47-4f19-9dfe-93a450884bb1-utilities\") pod \"certified-operators-pnmbg\" (UID: \"177bfbb3-3a47-4f19-9dfe-93a450884bb1\") " pod="openshift-marketplace/certified-operators-pnmbg" Nov 28 17:16:19 crc kubenswrapper[4909]: I1128 17:16:19.850750 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/177bfbb3-3a47-4f19-9dfe-93a450884bb1-catalog-content\") pod \"certified-operators-pnmbg\" (UID: \"177bfbb3-3a47-4f19-9dfe-93a450884bb1\") " pod="openshift-marketplace/certified-operators-pnmbg" Nov 28 17:16:19 crc kubenswrapper[4909]: I1128 17:16:19.952571 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6v6r8\" (UniqueName: \"kubernetes.io/projected/177bfbb3-3a47-4f19-9dfe-93a450884bb1-kube-api-access-6v6r8\") pod \"certified-operators-pnmbg\" (UID: \"177bfbb3-3a47-4f19-9dfe-93a450884bb1\") " pod="openshift-marketplace/certified-operators-pnmbg" Nov 28 17:16:19 crc kubenswrapper[4909]: I1128 17:16:19.952644 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/177bfbb3-3a47-4f19-9dfe-93a450884bb1-utilities\") pod \"certified-operators-pnmbg\" (UID: \"177bfbb3-3a47-4f19-9dfe-93a450884bb1\") " pod="openshift-marketplace/certified-operators-pnmbg" Nov 28 17:16:19 crc kubenswrapper[4909]: I1128 17:16:19.952731 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/177bfbb3-3a47-4f19-9dfe-93a450884bb1-catalog-content\") pod \"certified-operators-pnmbg\" (UID: \"177bfbb3-3a47-4f19-9dfe-93a450884bb1\") " pod="openshift-marketplace/certified-operators-pnmbg" Nov 28 17:16:19 crc kubenswrapper[4909]: I1128 17:16:19.953329 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/177bfbb3-3a47-4f19-9dfe-93a450884bb1-catalog-content\") pod \"certified-operators-pnmbg\" (UID: \"177bfbb3-3a47-4f19-9dfe-93a450884bb1\") " pod="openshift-marketplace/certified-operators-pnmbg" Nov 28 17:16:19 crc kubenswrapper[4909]: I1128 17:16:19.954010 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/177bfbb3-3a47-4f19-9dfe-93a450884bb1-utilities\") pod \"certified-operators-pnmbg\" (UID: \"177bfbb3-3a47-4f19-9dfe-93a450884bb1\") " pod="openshift-marketplace/certified-operators-pnmbg" Nov 28 17:16:19 crc kubenswrapper[4909]: I1128 17:16:19.981533 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6v6r8\" (UniqueName: \"kubernetes.io/projected/177bfbb3-3a47-4f19-9dfe-93a450884bb1-kube-api-access-6v6r8\") pod \"certified-operators-pnmbg\" (UID: \"177bfbb3-3a47-4f19-9dfe-93a450884bb1\") " pod="openshift-marketplace/certified-operators-pnmbg" Nov 28 17:16:20 crc kubenswrapper[4909]: I1128 17:16:20.029608 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-pnmbg" Nov 28 17:16:20 crc kubenswrapper[4909]: I1128 17:16:20.537438 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-pnmbg"] Nov 28 17:16:21 crc kubenswrapper[4909]: I1128 17:16:21.253383 4909 generic.go:334] "Generic (PLEG): container finished" podID="177bfbb3-3a47-4f19-9dfe-93a450884bb1" containerID="dcc861103c8b23b7c16a09934e0571d3b8800661a553595fc46f9302ce1aa668" exitCode=0 Nov 28 17:16:21 crc kubenswrapper[4909]: I1128 17:16:21.253455 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-pnmbg" event={"ID":"177bfbb3-3a47-4f19-9dfe-93a450884bb1","Type":"ContainerDied","Data":"dcc861103c8b23b7c16a09934e0571d3b8800661a553595fc46f9302ce1aa668"} Nov 28 17:16:21 crc kubenswrapper[4909]: I1128 17:16:21.253954 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-pnmbg" event={"ID":"177bfbb3-3a47-4f19-9dfe-93a450884bb1","Type":"ContainerStarted","Data":"c871f975ac4865ee9e34a3bac43b410a4b37875a2121766d43c56de617cb3540"} Nov 28 17:16:23 crc kubenswrapper[4909]: I1128 17:16:23.265778 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-pnmbg" event={"ID":"177bfbb3-3a47-4f19-9dfe-93a450884bb1","Type":"ContainerStarted","Data":"d0c7c6615249366a24c377dd155623f92ac0b729b6c700bfc5b52482ba735ebb"} Nov 28 17:16:24 crc kubenswrapper[4909]: I1128 17:16:24.277609 4909 generic.go:334] "Generic (PLEG): container finished" podID="177bfbb3-3a47-4f19-9dfe-93a450884bb1" containerID="d0c7c6615249366a24c377dd155623f92ac0b729b6c700bfc5b52482ba735ebb" exitCode=0 Nov 28 17:16:24 crc kubenswrapper[4909]: I1128 17:16:24.277680 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-pnmbg" event={"ID":"177bfbb3-3a47-4f19-9dfe-93a450884bb1","Type":"ContainerDied","Data":"d0c7c6615249366a24c377dd155623f92ac0b729b6c700bfc5b52482ba735ebb"} Nov 28 17:16:25 crc kubenswrapper[4909]: I1128 17:16:25.287484 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-pnmbg" event={"ID":"177bfbb3-3a47-4f19-9dfe-93a450884bb1","Type":"ContainerStarted","Data":"c356fa80ce664bccbab329e325deb72ca52e0836d41c66b4861c986501bf5398"} Nov 28 17:16:25 crc kubenswrapper[4909]: I1128 17:16:25.311901 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-pnmbg" podStartSLOduration=2.6992722909999998 podStartE2EDuration="6.31188131s" podCreationTimestamp="2025-11-28 17:16:19 +0000 UTC" firstStartedPulling="2025-11-28 17:16:21.254968912 +0000 UTC m=+3963.651653436" lastFinishedPulling="2025-11-28 17:16:24.867577921 +0000 UTC m=+3967.264262455" observedRunningTime="2025-11-28 17:16:25.30408001 +0000 UTC m=+3967.700764544" watchObservedRunningTime="2025-11-28 17:16:25.31188131 +0000 UTC m=+3967.708565844" Nov 28 17:16:29 crc kubenswrapper[4909]: I1128 17:16:29.902449 4909 scope.go:117] "RemoveContainer" containerID="f26ce64ee7189509dfc18cbe7ce86f7f6598eb9f6ae426c5dc846407a726e8c6" Nov 28 17:16:29 crc kubenswrapper[4909]: E1128 17:16:29.903099 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 17:16:30 crc kubenswrapper[4909]: I1128 17:16:30.030428 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-pnmbg" Nov 28 17:16:30 crc kubenswrapper[4909]: I1128 17:16:30.030475 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-pnmbg" Nov 28 17:16:30 crc kubenswrapper[4909]: I1128 17:16:30.107416 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-pnmbg" Nov 28 17:16:30 crc kubenswrapper[4909]: I1128 17:16:30.392074 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-pnmbg" Nov 28 17:16:30 crc kubenswrapper[4909]: I1128 17:16:30.462796 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-pnmbg"] Nov 28 17:16:32 crc kubenswrapper[4909]: I1128 17:16:32.342410 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-pnmbg" podUID="177bfbb3-3a47-4f19-9dfe-93a450884bb1" containerName="registry-server" containerID="cri-o://c356fa80ce664bccbab329e325deb72ca52e0836d41c66b4861c986501bf5398" gracePeriod=2 Nov 28 17:16:33 crc kubenswrapper[4909]: I1128 17:16:33.357222 4909 generic.go:334] "Generic (PLEG): container finished" podID="177bfbb3-3a47-4f19-9dfe-93a450884bb1" containerID="c356fa80ce664bccbab329e325deb72ca52e0836d41c66b4861c986501bf5398" exitCode=0 Nov 28 17:16:33 crc kubenswrapper[4909]: I1128 17:16:33.357335 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-pnmbg" event={"ID":"177bfbb3-3a47-4f19-9dfe-93a450884bb1","Type":"ContainerDied","Data":"c356fa80ce664bccbab329e325deb72ca52e0836d41c66b4861c986501bf5398"} Nov 28 17:16:33 crc kubenswrapper[4909]: I1128 17:16:33.471099 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-pnmbg" Nov 28 17:16:33 crc kubenswrapper[4909]: I1128 17:16:33.669604 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/177bfbb3-3a47-4f19-9dfe-93a450884bb1-utilities\") pod \"177bfbb3-3a47-4f19-9dfe-93a450884bb1\" (UID: \"177bfbb3-3a47-4f19-9dfe-93a450884bb1\") " Nov 28 17:16:33 crc kubenswrapper[4909]: I1128 17:16:33.669746 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6v6r8\" (UniqueName: \"kubernetes.io/projected/177bfbb3-3a47-4f19-9dfe-93a450884bb1-kube-api-access-6v6r8\") pod \"177bfbb3-3a47-4f19-9dfe-93a450884bb1\" (UID: \"177bfbb3-3a47-4f19-9dfe-93a450884bb1\") " Nov 28 17:16:33 crc kubenswrapper[4909]: I1128 17:16:33.669824 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/177bfbb3-3a47-4f19-9dfe-93a450884bb1-catalog-content\") pod \"177bfbb3-3a47-4f19-9dfe-93a450884bb1\" (UID: \"177bfbb3-3a47-4f19-9dfe-93a450884bb1\") " Nov 28 17:16:33 crc kubenswrapper[4909]: I1128 17:16:33.670362 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/177bfbb3-3a47-4f19-9dfe-93a450884bb1-utilities" (OuterVolumeSpecName: "utilities") pod "177bfbb3-3a47-4f19-9dfe-93a450884bb1" (UID: "177bfbb3-3a47-4f19-9dfe-93a450884bb1"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:16:33 crc kubenswrapper[4909]: I1128 17:16:33.678498 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/177bfbb3-3a47-4f19-9dfe-93a450884bb1-kube-api-access-6v6r8" (OuterVolumeSpecName: "kube-api-access-6v6r8") pod "177bfbb3-3a47-4f19-9dfe-93a450884bb1" (UID: "177bfbb3-3a47-4f19-9dfe-93a450884bb1"). InnerVolumeSpecName "kube-api-access-6v6r8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:16:33 crc kubenswrapper[4909]: I1128 17:16:33.723164 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/177bfbb3-3a47-4f19-9dfe-93a450884bb1-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "177bfbb3-3a47-4f19-9dfe-93a450884bb1" (UID: "177bfbb3-3a47-4f19-9dfe-93a450884bb1"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:16:33 crc kubenswrapper[4909]: I1128 17:16:33.771606 4909 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/177bfbb3-3a47-4f19-9dfe-93a450884bb1-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 17:16:33 crc kubenswrapper[4909]: I1128 17:16:33.771641 4909 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/177bfbb3-3a47-4f19-9dfe-93a450884bb1-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 17:16:33 crc kubenswrapper[4909]: I1128 17:16:33.771651 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6v6r8\" (UniqueName: \"kubernetes.io/projected/177bfbb3-3a47-4f19-9dfe-93a450884bb1-kube-api-access-6v6r8\") on node \"crc\" DevicePath \"\"" Nov 28 17:16:34 crc kubenswrapper[4909]: I1128 17:16:34.370365 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-pnmbg" event={"ID":"177bfbb3-3a47-4f19-9dfe-93a450884bb1","Type":"ContainerDied","Data":"c871f975ac4865ee9e34a3bac43b410a4b37875a2121766d43c56de617cb3540"} Nov 28 17:16:34 crc kubenswrapper[4909]: I1128 17:16:34.370448 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-pnmbg" Nov 28 17:16:34 crc kubenswrapper[4909]: I1128 17:16:34.370798 4909 scope.go:117] "RemoveContainer" containerID="c356fa80ce664bccbab329e325deb72ca52e0836d41c66b4861c986501bf5398" Nov 28 17:16:34 crc kubenswrapper[4909]: I1128 17:16:34.398549 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-pnmbg"] Nov 28 17:16:34 crc kubenswrapper[4909]: I1128 17:16:34.403421 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-pnmbg"] Nov 28 17:16:34 crc kubenswrapper[4909]: I1128 17:16:34.410784 4909 scope.go:117] "RemoveContainer" containerID="d0c7c6615249366a24c377dd155623f92ac0b729b6c700bfc5b52482ba735ebb" Nov 28 17:16:34 crc kubenswrapper[4909]: I1128 17:16:34.460148 4909 scope.go:117] "RemoveContainer" containerID="dcc861103c8b23b7c16a09934e0571d3b8800661a553595fc46f9302ce1aa668" Nov 28 17:16:35 crc kubenswrapper[4909]: I1128 17:16:35.914055 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="177bfbb3-3a47-4f19-9dfe-93a450884bb1" path="/var/lib/kubelet/pods/177bfbb3-3a47-4f19-9dfe-93a450884bb1/volumes" Nov 28 17:16:41 crc kubenswrapper[4909]: I1128 17:16:41.901144 4909 scope.go:117] "RemoveContainer" containerID="f26ce64ee7189509dfc18cbe7ce86f7f6598eb9f6ae426c5dc846407a726e8c6" Nov 28 17:16:41 crc kubenswrapper[4909]: E1128 17:16:41.901738 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 17:16:54 crc kubenswrapper[4909]: I1128 17:16:54.901102 4909 scope.go:117] "RemoveContainer" containerID="f26ce64ee7189509dfc18cbe7ce86f7f6598eb9f6ae426c5dc846407a726e8c6" Nov 28 17:16:54 crc kubenswrapper[4909]: E1128 17:16:54.902013 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 17:17:06 crc kubenswrapper[4909]: I1128 17:17:06.900909 4909 scope.go:117] "RemoveContainer" containerID="f26ce64ee7189509dfc18cbe7ce86f7f6598eb9f6ae426c5dc846407a726e8c6" Nov 28 17:17:06 crc kubenswrapper[4909]: E1128 17:17:06.901522 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 17:17:19 crc kubenswrapper[4909]: I1128 17:17:19.902471 4909 scope.go:117] "RemoveContainer" containerID="f26ce64ee7189509dfc18cbe7ce86f7f6598eb9f6ae426c5dc846407a726e8c6" Nov 28 17:17:19 crc kubenswrapper[4909]: E1128 17:17:19.904155 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 17:17:33 crc kubenswrapper[4909]: I1128 17:17:33.902539 4909 scope.go:117] "RemoveContainer" containerID="f26ce64ee7189509dfc18cbe7ce86f7f6598eb9f6ae426c5dc846407a726e8c6" Nov 28 17:17:33 crc kubenswrapper[4909]: E1128 17:17:33.903734 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 17:17:45 crc kubenswrapper[4909]: I1128 17:17:45.902514 4909 scope.go:117] "RemoveContainer" containerID="f26ce64ee7189509dfc18cbe7ce86f7f6598eb9f6ae426c5dc846407a726e8c6" Nov 28 17:17:45 crc kubenswrapper[4909]: E1128 17:17:45.903964 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 17:17:59 crc kubenswrapper[4909]: I1128 17:17:59.902167 4909 scope.go:117] "RemoveContainer" containerID="f26ce64ee7189509dfc18cbe7ce86f7f6598eb9f6ae426c5dc846407a726e8c6" Nov 28 17:17:59 crc kubenswrapper[4909]: E1128 17:17:59.903452 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 17:18:12 crc kubenswrapper[4909]: I1128 17:18:12.901219 4909 scope.go:117] "RemoveContainer" containerID="f26ce64ee7189509dfc18cbe7ce86f7f6598eb9f6ae426c5dc846407a726e8c6" Nov 28 17:18:12 crc kubenswrapper[4909]: E1128 17:18:12.901844 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 17:18:23 crc kubenswrapper[4909]: I1128 17:18:23.902014 4909 scope.go:117] "RemoveContainer" containerID="f26ce64ee7189509dfc18cbe7ce86f7f6598eb9f6ae426c5dc846407a726e8c6" Nov 28 17:18:23 crc kubenswrapper[4909]: E1128 17:18:23.902823 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 17:18:38 crc kubenswrapper[4909]: I1128 17:18:38.901713 4909 scope.go:117] "RemoveContainer" containerID="f26ce64ee7189509dfc18cbe7ce86f7f6598eb9f6ae426c5dc846407a726e8c6" Nov 28 17:18:38 crc kubenswrapper[4909]: E1128 17:18:38.902430 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 17:18:50 crc kubenswrapper[4909]: I1128 17:18:50.902198 4909 scope.go:117] "RemoveContainer" containerID="f26ce64ee7189509dfc18cbe7ce86f7f6598eb9f6ae426c5dc846407a726e8c6" Nov 28 17:18:50 crc kubenswrapper[4909]: E1128 17:18:50.903151 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 17:19:05 crc kubenswrapper[4909]: I1128 17:19:05.902214 4909 scope.go:117] "RemoveContainer" containerID="f26ce64ee7189509dfc18cbe7ce86f7f6598eb9f6ae426c5dc846407a726e8c6" Nov 28 17:19:05 crc kubenswrapper[4909]: E1128 17:19:05.903485 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 17:19:19 crc kubenswrapper[4909]: I1128 17:19:19.901175 4909 scope.go:117] "RemoveContainer" containerID="f26ce64ee7189509dfc18cbe7ce86f7f6598eb9f6ae426c5dc846407a726e8c6" Nov 28 17:19:19 crc kubenswrapper[4909]: E1128 17:19:19.902002 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 17:19:32 crc kubenswrapper[4909]: I1128 17:19:32.902821 4909 scope.go:117] "RemoveContainer" containerID="f26ce64ee7189509dfc18cbe7ce86f7f6598eb9f6ae426c5dc846407a726e8c6" Nov 28 17:19:34 crc kubenswrapper[4909]: I1128 17:19:34.078222 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" event={"ID":"5f0ac931-d37b-4342-8c12-c2779b455cc5","Type":"ContainerStarted","Data":"a816165ed0fb773aab8d328989505a9e2709d8311e49e97ca83a0108c1018ce9"} Nov 28 17:21:49 crc kubenswrapper[4909]: I1128 17:21:49.911564 4909 patch_prober.go:28] interesting pod/machine-config-daemon-d5nd7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 17:21:49 crc kubenswrapper[4909]: I1128 17:21:49.912718 4909 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 17:22:19 crc kubenswrapper[4909]: I1128 17:22:19.910771 4909 patch_prober.go:28] interesting pod/machine-config-daemon-d5nd7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 17:22:19 crc kubenswrapper[4909]: I1128 17:22:19.911439 4909 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 17:22:49 crc kubenswrapper[4909]: I1128 17:22:49.911177 4909 patch_prober.go:28] interesting pod/machine-config-daemon-d5nd7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 17:22:49 crc kubenswrapper[4909]: I1128 17:22:49.911875 4909 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 17:22:49 crc kubenswrapper[4909]: I1128 17:22:49.918387 4909 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" Nov 28 17:22:49 crc kubenswrapper[4909]: I1128 17:22:49.922267 4909 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"a816165ed0fb773aab8d328989505a9e2709d8311e49e97ca83a0108c1018ce9"} pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 17:22:49 crc kubenswrapper[4909]: I1128 17:22:49.922422 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerName="machine-config-daemon" containerID="cri-o://a816165ed0fb773aab8d328989505a9e2709d8311e49e97ca83a0108c1018ce9" gracePeriod=600 Nov 28 17:22:50 crc kubenswrapper[4909]: I1128 17:22:50.422541 4909 generic.go:334] "Generic (PLEG): container finished" podID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerID="a816165ed0fb773aab8d328989505a9e2709d8311e49e97ca83a0108c1018ce9" exitCode=0 Nov 28 17:22:50 crc kubenswrapper[4909]: I1128 17:22:50.422599 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" event={"ID":"5f0ac931-d37b-4342-8c12-c2779b455cc5","Type":"ContainerDied","Data":"a816165ed0fb773aab8d328989505a9e2709d8311e49e97ca83a0108c1018ce9"} Nov 28 17:22:50 crc kubenswrapper[4909]: I1128 17:22:50.422937 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" event={"ID":"5f0ac931-d37b-4342-8c12-c2779b455cc5","Type":"ContainerStarted","Data":"0e4b2f3d8611bd0ab12d7c7c8d35db09f6a69551a7858ff3d66e84f2e15b12f2"} Nov 28 17:22:50 crc kubenswrapper[4909]: I1128 17:22:50.422964 4909 scope.go:117] "RemoveContainer" containerID="f26ce64ee7189509dfc18cbe7ce86f7f6598eb9f6ae426c5dc846407a726e8c6" Nov 28 17:24:29 crc kubenswrapper[4909]: I1128 17:24:29.678502 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-hb79l"] Nov 28 17:24:29 crc kubenswrapper[4909]: E1128 17:24:29.679518 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="177bfbb3-3a47-4f19-9dfe-93a450884bb1" containerName="extract-content" Nov 28 17:24:29 crc kubenswrapper[4909]: I1128 17:24:29.679537 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="177bfbb3-3a47-4f19-9dfe-93a450884bb1" containerName="extract-content" Nov 28 17:24:29 crc kubenswrapper[4909]: E1128 17:24:29.679551 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="177bfbb3-3a47-4f19-9dfe-93a450884bb1" containerName="extract-utilities" Nov 28 17:24:29 crc kubenswrapper[4909]: I1128 17:24:29.679560 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="177bfbb3-3a47-4f19-9dfe-93a450884bb1" containerName="extract-utilities" Nov 28 17:24:29 crc kubenswrapper[4909]: E1128 17:24:29.679591 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="177bfbb3-3a47-4f19-9dfe-93a450884bb1" containerName="registry-server" Nov 28 17:24:29 crc kubenswrapper[4909]: I1128 17:24:29.679601 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="177bfbb3-3a47-4f19-9dfe-93a450884bb1" containerName="registry-server" Nov 28 17:24:29 crc kubenswrapper[4909]: I1128 17:24:29.679812 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="177bfbb3-3a47-4f19-9dfe-93a450884bb1" containerName="registry-server" Nov 28 17:24:29 crc kubenswrapper[4909]: I1128 17:24:29.681133 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-hb79l" Nov 28 17:24:29 crc kubenswrapper[4909]: I1128 17:24:29.694116 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-hb79l"] Nov 28 17:24:29 crc kubenswrapper[4909]: I1128 17:24:29.768815 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c08f06d8-2955-49e5-ae08-c7f7e8dff53f-utilities\") pod \"redhat-marketplace-hb79l\" (UID: \"c08f06d8-2955-49e5-ae08-c7f7e8dff53f\") " pod="openshift-marketplace/redhat-marketplace-hb79l" Nov 28 17:24:29 crc kubenswrapper[4909]: I1128 17:24:29.768911 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gnplt\" (UniqueName: \"kubernetes.io/projected/c08f06d8-2955-49e5-ae08-c7f7e8dff53f-kube-api-access-gnplt\") pod \"redhat-marketplace-hb79l\" (UID: \"c08f06d8-2955-49e5-ae08-c7f7e8dff53f\") " pod="openshift-marketplace/redhat-marketplace-hb79l" Nov 28 17:24:29 crc kubenswrapper[4909]: I1128 17:24:29.768946 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c08f06d8-2955-49e5-ae08-c7f7e8dff53f-catalog-content\") pod \"redhat-marketplace-hb79l\" (UID: \"c08f06d8-2955-49e5-ae08-c7f7e8dff53f\") " pod="openshift-marketplace/redhat-marketplace-hb79l" Nov 28 17:24:29 crc kubenswrapper[4909]: I1128 17:24:29.869692 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gnplt\" (UniqueName: \"kubernetes.io/projected/c08f06d8-2955-49e5-ae08-c7f7e8dff53f-kube-api-access-gnplt\") pod \"redhat-marketplace-hb79l\" (UID: \"c08f06d8-2955-49e5-ae08-c7f7e8dff53f\") " pod="openshift-marketplace/redhat-marketplace-hb79l" Nov 28 17:24:29 crc kubenswrapper[4909]: I1128 17:24:29.870001 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c08f06d8-2955-49e5-ae08-c7f7e8dff53f-catalog-content\") pod \"redhat-marketplace-hb79l\" (UID: \"c08f06d8-2955-49e5-ae08-c7f7e8dff53f\") " pod="openshift-marketplace/redhat-marketplace-hb79l" Nov 28 17:24:29 crc kubenswrapper[4909]: I1128 17:24:29.870168 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c08f06d8-2955-49e5-ae08-c7f7e8dff53f-utilities\") pod \"redhat-marketplace-hb79l\" (UID: \"c08f06d8-2955-49e5-ae08-c7f7e8dff53f\") " pod="openshift-marketplace/redhat-marketplace-hb79l" Nov 28 17:24:29 crc kubenswrapper[4909]: I1128 17:24:29.870904 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c08f06d8-2955-49e5-ae08-c7f7e8dff53f-utilities\") pod \"redhat-marketplace-hb79l\" (UID: \"c08f06d8-2955-49e5-ae08-c7f7e8dff53f\") " pod="openshift-marketplace/redhat-marketplace-hb79l" Nov 28 17:24:29 crc kubenswrapper[4909]: I1128 17:24:29.871064 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c08f06d8-2955-49e5-ae08-c7f7e8dff53f-catalog-content\") pod \"redhat-marketplace-hb79l\" (UID: \"c08f06d8-2955-49e5-ae08-c7f7e8dff53f\") " pod="openshift-marketplace/redhat-marketplace-hb79l" Nov 28 17:24:29 crc kubenswrapper[4909]: I1128 17:24:29.893483 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gnplt\" (UniqueName: \"kubernetes.io/projected/c08f06d8-2955-49e5-ae08-c7f7e8dff53f-kube-api-access-gnplt\") pod \"redhat-marketplace-hb79l\" (UID: \"c08f06d8-2955-49e5-ae08-c7f7e8dff53f\") " pod="openshift-marketplace/redhat-marketplace-hb79l" Nov 28 17:24:30 crc kubenswrapper[4909]: I1128 17:24:30.007152 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-hb79l" Nov 28 17:24:30 crc kubenswrapper[4909]: I1128 17:24:30.447311 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-hb79l"] Nov 28 17:24:30 crc kubenswrapper[4909]: W1128 17:24:30.458412 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc08f06d8_2955_49e5_ae08_c7f7e8dff53f.slice/crio-3e7ed97671bcc34547514f8b2151a5830df2deeeb3aa1d5a3059eae461435e98 WatchSource:0}: Error finding container 3e7ed97671bcc34547514f8b2151a5830df2deeeb3aa1d5a3059eae461435e98: Status 404 returned error can't find the container with id 3e7ed97671bcc34547514f8b2151a5830df2deeeb3aa1d5a3059eae461435e98 Nov 28 17:24:31 crc kubenswrapper[4909]: I1128 17:24:31.329452 4909 generic.go:334] "Generic (PLEG): container finished" podID="c08f06d8-2955-49e5-ae08-c7f7e8dff53f" containerID="92d6e305d6f2d1eea4ced48ebc77aeebbbb3a28245f853565e9c1b2ca39f00c2" exitCode=0 Nov 28 17:24:31 crc kubenswrapper[4909]: I1128 17:24:31.329556 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-hb79l" event={"ID":"c08f06d8-2955-49e5-ae08-c7f7e8dff53f","Type":"ContainerDied","Data":"92d6e305d6f2d1eea4ced48ebc77aeebbbb3a28245f853565e9c1b2ca39f00c2"} Nov 28 17:24:31 crc kubenswrapper[4909]: I1128 17:24:31.329610 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-hb79l" event={"ID":"c08f06d8-2955-49e5-ae08-c7f7e8dff53f","Type":"ContainerStarted","Data":"3e7ed97671bcc34547514f8b2151a5830df2deeeb3aa1d5a3059eae461435e98"} Nov 28 17:24:31 crc kubenswrapper[4909]: I1128 17:24:31.333536 4909 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 28 17:24:32 crc kubenswrapper[4909]: I1128 17:24:32.340253 4909 generic.go:334] "Generic (PLEG): container finished" podID="c08f06d8-2955-49e5-ae08-c7f7e8dff53f" containerID="b0997e79eeec28a9b426e5bdf9c1a451349c3bf0ae3266861da03d7c9cffbc89" exitCode=0 Nov 28 17:24:32 crc kubenswrapper[4909]: I1128 17:24:32.340323 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-hb79l" event={"ID":"c08f06d8-2955-49e5-ae08-c7f7e8dff53f","Type":"ContainerDied","Data":"b0997e79eeec28a9b426e5bdf9c1a451349c3bf0ae3266861da03d7c9cffbc89"} Nov 28 17:24:33 crc kubenswrapper[4909]: I1128 17:24:33.356284 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-hb79l" event={"ID":"c08f06d8-2955-49e5-ae08-c7f7e8dff53f","Type":"ContainerStarted","Data":"683571deba0792ad74fe61be02a34278ca978b53967501401746beafb98883fb"} Nov 28 17:24:33 crc kubenswrapper[4909]: I1128 17:24:33.396766 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-hb79l" podStartSLOduration=2.770084733 podStartE2EDuration="4.396729373s" podCreationTimestamp="2025-11-28 17:24:29 +0000 UTC" firstStartedPulling="2025-11-28 17:24:31.333131422 +0000 UTC m=+4453.729815986" lastFinishedPulling="2025-11-28 17:24:32.959776092 +0000 UTC m=+4455.356460626" observedRunningTime="2025-11-28 17:24:33.391898883 +0000 UTC m=+4455.788583447" watchObservedRunningTime="2025-11-28 17:24:33.396729373 +0000 UTC m=+4455.793413927" Nov 28 17:24:40 crc kubenswrapper[4909]: I1128 17:24:40.007934 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-hb79l" Nov 28 17:24:40 crc kubenswrapper[4909]: I1128 17:24:40.008574 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-hb79l" Nov 28 17:24:40 crc kubenswrapper[4909]: I1128 17:24:40.086769 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-hb79l" Nov 28 17:24:40 crc kubenswrapper[4909]: I1128 17:24:40.481228 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-hb79l" Nov 28 17:24:40 crc kubenswrapper[4909]: I1128 17:24:40.526934 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-hb79l"] Nov 28 17:24:42 crc kubenswrapper[4909]: I1128 17:24:42.443195 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-hb79l" podUID="c08f06d8-2955-49e5-ae08-c7f7e8dff53f" containerName="registry-server" containerID="cri-o://683571deba0792ad74fe61be02a34278ca978b53967501401746beafb98883fb" gracePeriod=2 Nov 28 17:24:43 crc kubenswrapper[4909]: I1128 17:24:43.333032 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-hb79l" Nov 28 17:24:43 crc kubenswrapper[4909]: I1128 17:24:43.450390 4909 generic.go:334] "Generic (PLEG): container finished" podID="c08f06d8-2955-49e5-ae08-c7f7e8dff53f" containerID="683571deba0792ad74fe61be02a34278ca978b53967501401746beafb98883fb" exitCode=0 Nov 28 17:24:43 crc kubenswrapper[4909]: I1128 17:24:43.450431 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-hb79l" Nov 28 17:24:43 crc kubenswrapper[4909]: I1128 17:24:43.450445 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-hb79l" event={"ID":"c08f06d8-2955-49e5-ae08-c7f7e8dff53f","Type":"ContainerDied","Data":"683571deba0792ad74fe61be02a34278ca978b53967501401746beafb98883fb"} Nov 28 17:24:43 crc kubenswrapper[4909]: I1128 17:24:43.450475 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-hb79l" event={"ID":"c08f06d8-2955-49e5-ae08-c7f7e8dff53f","Type":"ContainerDied","Data":"3e7ed97671bcc34547514f8b2151a5830df2deeeb3aa1d5a3059eae461435e98"} Nov 28 17:24:43 crc kubenswrapper[4909]: I1128 17:24:43.450508 4909 scope.go:117] "RemoveContainer" containerID="683571deba0792ad74fe61be02a34278ca978b53967501401746beafb98883fb" Nov 28 17:24:43 crc kubenswrapper[4909]: I1128 17:24:43.468841 4909 scope.go:117] "RemoveContainer" containerID="b0997e79eeec28a9b426e5bdf9c1a451349c3bf0ae3266861da03d7c9cffbc89" Nov 28 17:24:43 crc kubenswrapper[4909]: I1128 17:24:43.485814 4909 scope.go:117] "RemoveContainer" containerID="92d6e305d6f2d1eea4ced48ebc77aeebbbb3a28245f853565e9c1b2ca39f00c2" Nov 28 17:24:43 crc kubenswrapper[4909]: I1128 17:24:43.489246 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gnplt\" (UniqueName: \"kubernetes.io/projected/c08f06d8-2955-49e5-ae08-c7f7e8dff53f-kube-api-access-gnplt\") pod \"c08f06d8-2955-49e5-ae08-c7f7e8dff53f\" (UID: \"c08f06d8-2955-49e5-ae08-c7f7e8dff53f\") " Nov 28 17:24:43 crc kubenswrapper[4909]: I1128 17:24:43.489311 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c08f06d8-2955-49e5-ae08-c7f7e8dff53f-catalog-content\") pod \"c08f06d8-2955-49e5-ae08-c7f7e8dff53f\" (UID: \"c08f06d8-2955-49e5-ae08-c7f7e8dff53f\") " Nov 28 17:24:43 crc kubenswrapper[4909]: I1128 17:24:43.489338 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c08f06d8-2955-49e5-ae08-c7f7e8dff53f-utilities\") pod \"c08f06d8-2955-49e5-ae08-c7f7e8dff53f\" (UID: \"c08f06d8-2955-49e5-ae08-c7f7e8dff53f\") " Nov 28 17:24:43 crc kubenswrapper[4909]: I1128 17:24:43.490461 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c08f06d8-2955-49e5-ae08-c7f7e8dff53f-utilities" (OuterVolumeSpecName: "utilities") pod "c08f06d8-2955-49e5-ae08-c7f7e8dff53f" (UID: "c08f06d8-2955-49e5-ae08-c7f7e8dff53f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:24:43 crc kubenswrapper[4909]: I1128 17:24:43.495504 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c08f06d8-2955-49e5-ae08-c7f7e8dff53f-kube-api-access-gnplt" (OuterVolumeSpecName: "kube-api-access-gnplt") pod "c08f06d8-2955-49e5-ae08-c7f7e8dff53f" (UID: "c08f06d8-2955-49e5-ae08-c7f7e8dff53f"). InnerVolumeSpecName "kube-api-access-gnplt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:24:43 crc kubenswrapper[4909]: I1128 17:24:43.516300 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c08f06d8-2955-49e5-ae08-c7f7e8dff53f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "c08f06d8-2955-49e5-ae08-c7f7e8dff53f" (UID: "c08f06d8-2955-49e5-ae08-c7f7e8dff53f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:24:43 crc kubenswrapper[4909]: I1128 17:24:43.534277 4909 scope.go:117] "RemoveContainer" containerID="683571deba0792ad74fe61be02a34278ca978b53967501401746beafb98883fb" Nov 28 17:24:43 crc kubenswrapper[4909]: E1128 17:24:43.534732 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"683571deba0792ad74fe61be02a34278ca978b53967501401746beafb98883fb\": container with ID starting with 683571deba0792ad74fe61be02a34278ca978b53967501401746beafb98883fb not found: ID does not exist" containerID="683571deba0792ad74fe61be02a34278ca978b53967501401746beafb98883fb" Nov 28 17:24:43 crc kubenswrapper[4909]: I1128 17:24:43.534776 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"683571deba0792ad74fe61be02a34278ca978b53967501401746beafb98883fb"} err="failed to get container status \"683571deba0792ad74fe61be02a34278ca978b53967501401746beafb98883fb\": rpc error: code = NotFound desc = could not find container \"683571deba0792ad74fe61be02a34278ca978b53967501401746beafb98883fb\": container with ID starting with 683571deba0792ad74fe61be02a34278ca978b53967501401746beafb98883fb not found: ID does not exist" Nov 28 17:24:43 crc kubenswrapper[4909]: I1128 17:24:43.534803 4909 scope.go:117] "RemoveContainer" containerID="b0997e79eeec28a9b426e5bdf9c1a451349c3bf0ae3266861da03d7c9cffbc89" Nov 28 17:24:43 crc kubenswrapper[4909]: E1128 17:24:43.535270 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b0997e79eeec28a9b426e5bdf9c1a451349c3bf0ae3266861da03d7c9cffbc89\": container with ID starting with b0997e79eeec28a9b426e5bdf9c1a451349c3bf0ae3266861da03d7c9cffbc89 not found: ID does not exist" containerID="b0997e79eeec28a9b426e5bdf9c1a451349c3bf0ae3266861da03d7c9cffbc89" Nov 28 17:24:43 crc kubenswrapper[4909]: I1128 17:24:43.535325 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b0997e79eeec28a9b426e5bdf9c1a451349c3bf0ae3266861da03d7c9cffbc89"} err="failed to get container status \"b0997e79eeec28a9b426e5bdf9c1a451349c3bf0ae3266861da03d7c9cffbc89\": rpc error: code = NotFound desc = could not find container \"b0997e79eeec28a9b426e5bdf9c1a451349c3bf0ae3266861da03d7c9cffbc89\": container with ID starting with b0997e79eeec28a9b426e5bdf9c1a451349c3bf0ae3266861da03d7c9cffbc89 not found: ID does not exist" Nov 28 17:24:43 crc kubenswrapper[4909]: I1128 17:24:43.535362 4909 scope.go:117] "RemoveContainer" containerID="92d6e305d6f2d1eea4ced48ebc77aeebbbb3a28245f853565e9c1b2ca39f00c2" Nov 28 17:24:43 crc kubenswrapper[4909]: E1128 17:24:43.535852 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"92d6e305d6f2d1eea4ced48ebc77aeebbbb3a28245f853565e9c1b2ca39f00c2\": container with ID starting with 92d6e305d6f2d1eea4ced48ebc77aeebbbb3a28245f853565e9c1b2ca39f00c2 not found: ID does not exist" containerID="92d6e305d6f2d1eea4ced48ebc77aeebbbb3a28245f853565e9c1b2ca39f00c2" Nov 28 17:24:43 crc kubenswrapper[4909]: I1128 17:24:43.535890 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"92d6e305d6f2d1eea4ced48ebc77aeebbbb3a28245f853565e9c1b2ca39f00c2"} err="failed to get container status \"92d6e305d6f2d1eea4ced48ebc77aeebbbb3a28245f853565e9c1b2ca39f00c2\": rpc error: code = NotFound desc = could not find container \"92d6e305d6f2d1eea4ced48ebc77aeebbbb3a28245f853565e9c1b2ca39f00c2\": container with ID starting with 92d6e305d6f2d1eea4ced48ebc77aeebbbb3a28245f853565e9c1b2ca39f00c2 not found: ID does not exist" Nov 28 17:24:43 crc kubenswrapper[4909]: I1128 17:24:43.591545 4909 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c08f06d8-2955-49e5-ae08-c7f7e8dff53f-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 17:24:43 crc kubenswrapper[4909]: I1128 17:24:43.591615 4909 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c08f06d8-2955-49e5-ae08-c7f7e8dff53f-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 17:24:43 crc kubenswrapper[4909]: I1128 17:24:43.591636 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gnplt\" (UniqueName: \"kubernetes.io/projected/c08f06d8-2955-49e5-ae08-c7f7e8dff53f-kube-api-access-gnplt\") on node \"crc\" DevicePath \"\"" Nov 28 17:24:43 crc kubenswrapper[4909]: I1128 17:24:43.789001 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-hb79l"] Nov 28 17:24:43 crc kubenswrapper[4909]: I1128 17:24:43.795939 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-hb79l"] Nov 28 17:24:43 crc kubenswrapper[4909]: I1128 17:24:43.918087 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c08f06d8-2955-49e5-ae08-c7f7e8dff53f" path="/var/lib/kubelet/pods/c08f06d8-2955-49e5-ae08-c7f7e8dff53f/volumes" Nov 28 17:25:19 crc kubenswrapper[4909]: I1128 17:25:19.911103 4909 patch_prober.go:28] interesting pod/machine-config-daemon-d5nd7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 17:25:19 crc kubenswrapper[4909]: I1128 17:25:19.911995 4909 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 17:25:49 crc kubenswrapper[4909]: I1128 17:25:49.910883 4909 patch_prober.go:28] interesting pod/machine-config-daemon-d5nd7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 17:25:49 crc kubenswrapper[4909]: I1128 17:25:49.912795 4909 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 17:26:00 crc kubenswrapper[4909]: I1128 17:26:00.875506 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-bjcbj"] Nov 28 17:26:00 crc kubenswrapper[4909]: E1128 17:26:00.876563 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c08f06d8-2955-49e5-ae08-c7f7e8dff53f" containerName="registry-server" Nov 28 17:26:00 crc kubenswrapper[4909]: I1128 17:26:00.876584 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="c08f06d8-2955-49e5-ae08-c7f7e8dff53f" containerName="registry-server" Nov 28 17:26:00 crc kubenswrapper[4909]: E1128 17:26:00.876594 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c08f06d8-2955-49e5-ae08-c7f7e8dff53f" containerName="extract-utilities" Nov 28 17:26:00 crc kubenswrapper[4909]: I1128 17:26:00.876603 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="c08f06d8-2955-49e5-ae08-c7f7e8dff53f" containerName="extract-utilities" Nov 28 17:26:00 crc kubenswrapper[4909]: E1128 17:26:00.876618 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c08f06d8-2955-49e5-ae08-c7f7e8dff53f" containerName="extract-content" Nov 28 17:26:00 crc kubenswrapper[4909]: I1128 17:26:00.876627 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="c08f06d8-2955-49e5-ae08-c7f7e8dff53f" containerName="extract-content" Nov 28 17:26:00 crc kubenswrapper[4909]: I1128 17:26:00.876832 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="c08f06d8-2955-49e5-ae08-c7f7e8dff53f" containerName="registry-server" Nov 28 17:26:00 crc kubenswrapper[4909]: I1128 17:26:00.891838 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-bjcbj" Nov 28 17:26:00 crc kubenswrapper[4909]: I1128 17:26:00.919715 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-bjcbj"] Nov 28 17:26:01 crc kubenswrapper[4909]: I1128 17:26:01.077739 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c496ff00-1f8b-4677-832c-d3e8bee90e88-utilities\") pod \"community-operators-bjcbj\" (UID: \"c496ff00-1f8b-4677-832c-d3e8bee90e88\") " pod="openshift-marketplace/community-operators-bjcbj" Nov 28 17:26:01 crc kubenswrapper[4909]: I1128 17:26:01.077841 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rsfmb\" (UniqueName: \"kubernetes.io/projected/c496ff00-1f8b-4677-832c-d3e8bee90e88-kube-api-access-rsfmb\") pod \"community-operators-bjcbj\" (UID: \"c496ff00-1f8b-4677-832c-d3e8bee90e88\") " pod="openshift-marketplace/community-operators-bjcbj" Nov 28 17:26:01 crc kubenswrapper[4909]: I1128 17:26:01.077882 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c496ff00-1f8b-4677-832c-d3e8bee90e88-catalog-content\") pod \"community-operators-bjcbj\" (UID: \"c496ff00-1f8b-4677-832c-d3e8bee90e88\") " pod="openshift-marketplace/community-operators-bjcbj" Nov 28 17:26:01 crc kubenswrapper[4909]: I1128 17:26:01.178693 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rsfmb\" (UniqueName: \"kubernetes.io/projected/c496ff00-1f8b-4677-832c-d3e8bee90e88-kube-api-access-rsfmb\") pod \"community-operators-bjcbj\" (UID: \"c496ff00-1f8b-4677-832c-d3e8bee90e88\") " pod="openshift-marketplace/community-operators-bjcbj" Nov 28 17:26:01 crc kubenswrapper[4909]: I1128 17:26:01.178769 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c496ff00-1f8b-4677-832c-d3e8bee90e88-catalog-content\") pod \"community-operators-bjcbj\" (UID: \"c496ff00-1f8b-4677-832c-d3e8bee90e88\") " pod="openshift-marketplace/community-operators-bjcbj" Nov 28 17:26:01 crc kubenswrapper[4909]: I1128 17:26:01.178840 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c496ff00-1f8b-4677-832c-d3e8bee90e88-utilities\") pod \"community-operators-bjcbj\" (UID: \"c496ff00-1f8b-4677-832c-d3e8bee90e88\") " pod="openshift-marketplace/community-operators-bjcbj" Nov 28 17:26:01 crc kubenswrapper[4909]: I1128 17:26:01.179432 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c496ff00-1f8b-4677-832c-d3e8bee90e88-utilities\") pod \"community-operators-bjcbj\" (UID: \"c496ff00-1f8b-4677-832c-d3e8bee90e88\") " pod="openshift-marketplace/community-operators-bjcbj" Nov 28 17:26:01 crc kubenswrapper[4909]: I1128 17:26:01.179495 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c496ff00-1f8b-4677-832c-d3e8bee90e88-catalog-content\") pod \"community-operators-bjcbj\" (UID: \"c496ff00-1f8b-4677-832c-d3e8bee90e88\") " pod="openshift-marketplace/community-operators-bjcbj" Nov 28 17:26:01 crc kubenswrapper[4909]: I1128 17:26:01.201096 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rsfmb\" (UniqueName: \"kubernetes.io/projected/c496ff00-1f8b-4677-832c-d3e8bee90e88-kube-api-access-rsfmb\") pod \"community-operators-bjcbj\" (UID: \"c496ff00-1f8b-4677-832c-d3e8bee90e88\") " pod="openshift-marketplace/community-operators-bjcbj" Nov 28 17:26:01 crc kubenswrapper[4909]: I1128 17:26:01.225596 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-bjcbj" Nov 28 17:26:01 crc kubenswrapper[4909]: I1128 17:26:01.699447 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-bjcbj"] Nov 28 17:26:02 crc kubenswrapper[4909]: I1128 17:26:02.149199 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bjcbj" event={"ID":"c496ff00-1f8b-4677-832c-d3e8bee90e88","Type":"ContainerStarted","Data":"4919b6f695a3b6e84879c9cdbcb9032b09d646fec67e613c2435d0246a7d21a4"} Nov 28 17:26:03 crc kubenswrapper[4909]: I1128 17:26:03.164938 4909 generic.go:334] "Generic (PLEG): container finished" podID="c496ff00-1f8b-4677-832c-d3e8bee90e88" containerID="20e747fadd24e8c905d56d6456922b7dbc2c101bcb8ba14944e93dbb5b6f789c" exitCode=0 Nov 28 17:26:03 crc kubenswrapper[4909]: I1128 17:26:03.165116 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bjcbj" event={"ID":"c496ff00-1f8b-4677-832c-d3e8bee90e88","Type":"ContainerDied","Data":"20e747fadd24e8c905d56d6456922b7dbc2c101bcb8ba14944e93dbb5b6f789c"} Nov 28 17:26:05 crc kubenswrapper[4909]: I1128 17:26:05.182071 4909 generic.go:334] "Generic (PLEG): container finished" podID="c496ff00-1f8b-4677-832c-d3e8bee90e88" containerID="9cd42997643f623c9fdafe52f2bd598646215c006d26b937d5f5ec70cfb4ee2b" exitCode=0 Nov 28 17:26:05 crc kubenswrapper[4909]: I1128 17:26:05.182145 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bjcbj" event={"ID":"c496ff00-1f8b-4677-832c-d3e8bee90e88","Type":"ContainerDied","Data":"9cd42997643f623c9fdafe52f2bd598646215c006d26b937d5f5ec70cfb4ee2b"} Nov 28 17:26:06 crc kubenswrapper[4909]: I1128 17:26:06.192576 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bjcbj" event={"ID":"c496ff00-1f8b-4677-832c-d3e8bee90e88","Type":"ContainerStarted","Data":"270a97c8332fe9b21beedf6521c7cbfe56a27c1602c390f6062160dcb377522e"} Nov 28 17:26:06 crc kubenswrapper[4909]: I1128 17:26:06.217751 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-bjcbj" podStartSLOduration=3.611971878 podStartE2EDuration="6.21772578s" podCreationTimestamp="2025-11-28 17:26:00 +0000 UTC" firstStartedPulling="2025-11-28 17:26:03.166461587 +0000 UTC m=+4545.563146121" lastFinishedPulling="2025-11-28 17:26:05.772215459 +0000 UTC m=+4548.168900023" observedRunningTime="2025-11-28 17:26:06.210926799 +0000 UTC m=+4548.607611343" watchObservedRunningTime="2025-11-28 17:26:06.21772578 +0000 UTC m=+4548.614410314" Nov 28 17:26:11 crc kubenswrapper[4909]: I1128 17:26:11.225739 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-bjcbj" Nov 28 17:26:11 crc kubenswrapper[4909]: I1128 17:26:11.226029 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-bjcbj" Nov 28 17:26:11 crc kubenswrapper[4909]: I1128 17:26:11.270965 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-bjcbj" Nov 28 17:26:12 crc kubenswrapper[4909]: I1128 17:26:12.311033 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-bjcbj" Nov 28 17:26:12 crc kubenswrapper[4909]: I1128 17:26:12.356717 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-bjcbj"] Nov 28 17:26:14 crc kubenswrapper[4909]: I1128 17:26:14.272312 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-bjcbj" podUID="c496ff00-1f8b-4677-832c-d3e8bee90e88" containerName="registry-server" containerID="cri-o://270a97c8332fe9b21beedf6521c7cbfe56a27c1602c390f6062160dcb377522e" gracePeriod=2 Nov 28 17:26:15 crc kubenswrapper[4909]: I1128 17:26:15.282249 4909 generic.go:334] "Generic (PLEG): container finished" podID="c496ff00-1f8b-4677-832c-d3e8bee90e88" containerID="270a97c8332fe9b21beedf6521c7cbfe56a27c1602c390f6062160dcb377522e" exitCode=0 Nov 28 17:26:15 crc kubenswrapper[4909]: I1128 17:26:15.282321 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bjcbj" event={"ID":"c496ff00-1f8b-4677-832c-d3e8bee90e88","Type":"ContainerDied","Data":"270a97c8332fe9b21beedf6521c7cbfe56a27c1602c390f6062160dcb377522e"} Nov 28 17:26:15 crc kubenswrapper[4909]: I1128 17:26:15.282720 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bjcbj" event={"ID":"c496ff00-1f8b-4677-832c-d3e8bee90e88","Type":"ContainerDied","Data":"4919b6f695a3b6e84879c9cdbcb9032b09d646fec67e613c2435d0246a7d21a4"} Nov 28 17:26:15 crc kubenswrapper[4909]: I1128 17:26:15.282746 4909 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4919b6f695a3b6e84879c9cdbcb9032b09d646fec67e613c2435d0246a7d21a4" Nov 28 17:26:15 crc kubenswrapper[4909]: I1128 17:26:15.293305 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-bjcbj" Nov 28 17:26:15 crc kubenswrapper[4909]: I1128 17:26:15.406079 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c496ff00-1f8b-4677-832c-d3e8bee90e88-utilities\") pod \"c496ff00-1f8b-4677-832c-d3e8bee90e88\" (UID: \"c496ff00-1f8b-4677-832c-d3e8bee90e88\") " Nov 28 17:26:15 crc kubenswrapper[4909]: I1128 17:26:15.406202 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rsfmb\" (UniqueName: \"kubernetes.io/projected/c496ff00-1f8b-4677-832c-d3e8bee90e88-kube-api-access-rsfmb\") pod \"c496ff00-1f8b-4677-832c-d3e8bee90e88\" (UID: \"c496ff00-1f8b-4677-832c-d3e8bee90e88\") " Nov 28 17:26:15 crc kubenswrapper[4909]: I1128 17:26:15.406236 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c496ff00-1f8b-4677-832c-d3e8bee90e88-catalog-content\") pod \"c496ff00-1f8b-4677-832c-d3e8bee90e88\" (UID: \"c496ff00-1f8b-4677-832c-d3e8bee90e88\") " Nov 28 17:26:15 crc kubenswrapper[4909]: I1128 17:26:15.407441 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c496ff00-1f8b-4677-832c-d3e8bee90e88-utilities" (OuterVolumeSpecName: "utilities") pod "c496ff00-1f8b-4677-832c-d3e8bee90e88" (UID: "c496ff00-1f8b-4677-832c-d3e8bee90e88"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:26:15 crc kubenswrapper[4909]: I1128 17:26:15.414476 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c496ff00-1f8b-4677-832c-d3e8bee90e88-kube-api-access-rsfmb" (OuterVolumeSpecName: "kube-api-access-rsfmb") pod "c496ff00-1f8b-4677-832c-d3e8bee90e88" (UID: "c496ff00-1f8b-4677-832c-d3e8bee90e88"). InnerVolumeSpecName "kube-api-access-rsfmb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:26:15 crc kubenswrapper[4909]: I1128 17:26:15.486898 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c496ff00-1f8b-4677-832c-d3e8bee90e88-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "c496ff00-1f8b-4677-832c-d3e8bee90e88" (UID: "c496ff00-1f8b-4677-832c-d3e8bee90e88"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:26:15 crc kubenswrapper[4909]: I1128 17:26:15.507617 4909 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c496ff00-1f8b-4677-832c-d3e8bee90e88-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 17:26:15 crc kubenswrapper[4909]: I1128 17:26:15.507736 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rsfmb\" (UniqueName: \"kubernetes.io/projected/c496ff00-1f8b-4677-832c-d3e8bee90e88-kube-api-access-rsfmb\") on node \"crc\" DevicePath \"\"" Nov 28 17:26:15 crc kubenswrapper[4909]: I1128 17:26:15.507771 4909 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c496ff00-1f8b-4677-832c-d3e8bee90e88-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 17:26:16 crc kubenswrapper[4909]: I1128 17:26:16.292788 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-bjcbj" Nov 28 17:26:16 crc kubenswrapper[4909]: I1128 17:26:16.326523 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-bjcbj"] Nov 28 17:26:16 crc kubenswrapper[4909]: I1128 17:26:16.334113 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-bjcbj"] Nov 28 17:26:17 crc kubenswrapper[4909]: I1128 17:26:17.915151 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c496ff00-1f8b-4677-832c-d3e8bee90e88" path="/var/lib/kubelet/pods/c496ff00-1f8b-4677-832c-d3e8bee90e88/volumes" Nov 28 17:26:19 crc kubenswrapper[4909]: I1128 17:26:19.911412 4909 patch_prober.go:28] interesting pod/machine-config-daemon-d5nd7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 17:26:19 crc kubenswrapper[4909]: I1128 17:26:19.912134 4909 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 17:26:19 crc kubenswrapper[4909]: I1128 17:26:19.917335 4909 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" Nov 28 17:26:19 crc kubenswrapper[4909]: I1128 17:26:19.918092 4909 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"0e4b2f3d8611bd0ab12d7c7c8d35db09f6a69551a7858ff3d66e84f2e15b12f2"} pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 17:26:19 crc kubenswrapper[4909]: I1128 17:26:19.918200 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerName="machine-config-daemon" containerID="cri-o://0e4b2f3d8611bd0ab12d7c7c8d35db09f6a69551a7858ff3d66e84f2e15b12f2" gracePeriod=600 Nov 28 17:26:20 crc kubenswrapper[4909]: I1128 17:26:20.333715 4909 generic.go:334] "Generic (PLEG): container finished" podID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerID="0e4b2f3d8611bd0ab12d7c7c8d35db09f6a69551a7858ff3d66e84f2e15b12f2" exitCode=0 Nov 28 17:26:20 crc kubenswrapper[4909]: I1128 17:26:20.333813 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" event={"ID":"5f0ac931-d37b-4342-8c12-c2779b455cc5","Type":"ContainerDied","Data":"0e4b2f3d8611bd0ab12d7c7c8d35db09f6a69551a7858ff3d66e84f2e15b12f2"} Nov 28 17:26:20 crc kubenswrapper[4909]: I1128 17:26:20.333904 4909 scope.go:117] "RemoveContainer" containerID="a816165ed0fb773aab8d328989505a9e2709d8311e49e97ca83a0108c1018ce9" Nov 28 17:26:20 crc kubenswrapper[4909]: E1128 17:26:20.556800 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 17:26:21 crc kubenswrapper[4909]: I1128 17:26:21.341985 4909 scope.go:117] "RemoveContainer" containerID="0e4b2f3d8611bd0ab12d7c7c8d35db09f6a69551a7858ff3d66e84f2e15b12f2" Nov 28 17:26:21 crc kubenswrapper[4909]: E1128 17:26:21.342502 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 17:26:36 crc kubenswrapper[4909]: I1128 17:26:36.901279 4909 scope.go:117] "RemoveContainer" containerID="0e4b2f3d8611bd0ab12d7c7c8d35db09f6a69551a7858ff3d66e84f2e15b12f2" Nov 28 17:26:36 crc kubenswrapper[4909]: E1128 17:26:36.902321 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 17:26:51 crc kubenswrapper[4909]: I1128 17:26:51.901494 4909 scope.go:117] "RemoveContainer" containerID="0e4b2f3d8611bd0ab12d7c7c8d35db09f6a69551a7858ff3d66e84f2e15b12f2" Nov 28 17:26:51 crc kubenswrapper[4909]: E1128 17:26:51.902120 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 17:27:04 crc kubenswrapper[4909]: I1128 17:27:04.902339 4909 scope.go:117] "RemoveContainer" containerID="0e4b2f3d8611bd0ab12d7c7c8d35db09f6a69551a7858ff3d66e84f2e15b12f2" Nov 28 17:27:04 crc kubenswrapper[4909]: E1128 17:27:04.903366 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 17:27:15 crc kubenswrapper[4909]: I1128 17:27:15.327700 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["crc-storage/crc-storage-crc-rhqg8"] Nov 28 17:27:15 crc kubenswrapper[4909]: I1128 17:27:15.335451 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["crc-storage/crc-storage-crc-rhqg8"] Nov 28 17:27:15 crc kubenswrapper[4909]: I1128 17:27:15.452821 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["crc-storage/crc-storage-crc-l56sw"] Nov 28 17:27:15 crc kubenswrapper[4909]: E1128 17:27:15.453152 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c496ff00-1f8b-4677-832c-d3e8bee90e88" containerName="extract-utilities" Nov 28 17:27:15 crc kubenswrapper[4909]: I1128 17:27:15.453174 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="c496ff00-1f8b-4677-832c-d3e8bee90e88" containerName="extract-utilities" Nov 28 17:27:15 crc kubenswrapper[4909]: E1128 17:27:15.453192 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c496ff00-1f8b-4677-832c-d3e8bee90e88" containerName="registry-server" Nov 28 17:27:15 crc kubenswrapper[4909]: I1128 17:27:15.453201 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="c496ff00-1f8b-4677-832c-d3e8bee90e88" containerName="registry-server" Nov 28 17:27:15 crc kubenswrapper[4909]: E1128 17:27:15.453219 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c496ff00-1f8b-4677-832c-d3e8bee90e88" containerName="extract-content" Nov 28 17:27:15 crc kubenswrapper[4909]: I1128 17:27:15.453228 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="c496ff00-1f8b-4677-832c-d3e8bee90e88" containerName="extract-content" Nov 28 17:27:15 crc kubenswrapper[4909]: I1128 17:27:15.453438 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="c496ff00-1f8b-4677-832c-d3e8bee90e88" containerName="registry-server" Nov 28 17:27:15 crc kubenswrapper[4909]: I1128 17:27:15.454068 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-l56sw" Nov 28 17:27:15 crc kubenswrapper[4909]: I1128 17:27:15.458240 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"crc-storage"/"crc-storage" Nov 28 17:27:15 crc kubenswrapper[4909]: I1128 17:27:15.458585 4909 reflector.go:368] Caches populated for *v1.Secret from object-"crc-storage"/"crc-storage-dockercfg-29stw" Nov 28 17:27:15 crc kubenswrapper[4909]: I1128 17:27:15.458771 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"crc-storage"/"openshift-service-ca.crt" Nov 28 17:27:15 crc kubenswrapper[4909]: I1128 17:27:15.458897 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"crc-storage"/"kube-root-ca.crt" Nov 28 17:27:15 crc kubenswrapper[4909]: I1128 17:27:15.462538 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["crc-storage/crc-storage-crc-l56sw"] Nov 28 17:27:15 crc kubenswrapper[4909]: I1128 17:27:15.597692 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/0832dc59-f0b5-41b7-a9f8-a18b84edd46f-node-mnt\") pod \"crc-storage-crc-l56sw\" (UID: \"0832dc59-f0b5-41b7-a9f8-a18b84edd46f\") " pod="crc-storage/crc-storage-crc-l56sw" Nov 28 17:27:15 crc kubenswrapper[4909]: I1128 17:27:15.597988 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wfhxd\" (UniqueName: \"kubernetes.io/projected/0832dc59-f0b5-41b7-a9f8-a18b84edd46f-kube-api-access-wfhxd\") pod \"crc-storage-crc-l56sw\" (UID: \"0832dc59-f0b5-41b7-a9f8-a18b84edd46f\") " pod="crc-storage/crc-storage-crc-l56sw" Nov 28 17:27:15 crc kubenswrapper[4909]: I1128 17:27:15.598107 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/0832dc59-f0b5-41b7-a9f8-a18b84edd46f-crc-storage\") pod \"crc-storage-crc-l56sw\" (UID: \"0832dc59-f0b5-41b7-a9f8-a18b84edd46f\") " pod="crc-storage/crc-storage-crc-l56sw" Nov 28 17:27:15 crc kubenswrapper[4909]: I1128 17:27:15.699917 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/0832dc59-f0b5-41b7-a9f8-a18b84edd46f-crc-storage\") pod \"crc-storage-crc-l56sw\" (UID: \"0832dc59-f0b5-41b7-a9f8-a18b84edd46f\") " pod="crc-storage/crc-storage-crc-l56sw" Nov 28 17:27:15 crc kubenswrapper[4909]: I1128 17:27:15.700295 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/0832dc59-f0b5-41b7-a9f8-a18b84edd46f-node-mnt\") pod \"crc-storage-crc-l56sw\" (UID: \"0832dc59-f0b5-41b7-a9f8-a18b84edd46f\") " pod="crc-storage/crc-storage-crc-l56sw" Nov 28 17:27:15 crc kubenswrapper[4909]: I1128 17:27:15.700435 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wfhxd\" (UniqueName: \"kubernetes.io/projected/0832dc59-f0b5-41b7-a9f8-a18b84edd46f-kube-api-access-wfhxd\") pod \"crc-storage-crc-l56sw\" (UID: \"0832dc59-f0b5-41b7-a9f8-a18b84edd46f\") " pod="crc-storage/crc-storage-crc-l56sw" Nov 28 17:27:15 crc kubenswrapper[4909]: I1128 17:27:15.700718 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/0832dc59-f0b5-41b7-a9f8-a18b84edd46f-node-mnt\") pod \"crc-storage-crc-l56sw\" (UID: \"0832dc59-f0b5-41b7-a9f8-a18b84edd46f\") " pod="crc-storage/crc-storage-crc-l56sw" Nov 28 17:27:15 crc kubenswrapper[4909]: I1128 17:27:15.701258 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/0832dc59-f0b5-41b7-a9f8-a18b84edd46f-crc-storage\") pod \"crc-storage-crc-l56sw\" (UID: \"0832dc59-f0b5-41b7-a9f8-a18b84edd46f\") " pod="crc-storage/crc-storage-crc-l56sw" Nov 28 17:27:15 crc kubenswrapper[4909]: I1128 17:27:15.721004 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wfhxd\" (UniqueName: \"kubernetes.io/projected/0832dc59-f0b5-41b7-a9f8-a18b84edd46f-kube-api-access-wfhxd\") pod \"crc-storage-crc-l56sw\" (UID: \"0832dc59-f0b5-41b7-a9f8-a18b84edd46f\") " pod="crc-storage/crc-storage-crc-l56sw" Nov 28 17:27:15 crc kubenswrapper[4909]: I1128 17:27:15.784437 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-l56sw" Nov 28 17:27:15 crc kubenswrapper[4909]: I1128 17:27:15.920159 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a3224e96-5227-4540-b352-e9a86d53e4fd" path="/var/lib/kubelet/pods/a3224e96-5227-4540-b352-e9a86d53e4fd/volumes" Nov 28 17:27:16 crc kubenswrapper[4909]: I1128 17:27:16.239114 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["crc-storage/crc-storage-crc-l56sw"] Nov 28 17:27:16 crc kubenswrapper[4909]: I1128 17:27:16.915843 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-l56sw" event={"ID":"0832dc59-f0b5-41b7-a9f8-a18b84edd46f","Type":"ContainerStarted","Data":"188c185ebcdcc3af5fe0f94ae9b9d9ec6daf95557c6c9b34af91500a7e1b4e2d"} Nov 28 17:27:17 crc kubenswrapper[4909]: I1128 17:27:17.907354 4909 scope.go:117] "RemoveContainer" containerID="0e4b2f3d8611bd0ab12d7c7c8d35db09f6a69551a7858ff3d66e84f2e15b12f2" Nov 28 17:27:17 crc kubenswrapper[4909]: E1128 17:27:17.907627 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 17:27:17 crc kubenswrapper[4909]: I1128 17:27:17.927455 4909 generic.go:334] "Generic (PLEG): container finished" podID="0832dc59-f0b5-41b7-a9f8-a18b84edd46f" containerID="b81c4e89fb7a96cf240b8887f943c18708cbdec63178b536b4b1d6925dfd1209" exitCode=0 Nov 28 17:27:17 crc kubenswrapper[4909]: I1128 17:27:17.927743 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-l56sw" event={"ID":"0832dc59-f0b5-41b7-a9f8-a18b84edd46f","Type":"ContainerDied","Data":"b81c4e89fb7a96cf240b8887f943c18708cbdec63178b536b4b1d6925dfd1209"} Nov 28 17:27:19 crc kubenswrapper[4909]: I1128 17:27:19.257934 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-l56sw" Nov 28 17:27:19 crc kubenswrapper[4909]: I1128 17:27:19.351936 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wfhxd\" (UniqueName: \"kubernetes.io/projected/0832dc59-f0b5-41b7-a9f8-a18b84edd46f-kube-api-access-wfhxd\") pod \"0832dc59-f0b5-41b7-a9f8-a18b84edd46f\" (UID: \"0832dc59-f0b5-41b7-a9f8-a18b84edd46f\") " Nov 28 17:27:19 crc kubenswrapper[4909]: I1128 17:27:19.352030 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/0832dc59-f0b5-41b7-a9f8-a18b84edd46f-crc-storage\") pod \"0832dc59-f0b5-41b7-a9f8-a18b84edd46f\" (UID: \"0832dc59-f0b5-41b7-a9f8-a18b84edd46f\") " Nov 28 17:27:19 crc kubenswrapper[4909]: I1128 17:27:19.352117 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/0832dc59-f0b5-41b7-a9f8-a18b84edd46f-node-mnt\") pod \"0832dc59-f0b5-41b7-a9f8-a18b84edd46f\" (UID: \"0832dc59-f0b5-41b7-a9f8-a18b84edd46f\") " Nov 28 17:27:19 crc kubenswrapper[4909]: I1128 17:27:19.352356 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/0832dc59-f0b5-41b7-a9f8-a18b84edd46f-node-mnt" (OuterVolumeSpecName: "node-mnt") pod "0832dc59-f0b5-41b7-a9f8-a18b84edd46f" (UID: "0832dc59-f0b5-41b7-a9f8-a18b84edd46f"). InnerVolumeSpecName "node-mnt". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 17:27:19 crc kubenswrapper[4909]: I1128 17:27:19.359948 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0832dc59-f0b5-41b7-a9f8-a18b84edd46f-kube-api-access-wfhxd" (OuterVolumeSpecName: "kube-api-access-wfhxd") pod "0832dc59-f0b5-41b7-a9f8-a18b84edd46f" (UID: "0832dc59-f0b5-41b7-a9f8-a18b84edd46f"). InnerVolumeSpecName "kube-api-access-wfhxd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:27:19 crc kubenswrapper[4909]: I1128 17:27:19.374350 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0832dc59-f0b5-41b7-a9f8-a18b84edd46f-crc-storage" (OuterVolumeSpecName: "crc-storage") pod "0832dc59-f0b5-41b7-a9f8-a18b84edd46f" (UID: "0832dc59-f0b5-41b7-a9f8-a18b84edd46f"). InnerVolumeSpecName "crc-storage". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 17:27:19 crc kubenswrapper[4909]: I1128 17:27:19.454489 4909 reconciler_common.go:293] "Volume detached for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/0832dc59-f0b5-41b7-a9f8-a18b84edd46f-crc-storage\") on node \"crc\" DevicePath \"\"" Nov 28 17:27:19 crc kubenswrapper[4909]: I1128 17:27:19.454548 4909 reconciler_common.go:293] "Volume detached for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/0832dc59-f0b5-41b7-a9f8-a18b84edd46f-node-mnt\") on node \"crc\" DevicePath \"\"" Nov 28 17:27:19 crc kubenswrapper[4909]: I1128 17:27:19.454568 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wfhxd\" (UniqueName: \"kubernetes.io/projected/0832dc59-f0b5-41b7-a9f8-a18b84edd46f-kube-api-access-wfhxd\") on node \"crc\" DevicePath \"\"" Nov 28 17:27:19 crc kubenswrapper[4909]: I1128 17:27:19.941317 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-l56sw" event={"ID":"0832dc59-f0b5-41b7-a9f8-a18b84edd46f","Type":"ContainerDied","Data":"188c185ebcdcc3af5fe0f94ae9b9d9ec6daf95557c6c9b34af91500a7e1b4e2d"} Nov 28 17:27:19 crc kubenswrapper[4909]: I1128 17:27:19.941351 4909 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="188c185ebcdcc3af5fe0f94ae9b9d9ec6daf95557c6c9b34af91500a7e1b4e2d" Nov 28 17:27:19 crc kubenswrapper[4909]: I1128 17:27:19.941353 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-l56sw" Nov 28 17:27:21 crc kubenswrapper[4909]: I1128 17:27:21.588319 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["crc-storage/crc-storage-crc-l56sw"] Nov 28 17:27:21 crc kubenswrapper[4909]: I1128 17:27:21.594935 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["crc-storage/crc-storage-crc-l56sw"] Nov 28 17:27:21 crc kubenswrapper[4909]: I1128 17:27:21.727192 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["crc-storage/crc-storage-crc-mbz8r"] Nov 28 17:27:21 crc kubenswrapper[4909]: E1128 17:27:21.727518 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0832dc59-f0b5-41b7-a9f8-a18b84edd46f" containerName="storage" Nov 28 17:27:21 crc kubenswrapper[4909]: I1128 17:27:21.727540 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="0832dc59-f0b5-41b7-a9f8-a18b84edd46f" containerName="storage" Nov 28 17:27:21 crc kubenswrapper[4909]: I1128 17:27:21.727744 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="0832dc59-f0b5-41b7-a9f8-a18b84edd46f" containerName="storage" Nov 28 17:27:21 crc kubenswrapper[4909]: I1128 17:27:21.728340 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-mbz8r" Nov 28 17:27:21 crc kubenswrapper[4909]: I1128 17:27:21.732152 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"crc-storage"/"openshift-service-ca.crt" Nov 28 17:27:21 crc kubenswrapper[4909]: I1128 17:27:21.732228 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"crc-storage"/"crc-storage" Nov 28 17:27:21 crc kubenswrapper[4909]: I1128 17:27:21.732721 4909 reflector.go:368] Caches populated for *v1.Secret from object-"crc-storage"/"crc-storage-dockercfg-29stw" Nov 28 17:27:21 crc kubenswrapper[4909]: I1128 17:27:21.732921 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"crc-storage"/"kube-root-ca.crt" Nov 28 17:27:21 crc kubenswrapper[4909]: I1128 17:27:21.745434 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["crc-storage/crc-storage-crc-mbz8r"] Nov 28 17:27:21 crc kubenswrapper[4909]: I1128 17:27:21.889595 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/b693d911-e2df-4b3d-8bf1-a4362a4db06f-node-mnt\") pod \"crc-storage-crc-mbz8r\" (UID: \"b693d911-e2df-4b3d-8bf1-a4362a4db06f\") " pod="crc-storage/crc-storage-crc-mbz8r" Nov 28 17:27:21 crc kubenswrapper[4909]: I1128 17:27:21.889750 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/b693d911-e2df-4b3d-8bf1-a4362a4db06f-crc-storage\") pod \"crc-storage-crc-mbz8r\" (UID: \"b693d911-e2df-4b3d-8bf1-a4362a4db06f\") " pod="crc-storage/crc-storage-crc-mbz8r" Nov 28 17:27:21 crc kubenswrapper[4909]: I1128 17:27:21.889821 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4wn8j\" (UniqueName: \"kubernetes.io/projected/b693d911-e2df-4b3d-8bf1-a4362a4db06f-kube-api-access-4wn8j\") pod \"crc-storage-crc-mbz8r\" (UID: \"b693d911-e2df-4b3d-8bf1-a4362a4db06f\") " pod="crc-storage/crc-storage-crc-mbz8r" Nov 28 17:27:21 crc kubenswrapper[4909]: I1128 17:27:21.911015 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0832dc59-f0b5-41b7-a9f8-a18b84edd46f" path="/var/lib/kubelet/pods/0832dc59-f0b5-41b7-a9f8-a18b84edd46f/volumes" Nov 28 17:27:21 crc kubenswrapper[4909]: I1128 17:27:21.990716 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/b693d911-e2df-4b3d-8bf1-a4362a4db06f-node-mnt\") pod \"crc-storage-crc-mbz8r\" (UID: \"b693d911-e2df-4b3d-8bf1-a4362a4db06f\") " pod="crc-storage/crc-storage-crc-mbz8r" Nov 28 17:27:21 crc kubenswrapper[4909]: I1128 17:27:21.990799 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/b693d911-e2df-4b3d-8bf1-a4362a4db06f-crc-storage\") pod \"crc-storage-crc-mbz8r\" (UID: \"b693d911-e2df-4b3d-8bf1-a4362a4db06f\") " pod="crc-storage/crc-storage-crc-mbz8r" Nov 28 17:27:21 crc kubenswrapper[4909]: I1128 17:27:21.990843 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4wn8j\" (UniqueName: \"kubernetes.io/projected/b693d911-e2df-4b3d-8bf1-a4362a4db06f-kube-api-access-4wn8j\") pod \"crc-storage-crc-mbz8r\" (UID: \"b693d911-e2df-4b3d-8bf1-a4362a4db06f\") " pod="crc-storage/crc-storage-crc-mbz8r" Nov 28 17:27:21 crc kubenswrapper[4909]: I1128 17:27:21.991303 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/b693d911-e2df-4b3d-8bf1-a4362a4db06f-node-mnt\") pod \"crc-storage-crc-mbz8r\" (UID: \"b693d911-e2df-4b3d-8bf1-a4362a4db06f\") " pod="crc-storage/crc-storage-crc-mbz8r" Nov 28 17:27:21 crc kubenswrapper[4909]: I1128 17:27:21.991907 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/b693d911-e2df-4b3d-8bf1-a4362a4db06f-crc-storage\") pod \"crc-storage-crc-mbz8r\" (UID: \"b693d911-e2df-4b3d-8bf1-a4362a4db06f\") " pod="crc-storage/crc-storage-crc-mbz8r" Nov 28 17:27:22 crc kubenswrapper[4909]: I1128 17:27:22.009459 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4wn8j\" (UniqueName: \"kubernetes.io/projected/b693d911-e2df-4b3d-8bf1-a4362a4db06f-kube-api-access-4wn8j\") pod \"crc-storage-crc-mbz8r\" (UID: \"b693d911-e2df-4b3d-8bf1-a4362a4db06f\") " pod="crc-storage/crc-storage-crc-mbz8r" Nov 28 17:27:22 crc kubenswrapper[4909]: I1128 17:27:22.048831 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-mbz8r" Nov 28 17:27:22 crc kubenswrapper[4909]: I1128 17:27:22.497321 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["crc-storage/crc-storage-crc-mbz8r"] Nov 28 17:27:22 crc kubenswrapper[4909]: I1128 17:27:22.964182 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-mbz8r" event={"ID":"b693d911-e2df-4b3d-8bf1-a4362a4db06f","Type":"ContainerStarted","Data":"997658c47c57a5fe996be42b54e5b4e18f60f4627effe35e983ee9ef5d58b17d"} Nov 28 17:27:23 crc kubenswrapper[4909]: I1128 17:27:23.973778 4909 generic.go:334] "Generic (PLEG): container finished" podID="b693d911-e2df-4b3d-8bf1-a4362a4db06f" containerID="d774bb77940b7bd732914c5e431ec3eb75f789c02b89c56ea051cf9eaafe37d4" exitCode=0 Nov 28 17:27:23 crc kubenswrapper[4909]: I1128 17:27:23.973868 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-mbz8r" event={"ID":"b693d911-e2df-4b3d-8bf1-a4362a4db06f","Type":"ContainerDied","Data":"d774bb77940b7bd732914c5e431ec3eb75f789c02b89c56ea051cf9eaafe37d4"} Nov 28 17:27:25 crc kubenswrapper[4909]: I1128 17:27:25.348553 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-mbz8r" Nov 28 17:27:25 crc kubenswrapper[4909]: I1128 17:27:25.449809 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/b693d911-e2df-4b3d-8bf1-a4362a4db06f-node-mnt\") pod \"b693d911-e2df-4b3d-8bf1-a4362a4db06f\" (UID: \"b693d911-e2df-4b3d-8bf1-a4362a4db06f\") " Nov 28 17:27:25 crc kubenswrapper[4909]: I1128 17:27:25.449951 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4wn8j\" (UniqueName: \"kubernetes.io/projected/b693d911-e2df-4b3d-8bf1-a4362a4db06f-kube-api-access-4wn8j\") pod \"b693d911-e2df-4b3d-8bf1-a4362a4db06f\" (UID: \"b693d911-e2df-4b3d-8bf1-a4362a4db06f\") " Nov 28 17:27:25 crc kubenswrapper[4909]: I1128 17:27:25.449958 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/b693d911-e2df-4b3d-8bf1-a4362a4db06f-node-mnt" (OuterVolumeSpecName: "node-mnt") pod "b693d911-e2df-4b3d-8bf1-a4362a4db06f" (UID: "b693d911-e2df-4b3d-8bf1-a4362a4db06f"). InnerVolumeSpecName "node-mnt". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 17:27:25 crc kubenswrapper[4909]: I1128 17:27:25.449980 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/b693d911-e2df-4b3d-8bf1-a4362a4db06f-crc-storage\") pod \"b693d911-e2df-4b3d-8bf1-a4362a4db06f\" (UID: \"b693d911-e2df-4b3d-8bf1-a4362a4db06f\") " Nov 28 17:27:25 crc kubenswrapper[4909]: I1128 17:27:25.450511 4909 reconciler_common.go:293] "Volume detached for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/b693d911-e2df-4b3d-8bf1-a4362a4db06f-node-mnt\") on node \"crc\" DevicePath \"\"" Nov 28 17:27:25 crc kubenswrapper[4909]: I1128 17:27:25.457789 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b693d911-e2df-4b3d-8bf1-a4362a4db06f-kube-api-access-4wn8j" (OuterVolumeSpecName: "kube-api-access-4wn8j") pod "b693d911-e2df-4b3d-8bf1-a4362a4db06f" (UID: "b693d911-e2df-4b3d-8bf1-a4362a4db06f"). InnerVolumeSpecName "kube-api-access-4wn8j". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:27:25 crc kubenswrapper[4909]: I1128 17:27:25.482915 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b693d911-e2df-4b3d-8bf1-a4362a4db06f-crc-storage" (OuterVolumeSpecName: "crc-storage") pod "b693d911-e2df-4b3d-8bf1-a4362a4db06f" (UID: "b693d911-e2df-4b3d-8bf1-a4362a4db06f"). InnerVolumeSpecName "crc-storage". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 17:27:25 crc kubenswrapper[4909]: I1128 17:27:25.551954 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4wn8j\" (UniqueName: \"kubernetes.io/projected/b693d911-e2df-4b3d-8bf1-a4362a4db06f-kube-api-access-4wn8j\") on node \"crc\" DevicePath \"\"" Nov 28 17:27:25 crc kubenswrapper[4909]: I1128 17:27:25.551996 4909 reconciler_common.go:293] "Volume detached for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/b693d911-e2df-4b3d-8bf1-a4362a4db06f-crc-storage\") on node \"crc\" DevicePath \"\"" Nov 28 17:27:25 crc kubenswrapper[4909]: I1128 17:27:25.993790 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-mbz8r" event={"ID":"b693d911-e2df-4b3d-8bf1-a4362a4db06f","Type":"ContainerDied","Data":"997658c47c57a5fe996be42b54e5b4e18f60f4627effe35e983ee9ef5d58b17d"} Nov 28 17:27:25 crc kubenswrapper[4909]: I1128 17:27:25.994273 4909 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="997658c47c57a5fe996be42b54e5b4e18f60f4627effe35e983ee9ef5d58b17d" Nov 28 17:27:25 crc kubenswrapper[4909]: I1128 17:27:25.993859 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-mbz8r" Nov 28 17:27:29 crc kubenswrapper[4909]: I1128 17:27:29.901203 4909 scope.go:117] "RemoveContainer" containerID="0e4b2f3d8611bd0ab12d7c7c8d35db09f6a69551a7858ff3d66e84f2e15b12f2" Nov 28 17:27:29 crc kubenswrapper[4909]: E1128 17:27:29.902965 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 17:27:31 crc kubenswrapper[4909]: I1128 17:27:31.033225 4909 scope.go:117] "RemoveContainer" containerID="ce7bf5c13a3546d4e9feaa6589935c33668c6d1380e6d9b3e3ccc6cce544ac80" Nov 28 17:27:41 crc kubenswrapper[4909]: I1128 17:27:41.902278 4909 scope.go:117] "RemoveContainer" containerID="0e4b2f3d8611bd0ab12d7c7c8d35db09f6a69551a7858ff3d66e84f2e15b12f2" Nov 28 17:27:41 crc kubenswrapper[4909]: E1128 17:27:41.903757 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 17:27:53 crc kubenswrapper[4909]: I1128 17:27:53.902801 4909 scope.go:117] "RemoveContainer" containerID="0e4b2f3d8611bd0ab12d7c7c8d35db09f6a69551a7858ff3d66e84f2e15b12f2" Nov 28 17:27:53 crc kubenswrapper[4909]: E1128 17:27:53.903884 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 17:28:05 crc kubenswrapper[4909]: I1128 17:28:05.902210 4909 scope.go:117] "RemoveContainer" containerID="0e4b2f3d8611bd0ab12d7c7c8d35db09f6a69551a7858ff3d66e84f2e15b12f2" Nov 28 17:28:05 crc kubenswrapper[4909]: E1128 17:28:05.903469 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 17:28:19 crc kubenswrapper[4909]: I1128 17:28:19.901907 4909 scope.go:117] "RemoveContainer" containerID="0e4b2f3d8611bd0ab12d7c7c8d35db09f6a69551a7858ff3d66e84f2e15b12f2" Nov 28 17:28:19 crc kubenswrapper[4909]: E1128 17:28:19.902695 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 17:28:29 crc kubenswrapper[4909]: I1128 17:28:29.399549 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-5mznf"] Nov 28 17:28:29 crc kubenswrapper[4909]: E1128 17:28:29.400709 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b693d911-e2df-4b3d-8bf1-a4362a4db06f" containerName="storage" Nov 28 17:28:29 crc kubenswrapper[4909]: I1128 17:28:29.400730 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="b693d911-e2df-4b3d-8bf1-a4362a4db06f" containerName="storage" Nov 28 17:28:29 crc kubenswrapper[4909]: I1128 17:28:29.400976 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="b693d911-e2df-4b3d-8bf1-a4362a4db06f" containerName="storage" Nov 28 17:28:29 crc kubenswrapper[4909]: I1128 17:28:29.402453 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-5mznf" Nov 28 17:28:29 crc kubenswrapper[4909]: I1128 17:28:29.412845 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-5mznf"] Nov 28 17:28:29 crc kubenswrapper[4909]: I1128 17:28:29.559206 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rdtwg\" (UniqueName: \"kubernetes.io/projected/fcc8fe11-6ac1-4989-9cc2-ae822b108786-kube-api-access-rdtwg\") pod \"certified-operators-5mznf\" (UID: \"fcc8fe11-6ac1-4989-9cc2-ae822b108786\") " pod="openshift-marketplace/certified-operators-5mznf" Nov 28 17:28:29 crc kubenswrapper[4909]: I1128 17:28:29.559270 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fcc8fe11-6ac1-4989-9cc2-ae822b108786-catalog-content\") pod \"certified-operators-5mznf\" (UID: \"fcc8fe11-6ac1-4989-9cc2-ae822b108786\") " pod="openshift-marketplace/certified-operators-5mznf" Nov 28 17:28:29 crc kubenswrapper[4909]: I1128 17:28:29.559304 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fcc8fe11-6ac1-4989-9cc2-ae822b108786-utilities\") pod \"certified-operators-5mznf\" (UID: \"fcc8fe11-6ac1-4989-9cc2-ae822b108786\") " pod="openshift-marketplace/certified-operators-5mznf" Nov 28 17:28:29 crc kubenswrapper[4909]: I1128 17:28:29.661200 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rdtwg\" (UniqueName: \"kubernetes.io/projected/fcc8fe11-6ac1-4989-9cc2-ae822b108786-kube-api-access-rdtwg\") pod \"certified-operators-5mznf\" (UID: \"fcc8fe11-6ac1-4989-9cc2-ae822b108786\") " pod="openshift-marketplace/certified-operators-5mznf" Nov 28 17:28:29 crc kubenswrapper[4909]: I1128 17:28:29.661248 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fcc8fe11-6ac1-4989-9cc2-ae822b108786-catalog-content\") pod \"certified-operators-5mznf\" (UID: \"fcc8fe11-6ac1-4989-9cc2-ae822b108786\") " pod="openshift-marketplace/certified-operators-5mznf" Nov 28 17:28:29 crc kubenswrapper[4909]: I1128 17:28:29.661269 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fcc8fe11-6ac1-4989-9cc2-ae822b108786-utilities\") pod \"certified-operators-5mznf\" (UID: \"fcc8fe11-6ac1-4989-9cc2-ae822b108786\") " pod="openshift-marketplace/certified-operators-5mznf" Nov 28 17:28:29 crc kubenswrapper[4909]: I1128 17:28:29.661772 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fcc8fe11-6ac1-4989-9cc2-ae822b108786-utilities\") pod \"certified-operators-5mznf\" (UID: \"fcc8fe11-6ac1-4989-9cc2-ae822b108786\") " pod="openshift-marketplace/certified-operators-5mznf" Nov 28 17:28:29 crc kubenswrapper[4909]: I1128 17:28:29.661815 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fcc8fe11-6ac1-4989-9cc2-ae822b108786-catalog-content\") pod \"certified-operators-5mznf\" (UID: \"fcc8fe11-6ac1-4989-9cc2-ae822b108786\") " pod="openshift-marketplace/certified-operators-5mznf" Nov 28 17:28:29 crc kubenswrapper[4909]: I1128 17:28:29.693815 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rdtwg\" (UniqueName: \"kubernetes.io/projected/fcc8fe11-6ac1-4989-9cc2-ae822b108786-kube-api-access-rdtwg\") pod \"certified-operators-5mznf\" (UID: \"fcc8fe11-6ac1-4989-9cc2-ae822b108786\") " pod="openshift-marketplace/certified-operators-5mznf" Nov 28 17:28:29 crc kubenswrapper[4909]: I1128 17:28:29.770388 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-5mznf" Nov 28 17:28:30 crc kubenswrapper[4909]: I1128 17:28:30.272189 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-5mznf"] Nov 28 17:28:30 crc kubenswrapper[4909]: I1128 17:28:30.512075 4909 generic.go:334] "Generic (PLEG): container finished" podID="fcc8fe11-6ac1-4989-9cc2-ae822b108786" containerID="ba8f213b1041a0a2cc993c5d1aa94edbf449a5c9b5dc78f0fdbde19d4b3be1fa" exitCode=0 Nov 28 17:28:30 crc kubenswrapper[4909]: I1128 17:28:30.512429 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5mznf" event={"ID":"fcc8fe11-6ac1-4989-9cc2-ae822b108786","Type":"ContainerDied","Data":"ba8f213b1041a0a2cc993c5d1aa94edbf449a5c9b5dc78f0fdbde19d4b3be1fa"} Nov 28 17:28:30 crc kubenswrapper[4909]: I1128 17:28:30.512461 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5mznf" event={"ID":"fcc8fe11-6ac1-4989-9cc2-ae822b108786","Type":"ContainerStarted","Data":"be296b3cadb7d40e63ac2d6e02c3010653006a246590ad86043553298334e68c"} Nov 28 17:28:32 crc kubenswrapper[4909]: I1128 17:28:32.902224 4909 scope.go:117] "RemoveContainer" containerID="0e4b2f3d8611bd0ab12d7c7c8d35db09f6a69551a7858ff3d66e84f2e15b12f2" Nov 28 17:28:32 crc kubenswrapper[4909]: E1128 17:28:32.902817 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 17:28:32 crc kubenswrapper[4909]: I1128 17:28:32.986932 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-c4fcc"] Nov 28 17:28:32 crc kubenswrapper[4909]: I1128 17:28:32.989444 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-c4fcc" Nov 28 17:28:33 crc kubenswrapper[4909]: I1128 17:28:33.001546 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-c4fcc"] Nov 28 17:28:33 crc kubenswrapper[4909]: I1128 17:28:33.124706 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xmt85\" (UniqueName: \"kubernetes.io/projected/da1d3cd5-2f52-4dd3-b28d-4b9bbc8a05ef-kube-api-access-xmt85\") pod \"redhat-operators-c4fcc\" (UID: \"da1d3cd5-2f52-4dd3-b28d-4b9bbc8a05ef\") " pod="openshift-marketplace/redhat-operators-c4fcc" Nov 28 17:28:33 crc kubenswrapper[4909]: I1128 17:28:33.124898 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/da1d3cd5-2f52-4dd3-b28d-4b9bbc8a05ef-utilities\") pod \"redhat-operators-c4fcc\" (UID: \"da1d3cd5-2f52-4dd3-b28d-4b9bbc8a05ef\") " pod="openshift-marketplace/redhat-operators-c4fcc" Nov 28 17:28:33 crc kubenswrapper[4909]: I1128 17:28:33.125103 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/da1d3cd5-2f52-4dd3-b28d-4b9bbc8a05ef-catalog-content\") pod \"redhat-operators-c4fcc\" (UID: \"da1d3cd5-2f52-4dd3-b28d-4b9bbc8a05ef\") " pod="openshift-marketplace/redhat-operators-c4fcc" Nov 28 17:28:33 crc kubenswrapper[4909]: I1128 17:28:33.226060 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/da1d3cd5-2f52-4dd3-b28d-4b9bbc8a05ef-catalog-content\") pod \"redhat-operators-c4fcc\" (UID: \"da1d3cd5-2f52-4dd3-b28d-4b9bbc8a05ef\") " pod="openshift-marketplace/redhat-operators-c4fcc" Nov 28 17:28:33 crc kubenswrapper[4909]: I1128 17:28:33.226129 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xmt85\" (UniqueName: \"kubernetes.io/projected/da1d3cd5-2f52-4dd3-b28d-4b9bbc8a05ef-kube-api-access-xmt85\") pod \"redhat-operators-c4fcc\" (UID: \"da1d3cd5-2f52-4dd3-b28d-4b9bbc8a05ef\") " pod="openshift-marketplace/redhat-operators-c4fcc" Nov 28 17:28:33 crc kubenswrapper[4909]: I1128 17:28:33.226176 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/da1d3cd5-2f52-4dd3-b28d-4b9bbc8a05ef-utilities\") pod \"redhat-operators-c4fcc\" (UID: \"da1d3cd5-2f52-4dd3-b28d-4b9bbc8a05ef\") " pod="openshift-marketplace/redhat-operators-c4fcc" Nov 28 17:28:33 crc kubenswrapper[4909]: I1128 17:28:33.226618 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/da1d3cd5-2f52-4dd3-b28d-4b9bbc8a05ef-utilities\") pod \"redhat-operators-c4fcc\" (UID: \"da1d3cd5-2f52-4dd3-b28d-4b9bbc8a05ef\") " pod="openshift-marketplace/redhat-operators-c4fcc" Nov 28 17:28:33 crc kubenswrapper[4909]: I1128 17:28:33.226867 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/da1d3cd5-2f52-4dd3-b28d-4b9bbc8a05ef-catalog-content\") pod \"redhat-operators-c4fcc\" (UID: \"da1d3cd5-2f52-4dd3-b28d-4b9bbc8a05ef\") " pod="openshift-marketplace/redhat-operators-c4fcc" Nov 28 17:28:33 crc kubenswrapper[4909]: I1128 17:28:33.248027 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xmt85\" (UniqueName: \"kubernetes.io/projected/da1d3cd5-2f52-4dd3-b28d-4b9bbc8a05ef-kube-api-access-xmt85\") pod \"redhat-operators-c4fcc\" (UID: \"da1d3cd5-2f52-4dd3-b28d-4b9bbc8a05ef\") " pod="openshift-marketplace/redhat-operators-c4fcc" Nov 28 17:28:33 crc kubenswrapper[4909]: I1128 17:28:33.337496 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-c4fcc" Nov 28 17:28:35 crc kubenswrapper[4909]: I1128 17:28:35.211546 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-c4fcc"] Nov 28 17:28:35 crc kubenswrapper[4909]: I1128 17:28:35.553099 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5mznf" event={"ID":"fcc8fe11-6ac1-4989-9cc2-ae822b108786","Type":"ContainerStarted","Data":"293f2920eaaef448a794d77c05ed76db6911113c77de0fe267905ef0b5b33abd"} Nov 28 17:28:35 crc kubenswrapper[4909]: I1128 17:28:35.554939 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-c4fcc" event={"ID":"da1d3cd5-2f52-4dd3-b28d-4b9bbc8a05ef","Type":"ContainerStarted","Data":"9f32a90f58c72d25fceed6c92b6c5d7a98dbdd06e5c71e226dc6dcb54c724724"} Nov 28 17:28:36 crc kubenswrapper[4909]: I1128 17:28:36.566387 4909 generic.go:334] "Generic (PLEG): container finished" podID="fcc8fe11-6ac1-4989-9cc2-ae822b108786" containerID="293f2920eaaef448a794d77c05ed76db6911113c77de0fe267905ef0b5b33abd" exitCode=0 Nov 28 17:28:36 crc kubenswrapper[4909]: I1128 17:28:36.566483 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5mznf" event={"ID":"fcc8fe11-6ac1-4989-9cc2-ae822b108786","Type":"ContainerDied","Data":"293f2920eaaef448a794d77c05ed76db6911113c77de0fe267905ef0b5b33abd"} Nov 28 17:28:36 crc kubenswrapper[4909]: I1128 17:28:36.568940 4909 generic.go:334] "Generic (PLEG): container finished" podID="da1d3cd5-2f52-4dd3-b28d-4b9bbc8a05ef" containerID="c2b9348f270db254c98f8298fec50b7452a27e10a263686bd38051b7d74d7efd" exitCode=0 Nov 28 17:28:36 crc kubenswrapper[4909]: I1128 17:28:36.568997 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-c4fcc" event={"ID":"da1d3cd5-2f52-4dd3-b28d-4b9bbc8a05ef","Type":"ContainerDied","Data":"c2b9348f270db254c98f8298fec50b7452a27e10a263686bd38051b7d74d7efd"} Nov 28 17:28:37 crc kubenswrapper[4909]: I1128 17:28:37.581992 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5mznf" event={"ID":"fcc8fe11-6ac1-4989-9cc2-ae822b108786","Type":"ContainerStarted","Data":"6c8daa528a1f2e8673c667d0833bb8ec86decbc5082c8ef15febd8e4f7152765"} Nov 28 17:28:37 crc kubenswrapper[4909]: I1128 17:28:37.609648 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-5mznf" podStartSLOduration=2.00949358 podStartE2EDuration="8.609633843s" podCreationTimestamp="2025-11-28 17:28:29 +0000 UTC" firstStartedPulling="2025-11-28 17:28:30.514099323 +0000 UTC m=+4692.910783837" lastFinishedPulling="2025-11-28 17:28:37.114239576 +0000 UTC m=+4699.510924100" observedRunningTime="2025-11-28 17:28:37.605482292 +0000 UTC m=+4700.002166816" watchObservedRunningTime="2025-11-28 17:28:37.609633843 +0000 UTC m=+4700.006318367" Nov 28 17:28:38 crc kubenswrapper[4909]: I1128 17:28:38.593827 4909 generic.go:334] "Generic (PLEG): container finished" podID="da1d3cd5-2f52-4dd3-b28d-4b9bbc8a05ef" containerID="2f31e87baa5c3e0a36e46d39873c9ae8d723ed6928987f1ba029f8c99171f733" exitCode=0 Nov 28 17:28:38 crc kubenswrapper[4909]: I1128 17:28:38.594205 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-c4fcc" event={"ID":"da1d3cd5-2f52-4dd3-b28d-4b9bbc8a05ef","Type":"ContainerDied","Data":"2f31e87baa5c3e0a36e46d39873c9ae8d723ed6928987f1ba029f8c99171f733"} Nov 28 17:28:39 crc kubenswrapper[4909]: I1128 17:28:39.604438 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-c4fcc" event={"ID":"da1d3cd5-2f52-4dd3-b28d-4b9bbc8a05ef","Type":"ContainerStarted","Data":"9f5fac6210e703854644aa64c0798327b5163e216748a74077b9c2e4cd59c682"} Nov 28 17:28:39 crc kubenswrapper[4909]: I1128 17:28:39.627048 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-c4fcc" podStartSLOduration=5.099984166 podStartE2EDuration="7.627030275s" podCreationTimestamp="2025-11-28 17:28:32 +0000 UTC" firstStartedPulling="2025-11-28 17:28:36.572857935 +0000 UTC m=+4698.969542459" lastFinishedPulling="2025-11-28 17:28:39.099904034 +0000 UTC m=+4701.496588568" observedRunningTime="2025-11-28 17:28:39.624024595 +0000 UTC m=+4702.020709119" watchObservedRunningTime="2025-11-28 17:28:39.627030275 +0000 UTC m=+4702.023714799" Nov 28 17:28:39 crc kubenswrapper[4909]: I1128 17:28:39.770918 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-5mznf" Nov 28 17:28:39 crc kubenswrapper[4909]: I1128 17:28:39.770965 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-5mznf" Nov 28 17:28:40 crc kubenswrapper[4909]: I1128 17:28:40.119223 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-5mznf" Nov 28 17:28:43 crc kubenswrapper[4909]: I1128 17:28:43.338245 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-c4fcc" Nov 28 17:28:43 crc kubenswrapper[4909]: I1128 17:28:43.338650 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-c4fcc" Nov 28 17:28:44 crc kubenswrapper[4909]: I1128 17:28:44.410149 4909 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-c4fcc" podUID="da1d3cd5-2f52-4dd3-b28d-4b9bbc8a05ef" containerName="registry-server" probeResult="failure" output=< Nov 28 17:28:44 crc kubenswrapper[4909]: timeout: failed to connect service ":50051" within 1s Nov 28 17:28:44 crc kubenswrapper[4909]: > Nov 28 17:28:44 crc kubenswrapper[4909]: I1128 17:28:44.902276 4909 scope.go:117] "RemoveContainer" containerID="0e4b2f3d8611bd0ab12d7c7c8d35db09f6a69551a7858ff3d66e84f2e15b12f2" Nov 28 17:28:44 crc kubenswrapper[4909]: E1128 17:28:44.902548 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 17:28:49 crc kubenswrapper[4909]: I1128 17:28:49.826863 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-5mznf" Nov 28 17:28:49 crc kubenswrapper[4909]: I1128 17:28:49.922131 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-5mznf"] Nov 28 17:28:49 crc kubenswrapper[4909]: I1128 17:28:49.975333 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-jjgbb"] Nov 28 17:28:49 crc kubenswrapper[4909]: I1128 17:28:49.975590 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-jjgbb" podUID="e2776b5d-d9ac-4133-a89c-fad6d411a7c1" containerName="registry-server" containerID="cri-o://9692c25c5d70baec3bdec26ec9bd3fa04e7cf68b2da1a6632112f94b2320d547" gracePeriod=2 Nov 28 17:28:53 crc kubenswrapper[4909]: I1128 17:28:53.415000 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-c4fcc" Nov 28 17:28:53 crc kubenswrapper[4909]: I1128 17:28:53.491979 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-c4fcc" Nov 28 17:28:54 crc kubenswrapper[4909]: I1128 17:28:54.186923 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-jjgbb" Nov 28 17:28:54 crc kubenswrapper[4909]: I1128 17:28:54.237815 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e2776b5d-d9ac-4133-a89c-fad6d411a7c1-utilities\") pod \"e2776b5d-d9ac-4133-a89c-fad6d411a7c1\" (UID: \"e2776b5d-d9ac-4133-a89c-fad6d411a7c1\") " Nov 28 17:28:54 crc kubenswrapper[4909]: I1128 17:28:54.237862 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e2776b5d-d9ac-4133-a89c-fad6d411a7c1-catalog-content\") pod \"e2776b5d-d9ac-4133-a89c-fad6d411a7c1\" (UID: \"e2776b5d-d9ac-4133-a89c-fad6d411a7c1\") " Nov 28 17:28:54 crc kubenswrapper[4909]: I1128 17:28:54.237932 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kz8rc\" (UniqueName: \"kubernetes.io/projected/e2776b5d-d9ac-4133-a89c-fad6d411a7c1-kube-api-access-kz8rc\") pod \"e2776b5d-d9ac-4133-a89c-fad6d411a7c1\" (UID: \"e2776b5d-d9ac-4133-a89c-fad6d411a7c1\") " Nov 28 17:28:54 crc kubenswrapper[4909]: I1128 17:28:54.238375 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e2776b5d-d9ac-4133-a89c-fad6d411a7c1-utilities" (OuterVolumeSpecName: "utilities") pod "e2776b5d-d9ac-4133-a89c-fad6d411a7c1" (UID: "e2776b5d-d9ac-4133-a89c-fad6d411a7c1"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:28:54 crc kubenswrapper[4909]: I1128 17:28:54.245462 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e2776b5d-d9ac-4133-a89c-fad6d411a7c1-kube-api-access-kz8rc" (OuterVolumeSpecName: "kube-api-access-kz8rc") pod "e2776b5d-d9ac-4133-a89c-fad6d411a7c1" (UID: "e2776b5d-d9ac-4133-a89c-fad6d411a7c1"). InnerVolumeSpecName "kube-api-access-kz8rc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:28:54 crc kubenswrapper[4909]: I1128 17:28:54.280786 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e2776b5d-d9ac-4133-a89c-fad6d411a7c1-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "e2776b5d-d9ac-4133-a89c-fad6d411a7c1" (UID: "e2776b5d-d9ac-4133-a89c-fad6d411a7c1"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:28:54 crc kubenswrapper[4909]: I1128 17:28:54.339439 4909 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e2776b5d-d9ac-4133-a89c-fad6d411a7c1-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 17:28:54 crc kubenswrapper[4909]: I1128 17:28:54.339464 4909 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e2776b5d-d9ac-4133-a89c-fad6d411a7c1-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 17:28:54 crc kubenswrapper[4909]: I1128 17:28:54.339474 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kz8rc\" (UniqueName: \"kubernetes.io/projected/e2776b5d-d9ac-4133-a89c-fad6d411a7c1-kube-api-access-kz8rc\") on node \"crc\" DevicePath \"\"" Nov 28 17:28:54 crc kubenswrapper[4909]: I1128 17:28:54.736470 4909 generic.go:334] "Generic (PLEG): container finished" podID="e2776b5d-d9ac-4133-a89c-fad6d411a7c1" containerID="9692c25c5d70baec3bdec26ec9bd3fa04e7cf68b2da1a6632112f94b2320d547" exitCode=0 Nov 28 17:28:54 crc kubenswrapper[4909]: I1128 17:28:54.736547 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-jjgbb" event={"ID":"e2776b5d-d9ac-4133-a89c-fad6d411a7c1","Type":"ContainerDied","Data":"9692c25c5d70baec3bdec26ec9bd3fa04e7cf68b2da1a6632112f94b2320d547"} Nov 28 17:28:54 crc kubenswrapper[4909]: I1128 17:28:54.736594 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-jjgbb" event={"ID":"e2776b5d-d9ac-4133-a89c-fad6d411a7c1","Type":"ContainerDied","Data":"cb25205bb7dd8e8fc4cc402156bd74e519f06aa735d0de95a8bcafc7bda7ae59"} Nov 28 17:28:54 crc kubenswrapper[4909]: I1128 17:28:54.736630 4909 scope.go:117] "RemoveContainer" containerID="9692c25c5d70baec3bdec26ec9bd3fa04e7cf68b2da1a6632112f94b2320d547" Nov 28 17:28:54 crc kubenswrapper[4909]: I1128 17:28:54.736881 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-jjgbb" Nov 28 17:28:54 crc kubenswrapper[4909]: I1128 17:28:54.767730 4909 scope.go:117] "RemoveContainer" containerID="3976d4aead058965985428936e5741c8841af3488f59d5cf55e40e37fd139b44" Nov 28 17:28:54 crc kubenswrapper[4909]: I1128 17:28:54.774419 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-jjgbb"] Nov 28 17:28:54 crc kubenswrapper[4909]: I1128 17:28:54.779637 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-jjgbb"] Nov 28 17:28:54 crc kubenswrapper[4909]: I1128 17:28:54.795142 4909 scope.go:117] "RemoveContainer" containerID="6c73cb074f4150e16644130a6c10e708c2310be209360ab3f0df6dd46ab6aad5" Nov 28 17:28:54 crc kubenswrapper[4909]: I1128 17:28:54.836139 4909 scope.go:117] "RemoveContainer" containerID="9692c25c5d70baec3bdec26ec9bd3fa04e7cf68b2da1a6632112f94b2320d547" Nov 28 17:28:54 crc kubenswrapper[4909]: E1128 17:28:54.837195 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9692c25c5d70baec3bdec26ec9bd3fa04e7cf68b2da1a6632112f94b2320d547\": container with ID starting with 9692c25c5d70baec3bdec26ec9bd3fa04e7cf68b2da1a6632112f94b2320d547 not found: ID does not exist" containerID="9692c25c5d70baec3bdec26ec9bd3fa04e7cf68b2da1a6632112f94b2320d547" Nov 28 17:28:54 crc kubenswrapper[4909]: I1128 17:28:54.837244 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9692c25c5d70baec3bdec26ec9bd3fa04e7cf68b2da1a6632112f94b2320d547"} err="failed to get container status \"9692c25c5d70baec3bdec26ec9bd3fa04e7cf68b2da1a6632112f94b2320d547\": rpc error: code = NotFound desc = could not find container \"9692c25c5d70baec3bdec26ec9bd3fa04e7cf68b2da1a6632112f94b2320d547\": container with ID starting with 9692c25c5d70baec3bdec26ec9bd3fa04e7cf68b2da1a6632112f94b2320d547 not found: ID does not exist" Nov 28 17:28:54 crc kubenswrapper[4909]: I1128 17:28:54.837278 4909 scope.go:117] "RemoveContainer" containerID="3976d4aead058965985428936e5741c8841af3488f59d5cf55e40e37fd139b44" Nov 28 17:28:54 crc kubenswrapper[4909]: E1128 17:28:54.837641 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3976d4aead058965985428936e5741c8841af3488f59d5cf55e40e37fd139b44\": container with ID starting with 3976d4aead058965985428936e5741c8841af3488f59d5cf55e40e37fd139b44 not found: ID does not exist" containerID="3976d4aead058965985428936e5741c8841af3488f59d5cf55e40e37fd139b44" Nov 28 17:28:54 crc kubenswrapper[4909]: I1128 17:28:54.837674 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3976d4aead058965985428936e5741c8841af3488f59d5cf55e40e37fd139b44"} err="failed to get container status \"3976d4aead058965985428936e5741c8841af3488f59d5cf55e40e37fd139b44\": rpc error: code = NotFound desc = could not find container \"3976d4aead058965985428936e5741c8841af3488f59d5cf55e40e37fd139b44\": container with ID starting with 3976d4aead058965985428936e5741c8841af3488f59d5cf55e40e37fd139b44 not found: ID does not exist" Nov 28 17:28:54 crc kubenswrapper[4909]: I1128 17:28:54.837690 4909 scope.go:117] "RemoveContainer" containerID="6c73cb074f4150e16644130a6c10e708c2310be209360ab3f0df6dd46ab6aad5" Nov 28 17:28:54 crc kubenswrapper[4909]: E1128 17:28:54.838085 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6c73cb074f4150e16644130a6c10e708c2310be209360ab3f0df6dd46ab6aad5\": container with ID starting with 6c73cb074f4150e16644130a6c10e708c2310be209360ab3f0df6dd46ab6aad5 not found: ID does not exist" containerID="6c73cb074f4150e16644130a6c10e708c2310be209360ab3f0df6dd46ab6aad5" Nov 28 17:28:54 crc kubenswrapper[4909]: I1128 17:28:54.838147 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6c73cb074f4150e16644130a6c10e708c2310be209360ab3f0df6dd46ab6aad5"} err="failed to get container status \"6c73cb074f4150e16644130a6c10e708c2310be209360ab3f0df6dd46ab6aad5\": rpc error: code = NotFound desc = could not find container \"6c73cb074f4150e16644130a6c10e708c2310be209360ab3f0df6dd46ab6aad5\": container with ID starting with 6c73cb074f4150e16644130a6c10e708c2310be209360ab3f0df6dd46ab6aad5 not found: ID does not exist" Nov 28 17:28:55 crc kubenswrapper[4909]: I1128 17:28:55.681603 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-c4fcc"] Nov 28 17:28:55 crc kubenswrapper[4909]: I1128 17:28:55.682384 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-c4fcc" podUID="da1d3cd5-2f52-4dd3-b28d-4b9bbc8a05ef" containerName="registry-server" containerID="cri-o://9f5fac6210e703854644aa64c0798327b5163e216748a74077b9c2e4cd59c682" gracePeriod=2 Nov 28 17:28:55 crc kubenswrapper[4909]: I1128 17:28:55.909741 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e2776b5d-d9ac-4133-a89c-fad6d411a7c1" path="/var/lib/kubelet/pods/e2776b5d-d9ac-4133-a89c-fad6d411a7c1/volumes" Nov 28 17:28:56 crc kubenswrapper[4909]: I1128 17:28:56.078922 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-c4fcc" Nov 28 17:28:56 crc kubenswrapper[4909]: I1128 17:28:56.166308 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/da1d3cd5-2f52-4dd3-b28d-4b9bbc8a05ef-utilities\") pod \"da1d3cd5-2f52-4dd3-b28d-4b9bbc8a05ef\" (UID: \"da1d3cd5-2f52-4dd3-b28d-4b9bbc8a05ef\") " Nov 28 17:28:56 crc kubenswrapper[4909]: I1128 17:28:56.166360 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/da1d3cd5-2f52-4dd3-b28d-4b9bbc8a05ef-catalog-content\") pod \"da1d3cd5-2f52-4dd3-b28d-4b9bbc8a05ef\" (UID: \"da1d3cd5-2f52-4dd3-b28d-4b9bbc8a05ef\") " Nov 28 17:28:56 crc kubenswrapper[4909]: I1128 17:28:56.166465 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xmt85\" (UniqueName: \"kubernetes.io/projected/da1d3cd5-2f52-4dd3-b28d-4b9bbc8a05ef-kube-api-access-xmt85\") pod \"da1d3cd5-2f52-4dd3-b28d-4b9bbc8a05ef\" (UID: \"da1d3cd5-2f52-4dd3-b28d-4b9bbc8a05ef\") " Nov 28 17:28:56 crc kubenswrapper[4909]: I1128 17:28:56.167704 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/da1d3cd5-2f52-4dd3-b28d-4b9bbc8a05ef-utilities" (OuterVolumeSpecName: "utilities") pod "da1d3cd5-2f52-4dd3-b28d-4b9bbc8a05ef" (UID: "da1d3cd5-2f52-4dd3-b28d-4b9bbc8a05ef"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:28:56 crc kubenswrapper[4909]: I1128 17:28:56.174817 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/da1d3cd5-2f52-4dd3-b28d-4b9bbc8a05ef-kube-api-access-xmt85" (OuterVolumeSpecName: "kube-api-access-xmt85") pod "da1d3cd5-2f52-4dd3-b28d-4b9bbc8a05ef" (UID: "da1d3cd5-2f52-4dd3-b28d-4b9bbc8a05ef"). InnerVolumeSpecName "kube-api-access-xmt85". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:28:56 crc kubenswrapper[4909]: I1128 17:28:56.268011 4909 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/da1d3cd5-2f52-4dd3-b28d-4b9bbc8a05ef-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 17:28:56 crc kubenswrapper[4909]: I1128 17:28:56.268046 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xmt85\" (UniqueName: \"kubernetes.io/projected/da1d3cd5-2f52-4dd3-b28d-4b9bbc8a05ef-kube-api-access-xmt85\") on node \"crc\" DevicePath \"\"" Nov 28 17:28:56 crc kubenswrapper[4909]: I1128 17:28:56.277294 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/da1d3cd5-2f52-4dd3-b28d-4b9bbc8a05ef-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "da1d3cd5-2f52-4dd3-b28d-4b9bbc8a05ef" (UID: "da1d3cd5-2f52-4dd3-b28d-4b9bbc8a05ef"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:28:56 crc kubenswrapper[4909]: I1128 17:28:56.369603 4909 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/da1d3cd5-2f52-4dd3-b28d-4b9bbc8a05ef-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 17:28:56 crc kubenswrapper[4909]: I1128 17:28:56.754786 4909 generic.go:334] "Generic (PLEG): container finished" podID="da1d3cd5-2f52-4dd3-b28d-4b9bbc8a05ef" containerID="9f5fac6210e703854644aa64c0798327b5163e216748a74077b9c2e4cd59c682" exitCode=0 Nov 28 17:28:56 crc kubenswrapper[4909]: I1128 17:28:56.754828 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-c4fcc" event={"ID":"da1d3cd5-2f52-4dd3-b28d-4b9bbc8a05ef","Type":"ContainerDied","Data":"9f5fac6210e703854644aa64c0798327b5163e216748a74077b9c2e4cd59c682"} Nov 28 17:28:56 crc kubenswrapper[4909]: I1128 17:28:56.754855 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-c4fcc" event={"ID":"da1d3cd5-2f52-4dd3-b28d-4b9bbc8a05ef","Type":"ContainerDied","Data":"9f32a90f58c72d25fceed6c92b6c5d7a98dbdd06e5c71e226dc6dcb54c724724"} Nov 28 17:28:56 crc kubenswrapper[4909]: I1128 17:28:56.754873 4909 scope.go:117] "RemoveContainer" containerID="9f5fac6210e703854644aa64c0798327b5163e216748a74077b9c2e4cd59c682" Nov 28 17:28:56 crc kubenswrapper[4909]: I1128 17:28:56.754906 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-c4fcc" Nov 28 17:28:56 crc kubenswrapper[4909]: I1128 17:28:56.779859 4909 scope.go:117] "RemoveContainer" containerID="2f31e87baa5c3e0a36e46d39873c9ae8d723ed6928987f1ba029f8c99171f733" Nov 28 17:28:56 crc kubenswrapper[4909]: I1128 17:28:56.791033 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-c4fcc"] Nov 28 17:28:56 crc kubenswrapper[4909]: I1128 17:28:56.798564 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-c4fcc"] Nov 28 17:28:56 crc kubenswrapper[4909]: I1128 17:28:56.817650 4909 scope.go:117] "RemoveContainer" containerID="c2b9348f270db254c98f8298fec50b7452a27e10a263686bd38051b7d74d7efd" Nov 28 17:28:56 crc kubenswrapper[4909]: I1128 17:28:56.835336 4909 scope.go:117] "RemoveContainer" containerID="9f5fac6210e703854644aa64c0798327b5163e216748a74077b9c2e4cd59c682" Nov 28 17:28:56 crc kubenswrapper[4909]: E1128 17:28:56.835945 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9f5fac6210e703854644aa64c0798327b5163e216748a74077b9c2e4cd59c682\": container with ID starting with 9f5fac6210e703854644aa64c0798327b5163e216748a74077b9c2e4cd59c682 not found: ID does not exist" containerID="9f5fac6210e703854644aa64c0798327b5163e216748a74077b9c2e4cd59c682" Nov 28 17:28:56 crc kubenswrapper[4909]: I1128 17:28:56.835998 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9f5fac6210e703854644aa64c0798327b5163e216748a74077b9c2e4cd59c682"} err="failed to get container status \"9f5fac6210e703854644aa64c0798327b5163e216748a74077b9c2e4cd59c682\": rpc error: code = NotFound desc = could not find container \"9f5fac6210e703854644aa64c0798327b5163e216748a74077b9c2e4cd59c682\": container with ID starting with 9f5fac6210e703854644aa64c0798327b5163e216748a74077b9c2e4cd59c682 not found: ID does not exist" Nov 28 17:28:56 crc kubenswrapper[4909]: I1128 17:28:56.836029 4909 scope.go:117] "RemoveContainer" containerID="2f31e87baa5c3e0a36e46d39873c9ae8d723ed6928987f1ba029f8c99171f733" Nov 28 17:28:56 crc kubenswrapper[4909]: E1128 17:28:56.836436 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2f31e87baa5c3e0a36e46d39873c9ae8d723ed6928987f1ba029f8c99171f733\": container with ID starting with 2f31e87baa5c3e0a36e46d39873c9ae8d723ed6928987f1ba029f8c99171f733 not found: ID does not exist" containerID="2f31e87baa5c3e0a36e46d39873c9ae8d723ed6928987f1ba029f8c99171f733" Nov 28 17:28:56 crc kubenswrapper[4909]: I1128 17:28:56.836467 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2f31e87baa5c3e0a36e46d39873c9ae8d723ed6928987f1ba029f8c99171f733"} err="failed to get container status \"2f31e87baa5c3e0a36e46d39873c9ae8d723ed6928987f1ba029f8c99171f733\": rpc error: code = NotFound desc = could not find container \"2f31e87baa5c3e0a36e46d39873c9ae8d723ed6928987f1ba029f8c99171f733\": container with ID starting with 2f31e87baa5c3e0a36e46d39873c9ae8d723ed6928987f1ba029f8c99171f733 not found: ID does not exist" Nov 28 17:28:56 crc kubenswrapper[4909]: I1128 17:28:56.836490 4909 scope.go:117] "RemoveContainer" containerID="c2b9348f270db254c98f8298fec50b7452a27e10a263686bd38051b7d74d7efd" Nov 28 17:28:56 crc kubenswrapper[4909]: E1128 17:28:56.836846 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c2b9348f270db254c98f8298fec50b7452a27e10a263686bd38051b7d74d7efd\": container with ID starting with c2b9348f270db254c98f8298fec50b7452a27e10a263686bd38051b7d74d7efd not found: ID does not exist" containerID="c2b9348f270db254c98f8298fec50b7452a27e10a263686bd38051b7d74d7efd" Nov 28 17:28:56 crc kubenswrapper[4909]: I1128 17:28:56.836879 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c2b9348f270db254c98f8298fec50b7452a27e10a263686bd38051b7d74d7efd"} err="failed to get container status \"c2b9348f270db254c98f8298fec50b7452a27e10a263686bd38051b7d74d7efd\": rpc error: code = NotFound desc = could not find container \"c2b9348f270db254c98f8298fec50b7452a27e10a263686bd38051b7d74d7efd\": container with ID starting with c2b9348f270db254c98f8298fec50b7452a27e10a263686bd38051b7d74d7efd not found: ID does not exist" Nov 28 17:28:57 crc kubenswrapper[4909]: I1128 17:28:57.918221 4909 scope.go:117] "RemoveContainer" containerID="0e4b2f3d8611bd0ab12d7c7c8d35db09f6a69551a7858ff3d66e84f2e15b12f2" Nov 28 17:28:57 crc kubenswrapper[4909]: E1128 17:28:57.919123 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 17:28:57 crc kubenswrapper[4909]: I1128 17:28:57.920281 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="da1d3cd5-2f52-4dd3-b28d-4b9bbc8a05ef" path="/var/lib/kubelet/pods/da1d3cd5-2f52-4dd3-b28d-4b9bbc8a05ef/volumes" Nov 28 17:29:09 crc kubenswrapper[4909]: I1128 17:29:09.902151 4909 scope.go:117] "RemoveContainer" containerID="0e4b2f3d8611bd0ab12d7c7c8d35db09f6a69551a7858ff3d66e84f2e15b12f2" Nov 28 17:29:09 crc kubenswrapper[4909]: E1128 17:29:09.903006 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 17:29:24 crc kubenswrapper[4909]: I1128 17:29:24.903582 4909 scope.go:117] "RemoveContainer" containerID="0e4b2f3d8611bd0ab12d7c7c8d35db09f6a69551a7858ff3d66e84f2e15b12f2" Nov 28 17:29:24 crc kubenswrapper[4909]: E1128 17:29:24.904586 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 17:29:37 crc kubenswrapper[4909]: I1128 17:29:37.909487 4909 scope.go:117] "RemoveContainer" containerID="0e4b2f3d8611bd0ab12d7c7c8d35db09f6a69551a7858ff3d66e84f2e15b12f2" Nov 28 17:29:37 crc kubenswrapper[4909]: E1128 17:29:37.910442 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 17:29:51 crc kubenswrapper[4909]: I1128 17:29:51.902306 4909 scope.go:117] "RemoveContainer" containerID="0e4b2f3d8611bd0ab12d7c7c8d35db09f6a69551a7858ff3d66e84f2e15b12f2" Nov 28 17:29:51 crc kubenswrapper[4909]: E1128 17:29:51.903447 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 17:30:00 crc kubenswrapper[4909]: I1128 17:30:00.185109 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405850-8wfpm"] Nov 28 17:30:00 crc kubenswrapper[4909]: E1128 17:30:00.186399 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e2776b5d-d9ac-4133-a89c-fad6d411a7c1" containerName="registry-server" Nov 28 17:30:00 crc kubenswrapper[4909]: I1128 17:30:00.186425 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="e2776b5d-d9ac-4133-a89c-fad6d411a7c1" containerName="registry-server" Nov 28 17:30:00 crc kubenswrapper[4909]: E1128 17:30:00.186458 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="da1d3cd5-2f52-4dd3-b28d-4b9bbc8a05ef" containerName="registry-server" Nov 28 17:30:00 crc kubenswrapper[4909]: I1128 17:30:00.186473 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="da1d3cd5-2f52-4dd3-b28d-4b9bbc8a05ef" containerName="registry-server" Nov 28 17:30:00 crc kubenswrapper[4909]: E1128 17:30:00.186500 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="da1d3cd5-2f52-4dd3-b28d-4b9bbc8a05ef" containerName="extract-content" Nov 28 17:30:00 crc kubenswrapper[4909]: I1128 17:30:00.186549 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="da1d3cd5-2f52-4dd3-b28d-4b9bbc8a05ef" containerName="extract-content" Nov 28 17:30:00 crc kubenswrapper[4909]: E1128 17:30:00.186571 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e2776b5d-d9ac-4133-a89c-fad6d411a7c1" containerName="extract-utilities" Nov 28 17:30:00 crc kubenswrapper[4909]: I1128 17:30:00.186585 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="e2776b5d-d9ac-4133-a89c-fad6d411a7c1" containerName="extract-utilities" Nov 28 17:30:00 crc kubenswrapper[4909]: E1128 17:30:00.186599 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="da1d3cd5-2f52-4dd3-b28d-4b9bbc8a05ef" containerName="extract-utilities" Nov 28 17:30:00 crc kubenswrapper[4909]: I1128 17:30:00.186611 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="da1d3cd5-2f52-4dd3-b28d-4b9bbc8a05ef" containerName="extract-utilities" Nov 28 17:30:00 crc kubenswrapper[4909]: E1128 17:30:00.186654 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e2776b5d-d9ac-4133-a89c-fad6d411a7c1" containerName="extract-content" Nov 28 17:30:00 crc kubenswrapper[4909]: I1128 17:30:00.186667 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="e2776b5d-d9ac-4133-a89c-fad6d411a7c1" containerName="extract-content" Nov 28 17:30:00 crc kubenswrapper[4909]: I1128 17:30:00.186993 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="e2776b5d-d9ac-4133-a89c-fad6d411a7c1" containerName="registry-server" Nov 28 17:30:00 crc kubenswrapper[4909]: I1128 17:30:00.187017 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="da1d3cd5-2f52-4dd3-b28d-4b9bbc8a05ef" containerName="registry-server" Nov 28 17:30:00 crc kubenswrapper[4909]: I1128 17:30:00.187873 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405850-8wfpm" Nov 28 17:30:00 crc kubenswrapper[4909]: I1128 17:30:00.194158 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405850-8wfpm"] Nov 28 17:30:00 crc kubenswrapper[4909]: I1128 17:30:00.200354 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 28 17:30:00 crc kubenswrapper[4909]: I1128 17:30:00.200742 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 28 17:30:00 crc kubenswrapper[4909]: I1128 17:30:00.255718 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cjw2b\" (UniqueName: \"kubernetes.io/projected/84b48d81-ba40-4e31-82bd-652556a2eda9-kube-api-access-cjw2b\") pod \"collect-profiles-29405850-8wfpm\" (UID: \"84b48d81-ba40-4e31-82bd-652556a2eda9\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405850-8wfpm" Nov 28 17:30:00 crc kubenswrapper[4909]: I1128 17:30:00.256206 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/84b48d81-ba40-4e31-82bd-652556a2eda9-config-volume\") pod \"collect-profiles-29405850-8wfpm\" (UID: \"84b48d81-ba40-4e31-82bd-652556a2eda9\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405850-8wfpm" Nov 28 17:30:00 crc kubenswrapper[4909]: I1128 17:30:00.256229 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/84b48d81-ba40-4e31-82bd-652556a2eda9-secret-volume\") pod \"collect-profiles-29405850-8wfpm\" (UID: \"84b48d81-ba40-4e31-82bd-652556a2eda9\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405850-8wfpm" Nov 28 17:30:00 crc kubenswrapper[4909]: I1128 17:30:00.357057 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cjw2b\" (UniqueName: \"kubernetes.io/projected/84b48d81-ba40-4e31-82bd-652556a2eda9-kube-api-access-cjw2b\") pod \"collect-profiles-29405850-8wfpm\" (UID: \"84b48d81-ba40-4e31-82bd-652556a2eda9\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405850-8wfpm" Nov 28 17:30:00 crc kubenswrapper[4909]: I1128 17:30:00.357100 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/84b48d81-ba40-4e31-82bd-652556a2eda9-config-volume\") pod \"collect-profiles-29405850-8wfpm\" (UID: \"84b48d81-ba40-4e31-82bd-652556a2eda9\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405850-8wfpm" Nov 28 17:30:00 crc kubenswrapper[4909]: I1128 17:30:00.357124 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/84b48d81-ba40-4e31-82bd-652556a2eda9-secret-volume\") pod \"collect-profiles-29405850-8wfpm\" (UID: \"84b48d81-ba40-4e31-82bd-652556a2eda9\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405850-8wfpm" Nov 28 17:30:00 crc kubenswrapper[4909]: I1128 17:30:00.358102 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/84b48d81-ba40-4e31-82bd-652556a2eda9-config-volume\") pod \"collect-profiles-29405850-8wfpm\" (UID: \"84b48d81-ba40-4e31-82bd-652556a2eda9\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405850-8wfpm" Nov 28 17:30:00 crc kubenswrapper[4909]: I1128 17:30:00.367483 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/84b48d81-ba40-4e31-82bd-652556a2eda9-secret-volume\") pod \"collect-profiles-29405850-8wfpm\" (UID: \"84b48d81-ba40-4e31-82bd-652556a2eda9\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405850-8wfpm" Nov 28 17:30:00 crc kubenswrapper[4909]: I1128 17:30:00.375330 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cjw2b\" (UniqueName: \"kubernetes.io/projected/84b48d81-ba40-4e31-82bd-652556a2eda9-kube-api-access-cjw2b\") pod \"collect-profiles-29405850-8wfpm\" (UID: \"84b48d81-ba40-4e31-82bd-652556a2eda9\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405850-8wfpm" Nov 28 17:30:00 crc kubenswrapper[4909]: I1128 17:30:00.528483 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405850-8wfpm" Nov 28 17:30:00 crc kubenswrapper[4909]: I1128 17:30:00.936819 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405850-8wfpm"] Nov 28 17:30:00 crc kubenswrapper[4909]: W1128 17:30:00.945713 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod84b48d81_ba40_4e31_82bd_652556a2eda9.slice/crio-7011a26bdf1591e56ef13349d76f734fe9fd82f491ab195263a680abdea95ae1 WatchSource:0}: Error finding container 7011a26bdf1591e56ef13349d76f734fe9fd82f491ab195263a680abdea95ae1: Status 404 returned error can't find the container with id 7011a26bdf1591e56ef13349d76f734fe9fd82f491ab195263a680abdea95ae1 Nov 28 17:30:01 crc kubenswrapper[4909]: I1128 17:30:01.400475 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405850-8wfpm" event={"ID":"84b48d81-ba40-4e31-82bd-652556a2eda9","Type":"ContainerStarted","Data":"7011a26bdf1591e56ef13349d76f734fe9fd82f491ab195263a680abdea95ae1"} Nov 28 17:30:02 crc kubenswrapper[4909]: I1128 17:30:02.409622 4909 generic.go:334] "Generic (PLEG): container finished" podID="84b48d81-ba40-4e31-82bd-652556a2eda9" containerID="a1b4b88660b3827533df934fcb4553b0a04397289404dd13451bdbb44e9167e3" exitCode=0 Nov 28 17:30:02 crc kubenswrapper[4909]: I1128 17:30:02.409930 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405850-8wfpm" event={"ID":"84b48d81-ba40-4e31-82bd-652556a2eda9","Type":"ContainerDied","Data":"a1b4b88660b3827533df934fcb4553b0a04397289404dd13451bdbb44e9167e3"} Nov 28 17:30:03 crc kubenswrapper[4909]: I1128 17:30:03.756773 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405850-8wfpm" Nov 28 17:30:03 crc kubenswrapper[4909]: I1128 17:30:03.806377 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/84b48d81-ba40-4e31-82bd-652556a2eda9-config-volume\") pod \"84b48d81-ba40-4e31-82bd-652556a2eda9\" (UID: \"84b48d81-ba40-4e31-82bd-652556a2eda9\") " Nov 28 17:30:03 crc kubenswrapper[4909]: I1128 17:30:03.806734 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/84b48d81-ba40-4e31-82bd-652556a2eda9-secret-volume\") pod \"84b48d81-ba40-4e31-82bd-652556a2eda9\" (UID: \"84b48d81-ba40-4e31-82bd-652556a2eda9\") " Nov 28 17:30:03 crc kubenswrapper[4909]: I1128 17:30:03.806895 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cjw2b\" (UniqueName: \"kubernetes.io/projected/84b48d81-ba40-4e31-82bd-652556a2eda9-kube-api-access-cjw2b\") pod \"84b48d81-ba40-4e31-82bd-652556a2eda9\" (UID: \"84b48d81-ba40-4e31-82bd-652556a2eda9\") " Nov 28 17:30:03 crc kubenswrapper[4909]: I1128 17:30:03.807451 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/84b48d81-ba40-4e31-82bd-652556a2eda9-config-volume" (OuterVolumeSpecName: "config-volume") pod "84b48d81-ba40-4e31-82bd-652556a2eda9" (UID: "84b48d81-ba40-4e31-82bd-652556a2eda9"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 17:30:03 crc kubenswrapper[4909]: I1128 17:30:03.817446 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/84b48d81-ba40-4e31-82bd-652556a2eda9-kube-api-access-cjw2b" (OuterVolumeSpecName: "kube-api-access-cjw2b") pod "84b48d81-ba40-4e31-82bd-652556a2eda9" (UID: "84b48d81-ba40-4e31-82bd-652556a2eda9"). InnerVolumeSpecName "kube-api-access-cjw2b". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:30:03 crc kubenswrapper[4909]: I1128 17:30:03.817488 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/84b48d81-ba40-4e31-82bd-652556a2eda9-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "84b48d81-ba40-4e31-82bd-652556a2eda9" (UID: "84b48d81-ba40-4e31-82bd-652556a2eda9"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:30:03 crc kubenswrapper[4909]: I1128 17:30:03.908518 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cjw2b\" (UniqueName: \"kubernetes.io/projected/84b48d81-ba40-4e31-82bd-652556a2eda9-kube-api-access-cjw2b\") on node \"crc\" DevicePath \"\"" Nov 28 17:30:03 crc kubenswrapper[4909]: I1128 17:30:03.908555 4909 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/84b48d81-ba40-4e31-82bd-652556a2eda9-config-volume\") on node \"crc\" DevicePath \"\"" Nov 28 17:30:03 crc kubenswrapper[4909]: I1128 17:30:03.908568 4909 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/84b48d81-ba40-4e31-82bd-652556a2eda9-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 28 17:30:04 crc kubenswrapper[4909]: I1128 17:30:04.425278 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405850-8wfpm" event={"ID":"84b48d81-ba40-4e31-82bd-652556a2eda9","Type":"ContainerDied","Data":"7011a26bdf1591e56ef13349d76f734fe9fd82f491ab195263a680abdea95ae1"} Nov 28 17:30:04 crc kubenswrapper[4909]: I1128 17:30:04.425319 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405850-8wfpm" Nov 28 17:30:04 crc kubenswrapper[4909]: I1128 17:30:04.425335 4909 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7011a26bdf1591e56ef13349d76f734fe9fd82f491ab195263a680abdea95ae1" Nov 28 17:30:04 crc kubenswrapper[4909]: I1128 17:30:04.836450 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405805-44l8w"] Nov 28 17:30:04 crc kubenswrapper[4909]: I1128 17:30:04.843569 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405805-44l8w"] Nov 28 17:30:04 crc kubenswrapper[4909]: I1128 17:30:04.901524 4909 scope.go:117] "RemoveContainer" containerID="0e4b2f3d8611bd0ab12d7c7c8d35db09f6a69551a7858ff3d66e84f2e15b12f2" Nov 28 17:30:04 crc kubenswrapper[4909]: E1128 17:30:04.901884 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 17:30:05 crc kubenswrapper[4909]: I1128 17:30:05.918864 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="54925a18-3574-4cf3-b36d-6f12d9c424fd" path="/var/lib/kubelet/pods/54925a18-3574-4cf3-b36d-6f12d9c424fd/volumes" Nov 28 17:30:17 crc kubenswrapper[4909]: I1128 17:30:17.907812 4909 scope.go:117] "RemoveContainer" containerID="0e4b2f3d8611bd0ab12d7c7c8d35db09f6a69551a7858ff3d66e84f2e15b12f2" Nov 28 17:30:17 crc kubenswrapper[4909]: E1128 17:30:17.909001 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 17:30:31 crc kubenswrapper[4909]: I1128 17:30:31.186641 4909 scope.go:117] "RemoveContainer" containerID="a50f15cd57fdcfb7048afc764a560ed1849b7c7ceacb03a0d441ca42826aa9b2" Nov 28 17:30:31 crc kubenswrapper[4909]: I1128 17:30:31.341139 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5d7b5456f5-7p4s2"] Nov 28 17:30:31 crc kubenswrapper[4909]: E1128 17:30:31.341736 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="84b48d81-ba40-4e31-82bd-652556a2eda9" containerName="collect-profiles" Nov 28 17:30:31 crc kubenswrapper[4909]: I1128 17:30:31.341753 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="84b48d81-ba40-4e31-82bd-652556a2eda9" containerName="collect-profiles" Nov 28 17:30:31 crc kubenswrapper[4909]: I1128 17:30:31.341914 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="84b48d81-ba40-4e31-82bd-652556a2eda9" containerName="collect-profiles" Nov 28 17:30:31 crc kubenswrapper[4909]: I1128 17:30:31.342589 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5d7b5456f5-7p4s2" Nov 28 17:30:31 crc kubenswrapper[4909]: I1128 17:30:31.345304 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dnsmasq-dns-dockercfg-hw4wd" Nov 28 17:30:31 crc kubenswrapper[4909]: I1128 17:30:31.345754 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-svc" Nov 28 17:30:31 crc kubenswrapper[4909]: I1128 17:30:31.346226 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"kube-root-ca.crt" Nov 28 17:30:31 crc kubenswrapper[4909]: I1128 17:30:31.346332 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns" Nov 28 17:30:31 crc kubenswrapper[4909]: I1128 17:30:31.346351 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openshift-service-ca.crt" Nov 28 17:30:31 crc kubenswrapper[4909]: I1128 17:30:31.355087 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5d7b5456f5-7p4s2"] Nov 28 17:30:31 crc kubenswrapper[4909]: I1128 17:30:31.484402 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gxcsq\" (UniqueName: \"kubernetes.io/projected/87be2a94-1f75-4cce-a6e0-45470034c031-kube-api-access-gxcsq\") pod \"dnsmasq-dns-5d7b5456f5-7p4s2\" (UID: \"87be2a94-1f75-4cce-a6e0-45470034c031\") " pod="openstack/dnsmasq-dns-5d7b5456f5-7p4s2" Nov 28 17:30:31 crc kubenswrapper[4909]: I1128 17:30:31.484447 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/87be2a94-1f75-4cce-a6e0-45470034c031-config\") pod \"dnsmasq-dns-5d7b5456f5-7p4s2\" (UID: \"87be2a94-1f75-4cce-a6e0-45470034c031\") " pod="openstack/dnsmasq-dns-5d7b5456f5-7p4s2" Nov 28 17:30:31 crc kubenswrapper[4909]: I1128 17:30:31.484519 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/87be2a94-1f75-4cce-a6e0-45470034c031-dns-svc\") pod \"dnsmasq-dns-5d7b5456f5-7p4s2\" (UID: \"87be2a94-1f75-4cce-a6e0-45470034c031\") " pod="openstack/dnsmasq-dns-5d7b5456f5-7p4s2" Nov 28 17:30:31 crc kubenswrapper[4909]: I1128 17:30:31.585679 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/87be2a94-1f75-4cce-a6e0-45470034c031-dns-svc\") pod \"dnsmasq-dns-5d7b5456f5-7p4s2\" (UID: \"87be2a94-1f75-4cce-a6e0-45470034c031\") " pod="openstack/dnsmasq-dns-5d7b5456f5-7p4s2" Nov 28 17:30:31 crc kubenswrapper[4909]: I1128 17:30:31.585762 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gxcsq\" (UniqueName: \"kubernetes.io/projected/87be2a94-1f75-4cce-a6e0-45470034c031-kube-api-access-gxcsq\") pod \"dnsmasq-dns-5d7b5456f5-7p4s2\" (UID: \"87be2a94-1f75-4cce-a6e0-45470034c031\") " pod="openstack/dnsmasq-dns-5d7b5456f5-7p4s2" Nov 28 17:30:31 crc kubenswrapper[4909]: I1128 17:30:31.585791 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/87be2a94-1f75-4cce-a6e0-45470034c031-config\") pod \"dnsmasq-dns-5d7b5456f5-7p4s2\" (UID: \"87be2a94-1f75-4cce-a6e0-45470034c031\") " pod="openstack/dnsmasq-dns-5d7b5456f5-7p4s2" Nov 28 17:30:31 crc kubenswrapper[4909]: I1128 17:30:31.586793 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/87be2a94-1f75-4cce-a6e0-45470034c031-dns-svc\") pod \"dnsmasq-dns-5d7b5456f5-7p4s2\" (UID: \"87be2a94-1f75-4cce-a6e0-45470034c031\") " pod="openstack/dnsmasq-dns-5d7b5456f5-7p4s2" Nov 28 17:30:31 crc kubenswrapper[4909]: I1128 17:30:31.589167 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/87be2a94-1f75-4cce-a6e0-45470034c031-config\") pod \"dnsmasq-dns-5d7b5456f5-7p4s2\" (UID: \"87be2a94-1f75-4cce-a6e0-45470034c031\") " pod="openstack/dnsmasq-dns-5d7b5456f5-7p4s2" Nov 28 17:30:31 crc kubenswrapper[4909]: I1128 17:30:31.606405 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-98ddfc8f-jrwn8"] Nov 28 17:30:31 crc kubenswrapper[4909]: I1128 17:30:31.607512 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-98ddfc8f-jrwn8" Nov 28 17:30:31 crc kubenswrapper[4909]: I1128 17:30:31.611672 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gxcsq\" (UniqueName: \"kubernetes.io/projected/87be2a94-1f75-4cce-a6e0-45470034c031-kube-api-access-gxcsq\") pod \"dnsmasq-dns-5d7b5456f5-7p4s2\" (UID: \"87be2a94-1f75-4cce-a6e0-45470034c031\") " pod="openstack/dnsmasq-dns-5d7b5456f5-7p4s2" Nov 28 17:30:31 crc kubenswrapper[4909]: I1128 17:30:31.625279 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-98ddfc8f-jrwn8"] Nov 28 17:30:31 crc kubenswrapper[4909]: I1128 17:30:31.701601 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5d7b5456f5-7p4s2" Nov 28 17:30:31 crc kubenswrapper[4909]: I1128 17:30:31.787455 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b6e49410-982d-4b07-a360-0f578902b345-dns-svc\") pod \"dnsmasq-dns-98ddfc8f-jrwn8\" (UID: \"b6e49410-982d-4b07-a360-0f578902b345\") " pod="openstack/dnsmasq-dns-98ddfc8f-jrwn8" Nov 28 17:30:31 crc kubenswrapper[4909]: I1128 17:30:31.787500 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b6e49410-982d-4b07-a360-0f578902b345-config\") pod \"dnsmasq-dns-98ddfc8f-jrwn8\" (UID: \"b6e49410-982d-4b07-a360-0f578902b345\") " pod="openstack/dnsmasq-dns-98ddfc8f-jrwn8" Nov 28 17:30:31 crc kubenswrapper[4909]: I1128 17:30:31.787523 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wnb9q\" (UniqueName: \"kubernetes.io/projected/b6e49410-982d-4b07-a360-0f578902b345-kube-api-access-wnb9q\") pod \"dnsmasq-dns-98ddfc8f-jrwn8\" (UID: \"b6e49410-982d-4b07-a360-0f578902b345\") " pod="openstack/dnsmasq-dns-98ddfc8f-jrwn8" Nov 28 17:30:31 crc kubenswrapper[4909]: I1128 17:30:31.890036 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wnb9q\" (UniqueName: \"kubernetes.io/projected/b6e49410-982d-4b07-a360-0f578902b345-kube-api-access-wnb9q\") pod \"dnsmasq-dns-98ddfc8f-jrwn8\" (UID: \"b6e49410-982d-4b07-a360-0f578902b345\") " pod="openstack/dnsmasq-dns-98ddfc8f-jrwn8" Nov 28 17:30:31 crc kubenswrapper[4909]: I1128 17:30:31.890452 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b6e49410-982d-4b07-a360-0f578902b345-dns-svc\") pod \"dnsmasq-dns-98ddfc8f-jrwn8\" (UID: \"b6e49410-982d-4b07-a360-0f578902b345\") " pod="openstack/dnsmasq-dns-98ddfc8f-jrwn8" Nov 28 17:30:31 crc kubenswrapper[4909]: I1128 17:30:31.890478 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b6e49410-982d-4b07-a360-0f578902b345-config\") pod \"dnsmasq-dns-98ddfc8f-jrwn8\" (UID: \"b6e49410-982d-4b07-a360-0f578902b345\") " pod="openstack/dnsmasq-dns-98ddfc8f-jrwn8" Nov 28 17:30:31 crc kubenswrapper[4909]: I1128 17:30:31.891309 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b6e49410-982d-4b07-a360-0f578902b345-config\") pod \"dnsmasq-dns-98ddfc8f-jrwn8\" (UID: \"b6e49410-982d-4b07-a360-0f578902b345\") " pod="openstack/dnsmasq-dns-98ddfc8f-jrwn8" Nov 28 17:30:31 crc kubenswrapper[4909]: I1128 17:30:31.891919 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b6e49410-982d-4b07-a360-0f578902b345-dns-svc\") pod \"dnsmasq-dns-98ddfc8f-jrwn8\" (UID: \"b6e49410-982d-4b07-a360-0f578902b345\") " pod="openstack/dnsmasq-dns-98ddfc8f-jrwn8" Nov 28 17:30:31 crc kubenswrapper[4909]: I1128 17:30:31.906879 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wnb9q\" (UniqueName: \"kubernetes.io/projected/b6e49410-982d-4b07-a360-0f578902b345-kube-api-access-wnb9q\") pod \"dnsmasq-dns-98ddfc8f-jrwn8\" (UID: \"b6e49410-982d-4b07-a360-0f578902b345\") " pod="openstack/dnsmasq-dns-98ddfc8f-jrwn8" Nov 28 17:30:31 crc kubenswrapper[4909]: I1128 17:30:31.953324 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-98ddfc8f-jrwn8" Nov 28 17:30:32 crc kubenswrapper[4909]: I1128 17:30:32.139979 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5d7b5456f5-7p4s2"] Nov 28 17:30:32 crc kubenswrapper[4909]: I1128 17:30:32.363421 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-98ddfc8f-jrwn8"] Nov 28 17:30:32 crc kubenswrapper[4909]: I1128 17:30:32.518298 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Nov 28 17:30:32 crc kubenswrapper[4909]: I1128 17:30:32.521046 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 28 17:30:32 crc kubenswrapper[4909]: I1128 17:30:32.525319 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Nov 28 17:30:32 crc kubenswrapper[4909]: I1128 17:30:32.525546 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-mm4j5" Nov 28 17:30:32 crc kubenswrapper[4909]: I1128 17:30:32.526321 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Nov 28 17:30:32 crc kubenswrapper[4909]: I1128 17:30:32.526517 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Nov 28 17:30:32 crc kubenswrapper[4909]: I1128 17:30:32.526675 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Nov 28 17:30:32 crc kubenswrapper[4909]: I1128 17:30:32.531045 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 28 17:30:32 crc kubenswrapper[4909]: I1128 17:30:32.662057 4909 generic.go:334] "Generic (PLEG): container finished" podID="87be2a94-1f75-4cce-a6e0-45470034c031" containerID="500f2d7633d398859cf9f8b111e3902c3d1db4aa889da501eb495d3504ae114d" exitCode=0 Nov 28 17:30:32 crc kubenswrapper[4909]: I1128 17:30:32.662122 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5d7b5456f5-7p4s2" event={"ID":"87be2a94-1f75-4cce-a6e0-45470034c031","Type":"ContainerDied","Data":"500f2d7633d398859cf9f8b111e3902c3d1db4aa889da501eb495d3504ae114d"} Nov 28 17:30:32 crc kubenswrapper[4909]: I1128 17:30:32.662153 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5d7b5456f5-7p4s2" event={"ID":"87be2a94-1f75-4cce-a6e0-45470034c031","Type":"ContainerStarted","Data":"9c37eb8b0185e116349f58fd0e2ed6b0b047fb3ae0896385dedeadb8f5249a29"} Nov 28 17:30:32 crc kubenswrapper[4909]: I1128 17:30:32.667489 4909 generic.go:334] "Generic (PLEG): container finished" podID="b6e49410-982d-4b07-a360-0f578902b345" containerID="c5f2dd3ee9c3e1d7a95d948c5cd1987e05cf780c64bad6804863139457711868" exitCode=0 Nov 28 17:30:32 crc kubenswrapper[4909]: I1128 17:30:32.667525 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-98ddfc8f-jrwn8" event={"ID":"b6e49410-982d-4b07-a360-0f578902b345","Type":"ContainerDied","Data":"c5f2dd3ee9c3e1d7a95d948c5cd1987e05cf780c64bad6804863139457711868"} Nov 28 17:30:32 crc kubenswrapper[4909]: I1128 17:30:32.667725 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-98ddfc8f-jrwn8" event={"ID":"b6e49410-982d-4b07-a360-0f578902b345","Type":"ContainerStarted","Data":"e33df10d941349714d0875df9275a35a1cbc8aee6f5ddf25076c6d9f0345ba01"} Nov 28 17:30:32 crc kubenswrapper[4909]: I1128 17:30:32.702506 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/5b265e3b-4df7-4dd9-8fbc-ef31254a10ed-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"5b265e3b-4df7-4dd9-8fbc-ef31254a10ed\") " pod="openstack/rabbitmq-server-0" Nov 28 17:30:32 crc kubenswrapper[4909]: I1128 17:30:32.702681 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/5b265e3b-4df7-4dd9-8fbc-ef31254a10ed-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"5b265e3b-4df7-4dd9-8fbc-ef31254a10ed\") " pod="openstack/rabbitmq-server-0" Nov 28 17:30:32 crc kubenswrapper[4909]: I1128 17:30:32.702726 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/5b265e3b-4df7-4dd9-8fbc-ef31254a10ed-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"5b265e3b-4df7-4dd9-8fbc-ef31254a10ed\") " pod="openstack/rabbitmq-server-0" Nov 28 17:30:32 crc kubenswrapper[4909]: I1128 17:30:32.702795 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-ed0ec5af-12d3-4de7-97ee-2b423a5dca9a\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-ed0ec5af-12d3-4de7-97ee-2b423a5dca9a\") pod \"rabbitmq-server-0\" (UID: \"5b265e3b-4df7-4dd9-8fbc-ef31254a10ed\") " pod="openstack/rabbitmq-server-0" Nov 28 17:30:32 crc kubenswrapper[4909]: I1128 17:30:32.702817 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/5b265e3b-4df7-4dd9-8fbc-ef31254a10ed-server-conf\") pod \"rabbitmq-server-0\" (UID: \"5b265e3b-4df7-4dd9-8fbc-ef31254a10ed\") " pod="openstack/rabbitmq-server-0" Nov 28 17:30:32 crc kubenswrapper[4909]: I1128 17:30:32.702870 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/5b265e3b-4df7-4dd9-8fbc-ef31254a10ed-pod-info\") pod \"rabbitmq-server-0\" (UID: \"5b265e3b-4df7-4dd9-8fbc-ef31254a10ed\") " pod="openstack/rabbitmq-server-0" Nov 28 17:30:32 crc kubenswrapper[4909]: I1128 17:30:32.702897 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/5b265e3b-4df7-4dd9-8fbc-ef31254a10ed-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"5b265e3b-4df7-4dd9-8fbc-ef31254a10ed\") " pod="openstack/rabbitmq-server-0" Nov 28 17:30:32 crc kubenswrapper[4909]: I1128 17:30:32.702967 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gmr5n\" (UniqueName: \"kubernetes.io/projected/5b265e3b-4df7-4dd9-8fbc-ef31254a10ed-kube-api-access-gmr5n\") pod \"rabbitmq-server-0\" (UID: \"5b265e3b-4df7-4dd9-8fbc-ef31254a10ed\") " pod="openstack/rabbitmq-server-0" Nov 28 17:30:32 crc kubenswrapper[4909]: I1128 17:30:32.703025 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/5b265e3b-4df7-4dd9-8fbc-ef31254a10ed-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"5b265e3b-4df7-4dd9-8fbc-ef31254a10ed\") " pod="openstack/rabbitmq-server-0" Nov 28 17:30:32 crc kubenswrapper[4909]: I1128 17:30:32.794844 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 28 17:30:32 crc kubenswrapper[4909]: I1128 17:30:32.797112 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 28 17:30:32 crc kubenswrapper[4909]: I1128 17:30:32.804598 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Nov 28 17:30:32 crc kubenswrapper[4909]: I1128 17:30:32.804851 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Nov 28 17:30:32 crc kubenswrapper[4909]: I1128 17:30:32.804636 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-kq8x4" Nov 28 17:30:32 crc kubenswrapper[4909]: I1128 17:30:32.804726 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Nov 28 17:30:32 crc kubenswrapper[4909]: I1128 17:30:32.804764 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Nov 28 17:30:32 crc kubenswrapper[4909]: I1128 17:30:32.807050 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 28 17:30:32 crc kubenswrapper[4909]: I1128 17:30:32.808307 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/5b265e3b-4df7-4dd9-8fbc-ef31254a10ed-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"5b265e3b-4df7-4dd9-8fbc-ef31254a10ed\") " pod="openstack/rabbitmq-server-0" Nov 28 17:30:32 crc kubenswrapper[4909]: I1128 17:30:32.808368 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/5b265e3b-4df7-4dd9-8fbc-ef31254a10ed-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"5b265e3b-4df7-4dd9-8fbc-ef31254a10ed\") " pod="openstack/rabbitmq-server-0" Nov 28 17:30:32 crc kubenswrapper[4909]: I1128 17:30:32.808403 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/5b265e3b-4df7-4dd9-8fbc-ef31254a10ed-server-conf\") pod \"rabbitmq-server-0\" (UID: \"5b265e3b-4df7-4dd9-8fbc-ef31254a10ed\") " pod="openstack/rabbitmq-server-0" Nov 28 17:30:32 crc kubenswrapper[4909]: I1128 17:30:32.808424 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-ed0ec5af-12d3-4de7-97ee-2b423a5dca9a\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-ed0ec5af-12d3-4de7-97ee-2b423a5dca9a\") pod \"rabbitmq-server-0\" (UID: \"5b265e3b-4df7-4dd9-8fbc-ef31254a10ed\") " pod="openstack/rabbitmq-server-0" Nov 28 17:30:32 crc kubenswrapper[4909]: I1128 17:30:32.808439 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/5b265e3b-4df7-4dd9-8fbc-ef31254a10ed-pod-info\") pod \"rabbitmq-server-0\" (UID: \"5b265e3b-4df7-4dd9-8fbc-ef31254a10ed\") " pod="openstack/rabbitmq-server-0" Nov 28 17:30:32 crc kubenswrapper[4909]: I1128 17:30:32.808468 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/5b265e3b-4df7-4dd9-8fbc-ef31254a10ed-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"5b265e3b-4df7-4dd9-8fbc-ef31254a10ed\") " pod="openstack/rabbitmq-server-0" Nov 28 17:30:32 crc kubenswrapper[4909]: I1128 17:30:32.808520 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gmr5n\" (UniqueName: \"kubernetes.io/projected/5b265e3b-4df7-4dd9-8fbc-ef31254a10ed-kube-api-access-gmr5n\") pod \"rabbitmq-server-0\" (UID: \"5b265e3b-4df7-4dd9-8fbc-ef31254a10ed\") " pod="openstack/rabbitmq-server-0" Nov 28 17:30:32 crc kubenswrapper[4909]: I1128 17:30:32.808567 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/5b265e3b-4df7-4dd9-8fbc-ef31254a10ed-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"5b265e3b-4df7-4dd9-8fbc-ef31254a10ed\") " pod="openstack/rabbitmq-server-0" Nov 28 17:30:32 crc kubenswrapper[4909]: I1128 17:30:32.808592 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/5b265e3b-4df7-4dd9-8fbc-ef31254a10ed-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"5b265e3b-4df7-4dd9-8fbc-ef31254a10ed\") " pod="openstack/rabbitmq-server-0" Nov 28 17:30:32 crc kubenswrapper[4909]: I1128 17:30:32.809412 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/5b265e3b-4df7-4dd9-8fbc-ef31254a10ed-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"5b265e3b-4df7-4dd9-8fbc-ef31254a10ed\") " pod="openstack/rabbitmq-server-0" Nov 28 17:30:32 crc kubenswrapper[4909]: I1128 17:30:32.810337 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/5b265e3b-4df7-4dd9-8fbc-ef31254a10ed-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"5b265e3b-4df7-4dd9-8fbc-ef31254a10ed\") " pod="openstack/rabbitmq-server-0" Nov 28 17:30:32 crc kubenswrapper[4909]: I1128 17:30:32.810587 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/5b265e3b-4df7-4dd9-8fbc-ef31254a10ed-server-conf\") pod \"rabbitmq-server-0\" (UID: \"5b265e3b-4df7-4dd9-8fbc-ef31254a10ed\") " pod="openstack/rabbitmq-server-0" Nov 28 17:30:32 crc kubenswrapper[4909]: I1128 17:30:32.810881 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/5b265e3b-4df7-4dd9-8fbc-ef31254a10ed-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"5b265e3b-4df7-4dd9-8fbc-ef31254a10ed\") " pod="openstack/rabbitmq-server-0" Nov 28 17:30:32 crc kubenswrapper[4909]: I1128 17:30:32.814247 4909 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 28 17:30:32 crc kubenswrapper[4909]: I1128 17:30:32.814289 4909 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-ed0ec5af-12d3-4de7-97ee-2b423a5dca9a\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-ed0ec5af-12d3-4de7-97ee-2b423a5dca9a\") pod \"rabbitmq-server-0\" (UID: \"5b265e3b-4df7-4dd9-8fbc-ef31254a10ed\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/420bea3f69233050d8b2a30bf1f459ae1e1c8c0dc4c060c8442a9659f3b18a42/globalmount\"" pod="openstack/rabbitmq-server-0" Nov 28 17:30:32 crc kubenswrapper[4909]: I1128 17:30:32.816205 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/5b265e3b-4df7-4dd9-8fbc-ef31254a10ed-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"5b265e3b-4df7-4dd9-8fbc-ef31254a10ed\") " pod="openstack/rabbitmq-server-0" Nov 28 17:30:32 crc kubenswrapper[4909]: I1128 17:30:32.825692 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/5b265e3b-4df7-4dd9-8fbc-ef31254a10ed-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"5b265e3b-4df7-4dd9-8fbc-ef31254a10ed\") " pod="openstack/rabbitmq-server-0" Nov 28 17:30:32 crc kubenswrapper[4909]: I1128 17:30:32.826389 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/5b265e3b-4df7-4dd9-8fbc-ef31254a10ed-pod-info\") pod \"rabbitmq-server-0\" (UID: \"5b265e3b-4df7-4dd9-8fbc-ef31254a10ed\") " pod="openstack/rabbitmq-server-0" Nov 28 17:30:32 crc kubenswrapper[4909]: I1128 17:30:32.836566 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gmr5n\" (UniqueName: \"kubernetes.io/projected/5b265e3b-4df7-4dd9-8fbc-ef31254a10ed-kube-api-access-gmr5n\") pod \"rabbitmq-server-0\" (UID: \"5b265e3b-4df7-4dd9-8fbc-ef31254a10ed\") " pod="openstack/rabbitmq-server-0" Nov 28 17:30:32 crc kubenswrapper[4909]: I1128 17:30:32.878280 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-ed0ec5af-12d3-4de7-97ee-2b423a5dca9a\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-ed0ec5af-12d3-4de7-97ee-2b423a5dca9a\") pod \"rabbitmq-server-0\" (UID: \"5b265e3b-4df7-4dd9-8fbc-ef31254a10ed\") " pod="openstack/rabbitmq-server-0" Nov 28 17:30:32 crc kubenswrapper[4909]: I1128 17:30:32.898172 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 28 17:30:32 crc kubenswrapper[4909]: I1128 17:30:32.902241 4909 scope.go:117] "RemoveContainer" containerID="0e4b2f3d8611bd0ab12d7c7c8d35db09f6a69551a7858ff3d66e84f2e15b12f2" Nov 28 17:30:32 crc kubenswrapper[4909]: E1128 17:30:32.902415 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 17:30:32 crc kubenswrapper[4909]: I1128 17:30:32.909714 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/0a0e3c34-da37-4682-a5f0-7642d4bb5ed7-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"0a0e3c34-da37-4682-a5f0-7642d4bb5ed7\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 17:30:32 crc kubenswrapper[4909]: I1128 17:30:32.909760 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/0a0e3c34-da37-4682-a5f0-7642d4bb5ed7-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"0a0e3c34-da37-4682-a5f0-7642d4bb5ed7\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 17:30:32 crc kubenswrapper[4909]: I1128 17:30:32.909838 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/0a0e3c34-da37-4682-a5f0-7642d4bb5ed7-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"0a0e3c34-da37-4682-a5f0-7642d4bb5ed7\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 17:30:32 crc kubenswrapper[4909]: I1128 17:30:32.909882 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gh8vh\" (UniqueName: \"kubernetes.io/projected/0a0e3c34-da37-4682-a5f0-7642d4bb5ed7-kube-api-access-gh8vh\") pod \"rabbitmq-cell1-server-0\" (UID: \"0a0e3c34-da37-4682-a5f0-7642d4bb5ed7\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 17:30:32 crc kubenswrapper[4909]: I1128 17:30:32.909920 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/0a0e3c34-da37-4682-a5f0-7642d4bb5ed7-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"0a0e3c34-da37-4682-a5f0-7642d4bb5ed7\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 17:30:32 crc kubenswrapper[4909]: I1128 17:30:32.909944 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-3dc6036d-8245-45fb-a849-b6e685de461c\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-3dc6036d-8245-45fb-a849-b6e685de461c\") pod \"rabbitmq-cell1-server-0\" (UID: \"0a0e3c34-da37-4682-a5f0-7642d4bb5ed7\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 17:30:32 crc kubenswrapper[4909]: I1128 17:30:32.910016 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/0a0e3c34-da37-4682-a5f0-7642d4bb5ed7-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"0a0e3c34-da37-4682-a5f0-7642d4bb5ed7\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 17:30:32 crc kubenswrapper[4909]: I1128 17:30:32.910048 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/0a0e3c34-da37-4682-a5f0-7642d4bb5ed7-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"0a0e3c34-da37-4682-a5f0-7642d4bb5ed7\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 17:30:32 crc kubenswrapper[4909]: I1128 17:30:32.910079 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/0a0e3c34-da37-4682-a5f0-7642d4bb5ed7-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"0a0e3c34-da37-4682-a5f0-7642d4bb5ed7\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 17:30:33 crc kubenswrapper[4909]: I1128 17:30:33.011890 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/0a0e3c34-da37-4682-a5f0-7642d4bb5ed7-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"0a0e3c34-da37-4682-a5f0-7642d4bb5ed7\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 17:30:33 crc kubenswrapper[4909]: I1128 17:30:33.011940 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/0a0e3c34-da37-4682-a5f0-7642d4bb5ed7-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"0a0e3c34-da37-4682-a5f0-7642d4bb5ed7\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 17:30:33 crc kubenswrapper[4909]: I1128 17:30:33.011980 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/0a0e3c34-da37-4682-a5f0-7642d4bb5ed7-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"0a0e3c34-da37-4682-a5f0-7642d4bb5ed7\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 17:30:33 crc kubenswrapper[4909]: I1128 17:30:33.012030 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gh8vh\" (UniqueName: \"kubernetes.io/projected/0a0e3c34-da37-4682-a5f0-7642d4bb5ed7-kube-api-access-gh8vh\") pod \"rabbitmq-cell1-server-0\" (UID: \"0a0e3c34-da37-4682-a5f0-7642d4bb5ed7\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 17:30:33 crc kubenswrapper[4909]: I1128 17:30:33.012093 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/0a0e3c34-da37-4682-a5f0-7642d4bb5ed7-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"0a0e3c34-da37-4682-a5f0-7642d4bb5ed7\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 17:30:33 crc kubenswrapper[4909]: I1128 17:30:33.012124 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-3dc6036d-8245-45fb-a849-b6e685de461c\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-3dc6036d-8245-45fb-a849-b6e685de461c\") pod \"rabbitmq-cell1-server-0\" (UID: \"0a0e3c34-da37-4682-a5f0-7642d4bb5ed7\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 17:30:33 crc kubenswrapper[4909]: I1128 17:30:33.012154 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/0a0e3c34-da37-4682-a5f0-7642d4bb5ed7-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"0a0e3c34-da37-4682-a5f0-7642d4bb5ed7\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 17:30:33 crc kubenswrapper[4909]: I1128 17:30:33.012178 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/0a0e3c34-da37-4682-a5f0-7642d4bb5ed7-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"0a0e3c34-da37-4682-a5f0-7642d4bb5ed7\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 17:30:33 crc kubenswrapper[4909]: I1128 17:30:33.012227 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/0a0e3c34-da37-4682-a5f0-7642d4bb5ed7-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"0a0e3c34-da37-4682-a5f0-7642d4bb5ed7\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 17:30:33 crc kubenswrapper[4909]: I1128 17:30:33.012632 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/0a0e3c34-da37-4682-a5f0-7642d4bb5ed7-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"0a0e3c34-da37-4682-a5f0-7642d4bb5ed7\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 17:30:33 crc kubenswrapper[4909]: I1128 17:30:33.013840 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/0a0e3c34-da37-4682-a5f0-7642d4bb5ed7-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"0a0e3c34-da37-4682-a5f0-7642d4bb5ed7\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 17:30:33 crc kubenswrapper[4909]: I1128 17:30:33.015200 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/0a0e3c34-da37-4682-a5f0-7642d4bb5ed7-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"0a0e3c34-da37-4682-a5f0-7642d4bb5ed7\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 17:30:33 crc kubenswrapper[4909]: I1128 17:30:33.015350 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/0a0e3c34-da37-4682-a5f0-7642d4bb5ed7-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"0a0e3c34-da37-4682-a5f0-7642d4bb5ed7\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 17:30:33 crc kubenswrapper[4909]: I1128 17:30:33.016437 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/0a0e3c34-da37-4682-a5f0-7642d4bb5ed7-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"0a0e3c34-da37-4682-a5f0-7642d4bb5ed7\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 17:30:33 crc kubenswrapper[4909]: I1128 17:30:33.016481 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/0a0e3c34-da37-4682-a5f0-7642d4bb5ed7-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"0a0e3c34-da37-4682-a5f0-7642d4bb5ed7\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 17:30:33 crc kubenswrapper[4909]: I1128 17:30:33.019595 4909 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 28 17:30:33 crc kubenswrapper[4909]: I1128 17:30:33.019629 4909 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-3dc6036d-8245-45fb-a849-b6e685de461c\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-3dc6036d-8245-45fb-a849-b6e685de461c\") pod \"rabbitmq-cell1-server-0\" (UID: \"0a0e3c34-da37-4682-a5f0-7642d4bb5ed7\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/9a23359a9eaa83372fc07504d4fae6acb84583f31ca0bdea1af3c6636983e22c/globalmount\"" pod="openstack/rabbitmq-cell1-server-0" Nov 28 17:30:33 crc kubenswrapper[4909]: I1128 17:30:33.022999 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/0a0e3c34-da37-4682-a5f0-7642d4bb5ed7-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"0a0e3c34-da37-4682-a5f0-7642d4bb5ed7\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 17:30:33 crc kubenswrapper[4909]: I1128 17:30:33.030549 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gh8vh\" (UniqueName: \"kubernetes.io/projected/0a0e3c34-da37-4682-a5f0-7642d4bb5ed7-kube-api-access-gh8vh\") pod \"rabbitmq-cell1-server-0\" (UID: \"0a0e3c34-da37-4682-a5f0-7642d4bb5ed7\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 17:30:33 crc kubenswrapper[4909]: I1128 17:30:33.053336 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-3dc6036d-8245-45fb-a849-b6e685de461c\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-3dc6036d-8245-45fb-a849-b6e685de461c\") pod \"rabbitmq-cell1-server-0\" (UID: \"0a0e3c34-da37-4682-a5f0-7642d4bb5ed7\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 17:30:33 crc kubenswrapper[4909]: I1128 17:30:33.124603 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 28 17:30:33 crc kubenswrapper[4909]: W1128 17:30:33.133581 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5b265e3b_4df7_4dd9_8fbc_ef31254a10ed.slice/crio-90bef96dedd804eba6038258176f617b3a42257ea74f2aa972de77f757d3c87f WatchSource:0}: Error finding container 90bef96dedd804eba6038258176f617b3a42257ea74f2aa972de77f757d3c87f: Status 404 returned error can't find the container with id 90bef96dedd804eba6038258176f617b3a42257ea74f2aa972de77f757d3c87f Nov 28 17:30:33 crc kubenswrapper[4909]: I1128 17:30:33.139461 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 28 17:30:33 crc kubenswrapper[4909]: W1128 17:30:33.589985 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0a0e3c34_da37_4682_a5f0_7642d4bb5ed7.slice/crio-1b5e780b8cfc47d490a1f2c328d9e4f912c0fb37487a398e847aa1b7a9160c4b WatchSource:0}: Error finding container 1b5e780b8cfc47d490a1f2c328d9e4f912c0fb37487a398e847aa1b7a9160c4b: Status 404 returned error can't find the container with id 1b5e780b8cfc47d490a1f2c328d9e4f912c0fb37487a398e847aa1b7a9160c4b Nov 28 17:30:33 crc kubenswrapper[4909]: I1128 17:30:33.597581 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 28 17:30:33 crc kubenswrapper[4909]: I1128 17:30:33.676689 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"0a0e3c34-da37-4682-a5f0-7642d4bb5ed7","Type":"ContainerStarted","Data":"1b5e780b8cfc47d490a1f2c328d9e4f912c0fb37487a398e847aa1b7a9160c4b"} Nov 28 17:30:33 crc kubenswrapper[4909]: I1128 17:30:33.678339 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"5b265e3b-4df7-4dd9-8fbc-ef31254a10ed","Type":"ContainerStarted","Data":"90bef96dedd804eba6038258176f617b3a42257ea74f2aa972de77f757d3c87f"} Nov 28 17:30:33 crc kubenswrapper[4909]: I1128 17:30:33.680631 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5d7b5456f5-7p4s2" event={"ID":"87be2a94-1f75-4cce-a6e0-45470034c031","Type":"ContainerStarted","Data":"6915a047b45fc996bc10e3428b2ad56babb7d94b91bf1e6b657cff9d23b94759"} Nov 28 17:30:33 crc kubenswrapper[4909]: I1128 17:30:33.680837 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5d7b5456f5-7p4s2" Nov 28 17:30:33 crc kubenswrapper[4909]: I1128 17:30:33.682596 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-98ddfc8f-jrwn8" event={"ID":"b6e49410-982d-4b07-a360-0f578902b345","Type":"ContainerStarted","Data":"c1012b6c58da5f3c69c8808ffc16f79bb18c4fe38b7665e207cedfd40a47b5ac"} Nov 28 17:30:33 crc kubenswrapper[4909]: I1128 17:30:33.683225 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-98ddfc8f-jrwn8" Nov 28 17:30:33 crc kubenswrapper[4909]: I1128 17:30:33.729283 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5d7b5456f5-7p4s2" podStartSLOduration=2.729263552 podStartE2EDuration="2.729263552s" podCreationTimestamp="2025-11-28 17:30:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 17:30:33.727931846 +0000 UTC m=+4816.124616380" watchObservedRunningTime="2025-11-28 17:30:33.729263552 +0000 UTC m=+4816.125948086" Nov 28 17:30:33 crc kubenswrapper[4909]: I1128 17:30:33.750307 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-98ddfc8f-jrwn8" podStartSLOduration=2.750287694 podStartE2EDuration="2.750287694s" podCreationTimestamp="2025-11-28 17:30:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 17:30:33.745493326 +0000 UTC m=+4816.142177870" watchObservedRunningTime="2025-11-28 17:30:33.750287694 +0000 UTC m=+4816.146972228" Nov 28 17:30:34 crc kubenswrapper[4909]: I1128 17:30:34.097989 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-galera-0"] Nov 28 17:30:34 crc kubenswrapper[4909]: I1128 17:30:34.099682 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Nov 28 17:30:34 crc kubenswrapper[4909]: I1128 17:30:34.103851 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-dockercfg-8fvg5" Nov 28 17:30:34 crc kubenswrapper[4909]: I1128 17:30:34.104379 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config-data" Nov 28 17:30:34 crc kubenswrapper[4909]: I1128 17:30:34.104592 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-scripts" Nov 28 17:30:34 crc kubenswrapper[4909]: I1128 17:30:34.104814 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-svc" Nov 28 17:30:34 crc kubenswrapper[4909]: I1128 17:30:34.111896 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Nov 28 17:30:34 crc kubenswrapper[4909]: I1128 17:30:34.114783 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"combined-ca-bundle" Nov 28 17:30:34 crc kubenswrapper[4909]: I1128 17:30:34.233809 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/e040cc04-ec87-429f-86b4-37fc9aa86fb1-config-data-default\") pod \"openstack-galera-0\" (UID: \"e040cc04-ec87-429f-86b4-37fc9aa86fb1\") " pod="openstack/openstack-galera-0" Nov 28 17:30:34 crc kubenswrapper[4909]: I1128 17:30:34.233869 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e040cc04-ec87-429f-86b4-37fc9aa86fb1-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"e040cc04-ec87-429f-86b4-37fc9aa86fb1\") " pod="openstack/openstack-galera-0" Nov 28 17:30:34 crc kubenswrapper[4909]: I1128 17:30:34.233893 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hk6dx\" (UniqueName: \"kubernetes.io/projected/e040cc04-ec87-429f-86b4-37fc9aa86fb1-kube-api-access-hk6dx\") pod \"openstack-galera-0\" (UID: \"e040cc04-ec87-429f-86b4-37fc9aa86fb1\") " pod="openstack/openstack-galera-0" Nov 28 17:30:34 crc kubenswrapper[4909]: I1128 17:30:34.233997 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e040cc04-ec87-429f-86b4-37fc9aa86fb1-operator-scripts\") pod \"openstack-galera-0\" (UID: \"e040cc04-ec87-429f-86b4-37fc9aa86fb1\") " pod="openstack/openstack-galera-0" Nov 28 17:30:34 crc kubenswrapper[4909]: I1128 17:30:34.234029 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/e040cc04-ec87-429f-86b4-37fc9aa86fb1-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"e040cc04-ec87-429f-86b4-37fc9aa86fb1\") " pod="openstack/openstack-galera-0" Nov 28 17:30:34 crc kubenswrapper[4909]: I1128 17:30:34.234061 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/e040cc04-ec87-429f-86b4-37fc9aa86fb1-config-data-generated\") pod \"openstack-galera-0\" (UID: \"e040cc04-ec87-429f-86b4-37fc9aa86fb1\") " pod="openstack/openstack-galera-0" Nov 28 17:30:34 crc kubenswrapper[4909]: I1128 17:30:34.234078 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/e040cc04-ec87-429f-86b4-37fc9aa86fb1-kolla-config\") pod \"openstack-galera-0\" (UID: \"e040cc04-ec87-429f-86b4-37fc9aa86fb1\") " pod="openstack/openstack-galera-0" Nov 28 17:30:34 crc kubenswrapper[4909]: I1128 17:30:34.234100 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-e6335dad-f336-4c48-bebb-88e5164ff2ce\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-e6335dad-f336-4c48-bebb-88e5164ff2ce\") pod \"openstack-galera-0\" (UID: \"e040cc04-ec87-429f-86b4-37fc9aa86fb1\") " pod="openstack/openstack-galera-0" Nov 28 17:30:34 crc kubenswrapper[4909]: I1128 17:30:34.335544 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e040cc04-ec87-429f-86b4-37fc9aa86fb1-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"e040cc04-ec87-429f-86b4-37fc9aa86fb1\") " pod="openstack/openstack-galera-0" Nov 28 17:30:34 crc kubenswrapper[4909]: I1128 17:30:34.335606 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hk6dx\" (UniqueName: \"kubernetes.io/projected/e040cc04-ec87-429f-86b4-37fc9aa86fb1-kube-api-access-hk6dx\") pod \"openstack-galera-0\" (UID: \"e040cc04-ec87-429f-86b4-37fc9aa86fb1\") " pod="openstack/openstack-galera-0" Nov 28 17:30:34 crc kubenswrapper[4909]: I1128 17:30:34.335687 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e040cc04-ec87-429f-86b4-37fc9aa86fb1-operator-scripts\") pod \"openstack-galera-0\" (UID: \"e040cc04-ec87-429f-86b4-37fc9aa86fb1\") " pod="openstack/openstack-galera-0" Nov 28 17:30:34 crc kubenswrapper[4909]: I1128 17:30:34.335718 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/e040cc04-ec87-429f-86b4-37fc9aa86fb1-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"e040cc04-ec87-429f-86b4-37fc9aa86fb1\") " pod="openstack/openstack-galera-0" Nov 28 17:30:34 crc kubenswrapper[4909]: I1128 17:30:34.335759 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/e040cc04-ec87-429f-86b4-37fc9aa86fb1-config-data-generated\") pod \"openstack-galera-0\" (UID: \"e040cc04-ec87-429f-86b4-37fc9aa86fb1\") " pod="openstack/openstack-galera-0" Nov 28 17:30:34 crc kubenswrapper[4909]: I1128 17:30:34.335784 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/e040cc04-ec87-429f-86b4-37fc9aa86fb1-kolla-config\") pod \"openstack-galera-0\" (UID: \"e040cc04-ec87-429f-86b4-37fc9aa86fb1\") " pod="openstack/openstack-galera-0" Nov 28 17:30:34 crc kubenswrapper[4909]: I1128 17:30:34.335812 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-e6335dad-f336-4c48-bebb-88e5164ff2ce\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-e6335dad-f336-4c48-bebb-88e5164ff2ce\") pod \"openstack-galera-0\" (UID: \"e040cc04-ec87-429f-86b4-37fc9aa86fb1\") " pod="openstack/openstack-galera-0" Nov 28 17:30:34 crc kubenswrapper[4909]: I1128 17:30:34.335868 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/e040cc04-ec87-429f-86b4-37fc9aa86fb1-config-data-default\") pod \"openstack-galera-0\" (UID: \"e040cc04-ec87-429f-86b4-37fc9aa86fb1\") " pod="openstack/openstack-galera-0" Nov 28 17:30:34 crc kubenswrapper[4909]: I1128 17:30:34.336557 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/e040cc04-ec87-429f-86b4-37fc9aa86fb1-config-data-generated\") pod \"openstack-galera-0\" (UID: \"e040cc04-ec87-429f-86b4-37fc9aa86fb1\") " pod="openstack/openstack-galera-0" Nov 28 17:30:34 crc kubenswrapper[4909]: I1128 17:30:34.337451 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/e040cc04-ec87-429f-86b4-37fc9aa86fb1-kolla-config\") pod \"openstack-galera-0\" (UID: \"e040cc04-ec87-429f-86b4-37fc9aa86fb1\") " pod="openstack/openstack-galera-0" Nov 28 17:30:34 crc kubenswrapper[4909]: I1128 17:30:34.337644 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/e040cc04-ec87-429f-86b4-37fc9aa86fb1-config-data-default\") pod \"openstack-galera-0\" (UID: \"e040cc04-ec87-429f-86b4-37fc9aa86fb1\") " pod="openstack/openstack-galera-0" Nov 28 17:30:34 crc kubenswrapper[4909]: I1128 17:30:34.337638 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e040cc04-ec87-429f-86b4-37fc9aa86fb1-operator-scripts\") pod \"openstack-galera-0\" (UID: \"e040cc04-ec87-429f-86b4-37fc9aa86fb1\") " pod="openstack/openstack-galera-0" Nov 28 17:30:34 crc kubenswrapper[4909]: I1128 17:30:34.344862 4909 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 28 17:30:34 crc kubenswrapper[4909]: I1128 17:30:34.344930 4909 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-e6335dad-f336-4c48-bebb-88e5164ff2ce\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-e6335dad-f336-4c48-bebb-88e5164ff2ce\") pod \"openstack-galera-0\" (UID: \"e040cc04-ec87-429f-86b4-37fc9aa86fb1\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/75c248ebff39541e13e3106274893655601ff38eb02e056172ce957a3ff04936/globalmount\"" pod="openstack/openstack-galera-0" Nov 28 17:30:34 crc kubenswrapper[4909]: I1128 17:30:34.374158 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hk6dx\" (UniqueName: \"kubernetes.io/projected/e040cc04-ec87-429f-86b4-37fc9aa86fb1-kube-api-access-hk6dx\") pod \"openstack-galera-0\" (UID: \"e040cc04-ec87-429f-86b4-37fc9aa86fb1\") " pod="openstack/openstack-galera-0" Nov 28 17:30:34 crc kubenswrapper[4909]: I1128 17:30:34.374228 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e040cc04-ec87-429f-86b4-37fc9aa86fb1-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"e040cc04-ec87-429f-86b4-37fc9aa86fb1\") " pod="openstack/openstack-galera-0" Nov 28 17:30:34 crc kubenswrapper[4909]: I1128 17:30:34.374469 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/e040cc04-ec87-429f-86b4-37fc9aa86fb1-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"e040cc04-ec87-429f-86b4-37fc9aa86fb1\") " pod="openstack/openstack-galera-0" Nov 28 17:30:34 crc kubenswrapper[4909]: I1128 17:30:34.385311 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/memcached-0"] Nov 28 17:30:34 crc kubenswrapper[4909]: I1128 17:30:34.386543 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Nov 28 17:30:34 crc kubenswrapper[4909]: I1128 17:30:34.388967 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"memcached-config-data" Nov 28 17:30:34 crc kubenswrapper[4909]: I1128 17:30:34.389185 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"memcached-memcached-dockercfg-d2fbb" Nov 28 17:30:34 crc kubenswrapper[4909]: I1128 17:30:34.398094 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Nov 28 17:30:34 crc kubenswrapper[4909]: I1128 17:30:34.539780 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/8144b38e-f654-47a8-bac0-757cb44d606c-config-data\") pod \"memcached-0\" (UID: \"8144b38e-f654-47a8-bac0-757cb44d606c\") " pod="openstack/memcached-0" Nov 28 17:30:34 crc kubenswrapper[4909]: I1128 17:30:34.539846 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lclbm\" (UniqueName: \"kubernetes.io/projected/8144b38e-f654-47a8-bac0-757cb44d606c-kube-api-access-lclbm\") pod \"memcached-0\" (UID: \"8144b38e-f654-47a8-bac0-757cb44d606c\") " pod="openstack/memcached-0" Nov 28 17:30:34 crc kubenswrapper[4909]: I1128 17:30:34.539902 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/8144b38e-f654-47a8-bac0-757cb44d606c-kolla-config\") pod \"memcached-0\" (UID: \"8144b38e-f654-47a8-bac0-757cb44d606c\") " pod="openstack/memcached-0" Nov 28 17:30:34 crc kubenswrapper[4909]: I1128 17:30:34.641303 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/8144b38e-f654-47a8-bac0-757cb44d606c-kolla-config\") pod \"memcached-0\" (UID: \"8144b38e-f654-47a8-bac0-757cb44d606c\") " pod="openstack/memcached-0" Nov 28 17:30:34 crc kubenswrapper[4909]: I1128 17:30:34.641650 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/8144b38e-f654-47a8-bac0-757cb44d606c-config-data\") pod \"memcached-0\" (UID: \"8144b38e-f654-47a8-bac0-757cb44d606c\") " pod="openstack/memcached-0" Nov 28 17:30:34 crc kubenswrapper[4909]: I1128 17:30:34.641705 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lclbm\" (UniqueName: \"kubernetes.io/projected/8144b38e-f654-47a8-bac0-757cb44d606c-kube-api-access-lclbm\") pod \"memcached-0\" (UID: \"8144b38e-f654-47a8-bac0-757cb44d606c\") " pod="openstack/memcached-0" Nov 28 17:30:34 crc kubenswrapper[4909]: I1128 17:30:34.642175 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/8144b38e-f654-47a8-bac0-757cb44d606c-kolla-config\") pod \"memcached-0\" (UID: \"8144b38e-f654-47a8-bac0-757cb44d606c\") " pod="openstack/memcached-0" Nov 28 17:30:34 crc kubenswrapper[4909]: I1128 17:30:34.642382 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/8144b38e-f654-47a8-bac0-757cb44d606c-config-data\") pod \"memcached-0\" (UID: \"8144b38e-f654-47a8-bac0-757cb44d606c\") " pod="openstack/memcached-0" Nov 28 17:30:34 crc kubenswrapper[4909]: I1128 17:30:34.658336 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lclbm\" (UniqueName: \"kubernetes.io/projected/8144b38e-f654-47a8-bac0-757cb44d606c-kube-api-access-lclbm\") pod \"memcached-0\" (UID: \"8144b38e-f654-47a8-bac0-757cb44d606c\") " pod="openstack/memcached-0" Nov 28 17:30:34 crc kubenswrapper[4909]: I1128 17:30:34.691328 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"5b265e3b-4df7-4dd9-8fbc-ef31254a10ed","Type":"ContainerStarted","Data":"63e2c6aa8aaca7f37c6168b7c74a823e8c78031d83f85092a1117231d5da83f5"} Nov 28 17:30:34 crc kubenswrapper[4909]: I1128 17:30:34.706263 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-e6335dad-f336-4c48-bebb-88e5164ff2ce\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-e6335dad-f336-4c48-bebb-88e5164ff2ce\") pod \"openstack-galera-0\" (UID: \"e040cc04-ec87-429f-86b4-37fc9aa86fb1\") " pod="openstack/openstack-galera-0" Nov 28 17:30:34 crc kubenswrapper[4909]: I1128 17:30:34.739201 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Nov 28 17:30:34 crc kubenswrapper[4909]: I1128 17:30:34.940611 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Nov 28 17:30:35 crc kubenswrapper[4909]: I1128 17:30:35.226188 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Nov 28 17:30:35 crc kubenswrapper[4909]: W1128 17:30:35.231712 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode040cc04_ec87_429f_86b4_37fc9aa86fb1.slice/crio-98246582ba057c1aa4eabf7519294a9dc59d1e6f42f11a2f85520fe373731f15 WatchSource:0}: Error finding container 98246582ba057c1aa4eabf7519294a9dc59d1e6f42f11a2f85520fe373731f15: Status 404 returned error can't find the container with id 98246582ba057c1aa4eabf7519294a9dc59d1e6f42f11a2f85520fe373731f15 Nov 28 17:30:35 crc kubenswrapper[4909]: I1128 17:30:35.351376 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Nov 28 17:30:35 crc kubenswrapper[4909]: W1128 17:30:35.354119 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8144b38e_f654_47a8_bac0_757cb44d606c.slice/crio-e85774f2048e69d6e06de60dbd40c86878d58168758fc1c9bd7547b77bb6a989 WatchSource:0}: Error finding container e85774f2048e69d6e06de60dbd40c86878d58168758fc1c9bd7547b77bb6a989: Status 404 returned error can't find the container with id e85774f2048e69d6e06de60dbd40c86878d58168758fc1c9bd7547b77bb6a989 Nov 28 17:30:35 crc kubenswrapper[4909]: I1128 17:30:35.548303 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 28 17:30:35 crc kubenswrapper[4909]: I1128 17:30:35.549893 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Nov 28 17:30:35 crc kubenswrapper[4909]: I1128 17:30:35.557070 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-cell1-dockercfg-ftflc" Nov 28 17:30:35 crc kubenswrapper[4909]: I1128 17:30:35.557144 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-cell1-svc" Nov 28 17:30:35 crc kubenswrapper[4909]: I1128 17:30:35.558807 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 28 17:30:35 crc kubenswrapper[4909]: I1128 17:30:35.559059 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-config-data" Nov 28 17:30:35 crc kubenswrapper[4909]: I1128 17:30:35.559086 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-scripts" Nov 28 17:30:35 crc kubenswrapper[4909]: I1128 17:30:35.660393 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/fbee195d-c0c0-48d6-91d1-fc1d85e708b5-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"fbee195d-c0c0-48d6-91d1-fc1d85e708b5\") " pod="openstack/openstack-cell1-galera-0" Nov 28 17:30:35 crc kubenswrapper[4909]: I1128 17:30:35.660530 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-6cd14238-d536-438f-b4bc-4407b79c26d2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-6cd14238-d536-438f-b4bc-4407b79c26d2\") pod \"openstack-cell1-galera-0\" (UID: \"fbee195d-c0c0-48d6-91d1-fc1d85e708b5\") " pod="openstack/openstack-cell1-galera-0" Nov 28 17:30:35 crc kubenswrapper[4909]: I1128 17:30:35.660883 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/fbee195d-c0c0-48d6-91d1-fc1d85e708b5-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"fbee195d-c0c0-48d6-91d1-fc1d85e708b5\") " pod="openstack/openstack-cell1-galera-0" Nov 28 17:30:35 crc kubenswrapper[4909]: I1128 17:30:35.660959 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cfx6c\" (UniqueName: \"kubernetes.io/projected/fbee195d-c0c0-48d6-91d1-fc1d85e708b5-kube-api-access-cfx6c\") pod \"openstack-cell1-galera-0\" (UID: \"fbee195d-c0c0-48d6-91d1-fc1d85e708b5\") " pod="openstack/openstack-cell1-galera-0" Nov 28 17:30:35 crc kubenswrapper[4909]: I1128 17:30:35.660983 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/fbee195d-c0c0-48d6-91d1-fc1d85e708b5-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"fbee195d-c0c0-48d6-91d1-fc1d85e708b5\") " pod="openstack/openstack-cell1-galera-0" Nov 28 17:30:35 crc kubenswrapper[4909]: I1128 17:30:35.661072 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fbee195d-c0c0-48d6-91d1-fc1d85e708b5-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"fbee195d-c0c0-48d6-91d1-fc1d85e708b5\") " pod="openstack/openstack-cell1-galera-0" Nov 28 17:30:35 crc kubenswrapper[4909]: I1128 17:30:35.661186 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/fbee195d-c0c0-48d6-91d1-fc1d85e708b5-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"fbee195d-c0c0-48d6-91d1-fc1d85e708b5\") " pod="openstack/openstack-cell1-galera-0" Nov 28 17:30:35 crc kubenswrapper[4909]: I1128 17:30:35.661256 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/fbee195d-c0c0-48d6-91d1-fc1d85e708b5-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"fbee195d-c0c0-48d6-91d1-fc1d85e708b5\") " pod="openstack/openstack-cell1-galera-0" Nov 28 17:30:35 crc kubenswrapper[4909]: I1128 17:30:35.700562 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"8144b38e-f654-47a8-bac0-757cb44d606c","Type":"ContainerStarted","Data":"93220c91f983878c7c5ca3bedde3cf55f34b8dc94d3f168bd8b1fa7ace4ea560"} Nov 28 17:30:35 crc kubenswrapper[4909]: I1128 17:30:35.700624 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"8144b38e-f654-47a8-bac0-757cb44d606c","Type":"ContainerStarted","Data":"e85774f2048e69d6e06de60dbd40c86878d58168758fc1c9bd7547b77bb6a989"} Nov 28 17:30:35 crc kubenswrapper[4909]: I1128 17:30:35.700726 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/memcached-0" Nov 28 17:30:35 crc kubenswrapper[4909]: I1128 17:30:35.702920 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"0a0e3c34-da37-4682-a5f0-7642d4bb5ed7","Type":"ContainerStarted","Data":"3c97aa2dbbd2baeb90bb0a94b372133f2567f3fb8d6bf13915232e6a3fc3582f"} Nov 28 17:30:35 crc kubenswrapper[4909]: I1128 17:30:35.705749 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"e040cc04-ec87-429f-86b4-37fc9aa86fb1","Type":"ContainerStarted","Data":"45c7bb9f8c02e0b6f50f3c7e22bbc621d8a3fd8e4442adc14acb944b9556c56f"} Nov 28 17:30:35 crc kubenswrapper[4909]: I1128 17:30:35.705803 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"e040cc04-ec87-429f-86b4-37fc9aa86fb1","Type":"ContainerStarted","Data":"98246582ba057c1aa4eabf7519294a9dc59d1e6f42f11a2f85520fe373731f15"} Nov 28 17:30:35 crc kubenswrapper[4909]: I1128 17:30:35.717780 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/memcached-0" podStartSLOduration=1.717759754 podStartE2EDuration="1.717759754s" podCreationTimestamp="2025-11-28 17:30:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 17:30:35.717001454 +0000 UTC m=+4818.113685988" watchObservedRunningTime="2025-11-28 17:30:35.717759754 +0000 UTC m=+4818.114444298" Nov 28 17:30:35 crc kubenswrapper[4909]: I1128 17:30:35.762758 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-6cd14238-d536-438f-b4bc-4407b79c26d2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-6cd14238-d536-438f-b4bc-4407b79c26d2\") pod \"openstack-cell1-galera-0\" (UID: \"fbee195d-c0c0-48d6-91d1-fc1d85e708b5\") " pod="openstack/openstack-cell1-galera-0" Nov 28 17:30:35 crc kubenswrapper[4909]: I1128 17:30:35.762828 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/fbee195d-c0c0-48d6-91d1-fc1d85e708b5-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"fbee195d-c0c0-48d6-91d1-fc1d85e708b5\") " pod="openstack/openstack-cell1-galera-0" Nov 28 17:30:35 crc kubenswrapper[4909]: I1128 17:30:35.762853 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cfx6c\" (UniqueName: \"kubernetes.io/projected/fbee195d-c0c0-48d6-91d1-fc1d85e708b5-kube-api-access-cfx6c\") pod \"openstack-cell1-galera-0\" (UID: \"fbee195d-c0c0-48d6-91d1-fc1d85e708b5\") " pod="openstack/openstack-cell1-galera-0" Nov 28 17:30:35 crc kubenswrapper[4909]: I1128 17:30:35.762875 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/fbee195d-c0c0-48d6-91d1-fc1d85e708b5-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"fbee195d-c0c0-48d6-91d1-fc1d85e708b5\") " pod="openstack/openstack-cell1-galera-0" Nov 28 17:30:35 crc kubenswrapper[4909]: I1128 17:30:35.762915 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fbee195d-c0c0-48d6-91d1-fc1d85e708b5-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"fbee195d-c0c0-48d6-91d1-fc1d85e708b5\") " pod="openstack/openstack-cell1-galera-0" Nov 28 17:30:35 crc kubenswrapper[4909]: I1128 17:30:35.762985 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/fbee195d-c0c0-48d6-91d1-fc1d85e708b5-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"fbee195d-c0c0-48d6-91d1-fc1d85e708b5\") " pod="openstack/openstack-cell1-galera-0" Nov 28 17:30:35 crc kubenswrapper[4909]: I1128 17:30:35.763017 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/fbee195d-c0c0-48d6-91d1-fc1d85e708b5-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"fbee195d-c0c0-48d6-91d1-fc1d85e708b5\") " pod="openstack/openstack-cell1-galera-0" Nov 28 17:30:35 crc kubenswrapper[4909]: I1128 17:30:35.763103 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/fbee195d-c0c0-48d6-91d1-fc1d85e708b5-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"fbee195d-c0c0-48d6-91d1-fc1d85e708b5\") " pod="openstack/openstack-cell1-galera-0" Nov 28 17:30:35 crc kubenswrapper[4909]: I1128 17:30:35.764307 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/fbee195d-c0c0-48d6-91d1-fc1d85e708b5-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"fbee195d-c0c0-48d6-91d1-fc1d85e708b5\") " pod="openstack/openstack-cell1-galera-0" Nov 28 17:30:35 crc kubenswrapper[4909]: I1128 17:30:35.765057 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/fbee195d-c0c0-48d6-91d1-fc1d85e708b5-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"fbee195d-c0c0-48d6-91d1-fc1d85e708b5\") " pod="openstack/openstack-cell1-galera-0" Nov 28 17:30:35 crc kubenswrapper[4909]: I1128 17:30:35.765211 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/fbee195d-c0c0-48d6-91d1-fc1d85e708b5-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"fbee195d-c0c0-48d6-91d1-fc1d85e708b5\") " pod="openstack/openstack-cell1-galera-0" Nov 28 17:30:35 crc kubenswrapper[4909]: I1128 17:30:35.765520 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/fbee195d-c0c0-48d6-91d1-fc1d85e708b5-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"fbee195d-c0c0-48d6-91d1-fc1d85e708b5\") " pod="openstack/openstack-cell1-galera-0" Nov 28 17:30:35 crc kubenswrapper[4909]: I1128 17:30:35.769784 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/fbee195d-c0c0-48d6-91d1-fc1d85e708b5-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"fbee195d-c0c0-48d6-91d1-fc1d85e708b5\") " pod="openstack/openstack-cell1-galera-0" Nov 28 17:30:35 crc kubenswrapper[4909]: I1128 17:30:35.771278 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fbee195d-c0c0-48d6-91d1-fc1d85e708b5-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"fbee195d-c0c0-48d6-91d1-fc1d85e708b5\") " pod="openstack/openstack-cell1-galera-0" Nov 28 17:30:35 crc kubenswrapper[4909]: I1128 17:30:35.782684 4909 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 28 17:30:35 crc kubenswrapper[4909]: I1128 17:30:35.782739 4909 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-6cd14238-d536-438f-b4bc-4407b79c26d2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-6cd14238-d536-438f-b4bc-4407b79c26d2\") pod \"openstack-cell1-galera-0\" (UID: \"fbee195d-c0c0-48d6-91d1-fc1d85e708b5\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/34ab9f85cc19ba8ffea1f5ee83729a5d110a0c55d091d71d314cc17f812c5fdd/globalmount\"" pod="openstack/openstack-cell1-galera-0" Nov 28 17:30:35 crc kubenswrapper[4909]: I1128 17:30:35.790434 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cfx6c\" (UniqueName: \"kubernetes.io/projected/fbee195d-c0c0-48d6-91d1-fc1d85e708b5-kube-api-access-cfx6c\") pod \"openstack-cell1-galera-0\" (UID: \"fbee195d-c0c0-48d6-91d1-fc1d85e708b5\") " pod="openstack/openstack-cell1-galera-0" Nov 28 17:30:35 crc kubenswrapper[4909]: I1128 17:30:35.810724 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-6cd14238-d536-438f-b4bc-4407b79c26d2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-6cd14238-d536-438f-b4bc-4407b79c26d2\") pod \"openstack-cell1-galera-0\" (UID: \"fbee195d-c0c0-48d6-91d1-fc1d85e708b5\") " pod="openstack/openstack-cell1-galera-0" Nov 28 17:30:35 crc kubenswrapper[4909]: I1128 17:30:35.868877 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Nov 28 17:30:36 crc kubenswrapper[4909]: I1128 17:30:36.365487 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 28 17:30:36 crc kubenswrapper[4909]: W1128 17:30:36.369431 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podfbee195d_c0c0_48d6_91d1_fc1d85e708b5.slice/crio-0b3f34f301f6914f5fc18bae80b0274cf5c8381ad272f2a315f3dabd7e322231 WatchSource:0}: Error finding container 0b3f34f301f6914f5fc18bae80b0274cf5c8381ad272f2a315f3dabd7e322231: Status 404 returned error can't find the container with id 0b3f34f301f6914f5fc18bae80b0274cf5c8381ad272f2a315f3dabd7e322231 Nov 28 17:30:36 crc kubenswrapper[4909]: I1128 17:30:36.714316 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"fbee195d-c0c0-48d6-91d1-fc1d85e708b5","Type":"ContainerStarted","Data":"debcf3d88932eb99bfa15a2f35f9f5cd4397f52d1338512cdfa529f5f57bdb50"} Nov 28 17:30:36 crc kubenswrapper[4909]: I1128 17:30:36.714623 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"fbee195d-c0c0-48d6-91d1-fc1d85e708b5","Type":"ContainerStarted","Data":"0b3f34f301f6914f5fc18bae80b0274cf5c8381ad272f2a315f3dabd7e322231"} Nov 28 17:30:40 crc kubenswrapper[4909]: I1128 17:30:40.759036 4909 generic.go:334] "Generic (PLEG): container finished" podID="e040cc04-ec87-429f-86b4-37fc9aa86fb1" containerID="45c7bb9f8c02e0b6f50f3c7e22bbc621d8a3fd8e4442adc14acb944b9556c56f" exitCode=0 Nov 28 17:30:40 crc kubenswrapper[4909]: I1128 17:30:40.759173 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"e040cc04-ec87-429f-86b4-37fc9aa86fb1","Type":"ContainerDied","Data":"45c7bb9f8c02e0b6f50f3c7e22bbc621d8a3fd8e4442adc14acb944b9556c56f"} Nov 28 17:30:41 crc kubenswrapper[4909]: I1128 17:30:41.703945 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-5d7b5456f5-7p4s2" Nov 28 17:30:41 crc kubenswrapper[4909]: I1128 17:30:41.773297 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"e040cc04-ec87-429f-86b4-37fc9aa86fb1","Type":"ContainerStarted","Data":"13fad228f0f1e5e763488c55d7bfc7ebc3956e2844b45ffd2b34379f7737910e"} Nov 28 17:30:41 crc kubenswrapper[4909]: I1128 17:30:41.782000 4909 generic.go:334] "Generic (PLEG): container finished" podID="fbee195d-c0c0-48d6-91d1-fc1d85e708b5" containerID="debcf3d88932eb99bfa15a2f35f9f5cd4397f52d1338512cdfa529f5f57bdb50" exitCode=0 Nov 28 17:30:41 crc kubenswrapper[4909]: I1128 17:30:41.782247 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"fbee195d-c0c0-48d6-91d1-fc1d85e708b5","Type":"ContainerDied","Data":"debcf3d88932eb99bfa15a2f35f9f5cd4397f52d1338512cdfa529f5f57bdb50"} Nov 28 17:30:41 crc kubenswrapper[4909]: I1128 17:30:41.814005 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-galera-0" podStartSLOduration=8.813977975 podStartE2EDuration="8.813977975s" podCreationTimestamp="2025-11-28 17:30:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 17:30:41.80891034 +0000 UTC m=+4824.205594894" watchObservedRunningTime="2025-11-28 17:30:41.813977975 +0000 UTC m=+4824.210662499" Nov 28 17:30:41 crc kubenswrapper[4909]: I1128 17:30:41.964236 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-98ddfc8f-jrwn8" Nov 28 17:30:42 crc kubenswrapper[4909]: I1128 17:30:42.011324 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5d7b5456f5-7p4s2"] Nov 28 17:30:42 crc kubenswrapper[4909]: I1128 17:30:42.011524 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5d7b5456f5-7p4s2" podUID="87be2a94-1f75-4cce-a6e0-45470034c031" containerName="dnsmasq-dns" containerID="cri-o://6915a047b45fc996bc10e3428b2ad56babb7d94b91bf1e6b657cff9d23b94759" gracePeriod=10 Nov 28 17:30:42 crc kubenswrapper[4909]: I1128 17:30:42.791482 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"fbee195d-c0c0-48d6-91d1-fc1d85e708b5","Type":"ContainerStarted","Data":"4ade9d05f2af027d02c517f512d637b508cbb9cda74042ab963d72a5fb742190"} Nov 28 17:30:42 crc kubenswrapper[4909]: I1128 17:30:42.793902 4909 generic.go:334] "Generic (PLEG): container finished" podID="87be2a94-1f75-4cce-a6e0-45470034c031" containerID="6915a047b45fc996bc10e3428b2ad56babb7d94b91bf1e6b657cff9d23b94759" exitCode=0 Nov 28 17:30:42 crc kubenswrapper[4909]: I1128 17:30:42.793926 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5d7b5456f5-7p4s2" event={"ID":"87be2a94-1f75-4cce-a6e0-45470034c031","Type":"ContainerDied","Data":"6915a047b45fc996bc10e3428b2ad56babb7d94b91bf1e6b657cff9d23b94759"} Nov 28 17:30:42 crc kubenswrapper[4909]: I1128 17:30:42.824868 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-cell1-galera-0" podStartSLOduration=8.824844 podStartE2EDuration="8.824844s" podCreationTimestamp="2025-11-28 17:30:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 17:30:42.815645644 +0000 UTC m=+4825.212330208" watchObservedRunningTime="2025-11-28 17:30:42.824844 +0000 UTC m=+4825.221528534" Nov 28 17:30:43 crc kubenswrapper[4909]: I1128 17:30:43.027244 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5d7b5456f5-7p4s2" Nov 28 17:30:43 crc kubenswrapper[4909]: I1128 17:30:43.181314 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/87be2a94-1f75-4cce-a6e0-45470034c031-config\") pod \"87be2a94-1f75-4cce-a6e0-45470034c031\" (UID: \"87be2a94-1f75-4cce-a6e0-45470034c031\") " Nov 28 17:30:43 crc kubenswrapper[4909]: I1128 17:30:43.181495 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gxcsq\" (UniqueName: \"kubernetes.io/projected/87be2a94-1f75-4cce-a6e0-45470034c031-kube-api-access-gxcsq\") pod \"87be2a94-1f75-4cce-a6e0-45470034c031\" (UID: \"87be2a94-1f75-4cce-a6e0-45470034c031\") " Nov 28 17:30:43 crc kubenswrapper[4909]: I1128 17:30:43.181551 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/87be2a94-1f75-4cce-a6e0-45470034c031-dns-svc\") pod \"87be2a94-1f75-4cce-a6e0-45470034c031\" (UID: \"87be2a94-1f75-4cce-a6e0-45470034c031\") " Nov 28 17:30:43 crc kubenswrapper[4909]: I1128 17:30:43.187835 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/87be2a94-1f75-4cce-a6e0-45470034c031-kube-api-access-gxcsq" (OuterVolumeSpecName: "kube-api-access-gxcsq") pod "87be2a94-1f75-4cce-a6e0-45470034c031" (UID: "87be2a94-1f75-4cce-a6e0-45470034c031"). InnerVolumeSpecName "kube-api-access-gxcsq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:30:43 crc kubenswrapper[4909]: I1128 17:30:43.234713 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/87be2a94-1f75-4cce-a6e0-45470034c031-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "87be2a94-1f75-4cce-a6e0-45470034c031" (UID: "87be2a94-1f75-4cce-a6e0-45470034c031"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 17:30:43 crc kubenswrapper[4909]: I1128 17:30:43.238666 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/87be2a94-1f75-4cce-a6e0-45470034c031-config" (OuterVolumeSpecName: "config") pod "87be2a94-1f75-4cce-a6e0-45470034c031" (UID: "87be2a94-1f75-4cce-a6e0-45470034c031"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 17:30:43 crc kubenswrapper[4909]: I1128 17:30:43.282738 4909 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/87be2a94-1f75-4cce-a6e0-45470034c031-config\") on node \"crc\" DevicePath \"\"" Nov 28 17:30:43 crc kubenswrapper[4909]: I1128 17:30:43.282770 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gxcsq\" (UniqueName: \"kubernetes.io/projected/87be2a94-1f75-4cce-a6e0-45470034c031-kube-api-access-gxcsq\") on node \"crc\" DevicePath \"\"" Nov 28 17:30:43 crc kubenswrapper[4909]: I1128 17:30:43.282784 4909 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/87be2a94-1f75-4cce-a6e0-45470034c031-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 17:30:43 crc kubenswrapper[4909]: I1128 17:30:43.803046 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5d7b5456f5-7p4s2" event={"ID":"87be2a94-1f75-4cce-a6e0-45470034c031","Type":"ContainerDied","Data":"9c37eb8b0185e116349f58fd0e2ed6b0b047fb3ae0896385dedeadb8f5249a29"} Nov 28 17:30:43 crc kubenswrapper[4909]: I1128 17:30:43.803124 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5d7b5456f5-7p4s2" Nov 28 17:30:43 crc kubenswrapper[4909]: I1128 17:30:43.803411 4909 scope.go:117] "RemoveContainer" containerID="6915a047b45fc996bc10e3428b2ad56babb7d94b91bf1e6b657cff9d23b94759" Nov 28 17:30:43 crc kubenswrapper[4909]: I1128 17:30:43.822191 4909 scope.go:117] "RemoveContainer" containerID="500f2d7633d398859cf9f8b111e3902c3d1db4aa889da501eb495d3504ae114d" Nov 28 17:30:43 crc kubenswrapper[4909]: I1128 17:30:43.838015 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5d7b5456f5-7p4s2"] Nov 28 17:30:43 crc kubenswrapper[4909]: I1128 17:30:43.846524 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5d7b5456f5-7p4s2"] Nov 28 17:30:43 crc kubenswrapper[4909]: I1128 17:30:43.910460 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="87be2a94-1f75-4cce-a6e0-45470034c031" path="/var/lib/kubelet/pods/87be2a94-1f75-4cce-a6e0-45470034c031/volumes" Nov 28 17:30:44 crc kubenswrapper[4909]: I1128 17:30:44.740241 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-galera-0" Nov 28 17:30:44 crc kubenswrapper[4909]: I1128 17:30:44.740321 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-galera-0" Nov 28 17:30:44 crc kubenswrapper[4909]: I1128 17:30:44.942762 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/memcached-0" Nov 28 17:30:45 crc kubenswrapper[4909]: I1128 17:30:45.869850 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-cell1-galera-0" Nov 28 17:30:45 crc kubenswrapper[4909]: I1128 17:30:45.871315 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-cell1-galera-0" Nov 28 17:30:46 crc kubenswrapper[4909]: I1128 17:30:46.952174 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-galera-0" Nov 28 17:30:47 crc kubenswrapper[4909]: I1128 17:30:47.045770 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-galera-0" Nov 28 17:30:47 crc kubenswrapper[4909]: I1128 17:30:47.911317 4909 scope.go:117] "RemoveContainer" containerID="0e4b2f3d8611bd0ab12d7c7c8d35db09f6a69551a7858ff3d66e84f2e15b12f2" Nov 28 17:30:47 crc kubenswrapper[4909]: E1128 17:30:47.912686 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 17:30:48 crc kubenswrapper[4909]: I1128 17:30:48.115426 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-cell1-galera-0" Nov 28 17:30:48 crc kubenswrapper[4909]: I1128 17:30:48.207502 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-cell1-galera-0" Nov 28 17:30:59 crc kubenswrapper[4909]: I1128 17:30:59.901780 4909 scope.go:117] "RemoveContainer" containerID="0e4b2f3d8611bd0ab12d7c7c8d35db09f6a69551a7858ff3d66e84f2e15b12f2" Nov 28 17:30:59 crc kubenswrapper[4909]: E1128 17:30:59.903092 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 17:31:08 crc kubenswrapper[4909]: I1128 17:31:08.043082 4909 generic.go:334] "Generic (PLEG): container finished" podID="0a0e3c34-da37-4682-a5f0-7642d4bb5ed7" containerID="3c97aa2dbbd2baeb90bb0a94b372133f2567f3fb8d6bf13915232e6a3fc3582f" exitCode=0 Nov 28 17:31:08 crc kubenswrapper[4909]: I1128 17:31:08.043209 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"0a0e3c34-da37-4682-a5f0-7642d4bb5ed7","Type":"ContainerDied","Data":"3c97aa2dbbd2baeb90bb0a94b372133f2567f3fb8d6bf13915232e6a3fc3582f"} Nov 28 17:31:08 crc kubenswrapper[4909]: I1128 17:31:08.045928 4909 generic.go:334] "Generic (PLEG): container finished" podID="5b265e3b-4df7-4dd9-8fbc-ef31254a10ed" containerID="63e2c6aa8aaca7f37c6168b7c74a823e8c78031d83f85092a1117231d5da83f5" exitCode=0 Nov 28 17:31:08 crc kubenswrapper[4909]: I1128 17:31:08.045959 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"5b265e3b-4df7-4dd9-8fbc-ef31254a10ed","Type":"ContainerDied","Data":"63e2c6aa8aaca7f37c6168b7c74a823e8c78031d83f85092a1117231d5da83f5"} Nov 28 17:31:09 crc kubenswrapper[4909]: I1128 17:31:09.058628 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"0a0e3c34-da37-4682-a5f0-7642d4bb5ed7","Type":"ContainerStarted","Data":"cfa5ef15b98ef390af0d1163b9456f434e22025113a4f4bc0b0d975c8804abf0"} Nov 28 17:31:09 crc kubenswrapper[4909]: I1128 17:31:09.059482 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Nov 28 17:31:09 crc kubenswrapper[4909]: I1128 17:31:09.061335 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"5b265e3b-4df7-4dd9-8fbc-ef31254a10ed","Type":"ContainerStarted","Data":"98e3e42cd81eaef7c5c87e27eb8e1765df564992c1f631f5a7b224a2e265d858"} Nov 28 17:31:09 crc kubenswrapper[4909]: I1128 17:31:09.061488 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Nov 28 17:31:09 crc kubenswrapper[4909]: I1128 17:31:09.091666 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=38.091632955 podStartE2EDuration="38.091632955s" podCreationTimestamp="2025-11-28 17:30:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 17:31:09.087253498 +0000 UTC m=+4851.483938092" watchObservedRunningTime="2025-11-28 17:31:09.091632955 +0000 UTC m=+4851.488317479" Nov 28 17:31:09 crc kubenswrapper[4909]: I1128 17:31:09.124630 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=38.124610337 podStartE2EDuration="38.124610337s" podCreationTimestamp="2025-11-28 17:30:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 17:31:09.11910288 +0000 UTC m=+4851.515787394" watchObservedRunningTime="2025-11-28 17:31:09.124610337 +0000 UTC m=+4851.521294861" Nov 28 17:31:13 crc kubenswrapper[4909]: I1128 17:31:13.901204 4909 scope.go:117] "RemoveContainer" containerID="0e4b2f3d8611bd0ab12d7c7c8d35db09f6a69551a7858ff3d66e84f2e15b12f2" Nov 28 17:31:13 crc kubenswrapper[4909]: E1128 17:31:13.902021 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 17:31:22 crc kubenswrapper[4909]: I1128 17:31:22.901961 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Nov 28 17:31:23 crc kubenswrapper[4909]: I1128 17:31:23.143182 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Nov 28 17:31:26 crc kubenswrapper[4909]: I1128 17:31:26.903382 4909 scope.go:117] "RemoveContainer" containerID="0e4b2f3d8611bd0ab12d7c7c8d35db09f6a69551a7858ff3d66e84f2e15b12f2" Nov 28 17:31:27 crc kubenswrapper[4909]: I1128 17:31:27.213513 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" event={"ID":"5f0ac931-d37b-4342-8c12-c2779b455cc5","Type":"ContainerStarted","Data":"8bbf3ce168c14b8e2f40c7fde54da632f20fb2f06731b49dfa3ed5fbcc06a8e0"} Nov 28 17:31:27 crc kubenswrapper[4909]: I1128 17:31:27.971456 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5b7946d7b9-sgzbw"] Nov 28 17:31:27 crc kubenswrapper[4909]: E1128 17:31:27.972130 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="87be2a94-1f75-4cce-a6e0-45470034c031" containerName="init" Nov 28 17:31:27 crc kubenswrapper[4909]: I1128 17:31:27.972148 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="87be2a94-1f75-4cce-a6e0-45470034c031" containerName="init" Nov 28 17:31:27 crc kubenswrapper[4909]: E1128 17:31:27.972171 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="87be2a94-1f75-4cce-a6e0-45470034c031" containerName="dnsmasq-dns" Nov 28 17:31:27 crc kubenswrapper[4909]: I1128 17:31:27.972179 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="87be2a94-1f75-4cce-a6e0-45470034c031" containerName="dnsmasq-dns" Nov 28 17:31:27 crc kubenswrapper[4909]: I1128 17:31:27.972402 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="87be2a94-1f75-4cce-a6e0-45470034c031" containerName="dnsmasq-dns" Nov 28 17:31:27 crc kubenswrapper[4909]: I1128 17:31:27.973471 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5b7946d7b9-sgzbw" Nov 28 17:31:27 crc kubenswrapper[4909]: I1128 17:31:27.981174 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5b7946d7b9-sgzbw"] Nov 28 17:31:28 crc kubenswrapper[4909]: I1128 17:31:28.023136 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s9mgt\" (UniqueName: \"kubernetes.io/projected/e0749b84-dc57-428b-98b9-63e98b011b44-kube-api-access-s9mgt\") pod \"dnsmasq-dns-5b7946d7b9-sgzbw\" (UID: \"e0749b84-dc57-428b-98b9-63e98b011b44\") " pod="openstack/dnsmasq-dns-5b7946d7b9-sgzbw" Nov 28 17:31:28 crc kubenswrapper[4909]: I1128 17:31:28.023199 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e0749b84-dc57-428b-98b9-63e98b011b44-dns-svc\") pod \"dnsmasq-dns-5b7946d7b9-sgzbw\" (UID: \"e0749b84-dc57-428b-98b9-63e98b011b44\") " pod="openstack/dnsmasq-dns-5b7946d7b9-sgzbw" Nov 28 17:31:28 crc kubenswrapper[4909]: I1128 17:31:28.023219 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e0749b84-dc57-428b-98b9-63e98b011b44-config\") pod \"dnsmasq-dns-5b7946d7b9-sgzbw\" (UID: \"e0749b84-dc57-428b-98b9-63e98b011b44\") " pod="openstack/dnsmasq-dns-5b7946d7b9-sgzbw" Nov 28 17:31:28 crc kubenswrapper[4909]: I1128 17:31:28.123965 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s9mgt\" (UniqueName: \"kubernetes.io/projected/e0749b84-dc57-428b-98b9-63e98b011b44-kube-api-access-s9mgt\") pod \"dnsmasq-dns-5b7946d7b9-sgzbw\" (UID: \"e0749b84-dc57-428b-98b9-63e98b011b44\") " pod="openstack/dnsmasq-dns-5b7946d7b9-sgzbw" Nov 28 17:31:28 crc kubenswrapper[4909]: I1128 17:31:28.124031 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e0749b84-dc57-428b-98b9-63e98b011b44-dns-svc\") pod \"dnsmasq-dns-5b7946d7b9-sgzbw\" (UID: \"e0749b84-dc57-428b-98b9-63e98b011b44\") " pod="openstack/dnsmasq-dns-5b7946d7b9-sgzbw" Nov 28 17:31:28 crc kubenswrapper[4909]: I1128 17:31:28.124050 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e0749b84-dc57-428b-98b9-63e98b011b44-config\") pod \"dnsmasq-dns-5b7946d7b9-sgzbw\" (UID: \"e0749b84-dc57-428b-98b9-63e98b011b44\") " pod="openstack/dnsmasq-dns-5b7946d7b9-sgzbw" Nov 28 17:31:28 crc kubenswrapper[4909]: I1128 17:31:28.124914 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e0749b84-dc57-428b-98b9-63e98b011b44-dns-svc\") pod \"dnsmasq-dns-5b7946d7b9-sgzbw\" (UID: \"e0749b84-dc57-428b-98b9-63e98b011b44\") " pod="openstack/dnsmasq-dns-5b7946d7b9-sgzbw" Nov 28 17:31:28 crc kubenswrapper[4909]: I1128 17:31:28.124947 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e0749b84-dc57-428b-98b9-63e98b011b44-config\") pod \"dnsmasq-dns-5b7946d7b9-sgzbw\" (UID: \"e0749b84-dc57-428b-98b9-63e98b011b44\") " pod="openstack/dnsmasq-dns-5b7946d7b9-sgzbw" Nov 28 17:31:28 crc kubenswrapper[4909]: I1128 17:31:28.143377 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s9mgt\" (UniqueName: \"kubernetes.io/projected/e0749b84-dc57-428b-98b9-63e98b011b44-kube-api-access-s9mgt\") pod \"dnsmasq-dns-5b7946d7b9-sgzbw\" (UID: \"e0749b84-dc57-428b-98b9-63e98b011b44\") " pod="openstack/dnsmasq-dns-5b7946d7b9-sgzbw" Nov 28 17:31:28 crc kubenswrapper[4909]: I1128 17:31:28.289534 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5b7946d7b9-sgzbw" Nov 28 17:31:28 crc kubenswrapper[4909]: I1128 17:31:28.682931 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 28 17:31:28 crc kubenswrapper[4909]: I1128 17:31:28.766981 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5b7946d7b9-sgzbw"] Nov 28 17:31:28 crc kubenswrapper[4909]: W1128 17:31:28.779556 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode0749b84_dc57_428b_98b9_63e98b011b44.slice/crio-960650ad02156cd43943f316265095a807b54e848b1d356fb1f5630430598b1a WatchSource:0}: Error finding container 960650ad02156cd43943f316265095a807b54e848b1d356fb1f5630430598b1a: Status 404 returned error can't find the container with id 960650ad02156cd43943f316265095a807b54e848b1d356fb1f5630430598b1a Nov 28 17:31:29 crc kubenswrapper[4909]: I1128 17:31:29.237014 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5b7946d7b9-sgzbw" event={"ID":"e0749b84-dc57-428b-98b9-63e98b011b44","Type":"ContainerStarted","Data":"960650ad02156cd43943f316265095a807b54e848b1d356fb1f5630430598b1a"} Nov 28 17:31:29 crc kubenswrapper[4909]: I1128 17:31:29.355000 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 28 17:31:30 crc kubenswrapper[4909]: I1128 17:31:30.245954 4909 generic.go:334] "Generic (PLEG): container finished" podID="e0749b84-dc57-428b-98b9-63e98b011b44" containerID="e580068aab256004781aed23027abfda0e5933ac9e945ce83090afd45098b176" exitCode=0 Nov 28 17:31:30 crc kubenswrapper[4909]: I1128 17:31:30.246046 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5b7946d7b9-sgzbw" event={"ID":"e0749b84-dc57-428b-98b9-63e98b011b44","Type":"ContainerDied","Data":"e580068aab256004781aed23027abfda0e5933ac9e945ce83090afd45098b176"} Nov 28 17:31:30 crc kubenswrapper[4909]: I1128 17:31:30.545334 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-server-0" podUID="5b265e3b-4df7-4dd9-8fbc-ef31254a10ed" containerName="rabbitmq" containerID="cri-o://98e3e42cd81eaef7c5c87e27eb8e1765df564992c1f631f5a7b224a2e265d858" gracePeriod=604799 Nov 28 17:31:31 crc kubenswrapper[4909]: I1128 17:31:31.115975 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-cell1-server-0" podUID="0a0e3c34-da37-4682-a5f0-7642d4bb5ed7" containerName="rabbitmq" containerID="cri-o://cfa5ef15b98ef390af0d1163b9456f434e22025113a4f4bc0b0d975c8804abf0" gracePeriod=604799 Nov 28 17:31:32 crc kubenswrapper[4909]: I1128 17:31:32.274159 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5b7946d7b9-sgzbw" event={"ID":"e0749b84-dc57-428b-98b9-63e98b011b44","Type":"ContainerStarted","Data":"8d511b4993a3f10591543d0cc6acde31758dc2c084d44a92adbb26c541f7a846"} Nov 28 17:31:32 crc kubenswrapper[4909]: I1128 17:31:32.274475 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5b7946d7b9-sgzbw" Nov 28 17:31:32 crc kubenswrapper[4909]: I1128 17:31:32.309606 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5b7946d7b9-sgzbw" podStartSLOduration=5.309577 podStartE2EDuration="5.309577s" podCreationTimestamp="2025-11-28 17:31:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 17:31:32.300188809 +0000 UTC m=+4874.696873393" watchObservedRunningTime="2025-11-28 17:31:32.309577 +0000 UTC m=+4874.706261564" Nov 28 17:31:32 crc kubenswrapper[4909]: I1128 17:31:32.898851 4909 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-0" podUID="5b265e3b-4df7-4dd9-8fbc-ef31254a10ed" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.240:5672: connect: connection refused" Nov 28 17:31:33 crc kubenswrapper[4909]: I1128 17:31:33.139998 4909 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-cell1-server-0" podUID="0a0e3c34-da37-4682-a5f0-7642d4bb5ed7" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.241:5672: connect: connection refused" Nov 28 17:31:38 crc kubenswrapper[4909]: I1128 17:31:38.291941 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-5b7946d7b9-sgzbw" Nov 28 17:31:38 crc kubenswrapper[4909]: I1128 17:31:38.382076 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-98ddfc8f-jrwn8"] Nov 28 17:31:38 crc kubenswrapper[4909]: I1128 17:31:38.382478 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-98ddfc8f-jrwn8" podUID="b6e49410-982d-4b07-a360-0f578902b345" containerName="dnsmasq-dns" containerID="cri-o://c1012b6c58da5f3c69c8808ffc16f79bb18c4fe38b7665e207cedfd40a47b5ac" gracePeriod=10 Nov 28 17:31:38 crc kubenswrapper[4909]: I1128 17:31:38.973519 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-98ddfc8f-jrwn8" Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.135439 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b6e49410-982d-4b07-a360-0f578902b345-dns-svc\") pod \"b6e49410-982d-4b07-a360-0f578902b345\" (UID: \"b6e49410-982d-4b07-a360-0f578902b345\") " Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.135483 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b6e49410-982d-4b07-a360-0f578902b345-config\") pod \"b6e49410-982d-4b07-a360-0f578902b345\" (UID: \"b6e49410-982d-4b07-a360-0f578902b345\") " Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.135536 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wnb9q\" (UniqueName: \"kubernetes.io/projected/b6e49410-982d-4b07-a360-0f578902b345-kube-api-access-wnb9q\") pod \"b6e49410-982d-4b07-a360-0f578902b345\" (UID: \"b6e49410-982d-4b07-a360-0f578902b345\") " Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.141329 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6e49410-982d-4b07-a360-0f578902b345-kube-api-access-wnb9q" (OuterVolumeSpecName: "kube-api-access-wnb9q") pod "b6e49410-982d-4b07-a360-0f578902b345" (UID: "b6e49410-982d-4b07-a360-0f578902b345"). InnerVolumeSpecName "kube-api-access-wnb9q". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.176207 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b6e49410-982d-4b07-a360-0f578902b345-config" (OuterVolumeSpecName: "config") pod "b6e49410-982d-4b07-a360-0f578902b345" (UID: "b6e49410-982d-4b07-a360-0f578902b345"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.181138 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b6e49410-982d-4b07-a360-0f578902b345-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "b6e49410-982d-4b07-a360-0f578902b345" (UID: "b6e49410-982d-4b07-a360-0f578902b345"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.201761 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.208320 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.239583 4909 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b6e49410-982d-4b07-a360-0f578902b345-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.239612 4909 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b6e49410-982d-4b07-a360-0f578902b345-config\") on node \"crc\" DevicePath \"\"" Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.239622 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wnb9q\" (UniqueName: \"kubernetes.io/projected/b6e49410-982d-4b07-a360-0f578902b345-kube-api-access-wnb9q\") on node \"crc\" DevicePath \"\"" Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.335375 4909 generic.go:334] "Generic (PLEG): container finished" podID="0a0e3c34-da37-4682-a5f0-7642d4bb5ed7" containerID="cfa5ef15b98ef390af0d1163b9456f434e22025113a4f4bc0b0d975c8804abf0" exitCode=0 Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.335440 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"0a0e3c34-da37-4682-a5f0-7642d4bb5ed7","Type":"ContainerDied","Data":"cfa5ef15b98ef390af0d1163b9456f434e22025113a4f4bc0b0d975c8804abf0"} Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.335466 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"0a0e3c34-da37-4682-a5f0-7642d4bb5ed7","Type":"ContainerDied","Data":"1b5e780b8cfc47d490a1f2c328d9e4f912c0fb37487a398e847aa1b7a9160c4b"} Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.335482 4909 scope.go:117] "RemoveContainer" containerID="cfa5ef15b98ef390af0d1163b9456f434e22025113a4f4bc0b0d975c8804abf0" Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.335591 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.340397 4909 generic.go:334] "Generic (PLEG): container finished" podID="5b265e3b-4df7-4dd9-8fbc-ef31254a10ed" containerID="98e3e42cd81eaef7c5c87e27eb8e1765df564992c1f631f5a7b224a2e265d858" exitCode=0 Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.340459 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"5b265e3b-4df7-4dd9-8fbc-ef31254a10ed","Type":"ContainerDied","Data":"98e3e42cd81eaef7c5c87e27eb8e1765df564992c1f631f5a7b224a2e265d858"} Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.340487 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"5b265e3b-4df7-4dd9-8fbc-ef31254a10ed","Type":"ContainerDied","Data":"90bef96dedd804eba6038258176f617b3a42257ea74f2aa972de77f757d3c87f"} Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.340545 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.343462 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/5b265e3b-4df7-4dd9-8fbc-ef31254a10ed-pod-info\") pod \"5b265e3b-4df7-4dd9-8fbc-ef31254a10ed\" (UID: \"5b265e3b-4df7-4dd9-8fbc-ef31254a10ed\") " Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.343498 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/5b265e3b-4df7-4dd9-8fbc-ef31254a10ed-rabbitmq-erlang-cookie\") pod \"5b265e3b-4df7-4dd9-8fbc-ef31254a10ed\" (UID: \"5b265e3b-4df7-4dd9-8fbc-ef31254a10ed\") " Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.343518 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/0a0e3c34-da37-4682-a5f0-7642d4bb5ed7-rabbitmq-erlang-cookie\") pod \"0a0e3c34-da37-4682-a5f0-7642d4bb5ed7\" (UID: \"0a0e3c34-da37-4682-a5f0-7642d4bb5ed7\") " Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.343546 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/5b265e3b-4df7-4dd9-8fbc-ef31254a10ed-erlang-cookie-secret\") pod \"5b265e3b-4df7-4dd9-8fbc-ef31254a10ed\" (UID: \"5b265e3b-4df7-4dd9-8fbc-ef31254a10ed\") " Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.343576 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/5b265e3b-4df7-4dd9-8fbc-ef31254a10ed-rabbitmq-confd\") pod \"5b265e3b-4df7-4dd9-8fbc-ef31254a10ed\" (UID: \"5b265e3b-4df7-4dd9-8fbc-ef31254a10ed\") " Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.343597 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/0a0e3c34-da37-4682-a5f0-7642d4bb5ed7-server-conf\") pod \"0a0e3c34-da37-4682-a5f0-7642d4bb5ed7\" (UID: \"0a0e3c34-da37-4682-a5f0-7642d4bb5ed7\") " Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.343614 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gmr5n\" (UniqueName: \"kubernetes.io/projected/5b265e3b-4df7-4dd9-8fbc-ef31254a10ed-kube-api-access-gmr5n\") pod \"5b265e3b-4df7-4dd9-8fbc-ef31254a10ed\" (UID: \"5b265e3b-4df7-4dd9-8fbc-ef31254a10ed\") " Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.343772 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-3dc6036d-8245-45fb-a849-b6e685de461c\") pod \"0a0e3c34-da37-4682-a5f0-7642d4bb5ed7\" (UID: \"0a0e3c34-da37-4682-a5f0-7642d4bb5ed7\") " Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.343791 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/0a0e3c34-da37-4682-a5f0-7642d4bb5ed7-plugins-conf\") pod \"0a0e3c34-da37-4682-a5f0-7642d4bb5ed7\" (UID: \"0a0e3c34-da37-4682-a5f0-7642d4bb5ed7\") " Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.343814 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gh8vh\" (UniqueName: \"kubernetes.io/projected/0a0e3c34-da37-4682-a5f0-7642d4bb5ed7-kube-api-access-gh8vh\") pod \"0a0e3c34-da37-4682-a5f0-7642d4bb5ed7\" (UID: \"0a0e3c34-da37-4682-a5f0-7642d4bb5ed7\") " Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.343839 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/0a0e3c34-da37-4682-a5f0-7642d4bb5ed7-erlang-cookie-secret\") pod \"0a0e3c34-da37-4682-a5f0-7642d4bb5ed7\" (UID: \"0a0e3c34-da37-4682-a5f0-7642d4bb5ed7\") " Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.343911 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-ed0ec5af-12d3-4de7-97ee-2b423a5dca9a\") pod \"5b265e3b-4df7-4dd9-8fbc-ef31254a10ed\" (UID: \"5b265e3b-4df7-4dd9-8fbc-ef31254a10ed\") " Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.343947 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/5b265e3b-4df7-4dd9-8fbc-ef31254a10ed-server-conf\") pod \"5b265e3b-4df7-4dd9-8fbc-ef31254a10ed\" (UID: \"5b265e3b-4df7-4dd9-8fbc-ef31254a10ed\") " Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.343976 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/5b265e3b-4df7-4dd9-8fbc-ef31254a10ed-rabbitmq-plugins\") pod \"5b265e3b-4df7-4dd9-8fbc-ef31254a10ed\" (UID: \"5b265e3b-4df7-4dd9-8fbc-ef31254a10ed\") " Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.344020 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/5b265e3b-4df7-4dd9-8fbc-ef31254a10ed-plugins-conf\") pod \"5b265e3b-4df7-4dd9-8fbc-ef31254a10ed\" (UID: \"5b265e3b-4df7-4dd9-8fbc-ef31254a10ed\") " Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.344055 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/0a0e3c34-da37-4682-a5f0-7642d4bb5ed7-rabbitmq-confd\") pod \"0a0e3c34-da37-4682-a5f0-7642d4bb5ed7\" (UID: \"0a0e3c34-da37-4682-a5f0-7642d4bb5ed7\") " Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.344078 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/0a0e3c34-da37-4682-a5f0-7642d4bb5ed7-pod-info\") pod \"0a0e3c34-da37-4682-a5f0-7642d4bb5ed7\" (UID: \"0a0e3c34-da37-4682-a5f0-7642d4bb5ed7\") " Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.344115 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/0a0e3c34-da37-4682-a5f0-7642d4bb5ed7-rabbitmq-plugins\") pod \"0a0e3c34-da37-4682-a5f0-7642d4bb5ed7\" (UID: \"0a0e3c34-da37-4682-a5f0-7642d4bb5ed7\") " Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.345084 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0a0e3c34-da37-4682-a5f0-7642d4bb5ed7-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "0a0e3c34-da37-4682-a5f0-7642d4bb5ed7" (UID: "0a0e3c34-da37-4682-a5f0-7642d4bb5ed7"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.345107 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0a0e3c34-da37-4682-a5f0-7642d4bb5ed7-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "0a0e3c34-da37-4682-a5f0-7642d4bb5ed7" (UID: "0a0e3c34-da37-4682-a5f0-7642d4bb5ed7"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.345905 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0a0e3c34-da37-4682-a5f0-7642d4bb5ed7-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "0a0e3c34-da37-4682-a5f0-7642d4bb5ed7" (UID: "0a0e3c34-da37-4682-a5f0-7642d4bb5ed7"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.347332 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0a0e3c34-da37-4682-a5f0-7642d4bb5ed7-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "0a0e3c34-da37-4682-a5f0-7642d4bb5ed7" (UID: "0a0e3c34-da37-4682-a5f0-7642d4bb5ed7"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.347334 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0a0e3c34-da37-4682-a5f0-7642d4bb5ed7-kube-api-access-gh8vh" (OuterVolumeSpecName: "kube-api-access-gh8vh") pod "0a0e3c34-da37-4682-a5f0-7642d4bb5ed7" (UID: "0a0e3c34-da37-4682-a5f0-7642d4bb5ed7"). InnerVolumeSpecName "kube-api-access-gh8vh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.347581 4909 generic.go:334] "Generic (PLEG): container finished" podID="b6e49410-982d-4b07-a360-0f578902b345" containerID="c1012b6c58da5f3c69c8808ffc16f79bb18c4fe38b7665e207cedfd40a47b5ac" exitCode=0 Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.347622 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-98ddfc8f-jrwn8" event={"ID":"b6e49410-982d-4b07-a360-0f578902b345","Type":"ContainerDied","Data":"c1012b6c58da5f3c69c8808ffc16f79bb18c4fe38b7665e207cedfd40a47b5ac"} Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.347647 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-98ddfc8f-jrwn8" event={"ID":"b6e49410-982d-4b07-a360-0f578902b345","Type":"ContainerDied","Data":"e33df10d941349714d0875df9275a35a1cbc8aee6f5ddf25076c6d9f0345ba01"} Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.348238 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-98ddfc8f-jrwn8" Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.350429 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/0a0e3c34-da37-4682-a5f0-7642d4bb5ed7-pod-info" (OuterVolumeSpecName: "pod-info") pod "0a0e3c34-da37-4682-a5f0-7642d4bb5ed7" (UID: "0a0e3c34-da37-4682-a5f0-7642d4bb5ed7"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.365037 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-ed0ec5af-12d3-4de7-97ee-2b423a5dca9a" (OuterVolumeSpecName: "persistence") pod "5b265e3b-4df7-4dd9-8fbc-ef31254a10ed" (UID: "5b265e3b-4df7-4dd9-8fbc-ef31254a10ed"). InnerVolumeSpecName "pvc-ed0ec5af-12d3-4de7-97ee-2b423a5dca9a". PluginName "kubernetes.io/csi", VolumeGidValue "" Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.368249 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0a0e3c34-da37-4682-a5f0-7642d4bb5ed7-server-conf" (OuterVolumeSpecName: "server-conf") pod "0a0e3c34-da37-4682-a5f0-7642d4bb5ed7" (UID: "0a0e3c34-da37-4682-a5f0-7642d4bb5ed7"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.371140 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5b265e3b-4df7-4dd9-8fbc-ef31254a10ed-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "5b265e3b-4df7-4dd9-8fbc-ef31254a10ed" (UID: "5b265e3b-4df7-4dd9-8fbc-ef31254a10ed"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.371257 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5b265e3b-4df7-4dd9-8fbc-ef31254a10ed-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "5b265e3b-4df7-4dd9-8fbc-ef31254a10ed" (UID: "5b265e3b-4df7-4dd9-8fbc-ef31254a10ed"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.371298 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5b265e3b-4df7-4dd9-8fbc-ef31254a10ed-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "5b265e3b-4df7-4dd9-8fbc-ef31254a10ed" (UID: "5b265e3b-4df7-4dd9-8fbc-ef31254a10ed"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.371957 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-3dc6036d-8245-45fb-a849-b6e685de461c" (OuterVolumeSpecName: "persistence") pod "0a0e3c34-da37-4682-a5f0-7642d4bb5ed7" (UID: "0a0e3c34-da37-4682-a5f0-7642d4bb5ed7"). InnerVolumeSpecName "pvc-3dc6036d-8245-45fb-a849-b6e685de461c". PluginName "kubernetes.io/csi", VolumeGidValue "" Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.372220 4909 scope.go:117] "RemoveContainer" containerID="3c97aa2dbbd2baeb90bb0a94b372133f2567f3fb8d6bf13915232e6a3fc3582f" Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.375320 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5b265e3b-4df7-4dd9-8fbc-ef31254a10ed-server-conf" (OuterVolumeSpecName: "server-conf") pod "5b265e3b-4df7-4dd9-8fbc-ef31254a10ed" (UID: "5b265e3b-4df7-4dd9-8fbc-ef31254a10ed"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.381942 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5b265e3b-4df7-4dd9-8fbc-ef31254a10ed-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "5b265e3b-4df7-4dd9-8fbc-ef31254a10ed" (UID: "5b265e3b-4df7-4dd9-8fbc-ef31254a10ed"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.382036 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/5b265e3b-4df7-4dd9-8fbc-ef31254a10ed-pod-info" (OuterVolumeSpecName: "pod-info") pod "5b265e3b-4df7-4dd9-8fbc-ef31254a10ed" (UID: "5b265e3b-4df7-4dd9-8fbc-ef31254a10ed"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.391026 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-98ddfc8f-jrwn8"] Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.392449 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5b265e3b-4df7-4dd9-8fbc-ef31254a10ed-kube-api-access-gmr5n" (OuterVolumeSpecName: "kube-api-access-gmr5n") pod "5b265e3b-4df7-4dd9-8fbc-ef31254a10ed" (UID: "5b265e3b-4df7-4dd9-8fbc-ef31254a10ed"). InnerVolumeSpecName "kube-api-access-gmr5n". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.397011 4909 scope.go:117] "RemoveContainer" containerID="cfa5ef15b98ef390af0d1163b9456f434e22025113a4f4bc0b0d975c8804abf0" Nov 28 17:31:39 crc kubenswrapper[4909]: E1128 17:31:39.397411 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cfa5ef15b98ef390af0d1163b9456f434e22025113a4f4bc0b0d975c8804abf0\": container with ID starting with cfa5ef15b98ef390af0d1163b9456f434e22025113a4f4bc0b0d975c8804abf0 not found: ID does not exist" containerID="cfa5ef15b98ef390af0d1163b9456f434e22025113a4f4bc0b0d975c8804abf0" Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.397460 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cfa5ef15b98ef390af0d1163b9456f434e22025113a4f4bc0b0d975c8804abf0"} err="failed to get container status \"cfa5ef15b98ef390af0d1163b9456f434e22025113a4f4bc0b0d975c8804abf0\": rpc error: code = NotFound desc = could not find container \"cfa5ef15b98ef390af0d1163b9456f434e22025113a4f4bc0b0d975c8804abf0\": container with ID starting with cfa5ef15b98ef390af0d1163b9456f434e22025113a4f4bc0b0d975c8804abf0 not found: ID does not exist" Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.397495 4909 scope.go:117] "RemoveContainer" containerID="3c97aa2dbbd2baeb90bb0a94b372133f2567f3fb8d6bf13915232e6a3fc3582f" Nov 28 17:31:39 crc kubenswrapper[4909]: E1128 17:31:39.398025 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3c97aa2dbbd2baeb90bb0a94b372133f2567f3fb8d6bf13915232e6a3fc3582f\": container with ID starting with 3c97aa2dbbd2baeb90bb0a94b372133f2567f3fb8d6bf13915232e6a3fc3582f not found: ID does not exist" containerID="3c97aa2dbbd2baeb90bb0a94b372133f2567f3fb8d6bf13915232e6a3fc3582f" Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.398062 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3c97aa2dbbd2baeb90bb0a94b372133f2567f3fb8d6bf13915232e6a3fc3582f"} err="failed to get container status \"3c97aa2dbbd2baeb90bb0a94b372133f2567f3fb8d6bf13915232e6a3fc3582f\": rpc error: code = NotFound desc = could not find container \"3c97aa2dbbd2baeb90bb0a94b372133f2567f3fb8d6bf13915232e6a3fc3582f\": container with ID starting with 3c97aa2dbbd2baeb90bb0a94b372133f2567f3fb8d6bf13915232e6a3fc3582f not found: ID does not exist" Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.398088 4909 scope.go:117] "RemoveContainer" containerID="98e3e42cd81eaef7c5c87e27eb8e1765df564992c1f631f5a7b224a2e265d858" Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.398494 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-98ddfc8f-jrwn8"] Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.415073 4909 scope.go:117] "RemoveContainer" containerID="63e2c6aa8aaca7f37c6168b7c74a823e8c78031d83f85092a1117231d5da83f5" Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.437755 4909 scope.go:117] "RemoveContainer" containerID="98e3e42cd81eaef7c5c87e27eb8e1765df564992c1f631f5a7b224a2e265d858" Nov 28 17:31:39 crc kubenswrapper[4909]: E1128 17:31:39.438279 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"98e3e42cd81eaef7c5c87e27eb8e1765df564992c1f631f5a7b224a2e265d858\": container with ID starting with 98e3e42cd81eaef7c5c87e27eb8e1765df564992c1f631f5a7b224a2e265d858 not found: ID does not exist" containerID="98e3e42cd81eaef7c5c87e27eb8e1765df564992c1f631f5a7b224a2e265d858" Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.438318 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"98e3e42cd81eaef7c5c87e27eb8e1765df564992c1f631f5a7b224a2e265d858"} err="failed to get container status \"98e3e42cd81eaef7c5c87e27eb8e1765df564992c1f631f5a7b224a2e265d858\": rpc error: code = NotFound desc = could not find container \"98e3e42cd81eaef7c5c87e27eb8e1765df564992c1f631f5a7b224a2e265d858\": container with ID starting with 98e3e42cd81eaef7c5c87e27eb8e1765df564992c1f631f5a7b224a2e265d858 not found: ID does not exist" Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.438350 4909 scope.go:117] "RemoveContainer" containerID="63e2c6aa8aaca7f37c6168b7c74a823e8c78031d83f85092a1117231d5da83f5" Nov 28 17:31:39 crc kubenswrapper[4909]: E1128 17:31:39.438868 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"63e2c6aa8aaca7f37c6168b7c74a823e8c78031d83f85092a1117231d5da83f5\": container with ID starting with 63e2c6aa8aaca7f37c6168b7c74a823e8c78031d83f85092a1117231d5da83f5 not found: ID does not exist" containerID="63e2c6aa8aaca7f37c6168b7c74a823e8c78031d83f85092a1117231d5da83f5" Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.438909 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"63e2c6aa8aaca7f37c6168b7c74a823e8c78031d83f85092a1117231d5da83f5"} err="failed to get container status \"63e2c6aa8aaca7f37c6168b7c74a823e8c78031d83f85092a1117231d5da83f5\": rpc error: code = NotFound desc = could not find container \"63e2c6aa8aaca7f37c6168b7c74a823e8c78031d83f85092a1117231d5da83f5\": container with ID starting with 63e2c6aa8aaca7f37c6168b7c74a823e8c78031d83f85092a1117231d5da83f5 not found: ID does not exist" Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.438937 4909 scope.go:117] "RemoveContainer" containerID="c1012b6c58da5f3c69c8808ffc16f79bb18c4fe38b7665e207cedfd40a47b5ac" Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.445422 4909 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/0a0e3c34-da37-4682-a5f0-7642d4bb5ed7-server-conf\") on node \"crc\" DevicePath \"\"" Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.445448 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gmr5n\" (UniqueName: \"kubernetes.io/projected/5b265e3b-4df7-4dd9-8fbc-ef31254a10ed-kube-api-access-gmr5n\") on node \"crc\" DevicePath \"\"" Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.445479 4909 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"pvc-3dc6036d-8245-45fb-a849-b6e685de461c\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-3dc6036d-8245-45fb-a849-b6e685de461c\") on node \"crc\" " Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.445494 4909 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/0a0e3c34-da37-4682-a5f0-7642d4bb5ed7-plugins-conf\") on node \"crc\" DevicePath \"\"" Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.445509 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gh8vh\" (UniqueName: \"kubernetes.io/projected/0a0e3c34-da37-4682-a5f0-7642d4bb5ed7-kube-api-access-gh8vh\") on node \"crc\" DevicePath \"\"" Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.445522 4909 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/0a0e3c34-da37-4682-a5f0-7642d4bb5ed7-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.445541 4909 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"pvc-ed0ec5af-12d3-4de7-97ee-2b423a5dca9a\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-ed0ec5af-12d3-4de7-97ee-2b423a5dca9a\") on node \"crc\" " Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.445554 4909 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/5b265e3b-4df7-4dd9-8fbc-ef31254a10ed-server-conf\") on node \"crc\" DevicePath \"\"" Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.445568 4909 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/5b265e3b-4df7-4dd9-8fbc-ef31254a10ed-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.445581 4909 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/5b265e3b-4df7-4dd9-8fbc-ef31254a10ed-plugins-conf\") on node \"crc\" DevicePath \"\"" Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.445592 4909 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/0a0e3c34-da37-4682-a5f0-7642d4bb5ed7-pod-info\") on node \"crc\" DevicePath \"\"" Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.445647 4909 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/0a0e3c34-da37-4682-a5f0-7642d4bb5ed7-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.445690 4909 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/5b265e3b-4df7-4dd9-8fbc-ef31254a10ed-pod-info\") on node \"crc\" DevicePath \"\"" Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.445702 4909 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/5b265e3b-4df7-4dd9-8fbc-ef31254a10ed-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.445714 4909 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/0a0e3c34-da37-4682-a5f0-7642d4bb5ed7-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.445726 4909 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/5b265e3b-4df7-4dd9-8fbc-ef31254a10ed-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.451955 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5b265e3b-4df7-4dd9-8fbc-ef31254a10ed-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "5b265e3b-4df7-4dd9-8fbc-ef31254a10ed" (UID: "5b265e3b-4df7-4dd9-8fbc-ef31254a10ed"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.453729 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0a0e3c34-da37-4682-a5f0-7642d4bb5ed7-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "0a0e3c34-da37-4682-a5f0-7642d4bb5ed7" (UID: "0a0e3c34-da37-4682-a5f0-7642d4bb5ed7"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.461348 4909 scope.go:117] "RemoveContainer" containerID="c5f2dd3ee9c3e1d7a95d948c5cd1987e05cf780c64bad6804863139457711868" Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.465504 4909 csi_attacher.go:630] kubernetes.io/csi: attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice... Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.465681 4909 operation_generator.go:917] UnmountDevice succeeded for volume "pvc-ed0ec5af-12d3-4de7-97ee-2b423a5dca9a" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-ed0ec5af-12d3-4de7-97ee-2b423a5dca9a") on node "crc" Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.469752 4909 csi_attacher.go:630] kubernetes.io/csi: attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice... Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.469891 4909 operation_generator.go:917] UnmountDevice succeeded for volume "pvc-3dc6036d-8245-45fb-a849-b6e685de461c" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-3dc6036d-8245-45fb-a849-b6e685de461c") on node "crc" Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.486389 4909 scope.go:117] "RemoveContainer" containerID="c1012b6c58da5f3c69c8808ffc16f79bb18c4fe38b7665e207cedfd40a47b5ac" Nov 28 17:31:39 crc kubenswrapper[4909]: E1128 17:31:39.489145 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c1012b6c58da5f3c69c8808ffc16f79bb18c4fe38b7665e207cedfd40a47b5ac\": container with ID starting with c1012b6c58da5f3c69c8808ffc16f79bb18c4fe38b7665e207cedfd40a47b5ac not found: ID does not exist" containerID="c1012b6c58da5f3c69c8808ffc16f79bb18c4fe38b7665e207cedfd40a47b5ac" Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.489199 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c1012b6c58da5f3c69c8808ffc16f79bb18c4fe38b7665e207cedfd40a47b5ac"} err="failed to get container status \"c1012b6c58da5f3c69c8808ffc16f79bb18c4fe38b7665e207cedfd40a47b5ac\": rpc error: code = NotFound desc = could not find container \"c1012b6c58da5f3c69c8808ffc16f79bb18c4fe38b7665e207cedfd40a47b5ac\": container with ID starting with c1012b6c58da5f3c69c8808ffc16f79bb18c4fe38b7665e207cedfd40a47b5ac not found: ID does not exist" Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.489232 4909 scope.go:117] "RemoveContainer" containerID="c5f2dd3ee9c3e1d7a95d948c5cd1987e05cf780c64bad6804863139457711868" Nov 28 17:31:39 crc kubenswrapper[4909]: E1128 17:31:39.489705 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c5f2dd3ee9c3e1d7a95d948c5cd1987e05cf780c64bad6804863139457711868\": container with ID starting with c5f2dd3ee9c3e1d7a95d948c5cd1987e05cf780c64bad6804863139457711868 not found: ID does not exist" containerID="c5f2dd3ee9c3e1d7a95d948c5cd1987e05cf780c64bad6804863139457711868" Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.489747 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c5f2dd3ee9c3e1d7a95d948c5cd1987e05cf780c64bad6804863139457711868"} err="failed to get container status \"c5f2dd3ee9c3e1d7a95d948c5cd1987e05cf780c64bad6804863139457711868\": rpc error: code = NotFound desc = could not find container \"c5f2dd3ee9c3e1d7a95d948c5cd1987e05cf780c64bad6804863139457711868\": container with ID starting with c5f2dd3ee9c3e1d7a95d948c5cd1987e05cf780c64bad6804863139457711868 not found: ID does not exist" Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.546751 4909 reconciler_common.go:293] "Volume detached for volume \"pvc-ed0ec5af-12d3-4de7-97ee-2b423a5dca9a\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-ed0ec5af-12d3-4de7-97ee-2b423a5dca9a\") on node \"crc\" DevicePath \"\"" Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.546782 4909 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/0a0e3c34-da37-4682-a5f0-7642d4bb5ed7-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.546792 4909 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/5b265e3b-4df7-4dd9-8fbc-ef31254a10ed-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.546802 4909 reconciler_common.go:293] "Volume detached for volume \"pvc-3dc6036d-8245-45fb-a849-b6e685de461c\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-3dc6036d-8245-45fb-a849-b6e685de461c\") on node \"crc\" DevicePath \"\"" Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.673624 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.688665 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.734724 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.749721 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.773775 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 28 17:31:39 crc kubenswrapper[4909]: E1128 17:31:39.774113 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b6e49410-982d-4b07-a360-0f578902b345" containerName="init" Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.774129 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="b6e49410-982d-4b07-a360-0f578902b345" containerName="init" Nov 28 17:31:39 crc kubenswrapper[4909]: E1128 17:31:39.774141 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b6e49410-982d-4b07-a360-0f578902b345" containerName="dnsmasq-dns" Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.774147 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="b6e49410-982d-4b07-a360-0f578902b345" containerName="dnsmasq-dns" Nov 28 17:31:39 crc kubenswrapper[4909]: E1128 17:31:39.774156 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0a0e3c34-da37-4682-a5f0-7642d4bb5ed7" containerName="rabbitmq" Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.774162 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="0a0e3c34-da37-4682-a5f0-7642d4bb5ed7" containerName="rabbitmq" Nov 28 17:31:39 crc kubenswrapper[4909]: E1128 17:31:39.774174 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5b265e3b-4df7-4dd9-8fbc-ef31254a10ed" containerName="setup-container" Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.774180 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="5b265e3b-4df7-4dd9-8fbc-ef31254a10ed" containerName="setup-container" Nov 28 17:31:39 crc kubenswrapper[4909]: E1128 17:31:39.774195 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5b265e3b-4df7-4dd9-8fbc-ef31254a10ed" containerName="rabbitmq" Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.774200 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="5b265e3b-4df7-4dd9-8fbc-ef31254a10ed" containerName="rabbitmq" Nov 28 17:31:39 crc kubenswrapper[4909]: E1128 17:31:39.774210 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0a0e3c34-da37-4682-a5f0-7642d4bb5ed7" containerName="setup-container" Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.774216 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="0a0e3c34-da37-4682-a5f0-7642d4bb5ed7" containerName="setup-container" Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.774370 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="b6e49410-982d-4b07-a360-0f578902b345" containerName="dnsmasq-dns" Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.774384 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="0a0e3c34-da37-4682-a5f0-7642d4bb5ed7" containerName="rabbitmq" Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.774401 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="5b265e3b-4df7-4dd9-8fbc-ef31254a10ed" containerName="rabbitmq" Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.775164 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.780034 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.780449 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.782825 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.783041 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-kq8x4" Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.784069 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.789470 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.796184 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.809324 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.809576 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-mm4j5" Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.809750 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.809882 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.811052 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.817932 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.839284 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.910855 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0a0e3c34-da37-4682-a5f0-7642d4bb5ed7" path="/var/lib/kubelet/pods/0a0e3c34-da37-4682-a5f0-7642d4bb5ed7/volumes" Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.911783 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5b265e3b-4df7-4dd9-8fbc-ef31254a10ed" path="/var/lib/kubelet/pods/5b265e3b-4df7-4dd9-8fbc-ef31254a10ed/volumes" Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.912878 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6e49410-982d-4b07-a360-0f578902b345" path="/var/lib/kubelet/pods/b6e49410-982d-4b07-a360-0f578902b345/volumes" Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.956339 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-ed0ec5af-12d3-4de7-97ee-2b423a5dca9a\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-ed0ec5af-12d3-4de7-97ee-2b423a5dca9a\") pod \"rabbitmq-server-0\" (UID: \"01d2e8aa-55af-4fa5-98f4-c176b2701770\") " pod="openstack/rabbitmq-server-0" Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.956384 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/01d2e8aa-55af-4fa5-98f4-c176b2701770-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"01d2e8aa-55af-4fa5-98f4-c176b2701770\") " pod="openstack/rabbitmq-server-0" Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.956408 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/c5cefad2-7466-4c21-a977-bde45a4a0346-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"c5cefad2-7466-4c21-a977-bde45a4a0346\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.956429 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/01d2e8aa-55af-4fa5-98f4-c176b2701770-pod-info\") pod \"rabbitmq-server-0\" (UID: \"01d2e8aa-55af-4fa5-98f4-c176b2701770\") " pod="openstack/rabbitmq-server-0" Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.956452 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/c5cefad2-7466-4c21-a977-bde45a4a0346-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"c5cefad2-7466-4c21-a977-bde45a4a0346\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.956592 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/01d2e8aa-55af-4fa5-98f4-c176b2701770-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"01d2e8aa-55af-4fa5-98f4-c176b2701770\") " pod="openstack/rabbitmq-server-0" Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.956643 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/c5cefad2-7466-4c21-a977-bde45a4a0346-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"c5cefad2-7466-4c21-a977-bde45a4a0346\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.956721 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/c5cefad2-7466-4c21-a977-bde45a4a0346-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"c5cefad2-7466-4c21-a977-bde45a4a0346\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.956753 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/c5cefad2-7466-4c21-a977-bde45a4a0346-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"c5cefad2-7466-4c21-a977-bde45a4a0346\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.956789 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/01d2e8aa-55af-4fa5-98f4-c176b2701770-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"01d2e8aa-55af-4fa5-98f4-c176b2701770\") " pod="openstack/rabbitmq-server-0" Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.956835 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/01d2e8aa-55af-4fa5-98f4-c176b2701770-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"01d2e8aa-55af-4fa5-98f4-c176b2701770\") " pod="openstack/rabbitmq-server-0" Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.956857 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-3dc6036d-8245-45fb-a849-b6e685de461c\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-3dc6036d-8245-45fb-a849-b6e685de461c\") pod \"rabbitmq-cell1-server-0\" (UID: \"c5cefad2-7466-4c21-a977-bde45a4a0346\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.956893 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/01d2e8aa-55af-4fa5-98f4-c176b2701770-server-conf\") pod \"rabbitmq-server-0\" (UID: \"01d2e8aa-55af-4fa5-98f4-c176b2701770\") " pod="openstack/rabbitmq-server-0" Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.956938 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/01d2e8aa-55af-4fa5-98f4-c176b2701770-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"01d2e8aa-55af-4fa5-98f4-c176b2701770\") " pod="openstack/rabbitmq-server-0" Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.956979 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hj5rw\" (UniqueName: \"kubernetes.io/projected/01d2e8aa-55af-4fa5-98f4-c176b2701770-kube-api-access-hj5rw\") pod \"rabbitmq-server-0\" (UID: \"01d2e8aa-55af-4fa5-98f4-c176b2701770\") " pod="openstack/rabbitmq-server-0" Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.957021 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/c5cefad2-7466-4c21-a977-bde45a4a0346-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"c5cefad2-7466-4c21-a977-bde45a4a0346\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.957062 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/c5cefad2-7466-4c21-a977-bde45a4a0346-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"c5cefad2-7466-4c21-a977-bde45a4a0346\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 17:31:39 crc kubenswrapper[4909]: I1128 17:31:39.957086 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jmx8z\" (UniqueName: \"kubernetes.io/projected/c5cefad2-7466-4c21-a977-bde45a4a0346-kube-api-access-jmx8z\") pod \"rabbitmq-cell1-server-0\" (UID: \"c5cefad2-7466-4c21-a977-bde45a4a0346\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 17:31:40 crc kubenswrapper[4909]: I1128 17:31:40.058544 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/01d2e8aa-55af-4fa5-98f4-c176b2701770-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"01d2e8aa-55af-4fa5-98f4-c176b2701770\") " pod="openstack/rabbitmq-server-0" Nov 28 17:31:40 crc kubenswrapper[4909]: I1128 17:31:40.058587 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/c5cefad2-7466-4c21-a977-bde45a4a0346-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"c5cefad2-7466-4c21-a977-bde45a4a0346\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 17:31:40 crc kubenswrapper[4909]: I1128 17:31:40.058613 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/01d2e8aa-55af-4fa5-98f4-c176b2701770-pod-info\") pod \"rabbitmq-server-0\" (UID: \"01d2e8aa-55af-4fa5-98f4-c176b2701770\") " pod="openstack/rabbitmq-server-0" Nov 28 17:31:40 crc kubenswrapper[4909]: I1128 17:31:40.058641 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/c5cefad2-7466-4c21-a977-bde45a4a0346-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"c5cefad2-7466-4c21-a977-bde45a4a0346\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 17:31:40 crc kubenswrapper[4909]: I1128 17:31:40.058685 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/01d2e8aa-55af-4fa5-98f4-c176b2701770-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"01d2e8aa-55af-4fa5-98f4-c176b2701770\") " pod="openstack/rabbitmq-server-0" Nov 28 17:31:40 crc kubenswrapper[4909]: I1128 17:31:40.058700 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/c5cefad2-7466-4c21-a977-bde45a4a0346-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"c5cefad2-7466-4c21-a977-bde45a4a0346\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 17:31:40 crc kubenswrapper[4909]: I1128 17:31:40.058719 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/c5cefad2-7466-4c21-a977-bde45a4a0346-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"c5cefad2-7466-4c21-a977-bde45a4a0346\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 17:31:40 crc kubenswrapper[4909]: I1128 17:31:40.058735 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/c5cefad2-7466-4c21-a977-bde45a4a0346-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"c5cefad2-7466-4c21-a977-bde45a4a0346\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 17:31:40 crc kubenswrapper[4909]: I1128 17:31:40.058755 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/01d2e8aa-55af-4fa5-98f4-c176b2701770-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"01d2e8aa-55af-4fa5-98f4-c176b2701770\") " pod="openstack/rabbitmq-server-0" Nov 28 17:31:40 crc kubenswrapper[4909]: I1128 17:31:40.058773 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/01d2e8aa-55af-4fa5-98f4-c176b2701770-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"01d2e8aa-55af-4fa5-98f4-c176b2701770\") " pod="openstack/rabbitmq-server-0" Nov 28 17:31:40 crc kubenswrapper[4909]: I1128 17:31:40.058794 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-3dc6036d-8245-45fb-a849-b6e685de461c\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-3dc6036d-8245-45fb-a849-b6e685de461c\") pod \"rabbitmq-cell1-server-0\" (UID: \"c5cefad2-7466-4c21-a977-bde45a4a0346\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 17:31:40 crc kubenswrapper[4909]: I1128 17:31:40.058818 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/01d2e8aa-55af-4fa5-98f4-c176b2701770-server-conf\") pod \"rabbitmq-server-0\" (UID: \"01d2e8aa-55af-4fa5-98f4-c176b2701770\") " pod="openstack/rabbitmq-server-0" Nov 28 17:31:40 crc kubenswrapper[4909]: I1128 17:31:40.058841 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/01d2e8aa-55af-4fa5-98f4-c176b2701770-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"01d2e8aa-55af-4fa5-98f4-c176b2701770\") " pod="openstack/rabbitmq-server-0" Nov 28 17:31:40 crc kubenswrapper[4909]: I1128 17:31:40.058921 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hj5rw\" (UniqueName: \"kubernetes.io/projected/01d2e8aa-55af-4fa5-98f4-c176b2701770-kube-api-access-hj5rw\") pod \"rabbitmq-server-0\" (UID: \"01d2e8aa-55af-4fa5-98f4-c176b2701770\") " pod="openstack/rabbitmq-server-0" Nov 28 17:31:40 crc kubenswrapper[4909]: I1128 17:31:40.058946 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/c5cefad2-7466-4c21-a977-bde45a4a0346-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"c5cefad2-7466-4c21-a977-bde45a4a0346\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 17:31:40 crc kubenswrapper[4909]: I1128 17:31:40.058967 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/c5cefad2-7466-4c21-a977-bde45a4a0346-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"c5cefad2-7466-4c21-a977-bde45a4a0346\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 17:31:40 crc kubenswrapper[4909]: I1128 17:31:40.058986 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jmx8z\" (UniqueName: \"kubernetes.io/projected/c5cefad2-7466-4c21-a977-bde45a4a0346-kube-api-access-jmx8z\") pod \"rabbitmq-cell1-server-0\" (UID: \"c5cefad2-7466-4c21-a977-bde45a4a0346\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 17:31:40 crc kubenswrapper[4909]: I1128 17:31:40.059015 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-ed0ec5af-12d3-4de7-97ee-2b423a5dca9a\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-ed0ec5af-12d3-4de7-97ee-2b423a5dca9a\") pod \"rabbitmq-server-0\" (UID: \"01d2e8aa-55af-4fa5-98f4-c176b2701770\") " pod="openstack/rabbitmq-server-0" Nov 28 17:31:40 crc kubenswrapper[4909]: I1128 17:31:40.059521 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/01d2e8aa-55af-4fa5-98f4-c176b2701770-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"01d2e8aa-55af-4fa5-98f4-c176b2701770\") " pod="openstack/rabbitmq-server-0" Nov 28 17:31:40 crc kubenswrapper[4909]: I1128 17:31:40.059935 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/c5cefad2-7466-4c21-a977-bde45a4a0346-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"c5cefad2-7466-4c21-a977-bde45a4a0346\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 17:31:40 crc kubenswrapper[4909]: I1128 17:31:40.060026 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/01d2e8aa-55af-4fa5-98f4-c176b2701770-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"01d2e8aa-55af-4fa5-98f4-c176b2701770\") " pod="openstack/rabbitmq-server-0" Nov 28 17:31:40 crc kubenswrapper[4909]: I1128 17:31:40.060049 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/c5cefad2-7466-4c21-a977-bde45a4a0346-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"c5cefad2-7466-4c21-a977-bde45a4a0346\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 17:31:40 crc kubenswrapper[4909]: I1128 17:31:40.060197 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/c5cefad2-7466-4c21-a977-bde45a4a0346-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"c5cefad2-7466-4c21-a977-bde45a4a0346\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 17:31:40 crc kubenswrapper[4909]: I1128 17:31:40.061200 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/01d2e8aa-55af-4fa5-98f4-c176b2701770-server-conf\") pod \"rabbitmq-server-0\" (UID: \"01d2e8aa-55af-4fa5-98f4-c176b2701770\") " pod="openstack/rabbitmq-server-0" Nov 28 17:31:40 crc kubenswrapper[4909]: I1128 17:31:40.061829 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/c5cefad2-7466-4c21-a977-bde45a4a0346-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"c5cefad2-7466-4c21-a977-bde45a4a0346\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 17:31:40 crc kubenswrapper[4909]: I1128 17:31:40.062305 4909 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 28 17:31:40 crc kubenswrapper[4909]: I1128 17:31:40.062331 4909 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-3dc6036d-8245-45fb-a849-b6e685de461c\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-3dc6036d-8245-45fb-a849-b6e685de461c\") pod \"rabbitmq-cell1-server-0\" (UID: \"c5cefad2-7466-4c21-a977-bde45a4a0346\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/9a23359a9eaa83372fc07504d4fae6acb84583f31ca0bdea1af3c6636983e22c/globalmount\"" pod="openstack/rabbitmq-cell1-server-0" Nov 28 17:31:40 crc kubenswrapper[4909]: I1128 17:31:40.063573 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/c5cefad2-7466-4c21-a977-bde45a4a0346-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"c5cefad2-7466-4c21-a977-bde45a4a0346\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 17:31:40 crc kubenswrapper[4909]: I1128 17:31:40.064821 4909 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 28 17:31:40 crc kubenswrapper[4909]: I1128 17:31:40.064913 4909 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-ed0ec5af-12d3-4de7-97ee-2b423a5dca9a\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-ed0ec5af-12d3-4de7-97ee-2b423a5dca9a\") pod \"rabbitmq-server-0\" (UID: \"01d2e8aa-55af-4fa5-98f4-c176b2701770\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/420bea3f69233050d8b2a30bf1f459ae1e1c8c0dc4c060c8442a9659f3b18a42/globalmount\"" pod="openstack/rabbitmq-server-0" Nov 28 17:31:40 crc kubenswrapper[4909]: I1128 17:31:40.066546 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/c5cefad2-7466-4c21-a977-bde45a4a0346-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"c5cefad2-7466-4c21-a977-bde45a4a0346\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 17:31:40 crc kubenswrapper[4909]: I1128 17:31:40.066691 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/01d2e8aa-55af-4fa5-98f4-c176b2701770-pod-info\") pod \"rabbitmq-server-0\" (UID: \"01d2e8aa-55af-4fa5-98f4-c176b2701770\") " pod="openstack/rabbitmq-server-0" Nov 28 17:31:40 crc kubenswrapper[4909]: I1128 17:31:40.068320 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/01d2e8aa-55af-4fa5-98f4-c176b2701770-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"01d2e8aa-55af-4fa5-98f4-c176b2701770\") " pod="openstack/rabbitmq-server-0" Nov 28 17:31:40 crc kubenswrapper[4909]: I1128 17:31:40.070435 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/c5cefad2-7466-4c21-a977-bde45a4a0346-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"c5cefad2-7466-4c21-a977-bde45a4a0346\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 17:31:40 crc kubenswrapper[4909]: I1128 17:31:40.071763 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/01d2e8aa-55af-4fa5-98f4-c176b2701770-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"01d2e8aa-55af-4fa5-98f4-c176b2701770\") " pod="openstack/rabbitmq-server-0" Nov 28 17:31:40 crc kubenswrapper[4909]: I1128 17:31:40.078162 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hj5rw\" (UniqueName: \"kubernetes.io/projected/01d2e8aa-55af-4fa5-98f4-c176b2701770-kube-api-access-hj5rw\") pod \"rabbitmq-server-0\" (UID: \"01d2e8aa-55af-4fa5-98f4-c176b2701770\") " pod="openstack/rabbitmq-server-0" Nov 28 17:31:40 crc kubenswrapper[4909]: I1128 17:31:40.083584 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/01d2e8aa-55af-4fa5-98f4-c176b2701770-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"01d2e8aa-55af-4fa5-98f4-c176b2701770\") " pod="openstack/rabbitmq-server-0" Nov 28 17:31:40 crc kubenswrapper[4909]: I1128 17:31:40.092452 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jmx8z\" (UniqueName: \"kubernetes.io/projected/c5cefad2-7466-4c21-a977-bde45a4a0346-kube-api-access-jmx8z\") pod \"rabbitmq-cell1-server-0\" (UID: \"c5cefad2-7466-4c21-a977-bde45a4a0346\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 17:31:40 crc kubenswrapper[4909]: I1128 17:31:40.100991 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-ed0ec5af-12d3-4de7-97ee-2b423a5dca9a\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-ed0ec5af-12d3-4de7-97ee-2b423a5dca9a\") pod \"rabbitmq-server-0\" (UID: \"01d2e8aa-55af-4fa5-98f4-c176b2701770\") " pod="openstack/rabbitmq-server-0" Nov 28 17:31:40 crc kubenswrapper[4909]: I1128 17:31:40.100996 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-3dc6036d-8245-45fb-a849-b6e685de461c\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-3dc6036d-8245-45fb-a849-b6e685de461c\") pod \"rabbitmq-cell1-server-0\" (UID: \"c5cefad2-7466-4c21-a977-bde45a4a0346\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 17:31:40 crc kubenswrapper[4909]: I1128 17:31:40.116516 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 28 17:31:40 crc kubenswrapper[4909]: I1128 17:31:40.152345 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 28 17:31:40 crc kubenswrapper[4909]: I1128 17:31:40.614454 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 28 17:31:40 crc kubenswrapper[4909]: I1128 17:31:40.662003 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 28 17:31:41 crc kubenswrapper[4909]: W1128 17:31:41.084239 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc5cefad2_7466_4c21_a977_bde45a4a0346.slice/crio-529524c321f186cb436325ad124665a147f24eb6f6bb4713d331ad5f82b06a36 WatchSource:0}: Error finding container 529524c321f186cb436325ad124665a147f24eb6f6bb4713d331ad5f82b06a36: Status 404 returned error can't find the container with id 529524c321f186cb436325ad124665a147f24eb6f6bb4713d331ad5f82b06a36 Nov 28 17:31:41 crc kubenswrapper[4909]: I1128 17:31:41.369127 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"c5cefad2-7466-4c21-a977-bde45a4a0346","Type":"ContainerStarted","Data":"529524c321f186cb436325ad124665a147f24eb6f6bb4713d331ad5f82b06a36"} Nov 28 17:31:41 crc kubenswrapper[4909]: I1128 17:31:41.371097 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"01d2e8aa-55af-4fa5-98f4-c176b2701770","Type":"ContainerStarted","Data":"0dbac11d684c13a4c0b3cfe894fc60845b6ca20580e10e035ffb13d4c040726f"} Nov 28 17:31:43 crc kubenswrapper[4909]: I1128 17:31:43.400165 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"c5cefad2-7466-4c21-a977-bde45a4a0346","Type":"ContainerStarted","Data":"8c38315bc7de6a4d0297937d1f2c30b69585f18f938026642aceefa29342ea01"} Nov 28 17:31:43 crc kubenswrapper[4909]: I1128 17:31:43.402972 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"01d2e8aa-55af-4fa5-98f4-c176b2701770","Type":"ContainerStarted","Data":"9034825af5e05b615199af043a9e2b06724c7466a9f58a21c309aaa330188239"} Nov 28 17:32:15 crc kubenswrapper[4909]: I1128 17:32:15.707159 4909 generic.go:334] "Generic (PLEG): container finished" podID="01d2e8aa-55af-4fa5-98f4-c176b2701770" containerID="9034825af5e05b615199af043a9e2b06724c7466a9f58a21c309aaa330188239" exitCode=0 Nov 28 17:32:15 crc kubenswrapper[4909]: I1128 17:32:15.707245 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"01d2e8aa-55af-4fa5-98f4-c176b2701770","Type":"ContainerDied","Data":"9034825af5e05b615199af043a9e2b06724c7466a9f58a21c309aaa330188239"} Nov 28 17:32:15 crc kubenswrapper[4909]: I1128 17:32:15.710050 4909 generic.go:334] "Generic (PLEG): container finished" podID="c5cefad2-7466-4c21-a977-bde45a4a0346" containerID="8c38315bc7de6a4d0297937d1f2c30b69585f18f938026642aceefa29342ea01" exitCode=0 Nov 28 17:32:15 crc kubenswrapper[4909]: I1128 17:32:15.710084 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"c5cefad2-7466-4c21-a977-bde45a4a0346","Type":"ContainerDied","Data":"8c38315bc7de6a4d0297937d1f2c30b69585f18f938026642aceefa29342ea01"} Nov 28 17:32:16 crc kubenswrapper[4909]: I1128 17:32:16.718073 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"01d2e8aa-55af-4fa5-98f4-c176b2701770","Type":"ContainerStarted","Data":"034cb507d123659eb42933041e1b521966935d80b1fd66980abdf6abba2df816"} Nov 28 17:32:16 crc kubenswrapper[4909]: I1128 17:32:16.719359 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Nov 28 17:32:16 crc kubenswrapper[4909]: I1128 17:32:16.721273 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"c5cefad2-7466-4c21-a977-bde45a4a0346","Type":"ContainerStarted","Data":"e83ab05d334fc5608d33fbc8b3020b1a770d562f8da44ed501c6746ff5f5ce18"} Nov 28 17:32:16 crc kubenswrapper[4909]: I1128 17:32:16.721802 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Nov 28 17:32:16 crc kubenswrapper[4909]: I1128 17:32:16.771886 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=37.77186473 podStartE2EDuration="37.77186473s" podCreationTimestamp="2025-11-28 17:31:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 17:32:16.769352373 +0000 UTC m=+4919.166036897" watchObservedRunningTime="2025-11-28 17:32:16.77186473 +0000 UTC m=+4919.168549254" Nov 28 17:32:16 crc kubenswrapper[4909]: I1128 17:32:16.774848 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=37.774841169 podStartE2EDuration="37.774841169s" podCreationTimestamp="2025-11-28 17:31:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 17:32:16.745223318 +0000 UTC m=+4919.141907842" watchObservedRunningTime="2025-11-28 17:32:16.774841169 +0000 UTC m=+4919.171525693" Nov 28 17:32:30 crc kubenswrapper[4909]: I1128 17:32:30.120904 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Nov 28 17:32:30 crc kubenswrapper[4909]: I1128 17:32:30.156857 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Nov 28 17:32:31 crc kubenswrapper[4909]: I1128 17:32:31.273188 4909 scope.go:117] "RemoveContainer" containerID="9cd42997643f623c9fdafe52f2bd598646215c006d26b937d5f5ec70cfb4ee2b" Nov 28 17:32:31 crc kubenswrapper[4909]: I1128 17:32:31.305019 4909 scope.go:117] "RemoveContainer" containerID="20e747fadd24e8c905d56d6456922b7dbc2c101bcb8ba14944e93dbb5b6f789c" Nov 28 17:32:31 crc kubenswrapper[4909]: I1128 17:32:31.361578 4909 scope.go:117] "RemoveContainer" containerID="270a97c8332fe9b21beedf6521c7cbfe56a27c1602c390f6062160dcb377522e" Nov 28 17:32:40 crc kubenswrapper[4909]: I1128 17:32:40.304419 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mariadb-client-1-default"] Nov 28 17:32:40 crc kubenswrapper[4909]: I1128 17:32:40.305608 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-1-default" Nov 28 17:32:40 crc kubenswrapper[4909]: I1128 17:32:40.307842 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"default-dockercfg-j2qb5" Nov 28 17:32:40 crc kubenswrapper[4909]: I1128 17:32:40.317134 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-1-default"] Nov 28 17:32:40 crc kubenswrapper[4909]: I1128 17:32:40.421506 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mwhpg\" (UniqueName: \"kubernetes.io/projected/c5133b8e-7a85-4f2e-b5cc-96367eb3ac4e-kube-api-access-mwhpg\") pod \"mariadb-client-1-default\" (UID: \"c5133b8e-7a85-4f2e-b5cc-96367eb3ac4e\") " pod="openstack/mariadb-client-1-default" Nov 28 17:32:40 crc kubenswrapper[4909]: I1128 17:32:40.523094 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mwhpg\" (UniqueName: \"kubernetes.io/projected/c5133b8e-7a85-4f2e-b5cc-96367eb3ac4e-kube-api-access-mwhpg\") pod \"mariadb-client-1-default\" (UID: \"c5133b8e-7a85-4f2e-b5cc-96367eb3ac4e\") " pod="openstack/mariadb-client-1-default" Nov 28 17:32:40 crc kubenswrapper[4909]: I1128 17:32:40.542418 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mwhpg\" (UniqueName: \"kubernetes.io/projected/c5133b8e-7a85-4f2e-b5cc-96367eb3ac4e-kube-api-access-mwhpg\") pod \"mariadb-client-1-default\" (UID: \"c5133b8e-7a85-4f2e-b5cc-96367eb3ac4e\") " pod="openstack/mariadb-client-1-default" Nov 28 17:32:40 crc kubenswrapper[4909]: I1128 17:32:40.641396 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-1-default" Nov 28 17:32:41 crc kubenswrapper[4909]: I1128 17:32:41.160105 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-1-default"] Nov 28 17:32:41 crc kubenswrapper[4909]: I1128 17:32:41.942518 4909 generic.go:334] "Generic (PLEG): container finished" podID="c5133b8e-7a85-4f2e-b5cc-96367eb3ac4e" containerID="a4741f252e511b98f41b0ee057af9b9f021f7441be8ed5851d20a479a8fc2640" exitCode=0 Nov 28 17:32:41 crc kubenswrapper[4909]: I1128 17:32:41.942635 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-1-default" event={"ID":"c5133b8e-7a85-4f2e-b5cc-96367eb3ac4e","Type":"ContainerDied","Data":"a4741f252e511b98f41b0ee057af9b9f021f7441be8ed5851d20a479a8fc2640"} Nov 28 17:32:41 crc kubenswrapper[4909]: I1128 17:32:41.942986 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-1-default" event={"ID":"c5133b8e-7a85-4f2e-b5cc-96367eb3ac4e","Type":"ContainerStarted","Data":"33317ced55508467b4f16b540d52754bcebd6862e315dd84d287dc3e0106cf66"} Nov 28 17:32:43 crc kubenswrapper[4909]: I1128 17:32:43.370716 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-1-default" Nov 28 17:32:43 crc kubenswrapper[4909]: I1128 17:32:43.395948 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_mariadb-client-1-default_c5133b8e-7a85-4f2e-b5cc-96367eb3ac4e/mariadb-client-1-default/0.log" Nov 28 17:32:43 crc kubenswrapper[4909]: I1128 17:32:43.420949 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mariadb-client-1-default"] Nov 28 17:32:43 crc kubenswrapper[4909]: I1128 17:32:43.431913 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mariadb-client-1-default"] Nov 28 17:32:43 crc kubenswrapper[4909]: I1128 17:32:43.469204 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mwhpg\" (UniqueName: \"kubernetes.io/projected/c5133b8e-7a85-4f2e-b5cc-96367eb3ac4e-kube-api-access-mwhpg\") pod \"c5133b8e-7a85-4f2e-b5cc-96367eb3ac4e\" (UID: \"c5133b8e-7a85-4f2e-b5cc-96367eb3ac4e\") " Nov 28 17:32:43 crc kubenswrapper[4909]: I1128 17:32:43.475468 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c5133b8e-7a85-4f2e-b5cc-96367eb3ac4e-kube-api-access-mwhpg" (OuterVolumeSpecName: "kube-api-access-mwhpg") pod "c5133b8e-7a85-4f2e-b5cc-96367eb3ac4e" (UID: "c5133b8e-7a85-4f2e-b5cc-96367eb3ac4e"). InnerVolumeSpecName "kube-api-access-mwhpg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:32:43 crc kubenswrapper[4909]: I1128 17:32:43.572004 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mwhpg\" (UniqueName: \"kubernetes.io/projected/c5133b8e-7a85-4f2e-b5cc-96367eb3ac4e-kube-api-access-mwhpg\") on node \"crc\" DevicePath \"\"" Nov 28 17:32:43 crc kubenswrapper[4909]: I1128 17:32:43.912717 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c5133b8e-7a85-4f2e-b5cc-96367eb3ac4e" path="/var/lib/kubelet/pods/c5133b8e-7a85-4f2e-b5cc-96367eb3ac4e/volumes" Nov 28 17:32:43 crc kubenswrapper[4909]: I1128 17:32:43.959698 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mariadb-client-2-default"] Nov 28 17:32:43 crc kubenswrapper[4909]: E1128 17:32:43.960092 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c5133b8e-7a85-4f2e-b5cc-96367eb3ac4e" containerName="mariadb-client-1-default" Nov 28 17:32:43 crc kubenswrapper[4909]: I1128 17:32:43.960111 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="c5133b8e-7a85-4f2e-b5cc-96367eb3ac4e" containerName="mariadb-client-1-default" Nov 28 17:32:43 crc kubenswrapper[4909]: I1128 17:32:43.960348 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="c5133b8e-7a85-4f2e-b5cc-96367eb3ac4e" containerName="mariadb-client-1-default" Nov 28 17:32:43 crc kubenswrapper[4909]: I1128 17:32:43.961017 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-2-default" Nov 28 17:32:43 crc kubenswrapper[4909]: I1128 17:32:43.961712 4909 scope.go:117] "RemoveContainer" containerID="a4741f252e511b98f41b0ee057af9b9f021f7441be8ed5851d20a479a8fc2640" Nov 28 17:32:43 crc kubenswrapper[4909]: I1128 17:32:43.961781 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-1-default" Nov 28 17:32:43 crc kubenswrapper[4909]: I1128 17:32:43.971623 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-2-default"] Nov 28 17:32:44 crc kubenswrapper[4909]: I1128 17:32:44.084874 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w748h\" (UniqueName: \"kubernetes.io/projected/c86138ae-2da6-4ae4-967d-905dfb8ea3d9-kube-api-access-w748h\") pod \"mariadb-client-2-default\" (UID: \"c86138ae-2da6-4ae4-967d-905dfb8ea3d9\") " pod="openstack/mariadb-client-2-default" Nov 28 17:32:44 crc kubenswrapper[4909]: I1128 17:32:44.186422 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w748h\" (UniqueName: \"kubernetes.io/projected/c86138ae-2da6-4ae4-967d-905dfb8ea3d9-kube-api-access-w748h\") pod \"mariadb-client-2-default\" (UID: \"c86138ae-2da6-4ae4-967d-905dfb8ea3d9\") " pod="openstack/mariadb-client-2-default" Nov 28 17:32:44 crc kubenswrapper[4909]: I1128 17:32:44.204025 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w748h\" (UniqueName: \"kubernetes.io/projected/c86138ae-2da6-4ae4-967d-905dfb8ea3d9-kube-api-access-w748h\") pod \"mariadb-client-2-default\" (UID: \"c86138ae-2da6-4ae4-967d-905dfb8ea3d9\") " pod="openstack/mariadb-client-2-default" Nov 28 17:32:44 crc kubenswrapper[4909]: I1128 17:32:44.318831 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-2-default" Nov 28 17:32:44 crc kubenswrapper[4909]: I1128 17:32:44.865850 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-2-default"] Nov 28 17:32:44 crc kubenswrapper[4909]: I1128 17:32:44.975715 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-2-default" event={"ID":"c86138ae-2da6-4ae4-967d-905dfb8ea3d9","Type":"ContainerStarted","Data":"1b2d61faed510b1d9150e1eea4d33e582d13a785112955294076eed58b83c0b5"} Nov 28 17:32:45 crc kubenswrapper[4909]: I1128 17:32:45.985802 4909 generic.go:334] "Generic (PLEG): container finished" podID="c86138ae-2da6-4ae4-967d-905dfb8ea3d9" containerID="972df3d9e33c4598769f1f692c2802377abca94f4c85273fc2bdb66bdc081974" exitCode=1 Nov 28 17:32:45 crc kubenswrapper[4909]: I1128 17:32:45.985854 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-2-default" event={"ID":"c86138ae-2da6-4ae4-967d-905dfb8ea3d9","Type":"ContainerDied","Data":"972df3d9e33c4598769f1f692c2802377abca94f4c85273fc2bdb66bdc081974"} Nov 28 17:32:47 crc kubenswrapper[4909]: I1128 17:32:47.326063 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-2-default" Nov 28 17:32:47 crc kubenswrapper[4909]: I1128 17:32:47.354719 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_mariadb-client-2-default_c86138ae-2da6-4ae4-967d-905dfb8ea3d9/mariadb-client-2-default/0.log" Nov 28 17:32:47 crc kubenswrapper[4909]: I1128 17:32:47.378279 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mariadb-client-2-default"] Nov 28 17:32:47 crc kubenswrapper[4909]: I1128 17:32:47.385319 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mariadb-client-2-default"] Nov 28 17:32:47 crc kubenswrapper[4909]: I1128 17:32:47.434812 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w748h\" (UniqueName: \"kubernetes.io/projected/c86138ae-2da6-4ae4-967d-905dfb8ea3d9-kube-api-access-w748h\") pod \"c86138ae-2da6-4ae4-967d-905dfb8ea3d9\" (UID: \"c86138ae-2da6-4ae4-967d-905dfb8ea3d9\") " Nov 28 17:32:47 crc kubenswrapper[4909]: I1128 17:32:47.440471 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c86138ae-2da6-4ae4-967d-905dfb8ea3d9-kube-api-access-w748h" (OuterVolumeSpecName: "kube-api-access-w748h") pod "c86138ae-2da6-4ae4-967d-905dfb8ea3d9" (UID: "c86138ae-2da6-4ae4-967d-905dfb8ea3d9"). InnerVolumeSpecName "kube-api-access-w748h". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:32:47 crc kubenswrapper[4909]: I1128 17:32:47.537438 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w748h\" (UniqueName: \"kubernetes.io/projected/c86138ae-2da6-4ae4-967d-905dfb8ea3d9-kube-api-access-w748h\") on node \"crc\" DevicePath \"\"" Nov 28 17:32:47 crc kubenswrapper[4909]: I1128 17:32:47.899157 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mariadb-client-1"] Nov 28 17:32:47 crc kubenswrapper[4909]: E1128 17:32:47.899602 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c86138ae-2da6-4ae4-967d-905dfb8ea3d9" containerName="mariadb-client-2-default" Nov 28 17:32:47 crc kubenswrapper[4909]: I1128 17:32:47.899630 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="c86138ae-2da6-4ae4-967d-905dfb8ea3d9" containerName="mariadb-client-2-default" Nov 28 17:32:47 crc kubenswrapper[4909]: I1128 17:32:47.899926 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="c86138ae-2da6-4ae4-967d-905dfb8ea3d9" containerName="mariadb-client-2-default" Nov 28 17:32:47 crc kubenswrapper[4909]: I1128 17:32:47.901104 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-1" Nov 28 17:32:47 crc kubenswrapper[4909]: I1128 17:32:47.913088 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c86138ae-2da6-4ae4-967d-905dfb8ea3d9" path="/var/lib/kubelet/pods/c86138ae-2da6-4ae4-967d-905dfb8ea3d9/volumes" Nov 28 17:32:47 crc kubenswrapper[4909]: I1128 17:32:47.914614 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-1"] Nov 28 17:32:48 crc kubenswrapper[4909]: I1128 17:32:48.002463 4909 scope.go:117] "RemoveContainer" containerID="972df3d9e33c4598769f1f692c2802377abca94f4c85273fc2bdb66bdc081974" Nov 28 17:32:48 crc kubenswrapper[4909]: I1128 17:32:48.002883 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-2-default" Nov 28 17:32:48 crc kubenswrapper[4909]: I1128 17:32:48.043824 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xqlhw\" (UniqueName: \"kubernetes.io/projected/b21ddf60-2874-4c09-ab43-ef1bc4113000-kube-api-access-xqlhw\") pod \"mariadb-client-1\" (UID: \"b21ddf60-2874-4c09-ab43-ef1bc4113000\") " pod="openstack/mariadb-client-1" Nov 28 17:32:48 crc kubenswrapper[4909]: I1128 17:32:48.146081 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xqlhw\" (UniqueName: \"kubernetes.io/projected/b21ddf60-2874-4c09-ab43-ef1bc4113000-kube-api-access-xqlhw\") pod \"mariadb-client-1\" (UID: \"b21ddf60-2874-4c09-ab43-ef1bc4113000\") " pod="openstack/mariadb-client-1" Nov 28 17:32:48 crc kubenswrapper[4909]: I1128 17:32:48.181428 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xqlhw\" (UniqueName: \"kubernetes.io/projected/b21ddf60-2874-4c09-ab43-ef1bc4113000-kube-api-access-xqlhw\") pod \"mariadb-client-1\" (UID: \"b21ddf60-2874-4c09-ab43-ef1bc4113000\") " pod="openstack/mariadb-client-1" Nov 28 17:32:48 crc kubenswrapper[4909]: I1128 17:32:48.246942 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-1" Nov 28 17:32:48 crc kubenswrapper[4909]: I1128 17:32:48.736952 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-1"] Nov 28 17:32:48 crc kubenswrapper[4909]: W1128 17:32:48.746749 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb21ddf60_2874_4c09_ab43_ef1bc4113000.slice/crio-c49efb59058eeb14c3a7c605b9bc059c9017e86c3e1adccf1c99c3b66679ebfa WatchSource:0}: Error finding container c49efb59058eeb14c3a7c605b9bc059c9017e86c3e1adccf1c99c3b66679ebfa: Status 404 returned error can't find the container with id c49efb59058eeb14c3a7c605b9bc059c9017e86c3e1adccf1c99c3b66679ebfa Nov 28 17:32:49 crc kubenswrapper[4909]: I1128 17:32:49.010192 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-1" event={"ID":"b21ddf60-2874-4c09-ab43-ef1bc4113000","Type":"ContainerStarted","Data":"c49efb59058eeb14c3a7c605b9bc059c9017e86c3e1adccf1c99c3b66679ebfa"} Nov 28 17:32:53 crc kubenswrapper[4909]: I1128 17:32:53.047262 4909 generic.go:334] "Generic (PLEG): container finished" podID="b21ddf60-2874-4c09-ab43-ef1bc4113000" containerID="020bd65f619f7a7af9bc14dd491d2ead894c168643875fc7bda136597a78127c" exitCode=0 Nov 28 17:32:53 crc kubenswrapper[4909]: I1128 17:32:53.047393 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-1" event={"ID":"b21ddf60-2874-4c09-ab43-ef1bc4113000","Type":"ContainerDied","Data":"020bd65f619f7a7af9bc14dd491d2ead894c168643875fc7bda136597a78127c"} Nov 28 17:32:54 crc kubenswrapper[4909]: I1128 17:32:54.581255 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-1" Nov 28 17:32:54 crc kubenswrapper[4909]: I1128 17:32:54.604257 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_mariadb-client-1_b21ddf60-2874-4c09-ab43-ef1bc4113000/mariadb-client-1/0.log" Nov 28 17:32:54 crc kubenswrapper[4909]: I1128 17:32:54.638005 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mariadb-client-1"] Nov 28 17:32:54 crc kubenswrapper[4909]: I1128 17:32:54.647951 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mariadb-client-1"] Nov 28 17:32:54 crc kubenswrapper[4909]: I1128 17:32:54.750284 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xqlhw\" (UniqueName: \"kubernetes.io/projected/b21ddf60-2874-4c09-ab43-ef1bc4113000-kube-api-access-xqlhw\") pod \"b21ddf60-2874-4c09-ab43-ef1bc4113000\" (UID: \"b21ddf60-2874-4c09-ab43-ef1bc4113000\") " Nov 28 17:32:54 crc kubenswrapper[4909]: I1128 17:32:54.755241 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b21ddf60-2874-4c09-ab43-ef1bc4113000-kube-api-access-xqlhw" (OuterVolumeSpecName: "kube-api-access-xqlhw") pod "b21ddf60-2874-4c09-ab43-ef1bc4113000" (UID: "b21ddf60-2874-4c09-ab43-ef1bc4113000"). InnerVolumeSpecName "kube-api-access-xqlhw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:32:54 crc kubenswrapper[4909]: I1128 17:32:54.852109 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xqlhw\" (UniqueName: \"kubernetes.io/projected/b21ddf60-2874-4c09-ab43-ef1bc4113000-kube-api-access-xqlhw\") on node \"crc\" DevicePath \"\"" Nov 28 17:32:55 crc kubenswrapper[4909]: I1128 17:32:55.075574 4909 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c49efb59058eeb14c3a7c605b9bc059c9017e86c3e1adccf1c99c3b66679ebfa" Nov 28 17:32:55 crc kubenswrapper[4909]: I1128 17:32:55.075642 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-1" Nov 28 17:32:55 crc kubenswrapper[4909]: I1128 17:32:55.151317 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mariadb-client-4-default"] Nov 28 17:32:55 crc kubenswrapper[4909]: E1128 17:32:55.152055 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b21ddf60-2874-4c09-ab43-ef1bc4113000" containerName="mariadb-client-1" Nov 28 17:32:55 crc kubenswrapper[4909]: I1128 17:32:55.152098 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="b21ddf60-2874-4c09-ab43-ef1bc4113000" containerName="mariadb-client-1" Nov 28 17:32:55 crc kubenswrapper[4909]: I1128 17:32:55.152482 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="b21ddf60-2874-4c09-ab43-ef1bc4113000" containerName="mariadb-client-1" Nov 28 17:32:55 crc kubenswrapper[4909]: I1128 17:32:55.153639 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-4-default" Nov 28 17:32:55 crc kubenswrapper[4909]: I1128 17:32:55.156476 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"default-dockercfg-j2qb5" Nov 28 17:32:55 crc kubenswrapper[4909]: I1128 17:32:55.163691 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-4-default"] Nov 28 17:32:55 crc kubenswrapper[4909]: I1128 17:32:55.259345 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dc96w\" (UniqueName: \"kubernetes.io/projected/ce0b0a64-967e-4af9-b79c-fcbc14b17a23-kube-api-access-dc96w\") pod \"mariadb-client-4-default\" (UID: \"ce0b0a64-967e-4af9-b79c-fcbc14b17a23\") " pod="openstack/mariadb-client-4-default" Nov 28 17:32:55 crc kubenswrapper[4909]: I1128 17:32:55.361579 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dc96w\" (UniqueName: \"kubernetes.io/projected/ce0b0a64-967e-4af9-b79c-fcbc14b17a23-kube-api-access-dc96w\") pod \"mariadb-client-4-default\" (UID: \"ce0b0a64-967e-4af9-b79c-fcbc14b17a23\") " pod="openstack/mariadb-client-4-default" Nov 28 17:32:55 crc kubenswrapper[4909]: I1128 17:32:55.381934 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dc96w\" (UniqueName: \"kubernetes.io/projected/ce0b0a64-967e-4af9-b79c-fcbc14b17a23-kube-api-access-dc96w\") pod \"mariadb-client-4-default\" (UID: \"ce0b0a64-967e-4af9-b79c-fcbc14b17a23\") " pod="openstack/mariadb-client-4-default" Nov 28 17:32:55 crc kubenswrapper[4909]: I1128 17:32:55.480536 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-4-default" Nov 28 17:32:55 crc kubenswrapper[4909]: I1128 17:32:55.913917 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b21ddf60-2874-4c09-ab43-ef1bc4113000" path="/var/lib/kubelet/pods/b21ddf60-2874-4c09-ab43-ef1bc4113000/volumes" Nov 28 17:32:56 crc kubenswrapper[4909]: I1128 17:32:56.012772 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-4-default"] Nov 28 17:32:56 crc kubenswrapper[4909]: W1128 17:32:56.589098 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podce0b0a64_967e_4af9_b79c_fcbc14b17a23.slice/crio-b7684a45072a473ba49bbf30ce32393c61017f648314d75670b8b802d5bad9a4 WatchSource:0}: Error finding container b7684a45072a473ba49bbf30ce32393c61017f648314d75670b8b802d5bad9a4: Status 404 returned error can't find the container with id b7684a45072a473ba49bbf30ce32393c61017f648314d75670b8b802d5bad9a4 Nov 28 17:32:57 crc kubenswrapper[4909]: I1128 17:32:57.092726 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-4-default" event={"ID":"ce0b0a64-967e-4af9-b79c-fcbc14b17a23","Type":"ContainerStarted","Data":"b7684a45072a473ba49bbf30ce32393c61017f648314d75670b8b802d5bad9a4"} Nov 28 17:32:58 crc kubenswrapper[4909]: I1128 17:32:58.106118 4909 generic.go:334] "Generic (PLEG): container finished" podID="ce0b0a64-967e-4af9-b79c-fcbc14b17a23" containerID="53c6d3fc5f9c1e744c22a1a02409dda5019aad9167e3de4a181a08f2de9843cb" exitCode=0 Nov 28 17:32:58 crc kubenswrapper[4909]: I1128 17:32:58.106240 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-4-default" event={"ID":"ce0b0a64-967e-4af9-b79c-fcbc14b17a23","Type":"ContainerDied","Data":"53c6d3fc5f9c1e744c22a1a02409dda5019aad9167e3de4a181a08f2de9843cb"} Nov 28 17:32:59 crc kubenswrapper[4909]: I1128 17:32:59.479168 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-4-default" Nov 28 17:32:59 crc kubenswrapper[4909]: I1128 17:32:59.499018 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_mariadb-client-4-default_ce0b0a64-967e-4af9-b79c-fcbc14b17a23/mariadb-client-4-default/0.log" Nov 28 17:32:59 crc kubenswrapper[4909]: I1128 17:32:59.526987 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mariadb-client-4-default"] Nov 28 17:32:59 crc kubenswrapper[4909]: I1128 17:32:59.533009 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mariadb-client-4-default"] Nov 28 17:32:59 crc kubenswrapper[4909]: I1128 17:32:59.629554 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dc96w\" (UniqueName: \"kubernetes.io/projected/ce0b0a64-967e-4af9-b79c-fcbc14b17a23-kube-api-access-dc96w\") pod \"ce0b0a64-967e-4af9-b79c-fcbc14b17a23\" (UID: \"ce0b0a64-967e-4af9-b79c-fcbc14b17a23\") " Nov 28 17:32:59 crc kubenswrapper[4909]: I1128 17:32:59.634544 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ce0b0a64-967e-4af9-b79c-fcbc14b17a23-kube-api-access-dc96w" (OuterVolumeSpecName: "kube-api-access-dc96w") pod "ce0b0a64-967e-4af9-b79c-fcbc14b17a23" (UID: "ce0b0a64-967e-4af9-b79c-fcbc14b17a23"). InnerVolumeSpecName "kube-api-access-dc96w". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:32:59 crc kubenswrapper[4909]: I1128 17:32:59.731195 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dc96w\" (UniqueName: \"kubernetes.io/projected/ce0b0a64-967e-4af9-b79c-fcbc14b17a23-kube-api-access-dc96w\") on node \"crc\" DevicePath \"\"" Nov 28 17:32:59 crc kubenswrapper[4909]: I1128 17:32:59.953310 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ce0b0a64-967e-4af9-b79c-fcbc14b17a23" path="/var/lib/kubelet/pods/ce0b0a64-967e-4af9-b79c-fcbc14b17a23/volumes" Nov 28 17:33:00 crc kubenswrapper[4909]: I1128 17:33:00.124827 4909 scope.go:117] "RemoveContainer" containerID="53c6d3fc5f9c1e744c22a1a02409dda5019aad9167e3de4a181a08f2de9843cb" Nov 28 17:33:00 crc kubenswrapper[4909]: I1128 17:33:00.124899 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-4-default" Nov 28 17:33:03 crc kubenswrapper[4909]: I1128 17:33:03.049987 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mariadb-client-5-default"] Nov 28 17:33:03 crc kubenswrapper[4909]: E1128 17:33:03.051551 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ce0b0a64-967e-4af9-b79c-fcbc14b17a23" containerName="mariadb-client-4-default" Nov 28 17:33:03 crc kubenswrapper[4909]: I1128 17:33:03.051674 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="ce0b0a64-967e-4af9-b79c-fcbc14b17a23" containerName="mariadb-client-4-default" Nov 28 17:33:03 crc kubenswrapper[4909]: I1128 17:33:03.051950 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="ce0b0a64-967e-4af9-b79c-fcbc14b17a23" containerName="mariadb-client-4-default" Nov 28 17:33:03 crc kubenswrapper[4909]: I1128 17:33:03.052620 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-5-default" Nov 28 17:33:03 crc kubenswrapper[4909]: I1128 17:33:03.055148 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"default-dockercfg-j2qb5" Nov 28 17:33:03 crc kubenswrapper[4909]: I1128 17:33:03.068624 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-5-default"] Nov 28 17:33:03 crc kubenswrapper[4909]: I1128 17:33:03.103197 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-59njd\" (UniqueName: \"kubernetes.io/projected/b55d30f8-8f62-4dcd-91a8-d0bda20e4113-kube-api-access-59njd\") pod \"mariadb-client-5-default\" (UID: \"b55d30f8-8f62-4dcd-91a8-d0bda20e4113\") " pod="openstack/mariadb-client-5-default" Nov 28 17:33:03 crc kubenswrapper[4909]: I1128 17:33:03.204463 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-59njd\" (UniqueName: \"kubernetes.io/projected/b55d30f8-8f62-4dcd-91a8-d0bda20e4113-kube-api-access-59njd\") pod \"mariadb-client-5-default\" (UID: \"b55d30f8-8f62-4dcd-91a8-d0bda20e4113\") " pod="openstack/mariadb-client-5-default" Nov 28 17:33:03 crc kubenswrapper[4909]: I1128 17:33:03.227599 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-59njd\" (UniqueName: \"kubernetes.io/projected/b55d30f8-8f62-4dcd-91a8-d0bda20e4113-kube-api-access-59njd\") pod \"mariadb-client-5-default\" (UID: \"b55d30f8-8f62-4dcd-91a8-d0bda20e4113\") " pod="openstack/mariadb-client-5-default" Nov 28 17:33:03 crc kubenswrapper[4909]: I1128 17:33:03.372249 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-5-default" Nov 28 17:33:04 crc kubenswrapper[4909]: I1128 17:33:04.704497 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-5-default"] Nov 28 17:33:05 crc kubenswrapper[4909]: I1128 17:33:05.175235 4909 generic.go:334] "Generic (PLEG): container finished" podID="b55d30f8-8f62-4dcd-91a8-d0bda20e4113" containerID="9c2c726a1de00c20f5b3320df66b7cedefafc47829a8303ca229255aadde65cc" exitCode=0 Nov 28 17:33:05 crc kubenswrapper[4909]: I1128 17:33:05.175415 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-5-default" event={"ID":"b55d30f8-8f62-4dcd-91a8-d0bda20e4113","Type":"ContainerDied","Data":"9c2c726a1de00c20f5b3320df66b7cedefafc47829a8303ca229255aadde65cc"} Nov 28 17:33:05 crc kubenswrapper[4909]: I1128 17:33:05.175641 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-5-default" event={"ID":"b55d30f8-8f62-4dcd-91a8-d0bda20e4113","Type":"ContainerStarted","Data":"dd03006b7c0ecdab448407855cffc71968f87b18fcdcb4115cd0aa3c90f8588f"} Nov 28 17:33:06 crc kubenswrapper[4909]: I1128 17:33:06.662961 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-5-default" Nov 28 17:33:06 crc kubenswrapper[4909]: I1128 17:33:06.668686 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-59njd\" (UniqueName: \"kubernetes.io/projected/b55d30f8-8f62-4dcd-91a8-d0bda20e4113-kube-api-access-59njd\") pod \"b55d30f8-8f62-4dcd-91a8-d0bda20e4113\" (UID: \"b55d30f8-8f62-4dcd-91a8-d0bda20e4113\") " Nov 28 17:33:06 crc kubenswrapper[4909]: I1128 17:33:06.673931 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b55d30f8-8f62-4dcd-91a8-d0bda20e4113-kube-api-access-59njd" (OuterVolumeSpecName: "kube-api-access-59njd") pod "b55d30f8-8f62-4dcd-91a8-d0bda20e4113" (UID: "b55d30f8-8f62-4dcd-91a8-d0bda20e4113"). InnerVolumeSpecName "kube-api-access-59njd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:33:06 crc kubenswrapper[4909]: I1128 17:33:06.682745 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_mariadb-client-5-default_b55d30f8-8f62-4dcd-91a8-d0bda20e4113/mariadb-client-5-default/0.log" Nov 28 17:33:06 crc kubenswrapper[4909]: I1128 17:33:06.707052 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mariadb-client-5-default"] Nov 28 17:33:06 crc kubenswrapper[4909]: I1128 17:33:06.712848 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mariadb-client-5-default"] Nov 28 17:33:06 crc kubenswrapper[4909]: I1128 17:33:06.770095 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-59njd\" (UniqueName: \"kubernetes.io/projected/b55d30f8-8f62-4dcd-91a8-d0bda20e4113-kube-api-access-59njd\") on node \"crc\" DevicePath \"\"" Nov 28 17:33:06 crc kubenswrapper[4909]: I1128 17:33:06.868569 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mariadb-client-6-default"] Nov 28 17:33:06 crc kubenswrapper[4909]: E1128 17:33:06.869114 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b55d30f8-8f62-4dcd-91a8-d0bda20e4113" containerName="mariadb-client-5-default" Nov 28 17:33:06 crc kubenswrapper[4909]: I1128 17:33:06.869137 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="b55d30f8-8f62-4dcd-91a8-d0bda20e4113" containerName="mariadb-client-5-default" Nov 28 17:33:06 crc kubenswrapper[4909]: I1128 17:33:06.869328 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="b55d30f8-8f62-4dcd-91a8-d0bda20e4113" containerName="mariadb-client-5-default" Nov 28 17:33:06 crc kubenswrapper[4909]: I1128 17:33:06.870068 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-6-default" Nov 28 17:33:06 crc kubenswrapper[4909]: I1128 17:33:06.873804 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-6-default"] Nov 28 17:33:06 crc kubenswrapper[4909]: I1128 17:33:06.972491 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jpgbt\" (UniqueName: \"kubernetes.io/projected/fef8b272-2e5c-4932-9d8e-10d5dee4c443-kube-api-access-jpgbt\") pod \"mariadb-client-6-default\" (UID: \"fef8b272-2e5c-4932-9d8e-10d5dee4c443\") " pod="openstack/mariadb-client-6-default" Nov 28 17:33:07 crc kubenswrapper[4909]: I1128 17:33:07.073756 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jpgbt\" (UniqueName: \"kubernetes.io/projected/fef8b272-2e5c-4932-9d8e-10d5dee4c443-kube-api-access-jpgbt\") pod \"mariadb-client-6-default\" (UID: \"fef8b272-2e5c-4932-9d8e-10d5dee4c443\") " pod="openstack/mariadb-client-6-default" Nov 28 17:33:07 crc kubenswrapper[4909]: I1128 17:33:07.097768 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jpgbt\" (UniqueName: \"kubernetes.io/projected/fef8b272-2e5c-4932-9d8e-10d5dee4c443-kube-api-access-jpgbt\") pod \"mariadb-client-6-default\" (UID: \"fef8b272-2e5c-4932-9d8e-10d5dee4c443\") " pod="openstack/mariadb-client-6-default" Nov 28 17:33:07 crc kubenswrapper[4909]: I1128 17:33:07.189883 4909 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="dd03006b7c0ecdab448407855cffc71968f87b18fcdcb4115cd0aa3c90f8588f" Nov 28 17:33:07 crc kubenswrapper[4909]: I1128 17:33:07.189917 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-5-default" Nov 28 17:33:07 crc kubenswrapper[4909]: I1128 17:33:07.193846 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-6-default" Nov 28 17:33:07 crc kubenswrapper[4909]: I1128 17:33:07.761245 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-6-default"] Nov 28 17:33:07 crc kubenswrapper[4909]: I1128 17:33:07.920625 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b55d30f8-8f62-4dcd-91a8-d0bda20e4113" path="/var/lib/kubelet/pods/b55d30f8-8f62-4dcd-91a8-d0bda20e4113/volumes" Nov 28 17:33:07 crc kubenswrapper[4909]: W1128 17:33:07.983084 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podfef8b272_2e5c_4932_9d8e_10d5dee4c443.slice/crio-5b97633b6fe8ed385295fa1a850a1d0ad548bafd116371ab45bbdef8807a1a17 WatchSource:0}: Error finding container 5b97633b6fe8ed385295fa1a850a1d0ad548bafd116371ab45bbdef8807a1a17: Status 404 returned error can't find the container with id 5b97633b6fe8ed385295fa1a850a1d0ad548bafd116371ab45bbdef8807a1a17 Nov 28 17:33:08 crc kubenswrapper[4909]: I1128 17:33:08.201414 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-6-default" event={"ID":"fef8b272-2e5c-4932-9d8e-10d5dee4c443","Type":"ContainerStarted","Data":"d3b50455012716c9385a8d6351e92cc47f666891ab77ed6d7e6d6b53effce566"} Nov 28 17:33:08 crc kubenswrapper[4909]: I1128 17:33:08.201464 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-6-default" event={"ID":"fef8b272-2e5c-4932-9d8e-10d5dee4c443","Type":"ContainerStarted","Data":"5b97633b6fe8ed385295fa1a850a1d0ad548bafd116371ab45bbdef8807a1a17"} Nov 28 17:33:08 crc kubenswrapper[4909]: I1128 17:33:08.224306 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/mariadb-client-6-default" podStartSLOduration=2.22428691 podStartE2EDuration="2.22428691s" podCreationTimestamp="2025-11-28 17:33:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 17:33:08.217283532 +0000 UTC m=+4970.613968096" watchObservedRunningTime="2025-11-28 17:33:08.22428691 +0000 UTC m=+4970.620971434" Nov 28 17:33:09 crc kubenswrapper[4909]: I1128 17:33:09.214893 4909 generic.go:334] "Generic (PLEG): container finished" podID="fef8b272-2e5c-4932-9d8e-10d5dee4c443" containerID="d3b50455012716c9385a8d6351e92cc47f666891ab77ed6d7e6d6b53effce566" exitCode=1 Nov 28 17:33:09 crc kubenswrapper[4909]: I1128 17:33:09.215326 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-6-default" event={"ID":"fef8b272-2e5c-4932-9d8e-10d5dee4c443","Type":"ContainerDied","Data":"d3b50455012716c9385a8d6351e92cc47f666891ab77ed6d7e6d6b53effce566"} Nov 28 17:33:10 crc kubenswrapper[4909]: I1128 17:33:10.616568 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-6-default" Nov 28 17:33:10 crc kubenswrapper[4909]: I1128 17:33:10.634254 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jpgbt\" (UniqueName: \"kubernetes.io/projected/fef8b272-2e5c-4932-9d8e-10d5dee4c443-kube-api-access-jpgbt\") pod \"fef8b272-2e5c-4932-9d8e-10d5dee4c443\" (UID: \"fef8b272-2e5c-4932-9d8e-10d5dee4c443\") " Nov 28 17:33:10 crc kubenswrapper[4909]: I1128 17:33:10.639707 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fef8b272-2e5c-4932-9d8e-10d5dee4c443-kube-api-access-jpgbt" (OuterVolumeSpecName: "kube-api-access-jpgbt") pod "fef8b272-2e5c-4932-9d8e-10d5dee4c443" (UID: "fef8b272-2e5c-4932-9d8e-10d5dee4c443"). InnerVolumeSpecName "kube-api-access-jpgbt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:33:10 crc kubenswrapper[4909]: I1128 17:33:10.669004 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mariadb-client-6-default"] Nov 28 17:33:10 crc kubenswrapper[4909]: I1128 17:33:10.674446 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mariadb-client-6-default"] Nov 28 17:33:10 crc kubenswrapper[4909]: I1128 17:33:10.737033 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jpgbt\" (UniqueName: \"kubernetes.io/projected/fef8b272-2e5c-4932-9d8e-10d5dee4c443-kube-api-access-jpgbt\") on node \"crc\" DevicePath \"\"" Nov 28 17:33:10 crc kubenswrapper[4909]: I1128 17:33:10.773999 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mariadb-client-7-default"] Nov 28 17:33:10 crc kubenswrapper[4909]: E1128 17:33:10.774305 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fef8b272-2e5c-4932-9d8e-10d5dee4c443" containerName="mariadb-client-6-default" Nov 28 17:33:10 crc kubenswrapper[4909]: I1128 17:33:10.774331 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="fef8b272-2e5c-4932-9d8e-10d5dee4c443" containerName="mariadb-client-6-default" Nov 28 17:33:10 crc kubenswrapper[4909]: I1128 17:33:10.774516 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="fef8b272-2e5c-4932-9d8e-10d5dee4c443" containerName="mariadb-client-6-default" Nov 28 17:33:10 crc kubenswrapper[4909]: I1128 17:33:10.775015 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-7-default" Nov 28 17:33:10 crc kubenswrapper[4909]: I1128 17:33:10.789553 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-7-default"] Nov 28 17:33:10 crc kubenswrapper[4909]: I1128 17:33:10.838269 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nztpn\" (UniqueName: \"kubernetes.io/projected/b6ab2a42-c6a8-41e0-b76a-5f146d18d368-kube-api-access-nztpn\") pod \"mariadb-client-7-default\" (UID: \"b6ab2a42-c6a8-41e0-b76a-5f146d18d368\") " pod="openstack/mariadb-client-7-default" Nov 28 17:33:10 crc kubenswrapper[4909]: I1128 17:33:10.939607 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nztpn\" (UniqueName: \"kubernetes.io/projected/b6ab2a42-c6a8-41e0-b76a-5f146d18d368-kube-api-access-nztpn\") pod \"mariadb-client-7-default\" (UID: \"b6ab2a42-c6a8-41e0-b76a-5f146d18d368\") " pod="openstack/mariadb-client-7-default" Nov 28 17:33:10 crc kubenswrapper[4909]: I1128 17:33:10.962952 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nztpn\" (UniqueName: \"kubernetes.io/projected/b6ab2a42-c6a8-41e0-b76a-5f146d18d368-kube-api-access-nztpn\") pod \"mariadb-client-7-default\" (UID: \"b6ab2a42-c6a8-41e0-b76a-5f146d18d368\") " pod="openstack/mariadb-client-7-default" Nov 28 17:33:11 crc kubenswrapper[4909]: I1128 17:33:11.102061 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-7-default" Nov 28 17:33:11 crc kubenswrapper[4909]: I1128 17:33:11.230016 4909 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5b97633b6fe8ed385295fa1a850a1d0ad548bafd116371ab45bbdef8807a1a17" Nov 28 17:33:11 crc kubenswrapper[4909]: I1128 17:33:11.230082 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-6-default" Nov 28 17:33:11 crc kubenswrapper[4909]: I1128 17:33:11.586510 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-7-default"] Nov 28 17:33:11 crc kubenswrapper[4909]: W1128 17:33:11.590288 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb6ab2a42_c6a8_41e0_b76a_5f146d18d368.slice/crio-1dc923f278275836e9d037b6390548830d30533db0d54ee654aca2f7a266daa4 WatchSource:0}: Error finding container 1dc923f278275836e9d037b6390548830d30533db0d54ee654aca2f7a266daa4: Status 404 returned error can't find the container with id 1dc923f278275836e9d037b6390548830d30533db0d54ee654aca2f7a266daa4 Nov 28 17:33:11 crc kubenswrapper[4909]: I1128 17:33:11.912725 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fef8b272-2e5c-4932-9d8e-10d5dee4c443" path="/var/lib/kubelet/pods/fef8b272-2e5c-4932-9d8e-10d5dee4c443/volumes" Nov 28 17:33:12 crc kubenswrapper[4909]: I1128 17:33:12.239051 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-7-default" event={"ID":"b6ab2a42-c6a8-41e0-b76a-5f146d18d368","Type":"ContainerStarted","Data":"1dc923f278275836e9d037b6390548830d30533db0d54ee654aca2f7a266daa4"} Nov 28 17:33:13 crc kubenswrapper[4909]: I1128 17:33:13.247282 4909 generic.go:334] "Generic (PLEG): container finished" podID="b6ab2a42-c6a8-41e0-b76a-5f146d18d368" containerID="903fba95ed66be364f6b4ee879aaf96bb6febe6761606ebeb02be07ae1f4cc3a" exitCode=0 Nov 28 17:33:13 crc kubenswrapper[4909]: I1128 17:33:13.247321 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-7-default" event={"ID":"b6ab2a42-c6a8-41e0-b76a-5f146d18d368","Type":"ContainerDied","Data":"903fba95ed66be364f6b4ee879aaf96bb6febe6761606ebeb02be07ae1f4cc3a"} Nov 28 17:33:14 crc kubenswrapper[4909]: I1128 17:33:14.623660 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-7-default" Nov 28 17:33:14 crc kubenswrapper[4909]: I1128 17:33:14.639222 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_mariadb-client-7-default_b6ab2a42-c6a8-41e0-b76a-5f146d18d368/mariadb-client-7-default/0.log" Nov 28 17:33:14 crc kubenswrapper[4909]: I1128 17:33:14.702018 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mariadb-client-7-default"] Nov 28 17:33:14 crc kubenswrapper[4909]: I1128 17:33:14.704753 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nztpn\" (UniqueName: \"kubernetes.io/projected/b6ab2a42-c6a8-41e0-b76a-5f146d18d368-kube-api-access-nztpn\") pod \"b6ab2a42-c6a8-41e0-b76a-5f146d18d368\" (UID: \"b6ab2a42-c6a8-41e0-b76a-5f146d18d368\") " Nov 28 17:33:14 crc kubenswrapper[4909]: I1128 17:33:14.710821 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mariadb-client-7-default"] Nov 28 17:33:14 crc kubenswrapper[4909]: I1128 17:33:14.712096 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6ab2a42-c6a8-41e0-b76a-5f146d18d368-kube-api-access-nztpn" (OuterVolumeSpecName: "kube-api-access-nztpn") pod "b6ab2a42-c6a8-41e0-b76a-5f146d18d368" (UID: "b6ab2a42-c6a8-41e0-b76a-5f146d18d368"). InnerVolumeSpecName "kube-api-access-nztpn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:33:14 crc kubenswrapper[4909]: I1128 17:33:14.807249 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nztpn\" (UniqueName: \"kubernetes.io/projected/b6ab2a42-c6a8-41e0-b76a-5f146d18d368-kube-api-access-nztpn\") on node \"crc\" DevicePath \"\"" Nov 28 17:33:14 crc kubenswrapper[4909]: I1128 17:33:14.835841 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mariadb-client-2"] Nov 28 17:33:14 crc kubenswrapper[4909]: E1128 17:33:14.836220 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b6ab2a42-c6a8-41e0-b76a-5f146d18d368" containerName="mariadb-client-7-default" Nov 28 17:33:14 crc kubenswrapper[4909]: I1128 17:33:14.836243 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="b6ab2a42-c6a8-41e0-b76a-5f146d18d368" containerName="mariadb-client-7-default" Nov 28 17:33:14 crc kubenswrapper[4909]: I1128 17:33:14.836421 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="b6ab2a42-c6a8-41e0-b76a-5f146d18d368" containerName="mariadb-client-7-default" Nov 28 17:33:14 crc kubenswrapper[4909]: I1128 17:33:14.837009 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-2" Nov 28 17:33:14 crc kubenswrapper[4909]: I1128 17:33:14.843661 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-2"] Nov 28 17:33:14 crc kubenswrapper[4909]: I1128 17:33:14.908932 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2dfvl\" (UniqueName: \"kubernetes.io/projected/76a79b95-66aa-4e9c-b7dc-be5567975be4-kube-api-access-2dfvl\") pod \"mariadb-client-2\" (UID: \"76a79b95-66aa-4e9c-b7dc-be5567975be4\") " pod="openstack/mariadb-client-2" Nov 28 17:33:15 crc kubenswrapper[4909]: I1128 17:33:15.010183 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2dfvl\" (UniqueName: \"kubernetes.io/projected/76a79b95-66aa-4e9c-b7dc-be5567975be4-kube-api-access-2dfvl\") pod \"mariadb-client-2\" (UID: \"76a79b95-66aa-4e9c-b7dc-be5567975be4\") " pod="openstack/mariadb-client-2" Nov 28 17:33:15 crc kubenswrapper[4909]: I1128 17:33:15.024790 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2dfvl\" (UniqueName: \"kubernetes.io/projected/76a79b95-66aa-4e9c-b7dc-be5567975be4-kube-api-access-2dfvl\") pod \"mariadb-client-2\" (UID: \"76a79b95-66aa-4e9c-b7dc-be5567975be4\") " pod="openstack/mariadb-client-2" Nov 28 17:33:15 crc kubenswrapper[4909]: I1128 17:33:15.155970 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-2" Nov 28 17:33:15 crc kubenswrapper[4909]: I1128 17:33:15.305311 4909 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1dc923f278275836e9d037b6390548830d30533db0d54ee654aca2f7a266daa4" Nov 28 17:33:15 crc kubenswrapper[4909]: I1128 17:33:15.305546 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-7-default" Nov 28 17:33:15 crc kubenswrapper[4909]: I1128 17:33:15.552943 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-2"] Nov 28 17:33:15 crc kubenswrapper[4909]: W1128 17:33:15.564141 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod76a79b95_66aa_4e9c_b7dc_be5567975be4.slice/crio-a90d082bcbc34df6aad8f03b4a764f58bf6ff56bb7f101319a7b3e1cbadd6e0a WatchSource:0}: Error finding container a90d082bcbc34df6aad8f03b4a764f58bf6ff56bb7f101319a7b3e1cbadd6e0a: Status 404 returned error can't find the container with id a90d082bcbc34df6aad8f03b4a764f58bf6ff56bb7f101319a7b3e1cbadd6e0a Nov 28 17:33:15 crc kubenswrapper[4909]: I1128 17:33:15.913548 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6ab2a42-c6a8-41e0-b76a-5f146d18d368" path="/var/lib/kubelet/pods/b6ab2a42-c6a8-41e0-b76a-5f146d18d368/volumes" Nov 28 17:33:16 crc kubenswrapper[4909]: I1128 17:33:16.314552 4909 generic.go:334] "Generic (PLEG): container finished" podID="76a79b95-66aa-4e9c-b7dc-be5567975be4" containerID="5c747d8718232c04852ea32179dc4691fbdab8ad7fa4871325b3f02cdac25b81" exitCode=0 Nov 28 17:33:16 crc kubenswrapper[4909]: I1128 17:33:16.314630 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-2" event={"ID":"76a79b95-66aa-4e9c-b7dc-be5567975be4","Type":"ContainerDied","Data":"5c747d8718232c04852ea32179dc4691fbdab8ad7fa4871325b3f02cdac25b81"} Nov 28 17:33:16 crc kubenswrapper[4909]: I1128 17:33:16.314901 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-2" event={"ID":"76a79b95-66aa-4e9c-b7dc-be5567975be4","Type":"ContainerStarted","Data":"a90d082bcbc34df6aad8f03b4a764f58bf6ff56bb7f101319a7b3e1cbadd6e0a"} Nov 28 17:33:17 crc kubenswrapper[4909]: I1128 17:33:17.699725 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-2" Nov 28 17:33:17 crc kubenswrapper[4909]: I1128 17:33:17.717735 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_mariadb-client-2_76a79b95-66aa-4e9c-b7dc-be5567975be4/mariadb-client-2/0.log" Nov 28 17:33:17 crc kubenswrapper[4909]: I1128 17:33:17.740169 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mariadb-client-2"] Nov 28 17:33:17 crc kubenswrapper[4909]: I1128 17:33:17.745365 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mariadb-client-2"] Nov 28 17:33:17 crc kubenswrapper[4909]: I1128 17:33:17.760546 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2dfvl\" (UniqueName: \"kubernetes.io/projected/76a79b95-66aa-4e9c-b7dc-be5567975be4-kube-api-access-2dfvl\") pod \"76a79b95-66aa-4e9c-b7dc-be5567975be4\" (UID: \"76a79b95-66aa-4e9c-b7dc-be5567975be4\") " Nov 28 17:33:17 crc kubenswrapper[4909]: I1128 17:33:17.769182 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/76a79b95-66aa-4e9c-b7dc-be5567975be4-kube-api-access-2dfvl" (OuterVolumeSpecName: "kube-api-access-2dfvl") pod "76a79b95-66aa-4e9c-b7dc-be5567975be4" (UID: "76a79b95-66aa-4e9c-b7dc-be5567975be4"). InnerVolumeSpecName "kube-api-access-2dfvl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:33:17 crc kubenswrapper[4909]: I1128 17:33:17.862229 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2dfvl\" (UniqueName: \"kubernetes.io/projected/76a79b95-66aa-4e9c-b7dc-be5567975be4-kube-api-access-2dfvl\") on node \"crc\" DevicePath \"\"" Nov 28 17:33:17 crc kubenswrapper[4909]: I1128 17:33:17.915382 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="76a79b95-66aa-4e9c-b7dc-be5567975be4" path="/var/lib/kubelet/pods/76a79b95-66aa-4e9c-b7dc-be5567975be4/volumes" Nov 28 17:33:18 crc kubenswrapper[4909]: I1128 17:33:18.334750 4909 scope.go:117] "RemoveContainer" containerID="5c747d8718232c04852ea32179dc4691fbdab8ad7fa4871325b3f02cdac25b81" Nov 28 17:33:18 crc kubenswrapper[4909]: I1128 17:33:18.334790 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-2" Nov 28 17:33:31 crc kubenswrapper[4909]: I1128 17:33:31.458428 4909 scope.go:117] "RemoveContainer" containerID="b81c4e89fb7a96cf240b8887f943c18708cbdec63178b536b4b1d6925dfd1209" Nov 28 17:33:49 crc kubenswrapper[4909]: I1128 17:33:49.910708 4909 patch_prober.go:28] interesting pod/machine-config-daemon-d5nd7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 17:33:49 crc kubenswrapper[4909]: I1128 17:33:49.911412 4909 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 17:34:19 crc kubenswrapper[4909]: I1128 17:34:19.910841 4909 patch_prober.go:28] interesting pod/machine-config-daemon-d5nd7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 17:34:19 crc kubenswrapper[4909]: I1128 17:34:19.911889 4909 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 17:34:49 crc kubenswrapper[4909]: I1128 17:34:49.911490 4909 patch_prober.go:28] interesting pod/machine-config-daemon-d5nd7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 17:34:49 crc kubenswrapper[4909]: I1128 17:34:49.912384 4909 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 17:34:49 crc kubenswrapper[4909]: I1128 17:34:49.920543 4909 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" Nov 28 17:34:49 crc kubenswrapper[4909]: I1128 17:34:49.921305 4909 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"8bbf3ce168c14b8e2f40c7fde54da632f20fb2f06731b49dfa3ed5fbcc06a8e0"} pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 17:34:49 crc kubenswrapper[4909]: I1128 17:34:49.921391 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerName="machine-config-daemon" containerID="cri-o://8bbf3ce168c14b8e2f40c7fde54da632f20fb2f06731b49dfa3ed5fbcc06a8e0" gracePeriod=600 Nov 28 17:34:50 crc kubenswrapper[4909]: I1128 17:34:50.220202 4909 generic.go:334] "Generic (PLEG): container finished" podID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerID="8bbf3ce168c14b8e2f40c7fde54da632f20fb2f06731b49dfa3ed5fbcc06a8e0" exitCode=0 Nov 28 17:34:50 crc kubenswrapper[4909]: I1128 17:34:50.220290 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" event={"ID":"5f0ac931-d37b-4342-8c12-c2779b455cc5","Type":"ContainerDied","Data":"8bbf3ce168c14b8e2f40c7fde54da632f20fb2f06731b49dfa3ed5fbcc06a8e0"} Nov 28 17:34:50 crc kubenswrapper[4909]: I1128 17:34:50.220486 4909 scope.go:117] "RemoveContainer" containerID="0e4b2f3d8611bd0ab12d7c7c8d35db09f6a69551a7858ff3d66e84f2e15b12f2" Nov 28 17:34:51 crc kubenswrapper[4909]: I1128 17:34:51.233350 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" event={"ID":"5f0ac931-d37b-4342-8c12-c2779b455cc5","Type":"ContainerStarted","Data":"ba4943f4ba136c11fa217eba14fcdb34cf54ee4ef96ee334416ec901f5f4fe45"} Nov 28 17:34:52 crc kubenswrapper[4909]: I1128 17:34:52.615722 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-z4z96"] Nov 28 17:34:52 crc kubenswrapper[4909]: E1128 17:34:52.616292 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="76a79b95-66aa-4e9c-b7dc-be5567975be4" containerName="mariadb-client-2" Nov 28 17:34:52 crc kubenswrapper[4909]: I1128 17:34:52.616316 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="76a79b95-66aa-4e9c-b7dc-be5567975be4" containerName="mariadb-client-2" Nov 28 17:34:52 crc kubenswrapper[4909]: I1128 17:34:52.616678 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="76a79b95-66aa-4e9c-b7dc-be5567975be4" containerName="mariadb-client-2" Nov 28 17:34:52 crc kubenswrapper[4909]: I1128 17:34:52.618567 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-z4z96" Nov 28 17:34:52 crc kubenswrapper[4909]: I1128 17:34:52.622629 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-z4z96"] Nov 28 17:34:52 crc kubenswrapper[4909]: I1128 17:34:52.750062 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hj888\" (UniqueName: \"kubernetes.io/projected/0eb0ddff-6903-431a-af4a-ffb46cb36009-kube-api-access-hj888\") pod \"redhat-marketplace-z4z96\" (UID: \"0eb0ddff-6903-431a-af4a-ffb46cb36009\") " pod="openshift-marketplace/redhat-marketplace-z4z96" Nov 28 17:34:52 crc kubenswrapper[4909]: I1128 17:34:52.750175 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0eb0ddff-6903-431a-af4a-ffb46cb36009-utilities\") pod \"redhat-marketplace-z4z96\" (UID: \"0eb0ddff-6903-431a-af4a-ffb46cb36009\") " pod="openshift-marketplace/redhat-marketplace-z4z96" Nov 28 17:34:52 crc kubenswrapper[4909]: I1128 17:34:52.750244 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0eb0ddff-6903-431a-af4a-ffb46cb36009-catalog-content\") pod \"redhat-marketplace-z4z96\" (UID: \"0eb0ddff-6903-431a-af4a-ffb46cb36009\") " pod="openshift-marketplace/redhat-marketplace-z4z96" Nov 28 17:34:52 crc kubenswrapper[4909]: I1128 17:34:52.851839 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0eb0ddff-6903-431a-af4a-ffb46cb36009-catalog-content\") pod \"redhat-marketplace-z4z96\" (UID: \"0eb0ddff-6903-431a-af4a-ffb46cb36009\") " pod="openshift-marketplace/redhat-marketplace-z4z96" Nov 28 17:34:52 crc kubenswrapper[4909]: I1128 17:34:52.852215 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hj888\" (UniqueName: \"kubernetes.io/projected/0eb0ddff-6903-431a-af4a-ffb46cb36009-kube-api-access-hj888\") pod \"redhat-marketplace-z4z96\" (UID: \"0eb0ddff-6903-431a-af4a-ffb46cb36009\") " pod="openshift-marketplace/redhat-marketplace-z4z96" Nov 28 17:34:52 crc kubenswrapper[4909]: I1128 17:34:52.852258 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0eb0ddff-6903-431a-af4a-ffb46cb36009-utilities\") pod \"redhat-marketplace-z4z96\" (UID: \"0eb0ddff-6903-431a-af4a-ffb46cb36009\") " pod="openshift-marketplace/redhat-marketplace-z4z96" Nov 28 17:34:52 crc kubenswrapper[4909]: I1128 17:34:52.852375 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0eb0ddff-6903-431a-af4a-ffb46cb36009-catalog-content\") pod \"redhat-marketplace-z4z96\" (UID: \"0eb0ddff-6903-431a-af4a-ffb46cb36009\") " pod="openshift-marketplace/redhat-marketplace-z4z96" Nov 28 17:34:52 crc kubenswrapper[4909]: I1128 17:34:52.852642 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0eb0ddff-6903-431a-af4a-ffb46cb36009-utilities\") pod \"redhat-marketplace-z4z96\" (UID: \"0eb0ddff-6903-431a-af4a-ffb46cb36009\") " pod="openshift-marketplace/redhat-marketplace-z4z96" Nov 28 17:34:52 crc kubenswrapper[4909]: I1128 17:34:52.871305 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hj888\" (UniqueName: \"kubernetes.io/projected/0eb0ddff-6903-431a-af4a-ffb46cb36009-kube-api-access-hj888\") pod \"redhat-marketplace-z4z96\" (UID: \"0eb0ddff-6903-431a-af4a-ffb46cb36009\") " pod="openshift-marketplace/redhat-marketplace-z4z96" Nov 28 17:34:52 crc kubenswrapper[4909]: I1128 17:34:52.973335 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-z4z96" Nov 28 17:34:53 crc kubenswrapper[4909]: I1128 17:34:53.395088 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-z4z96"] Nov 28 17:34:53 crc kubenswrapper[4909]: W1128 17:34:53.402002 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0eb0ddff_6903_431a_af4a_ffb46cb36009.slice/crio-00a890d5851bebf42e6fa96ff374e0d4edf63bab2f8114e356c6e58d9633680b WatchSource:0}: Error finding container 00a890d5851bebf42e6fa96ff374e0d4edf63bab2f8114e356c6e58d9633680b: Status 404 returned error can't find the container with id 00a890d5851bebf42e6fa96ff374e0d4edf63bab2f8114e356c6e58d9633680b Nov 28 17:34:54 crc kubenswrapper[4909]: I1128 17:34:54.272772 4909 generic.go:334] "Generic (PLEG): container finished" podID="0eb0ddff-6903-431a-af4a-ffb46cb36009" containerID="ce718733418cfb61e20af4ae576ea3446fbaa206d6a7931af136c76cb573de38" exitCode=0 Nov 28 17:34:54 crc kubenswrapper[4909]: I1128 17:34:54.272844 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-z4z96" event={"ID":"0eb0ddff-6903-431a-af4a-ffb46cb36009","Type":"ContainerDied","Data":"ce718733418cfb61e20af4ae576ea3446fbaa206d6a7931af136c76cb573de38"} Nov 28 17:34:54 crc kubenswrapper[4909]: I1128 17:34:54.273080 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-z4z96" event={"ID":"0eb0ddff-6903-431a-af4a-ffb46cb36009","Type":"ContainerStarted","Data":"00a890d5851bebf42e6fa96ff374e0d4edf63bab2f8114e356c6e58d9633680b"} Nov 28 17:34:54 crc kubenswrapper[4909]: I1128 17:34:54.275056 4909 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 28 17:34:56 crc kubenswrapper[4909]: I1128 17:34:56.291589 4909 generic.go:334] "Generic (PLEG): container finished" podID="0eb0ddff-6903-431a-af4a-ffb46cb36009" containerID="076bce533bd8384c6bae4b786e86bdc0d43401137e57c266b1eb2e486c165330" exitCode=0 Nov 28 17:34:56 crc kubenswrapper[4909]: I1128 17:34:56.292252 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-z4z96" event={"ID":"0eb0ddff-6903-431a-af4a-ffb46cb36009","Type":"ContainerDied","Data":"076bce533bd8384c6bae4b786e86bdc0d43401137e57c266b1eb2e486c165330"} Nov 28 17:34:58 crc kubenswrapper[4909]: I1128 17:34:58.323864 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-z4z96" event={"ID":"0eb0ddff-6903-431a-af4a-ffb46cb36009","Type":"ContainerStarted","Data":"e418c4daff75d2efd6f1917f2b387d50fd94e61b0693d14da1198accf8a0b26c"} Nov 28 17:34:58 crc kubenswrapper[4909]: I1128 17:34:58.341039 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-z4z96" podStartSLOduration=3.511280189 podStartE2EDuration="6.341021276s" podCreationTimestamp="2025-11-28 17:34:52 +0000 UTC" firstStartedPulling="2025-11-28 17:34:54.274811122 +0000 UTC m=+5076.671495656" lastFinishedPulling="2025-11-28 17:34:57.104552219 +0000 UTC m=+5079.501236743" observedRunningTime="2025-11-28 17:34:58.339028652 +0000 UTC m=+5080.735713176" watchObservedRunningTime="2025-11-28 17:34:58.341021276 +0000 UTC m=+5080.737705800" Nov 28 17:35:02 crc kubenswrapper[4909]: I1128 17:35:02.974108 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-z4z96" Nov 28 17:35:02 crc kubenswrapper[4909]: I1128 17:35:02.974625 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-z4z96" Nov 28 17:35:03 crc kubenswrapper[4909]: I1128 17:35:03.050407 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-z4z96" Nov 28 17:35:03 crc kubenswrapper[4909]: I1128 17:35:03.420712 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-z4z96" Nov 28 17:35:03 crc kubenswrapper[4909]: I1128 17:35:03.469399 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-z4z96"] Nov 28 17:35:05 crc kubenswrapper[4909]: I1128 17:35:05.393017 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-z4z96" podUID="0eb0ddff-6903-431a-af4a-ffb46cb36009" containerName="registry-server" containerID="cri-o://e418c4daff75d2efd6f1917f2b387d50fd94e61b0693d14da1198accf8a0b26c" gracePeriod=2 Nov 28 17:35:05 crc kubenswrapper[4909]: I1128 17:35:05.786083 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-z4z96" Nov 28 17:35:05 crc kubenswrapper[4909]: I1128 17:35:05.957997 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hj888\" (UniqueName: \"kubernetes.io/projected/0eb0ddff-6903-431a-af4a-ffb46cb36009-kube-api-access-hj888\") pod \"0eb0ddff-6903-431a-af4a-ffb46cb36009\" (UID: \"0eb0ddff-6903-431a-af4a-ffb46cb36009\") " Nov 28 17:35:05 crc kubenswrapper[4909]: I1128 17:35:05.958071 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0eb0ddff-6903-431a-af4a-ffb46cb36009-catalog-content\") pod \"0eb0ddff-6903-431a-af4a-ffb46cb36009\" (UID: \"0eb0ddff-6903-431a-af4a-ffb46cb36009\") " Nov 28 17:35:05 crc kubenswrapper[4909]: I1128 17:35:05.958203 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0eb0ddff-6903-431a-af4a-ffb46cb36009-utilities\") pod \"0eb0ddff-6903-431a-af4a-ffb46cb36009\" (UID: \"0eb0ddff-6903-431a-af4a-ffb46cb36009\") " Nov 28 17:35:05 crc kubenswrapper[4909]: I1128 17:35:05.958817 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0eb0ddff-6903-431a-af4a-ffb46cb36009-utilities" (OuterVolumeSpecName: "utilities") pod "0eb0ddff-6903-431a-af4a-ffb46cb36009" (UID: "0eb0ddff-6903-431a-af4a-ffb46cb36009"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:35:05 crc kubenswrapper[4909]: I1128 17:35:05.964304 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0eb0ddff-6903-431a-af4a-ffb46cb36009-kube-api-access-hj888" (OuterVolumeSpecName: "kube-api-access-hj888") pod "0eb0ddff-6903-431a-af4a-ffb46cb36009" (UID: "0eb0ddff-6903-431a-af4a-ffb46cb36009"). InnerVolumeSpecName "kube-api-access-hj888". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:35:05 crc kubenswrapper[4909]: I1128 17:35:05.981688 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0eb0ddff-6903-431a-af4a-ffb46cb36009-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "0eb0ddff-6903-431a-af4a-ffb46cb36009" (UID: "0eb0ddff-6903-431a-af4a-ffb46cb36009"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:35:06 crc kubenswrapper[4909]: I1128 17:35:06.059789 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hj888\" (UniqueName: \"kubernetes.io/projected/0eb0ddff-6903-431a-af4a-ffb46cb36009-kube-api-access-hj888\") on node \"crc\" DevicePath \"\"" Nov 28 17:35:06 crc kubenswrapper[4909]: I1128 17:35:06.059832 4909 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0eb0ddff-6903-431a-af4a-ffb46cb36009-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 17:35:06 crc kubenswrapper[4909]: I1128 17:35:06.059849 4909 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0eb0ddff-6903-431a-af4a-ffb46cb36009-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 17:35:06 crc kubenswrapper[4909]: I1128 17:35:06.410985 4909 generic.go:334] "Generic (PLEG): container finished" podID="0eb0ddff-6903-431a-af4a-ffb46cb36009" containerID="e418c4daff75d2efd6f1917f2b387d50fd94e61b0693d14da1198accf8a0b26c" exitCode=0 Nov 28 17:35:06 crc kubenswrapper[4909]: I1128 17:35:06.411059 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-z4z96" event={"ID":"0eb0ddff-6903-431a-af4a-ffb46cb36009","Type":"ContainerDied","Data":"e418c4daff75d2efd6f1917f2b387d50fd94e61b0693d14da1198accf8a0b26c"} Nov 28 17:35:06 crc kubenswrapper[4909]: I1128 17:35:06.411095 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-z4z96" Nov 28 17:35:06 crc kubenswrapper[4909]: I1128 17:35:06.411162 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-z4z96" event={"ID":"0eb0ddff-6903-431a-af4a-ffb46cb36009","Type":"ContainerDied","Data":"00a890d5851bebf42e6fa96ff374e0d4edf63bab2f8114e356c6e58d9633680b"} Nov 28 17:35:06 crc kubenswrapper[4909]: I1128 17:35:06.411216 4909 scope.go:117] "RemoveContainer" containerID="e418c4daff75d2efd6f1917f2b387d50fd94e61b0693d14da1198accf8a0b26c" Nov 28 17:35:06 crc kubenswrapper[4909]: I1128 17:35:06.444981 4909 scope.go:117] "RemoveContainer" containerID="076bce533bd8384c6bae4b786e86bdc0d43401137e57c266b1eb2e486c165330" Nov 28 17:35:06 crc kubenswrapper[4909]: I1128 17:35:06.488578 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-z4z96"] Nov 28 17:35:06 crc kubenswrapper[4909]: I1128 17:35:06.492461 4909 scope.go:117] "RemoveContainer" containerID="ce718733418cfb61e20af4ae576ea3446fbaa206d6a7931af136c76cb573de38" Nov 28 17:35:06 crc kubenswrapper[4909]: I1128 17:35:06.496307 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-z4z96"] Nov 28 17:35:06 crc kubenswrapper[4909]: I1128 17:35:06.527237 4909 scope.go:117] "RemoveContainer" containerID="e418c4daff75d2efd6f1917f2b387d50fd94e61b0693d14da1198accf8a0b26c" Nov 28 17:35:06 crc kubenswrapper[4909]: E1128 17:35:06.527890 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e418c4daff75d2efd6f1917f2b387d50fd94e61b0693d14da1198accf8a0b26c\": container with ID starting with e418c4daff75d2efd6f1917f2b387d50fd94e61b0693d14da1198accf8a0b26c not found: ID does not exist" containerID="e418c4daff75d2efd6f1917f2b387d50fd94e61b0693d14da1198accf8a0b26c" Nov 28 17:35:06 crc kubenswrapper[4909]: I1128 17:35:06.527963 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e418c4daff75d2efd6f1917f2b387d50fd94e61b0693d14da1198accf8a0b26c"} err="failed to get container status \"e418c4daff75d2efd6f1917f2b387d50fd94e61b0693d14da1198accf8a0b26c\": rpc error: code = NotFound desc = could not find container \"e418c4daff75d2efd6f1917f2b387d50fd94e61b0693d14da1198accf8a0b26c\": container with ID starting with e418c4daff75d2efd6f1917f2b387d50fd94e61b0693d14da1198accf8a0b26c not found: ID does not exist" Nov 28 17:35:06 crc kubenswrapper[4909]: I1128 17:35:06.528003 4909 scope.go:117] "RemoveContainer" containerID="076bce533bd8384c6bae4b786e86bdc0d43401137e57c266b1eb2e486c165330" Nov 28 17:35:06 crc kubenswrapper[4909]: E1128 17:35:06.528546 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"076bce533bd8384c6bae4b786e86bdc0d43401137e57c266b1eb2e486c165330\": container with ID starting with 076bce533bd8384c6bae4b786e86bdc0d43401137e57c266b1eb2e486c165330 not found: ID does not exist" containerID="076bce533bd8384c6bae4b786e86bdc0d43401137e57c266b1eb2e486c165330" Nov 28 17:35:06 crc kubenswrapper[4909]: I1128 17:35:06.528587 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"076bce533bd8384c6bae4b786e86bdc0d43401137e57c266b1eb2e486c165330"} err="failed to get container status \"076bce533bd8384c6bae4b786e86bdc0d43401137e57c266b1eb2e486c165330\": rpc error: code = NotFound desc = could not find container \"076bce533bd8384c6bae4b786e86bdc0d43401137e57c266b1eb2e486c165330\": container with ID starting with 076bce533bd8384c6bae4b786e86bdc0d43401137e57c266b1eb2e486c165330 not found: ID does not exist" Nov 28 17:35:06 crc kubenswrapper[4909]: I1128 17:35:06.528614 4909 scope.go:117] "RemoveContainer" containerID="ce718733418cfb61e20af4ae576ea3446fbaa206d6a7931af136c76cb573de38" Nov 28 17:35:06 crc kubenswrapper[4909]: E1128 17:35:06.528984 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ce718733418cfb61e20af4ae576ea3446fbaa206d6a7931af136c76cb573de38\": container with ID starting with ce718733418cfb61e20af4ae576ea3446fbaa206d6a7931af136c76cb573de38 not found: ID does not exist" containerID="ce718733418cfb61e20af4ae576ea3446fbaa206d6a7931af136c76cb573de38" Nov 28 17:35:06 crc kubenswrapper[4909]: I1128 17:35:06.529009 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ce718733418cfb61e20af4ae576ea3446fbaa206d6a7931af136c76cb573de38"} err="failed to get container status \"ce718733418cfb61e20af4ae576ea3446fbaa206d6a7931af136c76cb573de38\": rpc error: code = NotFound desc = could not find container \"ce718733418cfb61e20af4ae576ea3446fbaa206d6a7931af136c76cb573de38\": container with ID starting with ce718733418cfb61e20af4ae576ea3446fbaa206d6a7931af136c76cb573de38 not found: ID does not exist" Nov 28 17:35:07 crc kubenswrapper[4909]: I1128 17:35:07.929274 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0eb0ddff-6903-431a-af4a-ffb46cb36009" path="/var/lib/kubelet/pods/0eb0ddff-6903-431a-af4a-ffb46cb36009/volumes" Nov 28 17:36:26 crc kubenswrapper[4909]: I1128 17:36:26.247524 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-w6v2w"] Nov 28 17:36:26 crc kubenswrapper[4909]: E1128 17:36:26.248470 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0eb0ddff-6903-431a-af4a-ffb46cb36009" containerName="registry-server" Nov 28 17:36:26 crc kubenswrapper[4909]: I1128 17:36:26.248485 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="0eb0ddff-6903-431a-af4a-ffb46cb36009" containerName="registry-server" Nov 28 17:36:26 crc kubenswrapper[4909]: E1128 17:36:26.248507 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0eb0ddff-6903-431a-af4a-ffb46cb36009" containerName="extract-utilities" Nov 28 17:36:26 crc kubenswrapper[4909]: I1128 17:36:26.248516 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="0eb0ddff-6903-431a-af4a-ffb46cb36009" containerName="extract-utilities" Nov 28 17:36:26 crc kubenswrapper[4909]: E1128 17:36:26.248537 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0eb0ddff-6903-431a-af4a-ffb46cb36009" containerName="extract-content" Nov 28 17:36:26 crc kubenswrapper[4909]: I1128 17:36:26.248544 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="0eb0ddff-6903-431a-af4a-ffb46cb36009" containerName="extract-content" Nov 28 17:36:26 crc kubenswrapper[4909]: I1128 17:36:26.248748 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="0eb0ddff-6903-431a-af4a-ffb46cb36009" containerName="registry-server" Nov 28 17:36:26 crc kubenswrapper[4909]: I1128 17:36:26.254794 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-w6v2w"] Nov 28 17:36:26 crc kubenswrapper[4909]: I1128 17:36:26.254913 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-w6v2w" Nov 28 17:36:26 crc kubenswrapper[4909]: I1128 17:36:26.320330 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1117637d-0189-45f6-8998-083e64532df1-catalog-content\") pod \"community-operators-w6v2w\" (UID: \"1117637d-0189-45f6-8998-083e64532df1\") " pod="openshift-marketplace/community-operators-w6v2w" Nov 28 17:36:26 crc kubenswrapper[4909]: I1128 17:36:26.320700 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2l5rm\" (UniqueName: \"kubernetes.io/projected/1117637d-0189-45f6-8998-083e64532df1-kube-api-access-2l5rm\") pod \"community-operators-w6v2w\" (UID: \"1117637d-0189-45f6-8998-083e64532df1\") " pod="openshift-marketplace/community-operators-w6v2w" Nov 28 17:36:26 crc kubenswrapper[4909]: I1128 17:36:26.320751 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1117637d-0189-45f6-8998-083e64532df1-utilities\") pod \"community-operators-w6v2w\" (UID: \"1117637d-0189-45f6-8998-083e64532df1\") " pod="openshift-marketplace/community-operators-w6v2w" Nov 28 17:36:26 crc kubenswrapper[4909]: I1128 17:36:26.422071 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1117637d-0189-45f6-8998-083e64532df1-utilities\") pod \"community-operators-w6v2w\" (UID: \"1117637d-0189-45f6-8998-083e64532df1\") " pod="openshift-marketplace/community-operators-w6v2w" Nov 28 17:36:26 crc kubenswrapper[4909]: I1128 17:36:26.422143 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1117637d-0189-45f6-8998-083e64532df1-catalog-content\") pod \"community-operators-w6v2w\" (UID: \"1117637d-0189-45f6-8998-083e64532df1\") " pod="openshift-marketplace/community-operators-w6v2w" Nov 28 17:36:26 crc kubenswrapper[4909]: I1128 17:36:26.422204 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2l5rm\" (UniqueName: \"kubernetes.io/projected/1117637d-0189-45f6-8998-083e64532df1-kube-api-access-2l5rm\") pod \"community-operators-w6v2w\" (UID: \"1117637d-0189-45f6-8998-083e64532df1\") " pod="openshift-marketplace/community-operators-w6v2w" Nov 28 17:36:26 crc kubenswrapper[4909]: I1128 17:36:26.422813 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1117637d-0189-45f6-8998-083e64532df1-utilities\") pod \"community-operators-w6v2w\" (UID: \"1117637d-0189-45f6-8998-083e64532df1\") " pod="openshift-marketplace/community-operators-w6v2w" Nov 28 17:36:26 crc kubenswrapper[4909]: I1128 17:36:26.422868 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1117637d-0189-45f6-8998-083e64532df1-catalog-content\") pod \"community-operators-w6v2w\" (UID: \"1117637d-0189-45f6-8998-083e64532df1\") " pod="openshift-marketplace/community-operators-w6v2w" Nov 28 17:36:26 crc kubenswrapper[4909]: I1128 17:36:26.456055 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2l5rm\" (UniqueName: \"kubernetes.io/projected/1117637d-0189-45f6-8998-083e64532df1-kube-api-access-2l5rm\") pod \"community-operators-w6v2w\" (UID: \"1117637d-0189-45f6-8998-083e64532df1\") " pod="openshift-marketplace/community-operators-w6v2w" Nov 28 17:36:26 crc kubenswrapper[4909]: I1128 17:36:26.582150 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-w6v2w" Nov 28 17:36:27 crc kubenswrapper[4909]: I1128 17:36:27.125540 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-w6v2w"] Nov 28 17:36:27 crc kubenswrapper[4909]: I1128 17:36:27.138022 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-w6v2w" event={"ID":"1117637d-0189-45f6-8998-083e64532df1","Type":"ContainerStarted","Data":"2cb3c0387a7ed18b7f7215d1e5457325772ae0bcde11b8b6c55622f3fea20a43"} Nov 28 17:36:28 crc kubenswrapper[4909]: I1128 17:36:28.146009 4909 generic.go:334] "Generic (PLEG): container finished" podID="1117637d-0189-45f6-8998-083e64532df1" containerID="dff2d680a1e8ac13835158596810c1c5b3cc2b4e4d6c77f0b277f43c726f596b" exitCode=0 Nov 28 17:36:28 crc kubenswrapper[4909]: I1128 17:36:28.146066 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-w6v2w" event={"ID":"1117637d-0189-45f6-8998-083e64532df1","Type":"ContainerDied","Data":"dff2d680a1e8ac13835158596810c1c5b3cc2b4e4d6c77f0b277f43c726f596b"} Nov 28 17:36:32 crc kubenswrapper[4909]: I1128 17:36:32.182704 4909 generic.go:334] "Generic (PLEG): container finished" podID="1117637d-0189-45f6-8998-083e64532df1" containerID="668ca41ed9fe54492a6799c23aba133a8edaa1fbe02f3615620231d00c45f9e1" exitCode=0 Nov 28 17:36:32 crc kubenswrapper[4909]: I1128 17:36:32.182791 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-w6v2w" event={"ID":"1117637d-0189-45f6-8998-083e64532df1","Type":"ContainerDied","Data":"668ca41ed9fe54492a6799c23aba133a8edaa1fbe02f3615620231d00c45f9e1"} Nov 28 17:36:33 crc kubenswrapper[4909]: I1128 17:36:33.195268 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-w6v2w" event={"ID":"1117637d-0189-45f6-8998-083e64532df1","Type":"ContainerStarted","Data":"e525159c45ad5a30cf12bdabdd0f2eea59585d6ea79ea5cfd78a779af41383bb"} Nov 28 17:36:33 crc kubenswrapper[4909]: I1128 17:36:33.219371 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-w6v2w" podStartSLOduration=2.754642022 podStartE2EDuration="7.21935442s" podCreationTimestamp="2025-11-28 17:36:26 +0000 UTC" firstStartedPulling="2025-11-28 17:36:28.149940677 +0000 UTC m=+5170.546625201" lastFinishedPulling="2025-11-28 17:36:32.614653045 +0000 UTC m=+5175.011337599" observedRunningTime="2025-11-28 17:36:33.217291875 +0000 UTC m=+5175.613976409" watchObservedRunningTime="2025-11-28 17:36:33.21935442 +0000 UTC m=+5175.616038954" Nov 28 17:36:36 crc kubenswrapper[4909]: I1128 17:36:36.582406 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-w6v2w" Nov 28 17:36:36 crc kubenswrapper[4909]: I1128 17:36:36.582712 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-w6v2w" Nov 28 17:36:36 crc kubenswrapper[4909]: I1128 17:36:36.639919 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-w6v2w" Nov 28 17:36:37 crc kubenswrapper[4909]: I1128 17:36:37.291721 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-w6v2w" Nov 28 17:36:37 crc kubenswrapper[4909]: I1128 17:36:37.382751 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-w6v2w"] Nov 28 17:36:37 crc kubenswrapper[4909]: I1128 17:36:37.445909 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-6bmn8"] Nov 28 17:36:37 crc kubenswrapper[4909]: I1128 17:36:37.446328 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-6bmn8" podUID="6d8f5823-a0ca-4fcc-9b8e-265a045e6ab8" containerName="registry-server" containerID="cri-o://2912497954d6918ddf640af2cb9ce41701e3e051be1f6c6bdf11e21d9381b9f7" gracePeriod=2 Nov 28 17:36:38 crc kubenswrapper[4909]: I1128 17:36:38.244378 4909 generic.go:334] "Generic (PLEG): container finished" podID="6d8f5823-a0ca-4fcc-9b8e-265a045e6ab8" containerID="2912497954d6918ddf640af2cb9ce41701e3e051be1f6c6bdf11e21d9381b9f7" exitCode=0 Nov 28 17:36:38 crc kubenswrapper[4909]: I1128 17:36:38.244520 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6bmn8" event={"ID":"6d8f5823-a0ca-4fcc-9b8e-265a045e6ab8","Type":"ContainerDied","Data":"2912497954d6918ddf640af2cb9ce41701e3e051be1f6c6bdf11e21d9381b9f7"} Nov 28 17:36:39 crc kubenswrapper[4909]: I1128 17:36:39.033174 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-6bmn8" Nov 28 17:36:39 crc kubenswrapper[4909]: I1128 17:36:39.139968 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bmwn8\" (UniqueName: \"kubernetes.io/projected/6d8f5823-a0ca-4fcc-9b8e-265a045e6ab8-kube-api-access-bmwn8\") pod \"6d8f5823-a0ca-4fcc-9b8e-265a045e6ab8\" (UID: \"6d8f5823-a0ca-4fcc-9b8e-265a045e6ab8\") " Nov 28 17:36:39 crc kubenswrapper[4909]: I1128 17:36:39.140137 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6d8f5823-a0ca-4fcc-9b8e-265a045e6ab8-utilities\") pod \"6d8f5823-a0ca-4fcc-9b8e-265a045e6ab8\" (UID: \"6d8f5823-a0ca-4fcc-9b8e-265a045e6ab8\") " Nov 28 17:36:39 crc kubenswrapper[4909]: I1128 17:36:39.140280 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6d8f5823-a0ca-4fcc-9b8e-265a045e6ab8-catalog-content\") pod \"6d8f5823-a0ca-4fcc-9b8e-265a045e6ab8\" (UID: \"6d8f5823-a0ca-4fcc-9b8e-265a045e6ab8\") " Nov 28 17:36:39 crc kubenswrapper[4909]: I1128 17:36:39.141098 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6d8f5823-a0ca-4fcc-9b8e-265a045e6ab8-utilities" (OuterVolumeSpecName: "utilities") pod "6d8f5823-a0ca-4fcc-9b8e-265a045e6ab8" (UID: "6d8f5823-a0ca-4fcc-9b8e-265a045e6ab8"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:36:39 crc kubenswrapper[4909]: I1128 17:36:39.151959 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6d8f5823-a0ca-4fcc-9b8e-265a045e6ab8-kube-api-access-bmwn8" (OuterVolumeSpecName: "kube-api-access-bmwn8") pod "6d8f5823-a0ca-4fcc-9b8e-265a045e6ab8" (UID: "6d8f5823-a0ca-4fcc-9b8e-265a045e6ab8"). InnerVolumeSpecName "kube-api-access-bmwn8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:36:39 crc kubenswrapper[4909]: I1128 17:36:39.183702 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6d8f5823-a0ca-4fcc-9b8e-265a045e6ab8-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "6d8f5823-a0ca-4fcc-9b8e-265a045e6ab8" (UID: "6d8f5823-a0ca-4fcc-9b8e-265a045e6ab8"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:36:39 crc kubenswrapper[4909]: I1128 17:36:39.242194 4909 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6d8f5823-a0ca-4fcc-9b8e-265a045e6ab8-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 17:36:39 crc kubenswrapper[4909]: I1128 17:36:39.242222 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bmwn8\" (UniqueName: \"kubernetes.io/projected/6d8f5823-a0ca-4fcc-9b8e-265a045e6ab8-kube-api-access-bmwn8\") on node \"crc\" DevicePath \"\"" Nov 28 17:36:39 crc kubenswrapper[4909]: I1128 17:36:39.242234 4909 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6d8f5823-a0ca-4fcc-9b8e-265a045e6ab8-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 17:36:39 crc kubenswrapper[4909]: I1128 17:36:39.256851 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6bmn8" event={"ID":"6d8f5823-a0ca-4fcc-9b8e-265a045e6ab8","Type":"ContainerDied","Data":"54a04210e0277e79f28fa35f99b914cecb0b4537ea64a8e111a66c5311cffe7c"} Nov 28 17:36:39 crc kubenswrapper[4909]: I1128 17:36:39.256936 4909 scope.go:117] "RemoveContainer" containerID="2912497954d6918ddf640af2cb9ce41701e3e051be1f6c6bdf11e21d9381b9f7" Nov 28 17:36:39 crc kubenswrapper[4909]: I1128 17:36:39.256940 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-6bmn8" Nov 28 17:36:39 crc kubenswrapper[4909]: I1128 17:36:39.289994 4909 scope.go:117] "RemoveContainer" containerID="f2199381a8746b4f3caa80285db0fac38d8e0780ad53b9af86d7deb265352775" Nov 28 17:36:39 crc kubenswrapper[4909]: I1128 17:36:39.298399 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-6bmn8"] Nov 28 17:36:39 crc kubenswrapper[4909]: I1128 17:36:39.302693 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-6bmn8"] Nov 28 17:36:39 crc kubenswrapper[4909]: I1128 17:36:39.334336 4909 scope.go:117] "RemoveContainer" containerID="f19dfc8391e9d9a9b72eaa15b3ca393a5025e8e52f4c87b8c1bdc5df3e9a13fa" Nov 28 17:36:39 crc kubenswrapper[4909]: I1128 17:36:39.913053 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6d8f5823-a0ca-4fcc-9b8e-265a045e6ab8" path="/var/lib/kubelet/pods/6d8f5823-a0ca-4fcc-9b8e-265a045e6ab8/volumes" Nov 28 17:37:19 crc kubenswrapper[4909]: I1128 17:37:19.910881 4909 patch_prober.go:28] interesting pod/machine-config-daemon-d5nd7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 17:37:19 crc kubenswrapper[4909]: I1128 17:37:19.911514 4909 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 17:37:47 crc kubenswrapper[4909]: I1128 17:37:47.251414 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mariadb-copy-data"] Nov 28 17:37:47 crc kubenswrapper[4909]: E1128 17:37:47.252809 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6d8f5823-a0ca-4fcc-9b8e-265a045e6ab8" containerName="registry-server" Nov 28 17:37:47 crc kubenswrapper[4909]: I1128 17:37:47.252837 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="6d8f5823-a0ca-4fcc-9b8e-265a045e6ab8" containerName="registry-server" Nov 28 17:37:47 crc kubenswrapper[4909]: E1128 17:37:47.252867 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6d8f5823-a0ca-4fcc-9b8e-265a045e6ab8" containerName="extract-utilities" Nov 28 17:37:47 crc kubenswrapper[4909]: I1128 17:37:47.252879 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="6d8f5823-a0ca-4fcc-9b8e-265a045e6ab8" containerName="extract-utilities" Nov 28 17:37:47 crc kubenswrapper[4909]: E1128 17:37:47.252905 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6d8f5823-a0ca-4fcc-9b8e-265a045e6ab8" containerName="extract-content" Nov 28 17:37:47 crc kubenswrapper[4909]: I1128 17:37:47.252918 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="6d8f5823-a0ca-4fcc-9b8e-265a045e6ab8" containerName="extract-content" Nov 28 17:37:47 crc kubenswrapper[4909]: I1128 17:37:47.253229 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="6d8f5823-a0ca-4fcc-9b8e-265a045e6ab8" containerName="registry-server" Nov 28 17:37:47 crc kubenswrapper[4909]: I1128 17:37:47.254275 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-copy-data" Nov 28 17:37:47 crc kubenswrapper[4909]: I1128 17:37:47.256589 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"default-dockercfg-j2qb5" Nov 28 17:37:47 crc kubenswrapper[4909]: I1128 17:37:47.262347 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-copy-data"] Nov 28 17:37:47 crc kubenswrapper[4909]: I1128 17:37:47.327039 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xsm99\" (UniqueName: \"kubernetes.io/projected/75370690-54d5-4283-8b1e-00deda652092-kube-api-access-xsm99\") pod \"mariadb-copy-data\" (UID: \"75370690-54d5-4283-8b1e-00deda652092\") " pod="openstack/mariadb-copy-data" Nov 28 17:37:47 crc kubenswrapper[4909]: I1128 17:37:47.327267 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-b05bf8b3-1bd7-4304-855e-6d182fcf1324\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b05bf8b3-1bd7-4304-855e-6d182fcf1324\") pod \"mariadb-copy-data\" (UID: \"75370690-54d5-4283-8b1e-00deda652092\") " pod="openstack/mariadb-copy-data" Nov 28 17:37:47 crc kubenswrapper[4909]: I1128 17:37:47.427884 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xsm99\" (UniqueName: \"kubernetes.io/projected/75370690-54d5-4283-8b1e-00deda652092-kube-api-access-xsm99\") pod \"mariadb-copy-data\" (UID: \"75370690-54d5-4283-8b1e-00deda652092\") " pod="openstack/mariadb-copy-data" Nov 28 17:37:47 crc kubenswrapper[4909]: I1128 17:37:47.428006 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-b05bf8b3-1bd7-4304-855e-6d182fcf1324\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b05bf8b3-1bd7-4304-855e-6d182fcf1324\") pod \"mariadb-copy-data\" (UID: \"75370690-54d5-4283-8b1e-00deda652092\") " pod="openstack/mariadb-copy-data" Nov 28 17:37:47 crc kubenswrapper[4909]: I1128 17:37:47.432743 4909 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 28 17:37:47 crc kubenswrapper[4909]: I1128 17:37:47.432783 4909 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-b05bf8b3-1bd7-4304-855e-6d182fcf1324\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b05bf8b3-1bd7-4304-855e-6d182fcf1324\") pod \"mariadb-copy-data\" (UID: \"75370690-54d5-4283-8b1e-00deda652092\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/131c05f5c61807dbb62496dc0904dfaf560a6fd85f1742c1c59fec79ebc14072/globalmount\"" pod="openstack/mariadb-copy-data" Nov 28 17:37:47 crc kubenswrapper[4909]: I1128 17:37:47.456034 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xsm99\" (UniqueName: \"kubernetes.io/projected/75370690-54d5-4283-8b1e-00deda652092-kube-api-access-xsm99\") pod \"mariadb-copy-data\" (UID: \"75370690-54d5-4283-8b1e-00deda652092\") " pod="openstack/mariadb-copy-data" Nov 28 17:37:47 crc kubenswrapper[4909]: I1128 17:37:47.474273 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-b05bf8b3-1bd7-4304-855e-6d182fcf1324\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b05bf8b3-1bd7-4304-855e-6d182fcf1324\") pod \"mariadb-copy-data\" (UID: \"75370690-54d5-4283-8b1e-00deda652092\") " pod="openstack/mariadb-copy-data" Nov 28 17:37:47 crc kubenswrapper[4909]: I1128 17:37:47.581405 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-copy-data" Nov 28 17:37:48 crc kubenswrapper[4909]: I1128 17:37:48.130816 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-copy-data"] Nov 28 17:37:48 crc kubenswrapper[4909]: I1128 17:37:48.940176 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-copy-data" event={"ID":"75370690-54d5-4283-8b1e-00deda652092","Type":"ContainerStarted","Data":"241ede6f5b70c6d762520c107d41898b9054421b0f9d81a5e7ad0b7351c7cf8a"} Nov 28 17:37:48 crc kubenswrapper[4909]: I1128 17:37:48.940512 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-copy-data" event={"ID":"75370690-54d5-4283-8b1e-00deda652092","Type":"ContainerStarted","Data":"b45ae6fd09d28c090cc1f4512ceb47fe164a8c2e0142f9c3b57e4f61fedb930a"} Nov 28 17:37:48 crc kubenswrapper[4909]: I1128 17:37:48.962957 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/mariadb-copy-data" podStartSLOduration=2.96293273 podStartE2EDuration="2.96293273s" podCreationTimestamp="2025-11-28 17:37:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 17:37:48.958245344 +0000 UTC m=+5251.354929898" watchObservedRunningTime="2025-11-28 17:37:48.96293273 +0000 UTC m=+5251.359617294" Nov 28 17:37:49 crc kubenswrapper[4909]: I1128 17:37:49.911078 4909 patch_prober.go:28] interesting pod/machine-config-daemon-d5nd7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 17:37:49 crc kubenswrapper[4909]: I1128 17:37:49.911152 4909 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 17:37:52 crc kubenswrapper[4909]: I1128 17:37:52.017725 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mariadb-client"] Nov 28 17:37:52 crc kubenswrapper[4909]: I1128 17:37:52.021527 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client" Nov 28 17:37:52 crc kubenswrapper[4909]: I1128 17:37:52.033638 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client"] Nov 28 17:37:52 crc kubenswrapper[4909]: I1128 17:37:52.114925 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-thcv2\" (UniqueName: \"kubernetes.io/projected/230ef725-4ede-4337-ac8c-88c2b4ba3aba-kube-api-access-thcv2\") pod \"mariadb-client\" (UID: \"230ef725-4ede-4337-ac8c-88c2b4ba3aba\") " pod="openstack/mariadb-client" Nov 28 17:37:52 crc kubenswrapper[4909]: I1128 17:37:52.217050 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-thcv2\" (UniqueName: \"kubernetes.io/projected/230ef725-4ede-4337-ac8c-88c2b4ba3aba-kube-api-access-thcv2\") pod \"mariadb-client\" (UID: \"230ef725-4ede-4337-ac8c-88c2b4ba3aba\") " pod="openstack/mariadb-client" Nov 28 17:37:52 crc kubenswrapper[4909]: I1128 17:37:52.238462 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-thcv2\" (UniqueName: \"kubernetes.io/projected/230ef725-4ede-4337-ac8c-88c2b4ba3aba-kube-api-access-thcv2\") pod \"mariadb-client\" (UID: \"230ef725-4ede-4337-ac8c-88c2b4ba3aba\") " pod="openstack/mariadb-client" Nov 28 17:37:52 crc kubenswrapper[4909]: I1128 17:37:52.347080 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client" Nov 28 17:37:52 crc kubenswrapper[4909]: I1128 17:37:52.837761 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client"] Nov 28 17:37:52 crc kubenswrapper[4909]: W1128 17:37:52.846926 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod230ef725_4ede_4337_ac8c_88c2b4ba3aba.slice/crio-191b40af79430a4822632730e217a6709071d0f95d6c957be9367c6d02657bcc WatchSource:0}: Error finding container 191b40af79430a4822632730e217a6709071d0f95d6c957be9367c6d02657bcc: Status 404 returned error can't find the container with id 191b40af79430a4822632730e217a6709071d0f95d6c957be9367c6d02657bcc Nov 28 17:37:52 crc kubenswrapper[4909]: I1128 17:37:52.976759 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client" event={"ID":"230ef725-4ede-4337-ac8c-88c2b4ba3aba","Type":"ContainerStarted","Data":"191b40af79430a4822632730e217a6709071d0f95d6c957be9367c6d02657bcc"} Nov 28 17:37:53 crc kubenswrapper[4909]: I1128 17:37:53.990532 4909 generic.go:334] "Generic (PLEG): container finished" podID="230ef725-4ede-4337-ac8c-88c2b4ba3aba" containerID="219f3978d08a00bb017323a8fc837467ff06618f5e04a407c8d468fbe56b2676" exitCode=0 Nov 28 17:37:53 crc kubenswrapper[4909]: I1128 17:37:53.990635 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client" event={"ID":"230ef725-4ede-4337-ac8c-88c2b4ba3aba","Type":"ContainerDied","Data":"219f3978d08a00bb017323a8fc837467ff06618f5e04a407c8d468fbe56b2676"} Nov 28 17:37:55 crc kubenswrapper[4909]: I1128 17:37:55.397187 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client" Nov 28 17:37:55 crc kubenswrapper[4909]: I1128 17:37:55.425438 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_mariadb-client_230ef725-4ede-4337-ac8c-88c2b4ba3aba/mariadb-client/0.log" Nov 28 17:37:55 crc kubenswrapper[4909]: I1128 17:37:55.462250 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mariadb-client"] Nov 28 17:37:55 crc kubenswrapper[4909]: I1128 17:37:55.469377 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mariadb-client"] Nov 28 17:37:55 crc kubenswrapper[4909]: I1128 17:37:55.471141 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-thcv2\" (UniqueName: \"kubernetes.io/projected/230ef725-4ede-4337-ac8c-88c2b4ba3aba-kube-api-access-thcv2\") pod \"230ef725-4ede-4337-ac8c-88c2b4ba3aba\" (UID: \"230ef725-4ede-4337-ac8c-88c2b4ba3aba\") " Nov 28 17:37:55 crc kubenswrapper[4909]: I1128 17:37:55.477071 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/230ef725-4ede-4337-ac8c-88c2b4ba3aba-kube-api-access-thcv2" (OuterVolumeSpecName: "kube-api-access-thcv2") pod "230ef725-4ede-4337-ac8c-88c2b4ba3aba" (UID: "230ef725-4ede-4337-ac8c-88c2b4ba3aba"). InnerVolumeSpecName "kube-api-access-thcv2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:37:55 crc kubenswrapper[4909]: I1128 17:37:55.574272 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-thcv2\" (UniqueName: \"kubernetes.io/projected/230ef725-4ede-4337-ac8c-88c2b4ba3aba-kube-api-access-thcv2\") on node \"crc\" DevicePath \"\"" Nov 28 17:37:55 crc kubenswrapper[4909]: I1128 17:37:55.581818 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mariadb-client"] Nov 28 17:37:55 crc kubenswrapper[4909]: E1128 17:37:55.582137 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="230ef725-4ede-4337-ac8c-88c2b4ba3aba" containerName="mariadb-client" Nov 28 17:37:55 crc kubenswrapper[4909]: I1128 17:37:55.582157 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="230ef725-4ede-4337-ac8c-88c2b4ba3aba" containerName="mariadb-client" Nov 28 17:37:55 crc kubenswrapper[4909]: I1128 17:37:55.582367 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="230ef725-4ede-4337-ac8c-88c2b4ba3aba" containerName="mariadb-client" Nov 28 17:37:55 crc kubenswrapper[4909]: I1128 17:37:55.583117 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client" Nov 28 17:37:55 crc kubenswrapper[4909]: I1128 17:37:55.591355 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client"] Nov 28 17:37:55 crc kubenswrapper[4909]: I1128 17:37:55.675900 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rqsc9\" (UniqueName: \"kubernetes.io/projected/0e9ec9e4-4426-43a4-b82b-3a111e84c50e-kube-api-access-rqsc9\") pod \"mariadb-client\" (UID: \"0e9ec9e4-4426-43a4-b82b-3a111e84c50e\") " pod="openstack/mariadb-client" Nov 28 17:37:55 crc kubenswrapper[4909]: I1128 17:37:55.778216 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rqsc9\" (UniqueName: \"kubernetes.io/projected/0e9ec9e4-4426-43a4-b82b-3a111e84c50e-kube-api-access-rqsc9\") pod \"mariadb-client\" (UID: \"0e9ec9e4-4426-43a4-b82b-3a111e84c50e\") " pod="openstack/mariadb-client" Nov 28 17:37:55 crc kubenswrapper[4909]: I1128 17:37:55.796992 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rqsc9\" (UniqueName: \"kubernetes.io/projected/0e9ec9e4-4426-43a4-b82b-3a111e84c50e-kube-api-access-rqsc9\") pod \"mariadb-client\" (UID: \"0e9ec9e4-4426-43a4-b82b-3a111e84c50e\") " pod="openstack/mariadb-client" Nov 28 17:37:55 crc kubenswrapper[4909]: I1128 17:37:55.913161 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client" Nov 28 17:37:55 crc kubenswrapper[4909]: I1128 17:37:55.914996 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="230ef725-4ede-4337-ac8c-88c2b4ba3aba" path="/var/lib/kubelet/pods/230ef725-4ede-4337-ac8c-88c2b4ba3aba/volumes" Nov 28 17:37:56 crc kubenswrapper[4909]: I1128 17:37:56.009272 4909 scope.go:117] "RemoveContainer" containerID="219f3978d08a00bb017323a8fc837467ff06618f5e04a407c8d468fbe56b2676" Nov 28 17:37:56 crc kubenswrapper[4909]: I1128 17:37:56.009437 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client" Nov 28 17:37:56 crc kubenswrapper[4909]: I1128 17:37:56.384008 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client"] Nov 28 17:37:56 crc kubenswrapper[4909]: W1128 17:37:56.389496 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0e9ec9e4_4426_43a4_b82b_3a111e84c50e.slice/crio-4950fe943353b2ab3621c394d0e8b898217f778c1250a2c6f5730fa04ceb90e8 WatchSource:0}: Error finding container 4950fe943353b2ab3621c394d0e8b898217f778c1250a2c6f5730fa04ceb90e8: Status 404 returned error can't find the container with id 4950fe943353b2ab3621c394d0e8b898217f778c1250a2c6f5730fa04ceb90e8 Nov 28 17:37:57 crc kubenswrapper[4909]: I1128 17:37:57.017212 4909 generic.go:334] "Generic (PLEG): container finished" podID="0e9ec9e4-4426-43a4-b82b-3a111e84c50e" containerID="90255f395c207fb48cbd301c57bd28118c5945ce5f5d995175d976577870855f" exitCode=0 Nov 28 17:37:57 crc kubenswrapper[4909]: I1128 17:37:57.017252 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client" event={"ID":"0e9ec9e4-4426-43a4-b82b-3a111e84c50e","Type":"ContainerDied","Data":"90255f395c207fb48cbd301c57bd28118c5945ce5f5d995175d976577870855f"} Nov 28 17:37:57 crc kubenswrapper[4909]: I1128 17:37:57.017274 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client" event={"ID":"0e9ec9e4-4426-43a4-b82b-3a111e84c50e","Type":"ContainerStarted","Data":"4950fe943353b2ab3621c394d0e8b898217f778c1250a2c6f5730fa04ceb90e8"} Nov 28 17:37:58 crc kubenswrapper[4909]: I1128 17:37:58.313327 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client" Nov 28 17:37:58 crc kubenswrapper[4909]: I1128 17:37:58.332608 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_mariadb-client_0e9ec9e4-4426-43a4-b82b-3a111e84c50e/mariadb-client/0.log" Nov 28 17:37:58 crc kubenswrapper[4909]: I1128 17:37:58.359549 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mariadb-client"] Nov 28 17:37:58 crc kubenswrapper[4909]: I1128 17:37:58.367374 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mariadb-client"] Nov 28 17:37:58 crc kubenswrapper[4909]: I1128 17:37:58.416252 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rqsc9\" (UniqueName: \"kubernetes.io/projected/0e9ec9e4-4426-43a4-b82b-3a111e84c50e-kube-api-access-rqsc9\") pod \"0e9ec9e4-4426-43a4-b82b-3a111e84c50e\" (UID: \"0e9ec9e4-4426-43a4-b82b-3a111e84c50e\") " Nov 28 17:37:58 crc kubenswrapper[4909]: I1128 17:37:58.424642 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0e9ec9e4-4426-43a4-b82b-3a111e84c50e-kube-api-access-rqsc9" (OuterVolumeSpecName: "kube-api-access-rqsc9") pod "0e9ec9e4-4426-43a4-b82b-3a111e84c50e" (UID: "0e9ec9e4-4426-43a4-b82b-3a111e84c50e"). InnerVolumeSpecName "kube-api-access-rqsc9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:37:58 crc kubenswrapper[4909]: I1128 17:37:58.519410 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rqsc9\" (UniqueName: \"kubernetes.io/projected/0e9ec9e4-4426-43a4-b82b-3a111e84c50e-kube-api-access-rqsc9\") on node \"crc\" DevicePath \"\"" Nov 28 17:37:59 crc kubenswrapper[4909]: I1128 17:37:59.036881 4909 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4950fe943353b2ab3621c394d0e8b898217f778c1250a2c6f5730fa04ceb90e8" Nov 28 17:37:59 crc kubenswrapper[4909]: I1128 17:37:59.036979 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client" Nov 28 17:37:59 crc kubenswrapper[4909]: I1128 17:37:59.913292 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0e9ec9e4-4426-43a4-b82b-3a111e84c50e" path="/var/lib/kubelet/pods/0e9ec9e4-4426-43a4-b82b-3a111e84c50e/volumes" Nov 28 17:38:19 crc kubenswrapper[4909]: I1128 17:38:19.911867 4909 patch_prober.go:28] interesting pod/machine-config-daemon-d5nd7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 17:38:19 crc kubenswrapper[4909]: I1128 17:38:19.913927 4909 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 17:38:19 crc kubenswrapper[4909]: I1128 17:38:19.914018 4909 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" Nov 28 17:38:19 crc kubenswrapper[4909]: I1128 17:38:19.914916 4909 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"ba4943f4ba136c11fa217eba14fcdb34cf54ee4ef96ee334416ec901f5f4fe45"} pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 17:38:19 crc kubenswrapper[4909]: I1128 17:38:19.915027 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerName="machine-config-daemon" containerID="cri-o://ba4943f4ba136c11fa217eba14fcdb34cf54ee4ef96ee334416ec901f5f4fe45" gracePeriod=600 Nov 28 17:38:20 crc kubenswrapper[4909]: E1128 17:38:20.055448 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 17:38:20 crc kubenswrapper[4909]: I1128 17:38:20.204674 4909 generic.go:334] "Generic (PLEG): container finished" podID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerID="ba4943f4ba136c11fa217eba14fcdb34cf54ee4ef96ee334416ec901f5f4fe45" exitCode=0 Nov 28 17:38:20 crc kubenswrapper[4909]: I1128 17:38:20.204696 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" event={"ID":"5f0ac931-d37b-4342-8c12-c2779b455cc5","Type":"ContainerDied","Data":"ba4943f4ba136c11fa217eba14fcdb34cf54ee4ef96ee334416ec901f5f4fe45"} Nov 28 17:38:20 crc kubenswrapper[4909]: I1128 17:38:20.205070 4909 scope.go:117] "RemoveContainer" containerID="8bbf3ce168c14b8e2f40c7fde54da632f20fb2f06731b49dfa3ed5fbcc06a8e0" Nov 28 17:38:20 crc kubenswrapper[4909]: I1128 17:38:20.205877 4909 scope.go:117] "RemoveContainer" containerID="ba4943f4ba136c11fa217eba14fcdb34cf54ee4ef96ee334416ec901f5f4fe45" Nov 28 17:38:20 crc kubenswrapper[4909]: E1128 17:38:20.206322 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 17:38:31 crc kubenswrapper[4909]: I1128 17:38:31.902893 4909 scope.go:117] "RemoveContainer" containerID="ba4943f4ba136c11fa217eba14fcdb34cf54ee4ef96ee334416ec901f5f4fe45" Nov 28 17:38:31 crc kubenswrapper[4909]: E1128 17:38:31.903864 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 17:38:35 crc kubenswrapper[4909]: I1128 17:38:35.494951 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 28 17:38:35 crc kubenswrapper[4909]: E1128 17:38:35.495581 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0e9ec9e4-4426-43a4-b82b-3a111e84c50e" containerName="mariadb-client" Nov 28 17:38:35 crc kubenswrapper[4909]: I1128 17:38:35.495600 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="0e9ec9e4-4426-43a4-b82b-3a111e84c50e" containerName="mariadb-client" Nov 28 17:38:35 crc kubenswrapper[4909]: I1128 17:38:35.495795 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="0e9ec9e4-4426-43a4-b82b-3a111e84c50e" containerName="mariadb-client" Nov 28 17:38:35 crc kubenswrapper[4909]: I1128 17:38:35.496536 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Nov 28 17:38:35 crc kubenswrapper[4909]: I1128 17:38:35.499535 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-config" Nov 28 17:38:35 crc kubenswrapper[4909]: I1128 17:38:35.499906 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-scripts" Nov 28 17:38:35 crc kubenswrapper[4909]: I1128 17:38:35.499920 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-nb-dockercfg-26ph7" Nov 28 17:38:35 crc kubenswrapper[4909]: I1128 17:38:35.519394 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-nb-1"] Nov 28 17:38:35 crc kubenswrapper[4909]: I1128 17:38:35.521061 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-1" Nov 28 17:38:35 crc kubenswrapper[4909]: I1128 17:38:35.527146 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 28 17:38:35 crc kubenswrapper[4909]: I1128 17:38:35.535445 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-nb-2"] Nov 28 17:38:35 crc kubenswrapper[4909]: I1128 17:38:35.537964 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-2" Nov 28 17:38:35 crc kubenswrapper[4909]: I1128 17:38:35.546412 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-2"] Nov 28 17:38:35 crc kubenswrapper[4909]: I1128 17:38:35.558678 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-1"] Nov 28 17:38:35 crc kubenswrapper[4909]: I1128 17:38:35.612180 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/8ae6c4b3-7956-4597-b15b-de51255c1273-ovsdb-rundir\") pod \"ovsdbserver-nb-1\" (UID: \"8ae6c4b3-7956-4597-b15b-de51255c1273\") " pod="openstack/ovsdbserver-nb-1" Nov 28 17:38:35 crc kubenswrapper[4909]: I1128 17:38:35.612560 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/425f39fc-d77b-4a11-b79e-30ae4dbad3c9-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"425f39fc-d77b-4a11-b79e-30ae4dbad3c9\") " pod="openstack/ovsdbserver-nb-0" Nov 28 17:38:35 crc kubenswrapper[4909]: I1128 17:38:35.612673 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/425f39fc-d77b-4a11-b79e-30ae4dbad3c9-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"425f39fc-d77b-4a11-b79e-30ae4dbad3c9\") " pod="openstack/ovsdbserver-nb-0" Nov 28 17:38:35 crc kubenswrapper[4909]: I1128 17:38:35.612816 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/8ae6c4b3-7956-4597-b15b-de51255c1273-scripts\") pod \"ovsdbserver-nb-1\" (UID: \"8ae6c4b3-7956-4597-b15b-de51255c1273\") " pod="openstack/ovsdbserver-nb-1" Nov 28 17:38:35 crc kubenswrapper[4909]: I1128 17:38:35.612960 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8ae6c4b3-7956-4597-b15b-de51255c1273-config\") pod \"ovsdbserver-nb-1\" (UID: \"8ae6c4b3-7956-4597-b15b-de51255c1273\") " pod="openstack/ovsdbserver-nb-1" Nov 28 17:38:35 crc kubenswrapper[4909]: I1128 17:38:35.613106 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-67wtj\" (UniqueName: \"kubernetes.io/projected/01dcb944-56d9-4239-9050-a85af687e4af-kube-api-access-67wtj\") pod \"ovsdbserver-nb-2\" (UID: \"01dcb944-56d9-4239-9050-a85af687e4af\") " pod="openstack/ovsdbserver-nb-2" Nov 28 17:38:35 crc kubenswrapper[4909]: I1128 17:38:35.613359 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-75d87\" (UniqueName: \"kubernetes.io/projected/8ae6c4b3-7956-4597-b15b-de51255c1273-kube-api-access-75d87\") pod \"ovsdbserver-nb-1\" (UID: \"8ae6c4b3-7956-4597-b15b-de51255c1273\") " pod="openstack/ovsdbserver-nb-1" Nov 28 17:38:35 crc kubenswrapper[4909]: I1128 17:38:35.613442 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/01dcb944-56d9-4239-9050-a85af687e4af-combined-ca-bundle\") pod \"ovsdbserver-nb-2\" (UID: \"01dcb944-56d9-4239-9050-a85af687e4af\") " pod="openstack/ovsdbserver-nb-2" Nov 28 17:38:35 crc kubenswrapper[4909]: I1128 17:38:35.613467 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01dcb944-56d9-4239-9050-a85af687e4af-config\") pod \"ovsdbserver-nb-2\" (UID: \"01dcb944-56d9-4239-9050-a85af687e4af\") " pod="openstack/ovsdbserver-nb-2" Nov 28 17:38:35 crc kubenswrapper[4909]: I1128 17:38:35.613495 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/01dcb944-56d9-4239-9050-a85af687e4af-scripts\") pod \"ovsdbserver-nb-2\" (UID: \"01dcb944-56d9-4239-9050-a85af687e4af\") " pod="openstack/ovsdbserver-nb-2" Nov 28 17:38:35 crc kubenswrapper[4909]: I1128 17:38:35.613533 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/01dcb944-56d9-4239-9050-a85af687e4af-ovsdb-rundir\") pod \"ovsdbserver-nb-2\" (UID: \"01dcb944-56d9-4239-9050-a85af687e4af\") " pod="openstack/ovsdbserver-nb-2" Nov 28 17:38:35 crc kubenswrapper[4909]: I1128 17:38:35.613733 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-c727c629-0df5-4add-a305-fc312c178a75\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-c727c629-0df5-4add-a305-fc312c178a75\") pod \"ovsdbserver-nb-0\" (UID: \"425f39fc-d77b-4a11-b79e-30ae4dbad3c9\") " pod="openstack/ovsdbserver-nb-0" Nov 28 17:38:35 crc kubenswrapper[4909]: I1128 17:38:35.613876 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8ae6c4b3-7956-4597-b15b-de51255c1273-combined-ca-bundle\") pod \"ovsdbserver-nb-1\" (UID: \"8ae6c4b3-7956-4597-b15b-de51255c1273\") " pod="openstack/ovsdbserver-nb-1" Nov 28 17:38:35 crc kubenswrapper[4909]: I1128 17:38:35.613909 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cxvft\" (UniqueName: \"kubernetes.io/projected/425f39fc-d77b-4a11-b79e-30ae4dbad3c9-kube-api-access-cxvft\") pod \"ovsdbserver-nb-0\" (UID: \"425f39fc-d77b-4a11-b79e-30ae4dbad3c9\") " pod="openstack/ovsdbserver-nb-0" Nov 28 17:38:35 crc kubenswrapper[4909]: I1128 17:38:35.614010 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/425f39fc-d77b-4a11-b79e-30ae4dbad3c9-config\") pod \"ovsdbserver-nb-0\" (UID: \"425f39fc-d77b-4a11-b79e-30ae4dbad3c9\") " pod="openstack/ovsdbserver-nb-0" Nov 28 17:38:35 crc kubenswrapper[4909]: I1128 17:38:35.614087 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-266e4607-fc49-4b59-b361-7820d8dd5dd7\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-266e4607-fc49-4b59-b361-7820d8dd5dd7\") pod \"ovsdbserver-nb-1\" (UID: \"8ae6c4b3-7956-4597-b15b-de51255c1273\") " pod="openstack/ovsdbserver-nb-1" Nov 28 17:38:35 crc kubenswrapper[4909]: I1128 17:38:35.614152 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-79c45c94-680e-4310-b947-f949ff98e832\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-79c45c94-680e-4310-b947-f949ff98e832\") pod \"ovsdbserver-nb-2\" (UID: \"01dcb944-56d9-4239-9050-a85af687e4af\") " pod="openstack/ovsdbserver-nb-2" Nov 28 17:38:35 crc kubenswrapper[4909]: I1128 17:38:35.614174 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/425f39fc-d77b-4a11-b79e-30ae4dbad3c9-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"425f39fc-d77b-4a11-b79e-30ae4dbad3c9\") " pod="openstack/ovsdbserver-nb-0" Nov 28 17:38:35 crc kubenswrapper[4909]: I1128 17:38:35.715487 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-75d87\" (UniqueName: \"kubernetes.io/projected/8ae6c4b3-7956-4597-b15b-de51255c1273-kube-api-access-75d87\") pod \"ovsdbserver-nb-1\" (UID: \"8ae6c4b3-7956-4597-b15b-de51255c1273\") " pod="openstack/ovsdbserver-nb-1" Nov 28 17:38:35 crc kubenswrapper[4909]: I1128 17:38:35.715546 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/01dcb944-56d9-4239-9050-a85af687e4af-combined-ca-bundle\") pod \"ovsdbserver-nb-2\" (UID: \"01dcb944-56d9-4239-9050-a85af687e4af\") " pod="openstack/ovsdbserver-nb-2" Nov 28 17:38:35 crc kubenswrapper[4909]: I1128 17:38:35.715571 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01dcb944-56d9-4239-9050-a85af687e4af-config\") pod \"ovsdbserver-nb-2\" (UID: \"01dcb944-56d9-4239-9050-a85af687e4af\") " pod="openstack/ovsdbserver-nb-2" Nov 28 17:38:35 crc kubenswrapper[4909]: I1128 17:38:35.715586 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/01dcb944-56d9-4239-9050-a85af687e4af-scripts\") pod \"ovsdbserver-nb-2\" (UID: \"01dcb944-56d9-4239-9050-a85af687e4af\") " pod="openstack/ovsdbserver-nb-2" Nov 28 17:38:35 crc kubenswrapper[4909]: I1128 17:38:35.715608 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/01dcb944-56d9-4239-9050-a85af687e4af-ovsdb-rundir\") pod \"ovsdbserver-nb-2\" (UID: \"01dcb944-56d9-4239-9050-a85af687e4af\") " pod="openstack/ovsdbserver-nb-2" Nov 28 17:38:35 crc kubenswrapper[4909]: I1128 17:38:35.715636 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-c727c629-0df5-4add-a305-fc312c178a75\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-c727c629-0df5-4add-a305-fc312c178a75\") pod \"ovsdbserver-nb-0\" (UID: \"425f39fc-d77b-4a11-b79e-30ae4dbad3c9\") " pod="openstack/ovsdbserver-nb-0" Nov 28 17:38:35 crc kubenswrapper[4909]: I1128 17:38:35.715691 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cxvft\" (UniqueName: \"kubernetes.io/projected/425f39fc-d77b-4a11-b79e-30ae4dbad3c9-kube-api-access-cxvft\") pod \"ovsdbserver-nb-0\" (UID: \"425f39fc-d77b-4a11-b79e-30ae4dbad3c9\") " pod="openstack/ovsdbserver-nb-0" Nov 28 17:38:35 crc kubenswrapper[4909]: I1128 17:38:35.715708 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8ae6c4b3-7956-4597-b15b-de51255c1273-combined-ca-bundle\") pod \"ovsdbserver-nb-1\" (UID: \"8ae6c4b3-7956-4597-b15b-de51255c1273\") " pod="openstack/ovsdbserver-nb-1" Nov 28 17:38:35 crc kubenswrapper[4909]: I1128 17:38:35.715732 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/425f39fc-d77b-4a11-b79e-30ae4dbad3c9-config\") pod \"ovsdbserver-nb-0\" (UID: \"425f39fc-d77b-4a11-b79e-30ae4dbad3c9\") " pod="openstack/ovsdbserver-nb-0" Nov 28 17:38:35 crc kubenswrapper[4909]: I1128 17:38:35.715756 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-266e4607-fc49-4b59-b361-7820d8dd5dd7\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-266e4607-fc49-4b59-b361-7820d8dd5dd7\") pod \"ovsdbserver-nb-1\" (UID: \"8ae6c4b3-7956-4597-b15b-de51255c1273\") " pod="openstack/ovsdbserver-nb-1" Nov 28 17:38:35 crc kubenswrapper[4909]: I1128 17:38:35.715788 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-79c45c94-680e-4310-b947-f949ff98e832\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-79c45c94-680e-4310-b947-f949ff98e832\") pod \"ovsdbserver-nb-2\" (UID: \"01dcb944-56d9-4239-9050-a85af687e4af\") " pod="openstack/ovsdbserver-nb-2" Nov 28 17:38:35 crc kubenswrapper[4909]: I1128 17:38:35.715808 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/425f39fc-d77b-4a11-b79e-30ae4dbad3c9-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"425f39fc-d77b-4a11-b79e-30ae4dbad3c9\") " pod="openstack/ovsdbserver-nb-0" Nov 28 17:38:35 crc kubenswrapper[4909]: I1128 17:38:35.715863 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/8ae6c4b3-7956-4597-b15b-de51255c1273-ovsdb-rundir\") pod \"ovsdbserver-nb-1\" (UID: \"8ae6c4b3-7956-4597-b15b-de51255c1273\") " pod="openstack/ovsdbserver-nb-1" Nov 28 17:38:35 crc kubenswrapper[4909]: I1128 17:38:35.715883 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/425f39fc-d77b-4a11-b79e-30ae4dbad3c9-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"425f39fc-d77b-4a11-b79e-30ae4dbad3c9\") " pod="openstack/ovsdbserver-nb-0" Nov 28 17:38:35 crc kubenswrapper[4909]: I1128 17:38:35.715907 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/425f39fc-d77b-4a11-b79e-30ae4dbad3c9-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"425f39fc-d77b-4a11-b79e-30ae4dbad3c9\") " pod="openstack/ovsdbserver-nb-0" Nov 28 17:38:35 crc kubenswrapper[4909]: I1128 17:38:35.715927 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/8ae6c4b3-7956-4597-b15b-de51255c1273-scripts\") pod \"ovsdbserver-nb-1\" (UID: \"8ae6c4b3-7956-4597-b15b-de51255c1273\") " pod="openstack/ovsdbserver-nb-1" Nov 28 17:38:35 crc kubenswrapper[4909]: I1128 17:38:35.715956 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8ae6c4b3-7956-4597-b15b-de51255c1273-config\") pod \"ovsdbserver-nb-1\" (UID: \"8ae6c4b3-7956-4597-b15b-de51255c1273\") " pod="openstack/ovsdbserver-nb-1" Nov 28 17:38:35 crc kubenswrapper[4909]: I1128 17:38:35.715984 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-67wtj\" (UniqueName: \"kubernetes.io/projected/01dcb944-56d9-4239-9050-a85af687e4af-kube-api-access-67wtj\") pod \"ovsdbserver-nb-2\" (UID: \"01dcb944-56d9-4239-9050-a85af687e4af\") " pod="openstack/ovsdbserver-nb-2" Nov 28 17:38:35 crc kubenswrapper[4909]: I1128 17:38:35.718610 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01dcb944-56d9-4239-9050-a85af687e4af-config\") pod \"ovsdbserver-nb-2\" (UID: \"01dcb944-56d9-4239-9050-a85af687e4af\") " pod="openstack/ovsdbserver-nb-2" Nov 28 17:38:35 crc kubenswrapper[4909]: I1128 17:38:35.719299 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/01dcb944-56d9-4239-9050-a85af687e4af-scripts\") pod \"ovsdbserver-nb-2\" (UID: \"01dcb944-56d9-4239-9050-a85af687e4af\") " pod="openstack/ovsdbserver-nb-2" Nov 28 17:38:35 crc kubenswrapper[4909]: I1128 17:38:35.719658 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/01dcb944-56d9-4239-9050-a85af687e4af-ovsdb-rundir\") pod \"ovsdbserver-nb-2\" (UID: \"01dcb944-56d9-4239-9050-a85af687e4af\") " pod="openstack/ovsdbserver-nb-2" Nov 28 17:38:35 crc kubenswrapper[4909]: I1128 17:38:35.719822 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/425f39fc-d77b-4a11-b79e-30ae4dbad3c9-config\") pod \"ovsdbserver-nb-0\" (UID: \"425f39fc-d77b-4a11-b79e-30ae4dbad3c9\") " pod="openstack/ovsdbserver-nb-0" Nov 28 17:38:35 crc kubenswrapper[4909]: I1128 17:38:35.720142 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/425f39fc-d77b-4a11-b79e-30ae4dbad3c9-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"425f39fc-d77b-4a11-b79e-30ae4dbad3c9\") " pod="openstack/ovsdbserver-nb-0" Nov 28 17:38:35 crc kubenswrapper[4909]: I1128 17:38:35.720970 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/8ae6c4b3-7956-4597-b15b-de51255c1273-ovsdb-rundir\") pod \"ovsdbserver-nb-1\" (UID: \"8ae6c4b3-7956-4597-b15b-de51255c1273\") " pod="openstack/ovsdbserver-nb-1" Nov 28 17:38:35 crc kubenswrapper[4909]: I1128 17:38:35.721451 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8ae6c4b3-7956-4597-b15b-de51255c1273-config\") pod \"ovsdbserver-nb-1\" (UID: \"8ae6c4b3-7956-4597-b15b-de51255c1273\") " pod="openstack/ovsdbserver-nb-1" Nov 28 17:38:35 crc kubenswrapper[4909]: I1128 17:38:35.721620 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/425f39fc-d77b-4a11-b79e-30ae4dbad3c9-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"425f39fc-d77b-4a11-b79e-30ae4dbad3c9\") " pod="openstack/ovsdbserver-nb-0" Nov 28 17:38:35 crc kubenswrapper[4909]: I1128 17:38:35.722220 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/8ae6c4b3-7956-4597-b15b-de51255c1273-scripts\") pod \"ovsdbserver-nb-1\" (UID: \"8ae6c4b3-7956-4597-b15b-de51255c1273\") " pod="openstack/ovsdbserver-nb-1" Nov 28 17:38:35 crc kubenswrapper[4909]: I1128 17:38:35.723071 4909 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 28 17:38:35 crc kubenswrapper[4909]: I1128 17:38:35.723099 4909 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-c727c629-0df5-4add-a305-fc312c178a75\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-c727c629-0df5-4add-a305-fc312c178a75\") pod \"ovsdbserver-nb-0\" (UID: \"425f39fc-d77b-4a11-b79e-30ae4dbad3c9\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/f4490e6ade9900881de7dffd2f6ebef1e420cf83776a2646cc137ba167d95ce9/globalmount\"" pod="openstack/ovsdbserver-nb-0" Nov 28 17:38:35 crc kubenswrapper[4909]: I1128 17:38:35.723270 4909 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 28 17:38:35 crc kubenswrapper[4909]: I1128 17:38:35.723318 4909 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-79c45c94-680e-4310-b947-f949ff98e832\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-79c45c94-680e-4310-b947-f949ff98e832\") pod \"ovsdbserver-nb-2\" (UID: \"01dcb944-56d9-4239-9050-a85af687e4af\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/f6d1f58c2e6a6723fad5b25066c677a84f0ed2351ba6314bb242db29f36852a5/globalmount\"" pod="openstack/ovsdbserver-nb-2" Nov 28 17:38:35 crc kubenswrapper[4909]: I1128 17:38:35.724559 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 28 17:38:35 crc kubenswrapper[4909]: I1128 17:38:35.746215 4909 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 28 17:38:35 crc kubenswrapper[4909]: I1128 17:38:35.746282 4909 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-266e4607-fc49-4b59-b361-7820d8dd5dd7\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-266e4607-fc49-4b59-b361-7820d8dd5dd7\") pod \"ovsdbserver-nb-1\" (UID: \"8ae6c4b3-7956-4597-b15b-de51255c1273\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/aa15f0e308865b59a27092d06590105d5fe9703b6c9f8d074ff57dd2a0625326/globalmount\"" pod="openstack/ovsdbserver-nb-1" Nov 28 17:38:35 crc kubenswrapper[4909]: I1128 17:38:35.748278 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/425f39fc-d77b-4a11-b79e-30ae4dbad3c9-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"425f39fc-d77b-4a11-b79e-30ae4dbad3c9\") " pod="openstack/ovsdbserver-nb-0" Nov 28 17:38:35 crc kubenswrapper[4909]: I1128 17:38:35.749307 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/01dcb944-56d9-4239-9050-a85af687e4af-combined-ca-bundle\") pod \"ovsdbserver-nb-2\" (UID: \"01dcb944-56d9-4239-9050-a85af687e4af\") " pod="openstack/ovsdbserver-nb-2" Nov 28 17:38:35 crc kubenswrapper[4909]: I1128 17:38:35.749855 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8ae6c4b3-7956-4597-b15b-de51255c1273-combined-ca-bundle\") pod \"ovsdbserver-nb-1\" (UID: \"8ae6c4b3-7956-4597-b15b-de51255c1273\") " pod="openstack/ovsdbserver-nb-1" Nov 28 17:38:35 crc kubenswrapper[4909]: I1128 17:38:35.766695 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-75d87\" (UniqueName: \"kubernetes.io/projected/8ae6c4b3-7956-4597-b15b-de51255c1273-kube-api-access-75d87\") pod \"ovsdbserver-nb-1\" (UID: \"8ae6c4b3-7956-4597-b15b-de51255c1273\") " pod="openstack/ovsdbserver-nb-1" Nov 28 17:38:35 crc kubenswrapper[4909]: I1128 17:38:35.773690 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Nov 28 17:38:35 crc kubenswrapper[4909]: I1128 17:38:35.754880 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cxvft\" (UniqueName: \"kubernetes.io/projected/425f39fc-d77b-4a11-b79e-30ae4dbad3c9-kube-api-access-cxvft\") pod \"ovsdbserver-nb-0\" (UID: \"425f39fc-d77b-4a11-b79e-30ae4dbad3c9\") " pod="openstack/ovsdbserver-nb-0" Nov 28 17:38:35 crc kubenswrapper[4909]: I1128 17:38:35.782061 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-config" Nov 28 17:38:35 crc kubenswrapper[4909]: I1128 17:38:35.784869 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-67wtj\" (UniqueName: \"kubernetes.io/projected/01dcb944-56d9-4239-9050-a85af687e4af-kube-api-access-67wtj\") pod \"ovsdbserver-nb-2\" (UID: \"01dcb944-56d9-4239-9050-a85af687e4af\") " pod="openstack/ovsdbserver-nb-2" Nov 28 17:38:35 crc kubenswrapper[4909]: I1128 17:38:35.785682 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-scripts" Nov 28 17:38:35 crc kubenswrapper[4909]: I1128 17:38:35.785890 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-sb-dockercfg-2hjnq" Nov 28 17:38:35 crc kubenswrapper[4909]: I1128 17:38:35.790806 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 28 17:38:35 crc kubenswrapper[4909]: I1128 17:38:35.806209 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-sb-1"] Nov 28 17:38:35 crc kubenswrapper[4909]: I1128 17:38:35.807593 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-1" Nov 28 17:38:35 crc kubenswrapper[4909]: I1128 17:38:35.808732 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-266e4607-fc49-4b59-b361-7820d8dd5dd7\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-266e4607-fc49-4b59-b361-7820d8dd5dd7\") pod \"ovsdbserver-nb-1\" (UID: \"8ae6c4b3-7956-4597-b15b-de51255c1273\") " pod="openstack/ovsdbserver-nb-1" Nov 28 17:38:35 crc kubenswrapper[4909]: I1128 17:38:35.813284 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-c727c629-0df5-4add-a305-fc312c178a75\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-c727c629-0df5-4add-a305-fc312c178a75\") pod \"ovsdbserver-nb-0\" (UID: \"425f39fc-d77b-4a11-b79e-30ae4dbad3c9\") " pod="openstack/ovsdbserver-nb-0" Nov 28 17:38:35 crc kubenswrapper[4909]: I1128 17:38:35.813561 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-79c45c94-680e-4310-b947-f949ff98e832\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-79c45c94-680e-4310-b947-f949ff98e832\") pod \"ovsdbserver-nb-2\" (UID: \"01dcb944-56d9-4239-9050-a85af687e4af\") " pod="openstack/ovsdbserver-nb-2" Nov 28 17:38:35 crc kubenswrapper[4909]: I1128 17:38:35.816393 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-sb-2"] Nov 28 17:38:35 crc kubenswrapper[4909]: I1128 17:38:35.817432 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zvjl9\" (UniqueName: \"kubernetes.io/projected/81b7dd2b-5e9c-454b-a12f-584bad52d211-kube-api-access-zvjl9\") pod \"ovsdbserver-sb-0\" (UID: \"81b7dd2b-5e9c-454b-a12f-584bad52d211\") " pod="openstack/ovsdbserver-sb-0" Nov 28 17:38:35 crc kubenswrapper[4909]: I1128 17:38:35.817641 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/81b7dd2b-5e9c-454b-a12f-584bad52d211-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"81b7dd2b-5e9c-454b-a12f-584bad52d211\") " pod="openstack/ovsdbserver-sb-0" Nov 28 17:38:35 crc kubenswrapper[4909]: I1128 17:38:35.817752 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/81b7dd2b-5e9c-454b-a12f-584bad52d211-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"81b7dd2b-5e9c-454b-a12f-584bad52d211\") " pod="openstack/ovsdbserver-sb-0" Nov 28 17:38:35 crc kubenswrapper[4909]: I1128 17:38:35.817858 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/81b7dd2b-5e9c-454b-a12f-584bad52d211-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"81b7dd2b-5e9c-454b-a12f-584bad52d211\") " pod="openstack/ovsdbserver-sb-0" Nov 28 17:38:35 crc kubenswrapper[4909]: I1128 17:38:35.817873 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-2" Nov 28 17:38:35 crc kubenswrapper[4909]: I1128 17:38:35.817912 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-1e50f720-e1c0-4839-b9c7-e403c100111b\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-1e50f720-e1c0-4839-b9c7-e403c100111b\") pod \"ovsdbserver-sb-0\" (UID: \"81b7dd2b-5e9c-454b-a12f-584bad52d211\") " pod="openstack/ovsdbserver-sb-0" Nov 28 17:38:35 crc kubenswrapper[4909]: I1128 17:38:35.817983 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/81b7dd2b-5e9c-454b-a12f-584bad52d211-config\") pod \"ovsdbserver-sb-0\" (UID: \"81b7dd2b-5e9c-454b-a12f-584bad52d211\") " pod="openstack/ovsdbserver-sb-0" Nov 28 17:38:35 crc kubenswrapper[4909]: I1128 17:38:35.822806 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-1"] Nov 28 17:38:35 crc kubenswrapper[4909]: I1128 17:38:35.823066 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Nov 28 17:38:35 crc kubenswrapper[4909]: I1128 17:38:35.831769 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-2"] Nov 28 17:38:35 crc kubenswrapper[4909]: I1128 17:38:35.841238 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-1" Nov 28 17:38:35 crc kubenswrapper[4909]: I1128 17:38:35.861442 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-2" Nov 28 17:38:35 crc kubenswrapper[4909]: I1128 17:38:35.919171 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-16310397-215b-4d9c-aa2e-d0f4b5aefa39\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-16310397-215b-4d9c-aa2e-d0f4b5aefa39\") pod \"ovsdbserver-sb-1\" (UID: \"0307a9d2-1a13-4d90-a52a-206ef5a80f4b\") " pod="openstack/ovsdbserver-sb-1" Nov 28 17:38:35 crc kubenswrapper[4909]: I1128 17:38:35.919215 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-43262620-2c61-4be7-85bd-ae926feda4d3\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-43262620-2c61-4be7-85bd-ae926feda4d3\") pod \"ovsdbserver-sb-2\" (UID: \"501619a3-d9ee-4809-9fe6-1e0170341225\") " pod="openstack/ovsdbserver-sb-2" Nov 28 17:38:35 crc kubenswrapper[4909]: I1128 17:38:35.919258 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/0307a9d2-1a13-4d90-a52a-206ef5a80f4b-ovsdb-rundir\") pod \"ovsdbserver-sb-1\" (UID: \"0307a9d2-1a13-4d90-a52a-206ef5a80f4b\") " pod="openstack/ovsdbserver-sb-1" Nov 28 17:38:35 crc kubenswrapper[4909]: I1128 17:38:35.919283 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/81b7dd2b-5e9c-454b-a12f-584bad52d211-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"81b7dd2b-5e9c-454b-a12f-584bad52d211\") " pod="openstack/ovsdbserver-sb-0" Nov 28 17:38:35 crc kubenswrapper[4909]: I1128 17:38:35.919326 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/81b7dd2b-5e9c-454b-a12f-584bad52d211-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"81b7dd2b-5e9c-454b-a12f-584bad52d211\") " pod="openstack/ovsdbserver-sb-0" Nov 28 17:38:35 crc kubenswrapper[4909]: I1128 17:38:35.919358 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/81b7dd2b-5e9c-454b-a12f-584bad52d211-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"81b7dd2b-5e9c-454b-a12f-584bad52d211\") " pod="openstack/ovsdbserver-sb-0" Nov 28 17:38:35 crc kubenswrapper[4909]: I1128 17:38:35.919376 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wpd89\" (UniqueName: \"kubernetes.io/projected/0307a9d2-1a13-4d90-a52a-206ef5a80f4b-kube-api-access-wpd89\") pod \"ovsdbserver-sb-1\" (UID: \"0307a9d2-1a13-4d90-a52a-206ef5a80f4b\") " pod="openstack/ovsdbserver-sb-1" Nov 28 17:38:35 crc kubenswrapper[4909]: I1128 17:38:35.919409 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/501619a3-d9ee-4809-9fe6-1e0170341225-config\") pod \"ovsdbserver-sb-2\" (UID: \"501619a3-d9ee-4809-9fe6-1e0170341225\") " pod="openstack/ovsdbserver-sb-2" Nov 28 17:38:35 crc kubenswrapper[4909]: I1128 17:38:35.919444 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-1e50f720-e1c0-4839-b9c7-e403c100111b\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-1e50f720-e1c0-4839-b9c7-e403c100111b\") pod \"ovsdbserver-sb-0\" (UID: \"81b7dd2b-5e9c-454b-a12f-584bad52d211\") " pod="openstack/ovsdbserver-sb-0" Nov 28 17:38:35 crc kubenswrapper[4909]: I1128 17:38:35.919465 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0307a9d2-1a13-4d90-a52a-206ef5a80f4b-config\") pod \"ovsdbserver-sb-1\" (UID: \"0307a9d2-1a13-4d90-a52a-206ef5a80f4b\") " pod="openstack/ovsdbserver-sb-1" Nov 28 17:38:35 crc kubenswrapper[4909]: I1128 17:38:35.919492 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/501619a3-d9ee-4809-9fe6-1e0170341225-scripts\") pod \"ovsdbserver-sb-2\" (UID: \"501619a3-d9ee-4809-9fe6-1e0170341225\") " pod="openstack/ovsdbserver-sb-2" Nov 28 17:38:35 crc kubenswrapper[4909]: I1128 17:38:35.919518 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qhpgl\" (UniqueName: \"kubernetes.io/projected/501619a3-d9ee-4809-9fe6-1e0170341225-kube-api-access-qhpgl\") pod \"ovsdbserver-sb-2\" (UID: \"501619a3-d9ee-4809-9fe6-1e0170341225\") " pod="openstack/ovsdbserver-sb-2" Nov 28 17:38:35 crc kubenswrapper[4909]: I1128 17:38:35.919541 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/81b7dd2b-5e9c-454b-a12f-584bad52d211-config\") pod \"ovsdbserver-sb-0\" (UID: \"81b7dd2b-5e9c-454b-a12f-584bad52d211\") " pod="openstack/ovsdbserver-sb-0" Nov 28 17:38:35 crc kubenswrapper[4909]: I1128 17:38:35.919574 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/0307a9d2-1a13-4d90-a52a-206ef5a80f4b-scripts\") pod \"ovsdbserver-sb-1\" (UID: \"0307a9d2-1a13-4d90-a52a-206ef5a80f4b\") " pod="openstack/ovsdbserver-sb-1" Nov 28 17:38:35 crc kubenswrapper[4909]: I1128 17:38:35.919605 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0307a9d2-1a13-4d90-a52a-206ef5a80f4b-combined-ca-bundle\") pod \"ovsdbserver-sb-1\" (UID: \"0307a9d2-1a13-4d90-a52a-206ef5a80f4b\") " pod="openstack/ovsdbserver-sb-1" Nov 28 17:38:35 crc kubenswrapper[4909]: I1128 17:38:35.919626 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/501619a3-d9ee-4809-9fe6-1e0170341225-combined-ca-bundle\") pod \"ovsdbserver-sb-2\" (UID: \"501619a3-d9ee-4809-9fe6-1e0170341225\") " pod="openstack/ovsdbserver-sb-2" Nov 28 17:38:35 crc kubenswrapper[4909]: I1128 17:38:35.919647 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zvjl9\" (UniqueName: \"kubernetes.io/projected/81b7dd2b-5e9c-454b-a12f-584bad52d211-kube-api-access-zvjl9\") pod \"ovsdbserver-sb-0\" (UID: \"81b7dd2b-5e9c-454b-a12f-584bad52d211\") " pod="openstack/ovsdbserver-sb-0" Nov 28 17:38:35 crc kubenswrapper[4909]: I1128 17:38:35.919672 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/501619a3-d9ee-4809-9fe6-1e0170341225-ovsdb-rundir\") pod \"ovsdbserver-sb-2\" (UID: \"501619a3-d9ee-4809-9fe6-1e0170341225\") " pod="openstack/ovsdbserver-sb-2" Nov 28 17:38:35 crc kubenswrapper[4909]: I1128 17:38:35.923031 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/81b7dd2b-5e9c-454b-a12f-584bad52d211-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"81b7dd2b-5e9c-454b-a12f-584bad52d211\") " pod="openstack/ovsdbserver-sb-0" Nov 28 17:38:35 crc kubenswrapper[4909]: I1128 17:38:35.925188 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/81b7dd2b-5e9c-454b-a12f-584bad52d211-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"81b7dd2b-5e9c-454b-a12f-584bad52d211\") " pod="openstack/ovsdbserver-sb-0" Nov 28 17:38:35 crc kubenswrapper[4909]: I1128 17:38:35.925764 4909 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 28 17:38:35 crc kubenswrapper[4909]: I1128 17:38:35.925910 4909 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-1e50f720-e1c0-4839-b9c7-e403c100111b\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-1e50f720-e1c0-4839-b9c7-e403c100111b\") pod \"ovsdbserver-sb-0\" (UID: \"81b7dd2b-5e9c-454b-a12f-584bad52d211\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/30e12c8c44805d4e0587375796940e7c4cf3430cb5801b96efe0eae0fba88f70/globalmount\"" pod="openstack/ovsdbserver-sb-0" Nov 28 17:38:35 crc kubenswrapper[4909]: I1128 17:38:35.926399 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/81b7dd2b-5e9c-454b-a12f-584bad52d211-config\") pod \"ovsdbserver-sb-0\" (UID: \"81b7dd2b-5e9c-454b-a12f-584bad52d211\") " pod="openstack/ovsdbserver-sb-0" Nov 28 17:38:35 crc kubenswrapper[4909]: I1128 17:38:35.926446 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/81b7dd2b-5e9c-454b-a12f-584bad52d211-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"81b7dd2b-5e9c-454b-a12f-584bad52d211\") " pod="openstack/ovsdbserver-sb-0" Nov 28 17:38:35 crc kubenswrapper[4909]: I1128 17:38:35.947823 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zvjl9\" (UniqueName: \"kubernetes.io/projected/81b7dd2b-5e9c-454b-a12f-584bad52d211-kube-api-access-zvjl9\") pod \"ovsdbserver-sb-0\" (UID: \"81b7dd2b-5e9c-454b-a12f-584bad52d211\") " pod="openstack/ovsdbserver-sb-0" Nov 28 17:38:35 crc kubenswrapper[4909]: I1128 17:38:35.980404 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-1e50f720-e1c0-4839-b9c7-e403c100111b\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-1e50f720-e1c0-4839-b9c7-e403c100111b\") pod \"ovsdbserver-sb-0\" (UID: \"81b7dd2b-5e9c-454b-a12f-584bad52d211\") " pod="openstack/ovsdbserver-sb-0" Nov 28 17:38:36 crc kubenswrapper[4909]: I1128 17:38:36.022493 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/501619a3-d9ee-4809-9fe6-1e0170341225-config\") pod \"ovsdbserver-sb-2\" (UID: \"501619a3-d9ee-4809-9fe6-1e0170341225\") " pod="openstack/ovsdbserver-sb-2" Nov 28 17:38:36 crc kubenswrapper[4909]: I1128 17:38:36.022569 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0307a9d2-1a13-4d90-a52a-206ef5a80f4b-config\") pod \"ovsdbserver-sb-1\" (UID: \"0307a9d2-1a13-4d90-a52a-206ef5a80f4b\") " pod="openstack/ovsdbserver-sb-1" Nov 28 17:38:36 crc kubenswrapper[4909]: I1128 17:38:36.022587 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/501619a3-d9ee-4809-9fe6-1e0170341225-scripts\") pod \"ovsdbserver-sb-2\" (UID: \"501619a3-d9ee-4809-9fe6-1e0170341225\") " pod="openstack/ovsdbserver-sb-2" Nov 28 17:38:36 crc kubenswrapper[4909]: I1128 17:38:36.022605 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qhpgl\" (UniqueName: \"kubernetes.io/projected/501619a3-d9ee-4809-9fe6-1e0170341225-kube-api-access-qhpgl\") pod \"ovsdbserver-sb-2\" (UID: \"501619a3-d9ee-4809-9fe6-1e0170341225\") " pod="openstack/ovsdbserver-sb-2" Nov 28 17:38:36 crc kubenswrapper[4909]: I1128 17:38:36.022636 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/0307a9d2-1a13-4d90-a52a-206ef5a80f4b-scripts\") pod \"ovsdbserver-sb-1\" (UID: \"0307a9d2-1a13-4d90-a52a-206ef5a80f4b\") " pod="openstack/ovsdbserver-sb-1" Nov 28 17:38:36 crc kubenswrapper[4909]: I1128 17:38:36.022660 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0307a9d2-1a13-4d90-a52a-206ef5a80f4b-combined-ca-bundle\") pod \"ovsdbserver-sb-1\" (UID: \"0307a9d2-1a13-4d90-a52a-206ef5a80f4b\") " pod="openstack/ovsdbserver-sb-1" Nov 28 17:38:36 crc kubenswrapper[4909]: I1128 17:38:36.022695 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/501619a3-d9ee-4809-9fe6-1e0170341225-combined-ca-bundle\") pod \"ovsdbserver-sb-2\" (UID: \"501619a3-d9ee-4809-9fe6-1e0170341225\") " pod="openstack/ovsdbserver-sb-2" Nov 28 17:38:36 crc kubenswrapper[4909]: I1128 17:38:36.022722 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/501619a3-d9ee-4809-9fe6-1e0170341225-ovsdb-rundir\") pod \"ovsdbserver-sb-2\" (UID: \"501619a3-d9ee-4809-9fe6-1e0170341225\") " pod="openstack/ovsdbserver-sb-2" Nov 28 17:38:36 crc kubenswrapper[4909]: I1128 17:38:36.022744 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-16310397-215b-4d9c-aa2e-d0f4b5aefa39\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-16310397-215b-4d9c-aa2e-d0f4b5aefa39\") pod \"ovsdbserver-sb-1\" (UID: \"0307a9d2-1a13-4d90-a52a-206ef5a80f4b\") " pod="openstack/ovsdbserver-sb-1" Nov 28 17:38:36 crc kubenswrapper[4909]: I1128 17:38:36.022765 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-43262620-2c61-4be7-85bd-ae926feda4d3\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-43262620-2c61-4be7-85bd-ae926feda4d3\") pod \"ovsdbserver-sb-2\" (UID: \"501619a3-d9ee-4809-9fe6-1e0170341225\") " pod="openstack/ovsdbserver-sb-2" Nov 28 17:38:36 crc kubenswrapper[4909]: I1128 17:38:36.022795 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/0307a9d2-1a13-4d90-a52a-206ef5a80f4b-ovsdb-rundir\") pod \"ovsdbserver-sb-1\" (UID: \"0307a9d2-1a13-4d90-a52a-206ef5a80f4b\") " pod="openstack/ovsdbserver-sb-1" Nov 28 17:38:36 crc kubenswrapper[4909]: I1128 17:38:36.022833 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wpd89\" (UniqueName: \"kubernetes.io/projected/0307a9d2-1a13-4d90-a52a-206ef5a80f4b-kube-api-access-wpd89\") pod \"ovsdbserver-sb-1\" (UID: \"0307a9d2-1a13-4d90-a52a-206ef5a80f4b\") " pod="openstack/ovsdbserver-sb-1" Nov 28 17:38:36 crc kubenswrapper[4909]: I1128 17:38:36.025068 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/501619a3-d9ee-4809-9fe6-1e0170341225-ovsdb-rundir\") pod \"ovsdbserver-sb-2\" (UID: \"501619a3-d9ee-4809-9fe6-1e0170341225\") " pod="openstack/ovsdbserver-sb-2" Nov 28 17:38:36 crc kubenswrapper[4909]: I1128 17:38:36.025114 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/501619a3-d9ee-4809-9fe6-1e0170341225-config\") pod \"ovsdbserver-sb-2\" (UID: \"501619a3-d9ee-4809-9fe6-1e0170341225\") " pod="openstack/ovsdbserver-sb-2" Nov 28 17:38:36 crc kubenswrapper[4909]: I1128 17:38:36.026475 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0307a9d2-1a13-4d90-a52a-206ef5a80f4b-config\") pod \"ovsdbserver-sb-1\" (UID: \"0307a9d2-1a13-4d90-a52a-206ef5a80f4b\") " pod="openstack/ovsdbserver-sb-1" Nov 28 17:38:36 crc kubenswrapper[4909]: I1128 17:38:36.026664 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/0307a9d2-1a13-4d90-a52a-206ef5a80f4b-ovsdb-rundir\") pod \"ovsdbserver-sb-1\" (UID: \"0307a9d2-1a13-4d90-a52a-206ef5a80f4b\") " pod="openstack/ovsdbserver-sb-1" Nov 28 17:38:36 crc kubenswrapper[4909]: I1128 17:38:36.027101 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/0307a9d2-1a13-4d90-a52a-206ef5a80f4b-scripts\") pod \"ovsdbserver-sb-1\" (UID: \"0307a9d2-1a13-4d90-a52a-206ef5a80f4b\") " pod="openstack/ovsdbserver-sb-1" Nov 28 17:38:36 crc kubenswrapper[4909]: I1128 17:38:36.027537 4909 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 28 17:38:36 crc kubenswrapper[4909]: I1128 17:38:36.027590 4909 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-16310397-215b-4d9c-aa2e-d0f4b5aefa39\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-16310397-215b-4d9c-aa2e-d0f4b5aefa39\") pod \"ovsdbserver-sb-1\" (UID: \"0307a9d2-1a13-4d90-a52a-206ef5a80f4b\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/2e59a1b89ca7f1ad4dcafadf09aa93f2a6e882edf7f8ff9e08402652c26b6f11/globalmount\"" pod="openstack/ovsdbserver-sb-1" Nov 28 17:38:36 crc kubenswrapper[4909]: I1128 17:38:36.027797 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/501619a3-d9ee-4809-9fe6-1e0170341225-scripts\") pod \"ovsdbserver-sb-2\" (UID: \"501619a3-d9ee-4809-9fe6-1e0170341225\") " pod="openstack/ovsdbserver-sb-2" Nov 28 17:38:36 crc kubenswrapper[4909]: I1128 17:38:36.029004 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/501619a3-d9ee-4809-9fe6-1e0170341225-combined-ca-bundle\") pod \"ovsdbserver-sb-2\" (UID: \"501619a3-d9ee-4809-9fe6-1e0170341225\") " pod="openstack/ovsdbserver-sb-2" Nov 28 17:38:36 crc kubenswrapper[4909]: I1128 17:38:36.036113 4909 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 28 17:38:36 crc kubenswrapper[4909]: I1128 17:38:36.036146 4909 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-43262620-2c61-4be7-85bd-ae926feda4d3\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-43262620-2c61-4be7-85bd-ae926feda4d3\") pod \"ovsdbserver-sb-2\" (UID: \"501619a3-d9ee-4809-9fe6-1e0170341225\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/e4807761c3c1c7c706e5e7aad2d5e2ae4eb4ef7ba27b7040663fdf5607431a39/globalmount\"" pod="openstack/ovsdbserver-sb-2" Nov 28 17:38:36 crc kubenswrapper[4909]: I1128 17:38:36.039466 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0307a9d2-1a13-4d90-a52a-206ef5a80f4b-combined-ca-bundle\") pod \"ovsdbserver-sb-1\" (UID: \"0307a9d2-1a13-4d90-a52a-206ef5a80f4b\") " pod="openstack/ovsdbserver-sb-1" Nov 28 17:38:36 crc kubenswrapper[4909]: I1128 17:38:36.041582 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wpd89\" (UniqueName: \"kubernetes.io/projected/0307a9d2-1a13-4d90-a52a-206ef5a80f4b-kube-api-access-wpd89\") pod \"ovsdbserver-sb-1\" (UID: \"0307a9d2-1a13-4d90-a52a-206ef5a80f4b\") " pod="openstack/ovsdbserver-sb-1" Nov 28 17:38:36 crc kubenswrapper[4909]: I1128 17:38:36.042363 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qhpgl\" (UniqueName: \"kubernetes.io/projected/501619a3-d9ee-4809-9fe6-1e0170341225-kube-api-access-qhpgl\") pod \"ovsdbserver-sb-2\" (UID: \"501619a3-d9ee-4809-9fe6-1e0170341225\") " pod="openstack/ovsdbserver-sb-2" Nov 28 17:38:36 crc kubenswrapper[4909]: I1128 17:38:36.067534 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-16310397-215b-4d9c-aa2e-d0f4b5aefa39\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-16310397-215b-4d9c-aa2e-d0f4b5aefa39\") pod \"ovsdbserver-sb-1\" (UID: \"0307a9d2-1a13-4d90-a52a-206ef5a80f4b\") " pod="openstack/ovsdbserver-sb-1" Nov 28 17:38:36 crc kubenswrapper[4909]: I1128 17:38:36.068013 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-43262620-2c61-4be7-85bd-ae926feda4d3\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-43262620-2c61-4be7-85bd-ae926feda4d3\") pod \"ovsdbserver-sb-2\" (UID: \"501619a3-d9ee-4809-9fe6-1e0170341225\") " pod="openstack/ovsdbserver-sb-2" Nov 28 17:38:36 crc kubenswrapper[4909]: I1128 17:38:36.149932 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Nov 28 17:38:36 crc kubenswrapper[4909]: I1128 17:38:36.242387 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-1" Nov 28 17:38:36 crc kubenswrapper[4909]: I1128 17:38:36.260939 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-2" Nov 28 17:38:36 crc kubenswrapper[4909]: I1128 17:38:36.403513 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-1"] Nov 28 17:38:36 crc kubenswrapper[4909]: I1128 17:38:36.479689 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-2"] Nov 28 17:38:36 crc kubenswrapper[4909]: W1128 17:38:36.486402 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod01dcb944_56d9_4239_9050_a85af687e4af.slice/crio-c579abd98a2829d67fc254f0558ea34b4923638ac513d20f2b48ce3df0185fa6 WatchSource:0}: Error finding container c579abd98a2829d67fc254f0558ea34b4923638ac513d20f2b48ce3df0185fa6: Status 404 returned error can't find the container with id c579abd98a2829d67fc254f0558ea34b4923638ac513d20f2b48ce3df0185fa6 Nov 28 17:38:36 crc kubenswrapper[4909]: I1128 17:38:36.682023 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 28 17:38:36 crc kubenswrapper[4909]: W1128 17:38:36.689475 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod81b7dd2b_5e9c_454b_a12f_584bad52d211.slice/crio-b42ffdf5440887dec45ec74450b925a1bda581e7fab88d2eee7ecf26aa3f9ff1 WatchSource:0}: Error finding container b42ffdf5440887dec45ec74450b925a1bda581e7fab88d2eee7ecf26aa3f9ff1: Status 404 returned error can't find the container with id b42ffdf5440887dec45ec74450b925a1bda581e7fab88d2eee7ecf26aa3f9ff1 Nov 28 17:38:36 crc kubenswrapper[4909]: I1128 17:38:36.768477 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-1"] Nov 28 17:38:36 crc kubenswrapper[4909]: W1128 17:38:36.775872 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0307a9d2_1a13_4d90_a52a_206ef5a80f4b.slice/crio-3e30424630b09ee2070aca18cf6a8cb3f47217cbe09004b23fcb2dc1f61b4ca8 WatchSource:0}: Error finding container 3e30424630b09ee2070aca18cf6a8cb3f47217cbe09004b23fcb2dc1f61b4ca8: Status 404 returned error can't find the container with id 3e30424630b09ee2070aca18cf6a8cb3f47217cbe09004b23fcb2dc1f61b4ca8 Nov 28 17:38:36 crc kubenswrapper[4909]: I1128 17:38:36.873934 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-2"] Nov 28 17:38:37 crc kubenswrapper[4909]: I1128 17:38:37.085063 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 28 17:38:37 crc kubenswrapper[4909]: I1128 17:38:37.365755 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-1" event={"ID":"8ae6c4b3-7956-4597-b15b-de51255c1273","Type":"ContainerStarted","Data":"221c581b345bb637f8e3d2af8a9c8c1afa14fd0070354c78ce8c25955f01ecb0"} Nov 28 17:38:37 crc kubenswrapper[4909]: I1128 17:38:37.366077 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-1" event={"ID":"8ae6c4b3-7956-4597-b15b-de51255c1273","Type":"ContainerStarted","Data":"8e5789beaef92787ffb5e6c18248e430401761cb2be2022a39f9ccc054179667"} Nov 28 17:38:37 crc kubenswrapper[4909]: I1128 17:38:37.366093 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-1" event={"ID":"8ae6c4b3-7956-4597-b15b-de51255c1273","Type":"ContainerStarted","Data":"ed110d3cfe0ddc162ff499d79ff3dc9038530288f6518e20945592df06050fa8"} Nov 28 17:38:37 crc kubenswrapper[4909]: I1128 17:38:37.376250 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"425f39fc-d77b-4a11-b79e-30ae4dbad3c9","Type":"ContainerStarted","Data":"966ad48d4bc2133f02a420e19c21c86fef215382a58e99b1ec718ebbf3e26e2a"} Nov 28 17:38:37 crc kubenswrapper[4909]: I1128 17:38:37.376302 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"425f39fc-d77b-4a11-b79e-30ae4dbad3c9","Type":"ContainerStarted","Data":"3397332e9635fe6cefc7090e0b8447b968da0d8cd1b3109a0bfda3947ef325f9"} Nov 28 17:38:37 crc kubenswrapper[4909]: I1128 17:38:37.385087 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-2" event={"ID":"501619a3-d9ee-4809-9fe6-1e0170341225","Type":"ContainerStarted","Data":"5b3493adb5070b588f652c4bcd70c6230932605932407351318e2fe993c10795"} Nov 28 17:38:37 crc kubenswrapper[4909]: I1128 17:38:37.385154 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-2" event={"ID":"501619a3-d9ee-4809-9fe6-1e0170341225","Type":"ContainerStarted","Data":"56b96d893825f05cbd9b4a96c0f3ce069d4cb4a904e841a71670451d8f912e4e"} Nov 28 17:38:37 crc kubenswrapper[4909]: I1128 17:38:37.385165 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-2" event={"ID":"501619a3-d9ee-4809-9fe6-1e0170341225","Type":"ContainerStarted","Data":"ee7597e4c4f5c7716e286b9631e0f3ffa87910e1cc048a94a3076d3b152b4ae8"} Nov 28 17:38:37 crc kubenswrapper[4909]: I1128 17:38:37.388940 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-2" event={"ID":"01dcb944-56d9-4239-9050-a85af687e4af","Type":"ContainerStarted","Data":"eabc33bd95591fe8e00eb09305f4285a2539df88f89c32d9994cb51cb2d6b0ea"} Nov 28 17:38:37 crc kubenswrapper[4909]: I1128 17:38:37.388983 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-2" event={"ID":"01dcb944-56d9-4239-9050-a85af687e4af","Type":"ContainerStarted","Data":"8532b1c2e6f4f1d68f3cc71adb4785a03b3368012902cd388cbada860b08ab69"} Nov 28 17:38:37 crc kubenswrapper[4909]: I1128 17:38:37.388997 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-2" event={"ID":"01dcb944-56d9-4239-9050-a85af687e4af","Type":"ContainerStarted","Data":"c579abd98a2829d67fc254f0558ea34b4923638ac513d20f2b48ce3df0185fa6"} Nov 28 17:38:37 crc kubenswrapper[4909]: I1128 17:38:37.392727 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-1" event={"ID":"0307a9d2-1a13-4d90-a52a-206ef5a80f4b","Type":"ContainerStarted","Data":"5b921c2150cc8d081d9d2ca77afcaecf69c5441075a2115f2bcb3f188a4e8de7"} Nov 28 17:38:37 crc kubenswrapper[4909]: I1128 17:38:37.392782 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-1" event={"ID":"0307a9d2-1a13-4d90-a52a-206ef5a80f4b","Type":"ContainerStarted","Data":"a1d56b06436996b3763c0461eb47be988a1c8c9bcc284bf80cc83b44afeb936c"} Nov 28 17:38:37 crc kubenswrapper[4909]: I1128 17:38:37.392803 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-1" event={"ID":"0307a9d2-1a13-4d90-a52a-206ef5a80f4b","Type":"ContainerStarted","Data":"3e30424630b09ee2070aca18cf6a8cb3f47217cbe09004b23fcb2dc1f61b4ca8"} Nov 28 17:38:37 crc kubenswrapper[4909]: I1128 17:38:37.395094 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"81b7dd2b-5e9c-454b-a12f-584bad52d211","Type":"ContainerStarted","Data":"9e724303cc03bd004b1c143d758a216a5942806a61524bf5c539d135125624af"} Nov 28 17:38:37 crc kubenswrapper[4909]: I1128 17:38:37.395145 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"81b7dd2b-5e9c-454b-a12f-584bad52d211","Type":"ContainerStarted","Data":"5b9017852f8ba1f1b693229a3f934cd346a2f3258889dae53e1d7540b30e73ed"} Nov 28 17:38:37 crc kubenswrapper[4909]: I1128 17:38:37.395160 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"81b7dd2b-5e9c-454b-a12f-584bad52d211","Type":"ContainerStarted","Data":"b42ffdf5440887dec45ec74450b925a1bda581e7fab88d2eee7ecf26aa3f9ff1"} Nov 28 17:38:37 crc kubenswrapper[4909]: I1128 17:38:37.400054 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-nb-1" podStartSLOduration=3.400030034 podStartE2EDuration="3.400030034s" podCreationTimestamp="2025-11-28 17:38:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 17:38:37.393048367 +0000 UTC m=+5299.789732891" watchObservedRunningTime="2025-11-28 17:38:37.400030034 +0000 UTC m=+5299.796714558" Nov 28 17:38:37 crc kubenswrapper[4909]: I1128 17:38:37.416779 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-nb-2" podStartSLOduration=3.416761723 podStartE2EDuration="3.416761723s" podCreationTimestamp="2025-11-28 17:38:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 17:38:37.416104486 +0000 UTC m=+5299.812789030" watchObservedRunningTime="2025-11-28 17:38:37.416761723 +0000 UTC m=+5299.813446247" Nov 28 17:38:37 crc kubenswrapper[4909]: I1128 17:38:37.432089 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-sb-2" podStartSLOduration=3.432071145 podStartE2EDuration="3.432071145s" podCreationTimestamp="2025-11-28 17:38:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 17:38:37.431115149 +0000 UTC m=+5299.827799673" watchObservedRunningTime="2025-11-28 17:38:37.432071145 +0000 UTC m=+5299.828755689" Nov 28 17:38:37 crc kubenswrapper[4909]: I1128 17:38:37.449105 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-sb-1" podStartSLOduration=3.449087852 podStartE2EDuration="3.449087852s" podCreationTimestamp="2025-11-28 17:38:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 17:38:37.446324457 +0000 UTC m=+5299.843009001" watchObservedRunningTime="2025-11-28 17:38:37.449087852 +0000 UTC m=+5299.845772376" Nov 28 17:38:37 crc kubenswrapper[4909]: I1128 17:38:37.463101 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-sb-0" podStartSLOduration=3.463080508 podStartE2EDuration="3.463080508s" podCreationTimestamp="2025-11-28 17:38:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 17:38:37.462089961 +0000 UTC m=+5299.858774505" watchObservedRunningTime="2025-11-28 17:38:37.463080508 +0000 UTC m=+5299.859765042" Nov 28 17:38:38 crc kubenswrapper[4909]: I1128 17:38:38.407845 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"425f39fc-d77b-4a11-b79e-30ae4dbad3c9","Type":"ContainerStarted","Data":"422579711af9713bf681f662f5a13dae44b90fac7eab1fbf943e95d27d35828b"} Nov 28 17:38:38 crc kubenswrapper[4909]: I1128 17:38:38.440923 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-nb-0" podStartSLOduration=4.440904011 podStartE2EDuration="4.440904011s" podCreationTimestamp="2025-11-28 17:38:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 17:38:38.433495751 +0000 UTC m=+5300.830180305" watchObservedRunningTime="2025-11-28 17:38:38.440904011 +0000 UTC m=+5300.837588545" Nov 28 17:38:38 crc kubenswrapper[4909]: I1128 17:38:38.824049 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-nb-0" Nov 28 17:38:38 crc kubenswrapper[4909]: I1128 17:38:38.841311 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-nb-1" Nov 28 17:38:38 crc kubenswrapper[4909]: I1128 17:38:38.862459 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-nb-2" Nov 28 17:38:39 crc kubenswrapper[4909]: I1128 17:38:39.150215 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-sb-0" Nov 28 17:38:39 crc kubenswrapper[4909]: I1128 17:38:39.244038 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-sb-1" Nov 28 17:38:39 crc kubenswrapper[4909]: I1128 17:38:39.261360 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-sb-2" Nov 28 17:38:40 crc kubenswrapper[4909]: I1128 17:38:40.824214 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-nb-0" Nov 28 17:38:40 crc kubenswrapper[4909]: I1128 17:38:40.841628 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-nb-1" Nov 28 17:38:40 crc kubenswrapper[4909]: I1128 17:38:40.862720 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-nb-2" Nov 28 17:38:41 crc kubenswrapper[4909]: I1128 17:38:41.150070 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-sb-0" Nov 28 17:38:41 crc kubenswrapper[4909]: I1128 17:38:41.243997 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-sb-1" Nov 28 17:38:41 crc kubenswrapper[4909]: I1128 17:38:41.261757 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-sb-2" Nov 28 17:38:41 crc kubenswrapper[4909]: I1128 17:38:41.893060 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-nb-0" Nov 28 17:38:41 crc kubenswrapper[4909]: I1128 17:38:41.918203 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-nb-1" Nov 28 17:38:41 crc kubenswrapper[4909]: I1128 17:38:41.918320 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-nb-2" Nov 28 17:38:41 crc kubenswrapper[4909]: I1128 17:38:41.957971 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-nb-1" Nov 28 17:38:41 crc kubenswrapper[4909]: I1128 17:38:41.959141 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-nb-2" Nov 28 17:38:42 crc kubenswrapper[4909]: I1128 17:38:42.133965 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-bfdfb7757-lzfpj"] Nov 28 17:38:42 crc kubenswrapper[4909]: I1128 17:38:42.135718 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-bfdfb7757-lzfpj" Nov 28 17:38:42 crc kubenswrapper[4909]: I1128 17:38:42.137922 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-nb" Nov 28 17:38:42 crc kubenswrapper[4909]: I1128 17:38:42.147745 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-bfdfb7757-lzfpj"] Nov 28 17:38:42 crc kubenswrapper[4909]: I1128 17:38:42.191524 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-sb-0" Nov 28 17:38:42 crc kubenswrapper[4909]: I1128 17:38:42.230894 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-sb-0" Nov 28 17:38:42 crc kubenswrapper[4909]: I1128 17:38:42.235303 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/43ea72ed-b5ab-48e1-a8f6-a7ff333ded33-ovsdbserver-nb\") pod \"dnsmasq-dns-bfdfb7757-lzfpj\" (UID: \"43ea72ed-b5ab-48e1-a8f6-a7ff333ded33\") " pod="openstack/dnsmasq-dns-bfdfb7757-lzfpj" Nov 28 17:38:42 crc kubenswrapper[4909]: I1128 17:38:42.235347 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jjw26\" (UniqueName: \"kubernetes.io/projected/43ea72ed-b5ab-48e1-a8f6-a7ff333ded33-kube-api-access-jjw26\") pod \"dnsmasq-dns-bfdfb7757-lzfpj\" (UID: \"43ea72ed-b5ab-48e1-a8f6-a7ff333ded33\") " pod="openstack/dnsmasq-dns-bfdfb7757-lzfpj" Nov 28 17:38:42 crc kubenswrapper[4909]: I1128 17:38:42.235389 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/43ea72ed-b5ab-48e1-a8f6-a7ff333ded33-config\") pod \"dnsmasq-dns-bfdfb7757-lzfpj\" (UID: \"43ea72ed-b5ab-48e1-a8f6-a7ff333ded33\") " pod="openstack/dnsmasq-dns-bfdfb7757-lzfpj" Nov 28 17:38:42 crc kubenswrapper[4909]: I1128 17:38:42.235479 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/43ea72ed-b5ab-48e1-a8f6-a7ff333ded33-dns-svc\") pod \"dnsmasq-dns-bfdfb7757-lzfpj\" (UID: \"43ea72ed-b5ab-48e1-a8f6-a7ff333ded33\") " pod="openstack/dnsmasq-dns-bfdfb7757-lzfpj" Nov 28 17:38:42 crc kubenswrapper[4909]: I1128 17:38:42.283348 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-sb-1" Nov 28 17:38:42 crc kubenswrapper[4909]: I1128 17:38:42.309969 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-sb-2" Nov 28 17:38:42 crc kubenswrapper[4909]: I1128 17:38:42.336971 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/43ea72ed-b5ab-48e1-a8f6-a7ff333ded33-ovsdbserver-nb\") pod \"dnsmasq-dns-bfdfb7757-lzfpj\" (UID: \"43ea72ed-b5ab-48e1-a8f6-a7ff333ded33\") " pod="openstack/dnsmasq-dns-bfdfb7757-lzfpj" Nov 28 17:38:42 crc kubenswrapper[4909]: I1128 17:38:42.337030 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jjw26\" (UniqueName: \"kubernetes.io/projected/43ea72ed-b5ab-48e1-a8f6-a7ff333ded33-kube-api-access-jjw26\") pod \"dnsmasq-dns-bfdfb7757-lzfpj\" (UID: \"43ea72ed-b5ab-48e1-a8f6-a7ff333ded33\") " pod="openstack/dnsmasq-dns-bfdfb7757-lzfpj" Nov 28 17:38:42 crc kubenswrapper[4909]: I1128 17:38:42.337086 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/43ea72ed-b5ab-48e1-a8f6-a7ff333ded33-config\") pod \"dnsmasq-dns-bfdfb7757-lzfpj\" (UID: \"43ea72ed-b5ab-48e1-a8f6-a7ff333ded33\") " pod="openstack/dnsmasq-dns-bfdfb7757-lzfpj" Nov 28 17:38:42 crc kubenswrapper[4909]: I1128 17:38:42.337146 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/43ea72ed-b5ab-48e1-a8f6-a7ff333ded33-dns-svc\") pod \"dnsmasq-dns-bfdfb7757-lzfpj\" (UID: \"43ea72ed-b5ab-48e1-a8f6-a7ff333ded33\") " pod="openstack/dnsmasq-dns-bfdfb7757-lzfpj" Nov 28 17:38:42 crc kubenswrapper[4909]: I1128 17:38:42.338061 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/43ea72ed-b5ab-48e1-a8f6-a7ff333ded33-ovsdbserver-nb\") pod \"dnsmasq-dns-bfdfb7757-lzfpj\" (UID: \"43ea72ed-b5ab-48e1-a8f6-a7ff333ded33\") " pod="openstack/dnsmasq-dns-bfdfb7757-lzfpj" Nov 28 17:38:42 crc kubenswrapper[4909]: I1128 17:38:42.338246 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/43ea72ed-b5ab-48e1-a8f6-a7ff333ded33-config\") pod \"dnsmasq-dns-bfdfb7757-lzfpj\" (UID: \"43ea72ed-b5ab-48e1-a8f6-a7ff333ded33\") " pod="openstack/dnsmasq-dns-bfdfb7757-lzfpj" Nov 28 17:38:42 crc kubenswrapper[4909]: I1128 17:38:42.338313 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/43ea72ed-b5ab-48e1-a8f6-a7ff333ded33-dns-svc\") pod \"dnsmasq-dns-bfdfb7757-lzfpj\" (UID: \"43ea72ed-b5ab-48e1-a8f6-a7ff333ded33\") " pod="openstack/dnsmasq-dns-bfdfb7757-lzfpj" Nov 28 17:38:42 crc kubenswrapper[4909]: I1128 17:38:42.339402 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-sb-1" Nov 28 17:38:42 crc kubenswrapper[4909]: I1128 17:38:42.377980 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-sb-2" Nov 28 17:38:42 crc kubenswrapper[4909]: I1128 17:38:42.379172 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jjw26\" (UniqueName: \"kubernetes.io/projected/43ea72ed-b5ab-48e1-a8f6-a7ff333ded33-kube-api-access-jjw26\") pod \"dnsmasq-dns-bfdfb7757-lzfpj\" (UID: \"43ea72ed-b5ab-48e1-a8f6-a7ff333ded33\") " pod="openstack/dnsmasq-dns-bfdfb7757-lzfpj" Nov 28 17:38:42 crc kubenswrapper[4909]: I1128 17:38:42.462236 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-bfdfb7757-lzfpj" Nov 28 17:38:42 crc kubenswrapper[4909]: I1128 17:38:42.506042 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-bfdfb7757-lzfpj"] Nov 28 17:38:42 crc kubenswrapper[4909]: I1128 17:38:42.531393 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6bb899f49f-86csq"] Nov 28 17:38:42 crc kubenswrapper[4909]: I1128 17:38:42.536263 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6bb899f49f-86csq" Nov 28 17:38:42 crc kubenswrapper[4909]: I1128 17:38:42.539805 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6bb899f49f-86csq"] Nov 28 17:38:42 crc kubenswrapper[4909]: I1128 17:38:42.542130 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-sb" Nov 28 17:38:42 crc kubenswrapper[4909]: I1128 17:38:42.571148 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-nb-0" Nov 28 17:38:42 crc kubenswrapper[4909]: I1128 17:38:42.760169 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/78ed4655-73ba-4c46-9d91-8e0f4dfbe258-dns-svc\") pod \"dnsmasq-dns-6bb899f49f-86csq\" (UID: \"78ed4655-73ba-4c46-9d91-8e0f4dfbe258\") " pod="openstack/dnsmasq-dns-6bb899f49f-86csq" Nov 28 17:38:42 crc kubenswrapper[4909]: I1128 17:38:42.760581 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m7bvt\" (UniqueName: \"kubernetes.io/projected/78ed4655-73ba-4c46-9d91-8e0f4dfbe258-kube-api-access-m7bvt\") pod \"dnsmasq-dns-6bb899f49f-86csq\" (UID: \"78ed4655-73ba-4c46-9d91-8e0f4dfbe258\") " pod="openstack/dnsmasq-dns-6bb899f49f-86csq" Nov 28 17:38:42 crc kubenswrapper[4909]: I1128 17:38:42.760736 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/78ed4655-73ba-4c46-9d91-8e0f4dfbe258-config\") pod \"dnsmasq-dns-6bb899f49f-86csq\" (UID: \"78ed4655-73ba-4c46-9d91-8e0f4dfbe258\") " pod="openstack/dnsmasq-dns-6bb899f49f-86csq" Nov 28 17:38:42 crc kubenswrapper[4909]: I1128 17:38:42.760860 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/78ed4655-73ba-4c46-9d91-8e0f4dfbe258-ovsdbserver-nb\") pod \"dnsmasq-dns-6bb899f49f-86csq\" (UID: \"78ed4655-73ba-4c46-9d91-8e0f4dfbe258\") " pod="openstack/dnsmasq-dns-6bb899f49f-86csq" Nov 28 17:38:42 crc kubenswrapper[4909]: I1128 17:38:42.760920 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/78ed4655-73ba-4c46-9d91-8e0f4dfbe258-ovsdbserver-sb\") pod \"dnsmasq-dns-6bb899f49f-86csq\" (UID: \"78ed4655-73ba-4c46-9d91-8e0f4dfbe258\") " pod="openstack/dnsmasq-dns-6bb899f49f-86csq" Nov 28 17:38:42 crc kubenswrapper[4909]: I1128 17:38:42.862757 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/78ed4655-73ba-4c46-9d91-8e0f4dfbe258-ovsdbserver-nb\") pod \"dnsmasq-dns-6bb899f49f-86csq\" (UID: \"78ed4655-73ba-4c46-9d91-8e0f4dfbe258\") " pod="openstack/dnsmasq-dns-6bb899f49f-86csq" Nov 28 17:38:42 crc kubenswrapper[4909]: I1128 17:38:42.862814 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/78ed4655-73ba-4c46-9d91-8e0f4dfbe258-ovsdbserver-sb\") pod \"dnsmasq-dns-6bb899f49f-86csq\" (UID: \"78ed4655-73ba-4c46-9d91-8e0f4dfbe258\") " pod="openstack/dnsmasq-dns-6bb899f49f-86csq" Nov 28 17:38:42 crc kubenswrapper[4909]: I1128 17:38:42.862875 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/78ed4655-73ba-4c46-9d91-8e0f4dfbe258-dns-svc\") pod \"dnsmasq-dns-6bb899f49f-86csq\" (UID: \"78ed4655-73ba-4c46-9d91-8e0f4dfbe258\") " pod="openstack/dnsmasq-dns-6bb899f49f-86csq" Nov 28 17:38:42 crc kubenswrapper[4909]: I1128 17:38:42.862898 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m7bvt\" (UniqueName: \"kubernetes.io/projected/78ed4655-73ba-4c46-9d91-8e0f4dfbe258-kube-api-access-m7bvt\") pod \"dnsmasq-dns-6bb899f49f-86csq\" (UID: \"78ed4655-73ba-4c46-9d91-8e0f4dfbe258\") " pod="openstack/dnsmasq-dns-6bb899f49f-86csq" Nov 28 17:38:42 crc kubenswrapper[4909]: I1128 17:38:42.862944 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/78ed4655-73ba-4c46-9d91-8e0f4dfbe258-config\") pod \"dnsmasq-dns-6bb899f49f-86csq\" (UID: \"78ed4655-73ba-4c46-9d91-8e0f4dfbe258\") " pod="openstack/dnsmasq-dns-6bb899f49f-86csq" Nov 28 17:38:42 crc kubenswrapper[4909]: I1128 17:38:42.863858 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/78ed4655-73ba-4c46-9d91-8e0f4dfbe258-config\") pod \"dnsmasq-dns-6bb899f49f-86csq\" (UID: \"78ed4655-73ba-4c46-9d91-8e0f4dfbe258\") " pod="openstack/dnsmasq-dns-6bb899f49f-86csq" Nov 28 17:38:42 crc kubenswrapper[4909]: I1128 17:38:42.863896 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/78ed4655-73ba-4c46-9d91-8e0f4dfbe258-ovsdbserver-nb\") pod \"dnsmasq-dns-6bb899f49f-86csq\" (UID: \"78ed4655-73ba-4c46-9d91-8e0f4dfbe258\") " pod="openstack/dnsmasq-dns-6bb899f49f-86csq" Nov 28 17:38:42 crc kubenswrapper[4909]: I1128 17:38:42.864463 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/78ed4655-73ba-4c46-9d91-8e0f4dfbe258-ovsdbserver-sb\") pod \"dnsmasq-dns-6bb899f49f-86csq\" (UID: \"78ed4655-73ba-4c46-9d91-8e0f4dfbe258\") " pod="openstack/dnsmasq-dns-6bb899f49f-86csq" Nov 28 17:38:42 crc kubenswrapper[4909]: I1128 17:38:42.864594 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/78ed4655-73ba-4c46-9d91-8e0f4dfbe258-dns-svc\") pod \"dnsmasq-dns-6bb899f49f-86csq\" (UID: \"78ed4655-73ba-4c46-9d91-8e0f4dfbe258\") " pod="openstack/dnsmasq-dns-6bb899f49f-86csq" Nov 28 17:38:42 crc kubenswrapper[4909]: I1128 17:38:42.885014 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m7bvt\" (UniqueName: \"kubernetes.io/projected/78ed4655-73ba-4c46-9d91-8e0f4dfbe258-kube-api-access-m7bvt\") pod \"dnsmasq-dns-6bb899f49f-86csq\" (UID: \"78ed4655-73ba-4c46-9d91-8e0f4dfbe258\") " pod="openstack/dnsmasq-dns-6bb899f49f-86csq" Nov 28 17:38:42 crc kubenswrapper[4909]: I1128 17:38:42.965168 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-bfdfb7757-lzfpj"] Nov 28 17:38:43 crc kubenswrapper[4909]: I1128 17:38:43.174551 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6bb899f49f-86csq" Nov 28 17:38:43 crc kubenswrapper[4909]: I1128 17:38:43.459850 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6bb899f49f-86csq"] Nov 28 17:38:43 crc kubenswrapper[4909]: W1128 17:38:43.467122 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod78ed4655_73ba_4c46_9d91_8e0f4dfbe258.slice/crio-eca675926d33382d1769be77fea9d6b9634cdb1c549f3a55a09c3fa6a531eaa6 WatchSource:0}: Error finding container eca675926d33382d1769be77fea9d6b9634cdb1c549f3a55a09c3fa6a531eaa6: Status 404 returned error can't find the container with id eca675926d33382d1769be77fea9d6b9634cdb1c549f3a55a09c3fa6a531eaa6 Nov 28 17:38:43 crc kubenswrapper[4909]: I1128 17:38:43.473326 4909 generic.go:334] "Generic (PLEG): container finished" podID="43ea72ed-b5ab-48e1-a8f6-a7ff333ded33" containerID="32cd2c20cbea58e9fe4b04eb4dc4c86c8e9a1cf780baec5e00f4e3a696fc9834" exitCode=0 Nov 28 17:38:43 crc kubenswrapper[4909]: I1128 17:38:43.473456 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-bfdfb7757-lzfpj" event={"ID":"43ea72ed-b5ab-48e1-a8f6-a7ff333ded33","Type":"ContainerDied","Data":"32cd2c20cbea58e9fe4b04eb4dc4c86c8e9a1cf780baec5e00f4e3a696fc9834"} Nov 28 17:38:43 crc kubenswrapper[4909]: I1128 17:38:43.473567 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-bfdfb7757-lzfpj" event={"ID":"43ea72ed-b5ab-48e1-a8f6-a7ff333ded33","Type":"ContainerStarted","Data":"6989dbabdb3050cbc4e01dcfdaf53cbfe14d9f3828cb043b64945ebd20ddde5a"} Nov 28 17:38:43 crc kubenswrapper[4909]: I1128 17:38:43.766966 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-bfdfb7757-lzfpj" Nov 28 17:38:43 crc kubenswrapper[4909]: I1128 17:38:43.879424 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/43ea72ed-b5ab-48e1-a8f6-a7ff333ded33-dns-svc\") pod \"43ea72ed-b5ab-48e1-a8f6-a7ff333ded33\" (UID: \"43ea72ed-b5ab-48e1-a8f6-a7ff333ded33\") " Nov 28 17:38:43 crc kubenswrapper[4909]: I1128 17:38:43.879537 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/43ea72ed-b5ab-48e1-a8f6-a7ff333ded33-ovsdbserver-nb\") pod \"43ea72ed-b5ab-48e1-a8f6-a7ff333ded33\" (UID: \"43ea72ed-b5ab-48e1-a8f6-a7ff333ded33\") " Nov 28 17:38:43 crc kubenswrapper[4909]: I1128 17:38:43.879632 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jjw26\" (UniqueName: \"kubernetes.io/projected/43ea72ed-b5ab-48e1-a8f6-a7ff333ded33-kube-api-access-jjw26\") pod \"43ea72ed-b5ab-48e1-a8f6-a7ff333ded33\" (UID: \"43ea72ed-b5ab-48e1-a8f6-a7ff333ded33\") " Nov 28 17:38:43 crc kubenswrapper[4909]: I1128 17:38:43.880228 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/43ea72ed-b5ab-48e1-a8f6-a7ff333ded33-config\") pod \"43ea72ed-b5ab-48e1-a8f6-a7ff333ded33\" (UID: \"43ea72ed-b5ab-48e1-a8f6-a7ff333ded33\") " Nov 28 17:38:43 crc kubenswrapper[4909]: I1128 17:38:43.884581 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/43ea72ed-b5ab-48e1-a8f6-a7ff333ded33-kube-api-access-jjw26" (OuterVolumeSpecName: "kube-api-access-jjw26") pod "43ea72ed-b5ab-48e1-a8f6-a7ff333ded33" (UID: "43ea72ed-b5ab-48e1-a8f6-a7ff333ded33"). InnerVolumeSpecName "kube-api-access-jjw26". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:38:43 crc kubenswrapper[4909]: I1128 17:38:43.898002 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43ea72ed-b5ab-48e1-a8f6-a7ff333ded33-config" (OuterVolumeSpecName: "config") pod "43ea72ed-b5ab-48e1-a8f6-a7ff333ded33" (UID: "43ea72ed-b5ab-48e1-a8f6-a7ff333ded33"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 17:38:43 crc kubenswrapper[4909]: I1128 17:38:43.898068 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43ea72ed-b5ab-48e1-a8f6-a7ff333ded33-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "43ea72ed-b5ab-48e1-a8f6-a7ff333ded33" (UID: "43ea72ed-b5ab-48e1-a8f6-a7ff333ded33"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 17:38:43 crc kubenswrapper[4909]: I1128 17:38:43.904837 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43ea72ed-b5ab-48e1-a8f6-a7ff333ded33-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "43ea72ed-b5ab-48e1-a8f6-a7ff333ded33" (UID: "43ea72ed-b5ab-48e1-a8f6-a7ff333ded33"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 17:38:43 crc kubenswrapper[4909]: I1128 17:38:43.983767 4909 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/43ea72ed-b5ab-48e1-a8f6-a7ff333ded33-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 28 17:38:43 crc kubenswrapper[4909]: I1128 17:38:43.985117 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jjw26\" (UniqueName: \"kubernetes.io/projected/43ea72ed-b5ab-48e1-a8f6-a7ff333ded33-kube-api-access-jjw26\") on node \"crc\" DevicePath \"\"" Nov 28 17:38:43 crc kubenswrapper[4909]: I1128 17:38:43.985183 4909 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/43ea72ed-b5ab-48e1-a8f6-a7ff333ded33-config\") on node \"crc\" DevicePath \"\"" Nov 28 17:38:43 crc kubenswrapper[4909]: I1128 17:38:43.985236 4909 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/43ea72ed-b5ab-48e1-a8f6-a7ff333ded33-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 17:38:44 crc kubenswrapper[4909]: I1128 17:38:44.482487 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-bfdfb7757-lzfpj" event={"ID":"43ea72ed-b5ab-48e1-a8f6-a7ff333ded33","Type":"ContainerDied","Data":"6989dbabdb3050cbc4e01dcfdaf53cbfe14d9f3828cb043b64945ebd20ddde5a"} Nov 28 17:38:44 crc kubenswrapper[4909]: I1128 17:38:44.482539 4909 scope.go:117] "RemoveContainer" containerID="32cd2c20cbea58e9fe4b04eb4dc4c86c8e9a1cf780baec5e00f4e3a696fc9834" Nov 28 17:38:44 crc kubenswrapper[4909]: I1128 17:38:44.482560 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-bfdfb7757-lzfpj" Nov 28 17:38:44 crc kubenswrapper[4909]: I1128 17:38:44.487416 4909 generic.go:334] "Generic (PLEG): container finished" podID="78ed4655-73ba-4c46-9d91-8e0f4dfbe258" containerID="a55bf7c324d79cd457920d599fe502dae9d25617ae7cd2ee5a8af6353ec8fcb8" exitCode=0 Nov 28 17:38:44 crc kubenswrapper[4909]: I1128 17:38:44.487473 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bb899f49f-86csq" event={"ID":"78ed4655-73ba-4c46-9d91-8e0f4dfbe258","Type":"ContainerDied","Data":"a55bf7c324d79cd457920d599fe502dae9d25617ae7cd2ee5a8af6353ec8fcb8"} Nov 28 17:38:44 crc kubenswrapper[4909]: I1128 17:38:44.487502 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bb899f49f-86csq" event={"ID":"78ed4655-73ba-4c46-9d91-8e0f4dfbe258","Type":"ContainerStarted","Data":"eca675926d33382d1769be77fea9d6b9634cdb1c549f3a55a09c3fa6a531eaa6"} Nov 28 17:38:44 crc kubenswrapper[4909]: I1128 17:38:44.611777 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-bfdfb7757-lzfpj"] Nov 28 17:38:44 crc kubenswrapper[4909]: I1128 17:38:44.628589 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-bfdfb7757-lzfpj"] Nov 28 17:38:45 crc kubenswrapper[4909]: I1128 17:38:45.496580 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bb899f49f-86csq" event={"ID":"78ed4655-73ba-4c46-9d91-8e0f4dfbe258","Type":"ContainerStarted","Data":"6392edefb565a7c99c3228befdaca83d5ed4ebfde8e3e7a9c4241ab4e6714d42"} Nov 28 17:38:45 crc kubenswrapper[4909]: I1128 17:38:45.520291 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-6bb899f49f-86csq" podStartSLOduration=3.520272963 podStartE2EDuration="3.520272963s" podCreationTimestamp="2025-11-28 17:38:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 17:38:45.513803439 +0000 UTC m=+5307.910487973" watchObservedRunningTime="2025-11-28 17:38:45.520272963 +0000 UTC m=+5307.916957487" Nov 28 17:38:45 crc kubenswrapper[4909]: I1128 17:38:45.534088 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-copy-data"] Nov 28 17:38:45 crc kubenswrapper[4909]: E1128 17:38:45.534609 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="43ea72ed-b5ab-48e1-a8f6-a7ff333ded33" containerName="init" Nov 28 17:38:45 crc kubenswrapper[4909]: I1128 17:38:45.534639 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="43ea72ed-b5ab-48e1-a8f6-a7ff333ded33" containerName="init" Nov 28 17:38:45 crc kubenswrapper[4909]: I1128 17:38:45.534941 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="43ea72ed-b5ab-48e1-a8f6-a7ff333ded33" containerName="init" Nov 28 17:38:45 crc kubenswrapper[4909]: I1128 17:38:45.535982 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-copy-data" Nov 28 17:38:45 crc kubenswrapper[4909]: I1128 17:38:45.538559 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovn-data-cert" Nov 28 17:38:45 crc kubenswrapper[4909]: I1128 17:38:45.546683 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-copy-data"] Nov 28 17:38:45 crc kubenswrapper[4909]: I1128 17:38:45.618492 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-data-cert\" (UniqueName: \"kubernetes.io/secret/bb430976-530e-466d-be9a-7cb07fb560e7-ovn-data-cert\") pod \"ovn-copy-data\" (UID: \"bb430976-530e-466d-be9a-7cb07fb560e7\") " pod="openstack/ovn-copy-data" Nov 28 17:38:45 crc kubenswrapper[4909]: I1128 17:38:45.618573 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-77b06fa3-847d-47d3-aab5-1872a4f18357\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-77b06fa3-847d-47d3-aab5-1872a4f18357\") pod \"ovn-copy-data\" (UID: \"bb430976-530e-466d-be9a-7cb07fb560e7\") " pod="openstack/ovn-copy-data" Nov 28 17:38:45 crc kubenswrapper[4909]: I1128 17:38:45.618844 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xxtsg\" (UniqueName: \"kubernetes.io/projected/bb430976-530e-466d-be9a-7cb07fb560e7-kube-api-access-xxtsg\") pod \"ovn-copy-data\" (UID: \"bb430976-530e-466d-be9a-7cb07fb560e7\") " pod="openstack/ovn-copy-data" Nov 28 17:38:45 crc kubenswrapper[4909]: I1128 17:38:45.720150 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xxtsg\" (UniqueName: \"kubernetes.io/projected/bb430976-530e-466d-be9a-7cb07fb560e7-kube-api-access-xxtsg\") pod \"ovn-copy-data\" (UID: \"bb430976-530e-466d-be9a-7cb07fb560e7\") " pod="openstack/ovn-copy-data" Nov 28 17:38:45 crc kubenswrapper[4909]: I1128 17:38:45.720228 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-data-cert\" (UniqueName: \"kubernetes.io/secret/bb430976-530e-466d-be9a-7cb07fb560e7-ovn-data-cert\") pod \"ovn-copy-data\" (UID: \"bb430976-530e-466d-be9a-7cb07fb560e7\") " pod="openstack/ovn-copy-data" Nov 28 17:38:45 crc kubenswrapper[4909]: I1128 17:38:45.720265 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-77b06fa3-847d-47d3-aab5-1872a4f18357\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-77b06fa3-847d-47d3-aab5-1872a4f18357\") pod \"ovn-copy-data\" (UID: \"bb430976-530e-466d-be9a-7cb07fb560e7\") " pod="openstack/ovn-copy-data" Nov 28 17:38:45 crc kubenswrapper[4909]: I1128 17:38:45.725652 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-data-cert\" (UniqueName: \"kubernetes.io/secret/bb430976-530e-466d-be9a-7cb07fb560e7-ovn-data-cert\") pod \"ovn-copy-data\" (UID: \"bb430976-530e-466d-be9a-7cb07fb560e7\") " pod="openstack/ovn-copy-data" Nov 28 17:38:45 crc kubenswrapper[4909]: I1128 17:38:45.735224 4909 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 28 17:38:45 crc kubenswrapper[4909]: I1128 17:38:45.735263 4909 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-77b06fa3-847d-47d3-aab5-1872a4f18357\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-77b06fa3-847d-47d3-aab5-1872a4f18357\") pod \"ovn-copy-data\" (UID: \"bb430976-530e-466d-be9a-7cb07fb560e7\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/f4759c8f74398915934868e88016027d47ae7f403782311bbccc480d30fdb387/globalmount\"" pod="openstack/ovn-copy-data" Nov 28 17:38:45 crc kubenswrapper[4909]: I1128 17:38:45.751469 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xxtsg\" (UniqueName: \"kubernetes.io/projected/bb430976-530e-466d-be9a-7cb07fb560e7-kube-api-access-xxtsg\") pod \"ovn-copy-data\" (UID: \"bb430976-530e-466d-be9a-7cb07fb560e7\") " pod="openstack/ovn-copy-data" Nov 28 17:38:45 crc kubenswrapper[4909]: I1128 17:38:45.764622 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-77b06fa3-847d-47d3-aab5-1872a4f18357\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-77b06fa3-847d-47d3-aab5-1872a4f18357\") pod \"ovn-copy-data\" (UID: \"bb430976-530e-466d-be9a-7cb07fb560e7\") " pod="openstack/ovn-copy-data" Nov 28 17:38:45 crc kubenswrapper[4909]: I1128 17:38:45.871219 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-copy-data" Nov 28 17:38:45 crc kubenswrapper[4909]: I1128 17:38:45.905759 4909 scope.go:117] "RemoveContainer" containerID="ba4943f4ba136c11fa217eba14fcdb34cf54ee4ef96ee334416ec901f5f4fe45" Nov 28 17:38:45 crc kubenswrapper[4909]: E1128 17:38:45.906004 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 17:38:45 crc kubenswrapper[4909]: I1128 17:38:45.920816 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="43ea72ed-b5ab-48e1-a8f6-a7ff333ded33" path="/var/lib/kubelet/pods/43ea72ed-b5ab-48e1-a8f6-a7ff333ded33/volumes" Nov 28 17:38:46 crc kubenswrapper[4909]: I1128 17:38:46.375768 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-copy-data"] Nov 28 17:38:46 crc kubenswrapper[4909]: I1128 17:38:46.507455 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-copy-data" event={"ID":"bb430976-530e-466d-be9a-7cb07fb560e7","Type":"ContainerStarted","Data":"b927fc2b6ae1035c902f45ef1b75f6fe0d565a884178ff30cf20c057d6c41803"} Nov 28 17:38:46 crc kubenswrapper[4909]: I1128 17:38:46.507843 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-6bb899f49f-86csq" Nov 28 17:38:47 crc kubenswrapper[4909]: I1128 17:38:47.525122 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-copy-data" event={"ID":"bb430976-530e-466d-be9a-7cb07fb560e7","Type":"ContainerStarted","Data":"5827cf351581a93448f8f19fda6b6d62cc0392d8eb43bfce460600ff9d139b8f"} Nov 28 17:38:47 crc kubenswrapper[4909]: I1128 17:38:47.549848 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-copy-data" podStartSLOduration=3.549823924 podStartE2EDuration="3.549823924s" podCreationTimestamp="2025-11-28 17:38:44 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 17:38:47.543827743 +0000 UTC m=+5309.940512287" watchObservedRunningTime="2025-11-28 17:38:47.549823924 +0000 UTC m=+5309.946508458" Nov 28 17:38:52 crc kubenswrapper[4909]: I1128 17:38:52.871211 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-northd-0"] Nov 28 17:38:52 crc kubenswrapper[4909]: I1128 17:38:52.873247 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Nov 28 17:38:52 crc kubenswrapper[4909]: I1128 17:38:52.876559 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovnnorthd-ovnnorthd-dockercfg-xg89r" Nov 28 17:38:52 crc kubenswrapper[4909]: I1128 17:38:52.876884 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-scripts" Nov 28 17:38:52 crc kubenswrapper[4909]: I1128 17:38:52.877026 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-config" Nov 28 17:38:52 crc kubenswrapper[4909]: I1128 17:38:52.897225 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Nov 28 17:38:53 crc kubenswrapper[4909]: I1128 17:38:53.054637 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2pttp\" (UniqueName: \"kubernetes.io/projected/8b509e35-cec7-4668-8421-07678240db3b-kube-api-access-2pttp\") pod \"ovn-northd-0\" (UID: \"8b509e35-cec7-4668-8421-07678240db3b\") " pod="openstack/ovn-northd-0" Nov 28 17:38:53 crc kubenswrapper[4909]: I1128 17:38:53.054795 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8b509e35-cec7-4668-8421-07678240db3b-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"8b509e35-cec7-4668-8421-07678240db3b\") " pod="openstack/ovn-northd-0" Nov 28 17:38:53 crc kubenswrapper[4909]: I1128 17:38:53.054820 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8b509e35-cec7-4668-8421-07678240db3b-config\") pod \"ovn-northd-0\" (UID: \"8b509e35-cec7-4668-8421-07678240db3b\") " pod="openstack/ovn-northd-0" Nov 28 17:38:53 crc kubenswrapper[4909]: I1128 17:38:53.054855 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/8b509e35-cec7-4668-8421-07678240db3b-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"8b509e35-cec7-4668-8421-07678240db3b\") " pod="openstack/ovn-northd-0" Nov 28 17:38:53 crc kubenswrapper[4909]: I1128 17:38:53.054895 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/8b509e35-cec7-4668-8421-07678240db3b-scripts\") pod \"ovn-northd-0\" (UID: \"8b509e35-cec7-4668-8421-07678240db3b\") " pod="openstack/ovn-northd-0" Nov 28 17:38:53 crc kubenswrapper[4909]: I1128 17:38:53.156899 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8b509e35-cec7-4668-8421-07678240db3b-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"8b509e35-cec7-4668-8421-07678240db3b\") " pod="openstack/ovn-northd-0" Nov 28 17:38:53 crc kubenswrapper[4909]: I1128 17:38:53.156950 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8b509e35-cec7-4668-8421-07678240db3b-config\") pod \"ovn-northd-0\" (UID: \"8b509e35-cec7-4668-8421-07678240db3b\") " pod="openstack/ovn-northd-0" Nov 28 17:38:53 crc kubenswrapper[4909]: I1128 17:38:53.157002 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/8b509e35-cec7-4668-8421-07678240db3b-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"8b509e35-cec7-4668-8421-07678240db3b\") " pod="openstack/ovn-northd-0" Nov 28 17:38:53 crc kubenswrapper[4909]: I1128 17:38:53.157051 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/8b509e35-cec7-4668-8421-07678240db3b-scripts\") pod \"ovn-northd-0\" (UID: \"8b509e35-cec7-4668-8421-07678240db3b\") " pod="openstack/ovn-northd-0" Nov 28 17:38:53 crc kubenswrapper[4909]: I1128 17:38:53.157135 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2pttp\" (UniqueName: \"kubernetes.io/projected/8b509e35-cec7-4668-8421-07678240db3b-kube-api-access-2pttp\") pod \"ovn-northd-0\" (UID: \"8b509e35-cec7-4668-8421-07678240db3b\") " pod="openstack/ovn-northd-0" Nov 28 17:38:53 crc kubenswrapper[4909]: I1128 17:38:53.157856 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/8b509e35-cec7-4668-8421-07678240db3b-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"8b509e35-cec7-4668-8421-07678240db3b\") " pod="openstack/ovn-northd-0" Nov 28 17:38:53 crc kubenswrapper[4909]: I1128 17:38:53.157974 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8b509e35-cec7-4668-8421-07678240db3b-config\") pod \"ovn-northd-0\" (UID: \"8b509e35-cec7-4668-8421-07678240db3b\") " pod="openstack/ovn-northd-0" Nov 28 17:38:53 crc kubenswrapper[4909]: I1128 17:38:53.158948 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/8b509e35-cec7-4668-8421-07678240db3b-scripts\") pod \"ovn-northd-0\" (UID: \"8b509e35-cec7-4668-8421-07678240db3b\") " pod="openstack/ovn-northd-0" Nov 28 17:38:53 crc kubenswrapper[4909]: I1128 17:38:53.174935 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8b509e35-cec7-4668-8421-07678240db3b-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"8b509e35-cec7-4668-8421-07678240db3b\") " pod="openstack/ovn-northd-0" Nov 28 17:38:53 crc kubenswrapper[4909]: I1128 17:38:53.175398 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-6bb899f49f-86csq" Nov 28 17:38:53 crc kubenswrapper[4909]: I1128 17:38:53.176625 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2pttp\" (UniqueName: \"kubernetes.io/projected/8b509e35-cec7-4668-8421-07678240db3b-kube-api-access-2pttp\") pod \"ovn-northd-0\" (UID: \"8b509e35-cec7-4668-8421-07678240db3b\") " pod="openstack/ovn-northd-0" Nov 28 17:38:53 crc kubenswrapper[4909]: I1128 17:38:53.202041 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Nov 28 17:38:53 crc kubenswrapper[4909]: I1128 17:38:53.256126 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5b7946d7b9-sgzbw"] Nov 28 17:38:53 crc kubenswrapper[4909]: I1128 17:38:53.257201 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5b7946d7b9-sgzbw" podUID="e0749b84-dc57-428b-98b9-63e98b011b44" containerName="dnsmasq-dns" containerID="cri-o://8d511b4993a3f10591543d0cc6acde31758dc2c084d44a92adbb26c541f7a846" gracePeriod=10 Nov 28 17:38:53 crc kubenswrapper[4909]: I1128 17:38:53.290234 4909 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-5b7946d7b9-sgzbw" podUID="e0749b84-dc57-428b-98b9-63e98b011b44" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.245:5353: connect: connection refused" Nov 28 17:38:53 crc kubenswrapper[4909]: I1128 17:38:53.584911 4909 generic.go:334] "Generic (PLEG): container finished" podID="e0749b84-dc57-428b-98b9-63e98b011b44" containerID="8d511b4993a3f10591543d0cc6acde31758dc2c084d44a92adbb26c541f7a846" exitCode=0 Nov 28 17:38:53 crc kubenswrapper[4909]: I1128 17:38:53.585021 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5b7946d7b9-sgzbw" event={"ID":"e0749b84-dc57-428b-98b9-63e98b011b44","Type":"ContainerDied","Data":"8d511b4993a3f10591543d0cc6acde31758dc2c084d44a92adbb26c541f7a846"} Nov 28 17:38:53 crc kubenswrapper[4909]: I1128 17:38:53.708009 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5b7946d7b9-sgzbw" Nov 28 17:38:53 crc kubenswrapper[4909]: I1128 17:38:53.770079 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Nov 28 17:38:53 crc kubenswrapper[4909]: W1128 17:38:53.779028 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8b509e35_cec7_4668_8421_07678240db3b.slice/crio-9917687313f1f8f9334e4c5fd44e79e5ab8577526ffcbb8d45dd92232d0b0b2e WatchSource:0}: Error finding container 9917687313f1f8f9334e4c5fd44e79e5ab8577526ffcbb8d45dd92232d0b0b2e: Status 404 returned error can't find the container with id 9917687313f1f8f9334e4c5fd44e79e5ab8577526ffcbb8d45dd92232d0b0b2e Nov 28 17:38:53 crc kubenswrapper[4909]: I1128 17:38:53.872649 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e0749b84-dc57-428b-98b9-63e98b011b44-dns-svc\") pod \"e0749b84-dc57-428b-98b9-63e98b011b44\" (UID: \"e0749b84-dc57-428b-98b9-63e98b011b44\") " Nov 28 17:38:53 crc kubenswrapper[4909]: I1128 17:38:53.872744 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e0749b84-dc57-428b-98b9-63e98b011b44-config\") pod \"e0749b84-dc57-428b-98b9-63e98b011b44\" (UID: \"e0749b84-dc57-428b-98b9-63e98b011b44\") " Nov 28 17:38:53 crc kubenswrapper[4909]: I1128 17:38:53.872820 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s9mgt\" (UniqueName: \"kubernetes.io/projected/e0749b84-dc57-428b-98b9-63e98b011b44-kube-api-access-s9mgt\") pod \"e0749b84-dc57-428b-98b9-63e98b011b44\" (UID: \"e0749b84-dc57-428b-98b9-63e98b011b44\") " Nov 28 17:38:53 crc kubenswrapper[4909]: I1128 17:38:53.878123 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e0749b84-dc57-428b-98b9-63e98b011b44-kube-api-access-s9mgt" (OuterVolumeSpecName: "kube-api-access-s9mgt") pod "e0749b84-dc57-428b-98b9-63e98b011b44" (UID: "e0749b84-dc57-428b-98b9-63e98b011b44"). InnerVolumeSpecName "kube-api-access-s9mgt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:38:53 crc kubenswrapper[4909]: I1128 17:38:53.915601 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e0749b84-dc57-428b-98b9-63e98b011b44-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "e0749b84-dc57-428b-98b9-63e98b011b44" (UID: "e0749b84-dc57-428b-98b9-63e98b011b44"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 17:38:53 crc kubenswrapper[4909]: I1128 17:38:53.927038 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e0749b84-dc57-428b-98b9-63e98b011b44-config" (OuterVolumeSpecName: "config") pod "e0749b84-dc57-428b-98b9-63e98b011b44" (UID: "e0749b84-dc57-428b-98b9-63e98b011b44"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 17:38:53 crc kubenswrapper[4909]: I1128 17:38:53.975339 4909 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e0749b84-dc57-428b-98b9-63e98b011b44-config\") on node \"crc\" DevicePath \"\"" Nov 28 17:38:53 crc kubenswrapper[4909]: I1128 17:38:53.975679 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s9mgt\" (UniqueName: \"kubernetes.io/projected/e0749b84-dc57-428b-98b9-63e98b011b44-kube-api-access-s9mgt\") on node \"crc\" DevicePath \"\"" Nov 28 17:38:53 crc kubenswrapper[4909]: I1128 17:38:53.975696 4909 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e0749b84-dc57-428b-98b9-63e98b011b44-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 17:38:54 crc kubenswrapper[4909]: I1128 17:38:54.594756 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5b7946d7b9-sgzbw" event={"ID":"e0749b84-dc57-428b-98b9-63e98b011b44","Type":"ContainerDied","Data":"960650ad02156cd43943f316265095a807b54e848b1d356fb1f5630430598b1a"} Nov 28 17:38:54 crc kubenswrapper[4909]: I1128 17:38:54.594810 4909 scope.go:117] "RemoveContainer" containerID="8d511b4993a3f10591543d0cc6acde31758dc2c084d44a92adbb26c541f7a846" Nov 28 17:38:54 crc kubenswrapper[4909]: I1128 17:38:54.594899 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5b7946d7b9-sgzbw" Nov 28 17:38:54 crc kubenswrapper[4909]: I1128 17:38:54.597030 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"8b509e35-cec7-4668-8421-07678240db3b","Type":"ContainerStarted","Data":"7a59df784679c3b4838b428896cd14ec291b4f42bbe16ac2e550751aff638453"} Nov 28 17:38:54 crc kubenswrapper[4909]: I1128 17:38:54.597062 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"8b509e35-cec7-4668-8421-07678240db3b","Type":"ContainerStarted","Data":"7ab194a718fc2eb8041268cb84d34e5db2e4af341e506e8f9af6dce5c7a1e3ba"} Nov 28 17:38:54 crc kubenswrapper[4909]: I1128 17:38:54.597075 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"8b509e35-cec7-4668-8421-07678240db3b","Type":"ContainerStarted","Data":"9917687313f1f8f9334e4c5fd44e79e5ab8577526ffcbb8d45dd92232d0b0b2e"} Nov 28 17:38:54 crc kubenswrapper[4909]: I1128 17:38:54.598236 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-northd-0" Nov 28 17:38:54 crc kubenswrapper[4909]: I1128 17:38:54.617816 4909 scope.go:117] "RemoveContainer" containerID="e580068aab256004781aed23027abfda0e5933ac9e945ce83090afd45098b176" Nov 28 17:38:54 crc kubenswrapper[4909]: I1128 17:38:54.633387 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-northd-0" podStartSLOduration=2.633360047 podStartE2EDuration="2.633360047s" podCreationTimestamp="2025-11-28 17:38:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 17:38:54.618041785 +0000 UTC m=+5317.014726339" watchObservedRunningTime="2025-11-28 17:38:54.633360047 +0000 UTC m=+5317.030044591" Nov 28 17:38:54 crc kubenswrapper[4909]: I1128 17:38:54.653818 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5b7946d7b9-sgzbw"] Nov 28 17:38:54 crc kubenswrapper[4909]: I1128 17:38:54.662285 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5b7946d7b9-sgzbw"] Nov 28 17:38:55 crc kubenswrapper[4909]: I1128 17:38:55.910990 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e0749b84-dc57-428b-98b9-63e98b011b44" path="/var/lib/kubelet/pods/e0749b84-dc57-428b-98b9-63e98b011b44/volumes" Nov 28 17:38:58 crc kubenswrapper[4909]: I1128 17:38:58.838842 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-create-wwpvc"] Nov 28 17:38:58 crc kubenswrapper[4909]: E1128 17:38:58.840299 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e0749b84-dc57-428b-98b9-63e98b011b44" containerName="init" Nov 28 17:38:58 crc kubenswrapper[4909]: I1128 17:38:58.840447 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="e0749b84-dc57-428b-98b9-63e98b011b44" containerName="init" Nov 28 17:38:58 crc kubenswrapper[4909]: E1128 17:38:58.840569 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e0749b84-dc57-428b-98b9-63e98b011b44" containerName="dnsmasq-dns" Nov 28 17:38:58 crc kubenswrapper[4909]: I1128 17:38:58.840705 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="e0749b84-dc57-428b-98b9-63e98b011b44" containerName="dnsmasq-dns" Nov 28 17:38:58 crc kubenswrapper[4909]: I1128 17:38:58.841044 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="e0749b84-dc57-428b-98b9-63e98b011b44" containerName="dnsmasq-dns" Nov 28 17:38:58 crc kubenswrapper[4909]: I1128 17:38:58.841695 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-wwpvc" Nov 28 17:38:58 crc kubenswrapper[4909]: I1128 17:38:58.859260 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-wwpvc"] Nov 28 17:38:58 crc kubenswrapper[4909]: I1128 17:38:58.937671 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-722f-account-create-update-f7nk9"] Nov 28 17:38:58 crc kubenswrapper[4909]: I1128 17:38:58.939079 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-722f-account-create-update-f7nk9" Nov 28 17:38:58 crc kubenswrapper[4909]: I1128 17:38:58.941021 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-db-secret" Nov 28 17:38:58 crc kubenswrapper[4909]: I1128 17:38:58.944477 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-722f-account-create-update-f7nk9"] Nov 28 17:38:58 crc kubenswrapper[4909]: I1128 17:38:58.972646 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7mwb2\" (UniqueName: \"kubernetes.io/projected/bbb1a892-57b8-4458-836f-3f01bc129873-kube-api-access-7mwb2\") pod \"keystone-db-create-wwpvc\" (UID: \"bbb1a892-57b8-4458-836f-3f01bc129873\") " pod="openstack/keystone-db-create-wwpvc" Nov 28 17:38:58 crc kubenswrapper[4909]: I1128 17:38:58.972912 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bbb1a892-57b8-4458-836f-3f01bc129873-operator-scripts\") pod \"keystone-db-create-wwpvc\" (UID: \"bbb1a892-57b8-4458-836f-3f01bc129873\") " pod="openstack/keystone-db-create-wwpvc" Nov 28 17:38:59 crc kubenswrapper[4909]: I1128 17:38:59.074688 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7mwb2\" (UniqueName: \"kubernetes.io/projected/bbb1a892-57b8-4458-836f-3f01bc129873-kube-api-access-7mwb2\") pod \"keystone-db-create-wwpvc\" (UID: \"bbb1a892-57b8-4458-836f-3f01bc129873\") " pod="openstack/keystone-db-create-wwpvc" Nov 28 17:38:59 crc kubenswrapper[4909]: I1128 17:38:59.075021 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k8czv\" (UniqueName: \"kubernetes.io/projected/505dcabe-949d-4084-af7c-bb46f054ab68-kube-api-access-k8czv\") pod \"keystone-722f-account-create-update-f7nk9\" (UID: \"505dcabe-949d-4084-af7c-bb46f054ab68\") " pod="openstack/keystone-722f-account-create-update-f7nk9" Nov 28 17:38:59 crc kubenswrapper[4909]: I1128 17:38:59.075121 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/505dcabe-949d-4084-af7c-bb46f054ab68-operator-scripts\") pod \"keystone-722f-account-create-update-f7nk9\" (UID: \"505dcabe-949d-4084-af7c-bb46f054ab68\") " pod="openstack/keystone-722f-account-create-update-f7nk9" Nov 28 17:38:59 crc kubenswrapper[4909]: I1128 17:38:59.075249 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bbb1a892-57b8-4458-836f-3f01bc129873-operator-scripts\") pod \"keystone-db-create-wwpvc\" (UID: \"bbb1a892-57b8-4458-836f-3f01bc129873\") " pod="openstack/keystone-db-create-wwpvc" Nov 28 17:38:59 crc kubenswrapper[4909]: I1128 17:38:59.076165 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bbb1a892-57b8-4458-836f-3f01bc129873-operator-scripts\") pod \"keystone-db-create-wwpvc\" (UID: \"bbb1a892-57b8-4458-836f-3f01bc129873\") " pod="openstack/keystone-db-create-wwpvc" Nov 28 17:38:59 crc kubenswrapper[4909]: I1128 17:38:59.093297 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7mwb2\" (UniqueName: \"kubernetes.io/projected/bbb1a892-57b8-4458-836f-3f01bc129873-kube-api-access-7mwb2\") pod \"keystone-db-create-wwpvc\" (UID: \"bbb1a892-57b8-4458-836f-3f01bc129873\") " pod="openstack/keystone-db-create-wwpvc" Nov 28 17:38:59 crc kubenswrapper[4909]: I1128 17:38:59.162593 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-wwpvc" Nov 28 17:38:59 crc kubenswrapper[4909]: I1128 17:38:59.177024 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k8czv\" (UniqueName: \"kubernetes.io/projected/505dcabe-949d-4084-af7c-bb46f054ab68-kube-api-access-k8czv\") pod \"keystone-722f-account-create-update-f7nk9\" (UID: \"505dcabe-949d-4084-af7c-bb46f054ab68\") " pod="openstack/keystone-722f-account-create-update-f7nk9" Nov 28 17:38:59 crc kubenswrapper[4909]: I1128 17:38:59.177288 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/505dcabe-949d-4084-af7c-bb46f054ab68-operator-scripts\") pod \"keystone-722f-account-create-update-f7nk9\" (UID: \"505dcabe-949d-4084-af7c-bb46f054ab68\") " pod="openstack/keystone-722f-account-create-update-f7nk9" Nov 28 17:38:59 crc kubenswrapper[4909]: I1128 17:38:59.177918 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/505dcabe-949d-4084-af7c-bb46f054ab68-operator-scripts\") pod \"keystone-722f-account-create-update-f7nk9\" (UID: \"505dcabe-949d-4084-af7c-bb46f054ab68\") " pod="openstack/keystone-722f-account-create-update-f7nk9" Nov 28 17:38:59 crc kubenswrapper[4909]: I1128 17:38:59.193241 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k8czv\" (UniqueName: \"kubernetes.io/projected/505dcabe-949d-4084-af7c-bb46f054ab68-kube-api-access-k8czv\") pod \"keystone-722f-account-create-update-f7nk9\" (UID: \"505dcabe-949d-4084-af7c-bb46f054ab68\") " pod="openstack/keystone-722f-account-create-update-f7nk9" Nov 28 17:38:59 crc kubenswrapper[4909]: I1128 17:38:59.253224 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-722f-account-create-update-f7nk9" Nov 28 17:38:59 crc kubenswrapper[4909]: I1128 17:38:59.591052 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-wwpvc"] Nov 28 17:38:59 crc kubenswrapper[4909]: W1128 17:38:59.597863 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podbbb1a892_57b8_4458_836f_3f01bc129873.slice/crio-2ad8ceb060dc5cbaaf5ed1547d0535ad5db785cfc424093e6f1dbac13284cbb2 WatchSource:0}: Error finding container 2ad8ceb060dc5cbaaf5ed1547d0535ad5db785cfc424093e6f1dbac13284cbb2: Status 404 returned error can't find the container with id 2ad8ceb060dc5cbaaf5ed1547d0535ad5db785cfc424093e6f1dbac13284cbb2 Nov 28 17:38:59 crc kubenswrapper[4909]: I1128 17:38:59.652187 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-wwpvc" event={"ID":"bbb1a892-57b8-4458-836f-3f01bc129873","Type":"ContainerStarted","Data":"2ad8ceb060dc5cbaaf5ed1547d0535ad5db785cfc424093e6f1dbac13284cbb2"} Nov 28 17:38:59 crc kubenswrapper[4909]: W1128 17:38:59.663675 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod505dcabe_949d_4084_af7c_bb46f054ab68.slice/crio-918f6730d278bae8a1aa33a4a819b8a925bbc8d4b9c6704335ba14ae3bd5a323 WatchSource:0}: Error finding container 918f6730d278bae8a1aa33a4a819b8a925bbc8d4b9c6704335ba14ae3bd5a323: Status 404 returned error can't find the container with id 918f6730d278bae8a1aa33a4a819b8a925bbc8d4b9c6704335ba14ae3bd5a323 Nov 28 17:38:59 crc kubenswrapper[4909]: I1128 17:38:59.666307 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-722f-account-create-update-f7nk9"] Nov 28 17:39:00 crc kubenswrapper[4909]: I1128 17:39:00.664618 4909 generic.go:334] "Generic (PLEG): container finished" podID="505dcabe-949d-4084-af7c-bb46f054ab68" containerID="916775bf3e278027a629e5a16d9c467f2827588e58d64d2ed0fd842354e2a8a9" exitCode=0 Nov 28 17:39:00 crc kubenswrapper[4909]: I1128 17:39:00.664721 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-722f-account-create-update-f7nk9" event={"ID":"505dcabe-949d-4084-af7c-bb46f054ab68","Type":"ContainerDied","Data":"916775bf3e278027a629e5a16d9c467f2827588e58d64d2ed0fd842354e2a8a9"} Nov 28 17:39:00 crc kubenswrapper[4909]: I1128 17:39:00.667690 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-722f-account-create-update-f7nk9" event={"ID":"505dcabe-949d-4084-af7c-bb46f054ab68","Type":"ContainerStarted","Data":"918f6730d278bae8a1aa33a4a819b8a925bbc8d4b9c6704335ba14ae3bd5a323"} Nov 28 17:39:00 crc kubenswrapper[4909]: I1128 17:39:00.670590 4909 generic.go:334] "Generic (PLEG): container finished" podID="bbb1a892-57b8-4458-836f-3f01bc129873" containerID="332f0539b12161a23f9745305082c8f4a398c34867c7ee4c6ff4c0ebde858110" exitCode=0 Nov 28 17:39:00 crc kubenswrapper[4909]: I1128 17:39:00.670641 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-wwpvc" event={"ID":"bbb1a892-57b8-4458-836f-3f01bc129873","Type":"ContainerDied","Data":"332f0539b12161a23f9745305082c8f4a398c34867c7ee4c6ff4c0ebde858110"} Nov 28 17:39:00 crc kubenswrapper[4909]: I1128 17:39:00.905296 4909 scope.go:117] "RemoveContainer" containerID="ba4943f4ba136c11fa217eba14fcdb34cf54ee4ef96ee334416ec901f5f4fe45" Nov 28 17:39:00 crc kubenswrapper[4909]: E1128 17:39:00.907861 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 17:39:02 crc kubenswrapper[4909]: I1128 17:39:02.206884 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-722f-account-create-update-f7nk9" Nov 28 17:39:02 crc kubenswrapper[4909]: I1128 17:39:02.211688 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-wwpvc" Nov 28 17:39:02 crc kubenswrapper[4909]: I1128 17:39:02.335443 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/505dcabe-949d-4084-af7c-bb46f054ab68-operator-scripts\") pod \"505dcabe-949d-4084-af7c-bb46f054ab68\" (UID: \"505dcabe-949d-4084-af7c-bb46f054ab68\") " Nov 28 17:39:02 crc kubenswrapper[4909]: I1128 17:39:02.335510 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k8czv\" (UniqueName: \"kubernetes.io/projected/505dcabe-949d-4084-af7c-bb46f054ab68-kube-api-access-k8czv\") pod \"505dcabe-949d-4084-af7c-bb46f054ab68\" (UID: \"505dcabe-949d-4084-af7c-bb46f054ab68\") " Nov 28 17:39:02 crc kubenswrapper[4909]: I1128 17:39:02.335533 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bbb1a892-57b8-4458-836f-3f01bc129873-operator-scripts\") pod \"bbb1a892-57b8-4458-836f-3f01bc129873\" (UID: \"bbb1a892-57b8-4458-836f-3f01bc129873\") " Nov 28 17:39:02 crc kubenswrapper[4909]: I1128 17:39:02.335581 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7mwb2\" (UniqueName: \"kubernetes.io/projected/bbb1a892-57b8-4458-836f-3f01bc129873-kube-api-access-7mwb2\") pod \"bbb1a892-57b8-4458-836f-3f01bc129873\" (UID: \"bbb1a892-57b8-4458-836f-3f01bc129873\") " Nov 28 17:39:02 crc kubenswrapper[4909]: I1128 17:39:02.336044 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/505dcabe-949d-4084-af7c-bb46f054ab68-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "505dcabe-949d-4084-af7c-bb46f054ab68" (UID: "505dcabe-949d-4084-af7c-bb46f054ab68"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 17:39:02 crc kubenswrapper[4909]: I1128 17:39:02.336393 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bbb1a892-57b8-4458-836f-3f01bc129873-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "bbb1a892-57b8-4458-836f-3f01bc129873" (UID: "bbb1a892-57b8-4458-836f-3f01bc129873"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 17:39:02 crc kubenswrapper[4909]: I1128 17:39:02.336438 4909 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/505dcabe-949d-4084-af7c-bb46f054ab68-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 17:39:02 crc kubenswrapper[4909]: I1128 17:39:02.342963 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bbb1a892-57b8-4458-836f-3f01bc129873-kube-api-access-7mwb2" (OuterVolumeSpecName: "kube-api-access-7mwb2") pod "bbb1a892-57b8-4458-836f-3f01bc129873" (UID: "bbb1a892-57b8-4458-836f-3f01bc129873"). InnerVolumeSpecName "kube-api-access-7mwb2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:39:02 crc kubenswrapper[4909]: I1128 17:39:02.344077 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/505dcabe-949d-4084-af7c-bb46f054ab68-kube-api-access-k8czv" (OuterVolumeSpecName: "kube-api-access-k8czv") pod "505dcabe-949d-4084-af7c-bb46f054ab68" (UID: "505dcabe-949d-4084-af7c-bb46f054ab68"). InnerVolumeSpecName "kube-api-access-k8czv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:39:02 crc kubenswrapper[4909]: I1128 17:39:02.438236 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k8czv\" (UniqueName: \"kubernetes.io/projected/505dcabe-949d-4084-af7c-bb46f054ab68-kube-api-access-k8czv\") on node \"crc\" DevicePath \"\"" Nov 28 17:39:02 crc kubenswrapper[4909]: I1128 17:39:02.438270 4909 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bbb1a892-57b8-4458-836f-3f01bc129873-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 17:39:02 crc kubenswrapper[4909]: I1128 17:39:02.438280 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7mwb2\" (UniqueName: \"kubernetes.io/projected/bbb1a892-57b8-4458-836f-3f01bc129873-kube-api-access-7mwb2\") on node \"crc\" DevicePath \"\"" Nov 28 17:39:02 crc kubenswrapper[4909]: I1128 17:39:02.692006 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-wwpvc" Nov 28 17:39:02 crc kubenswrapper[4909]: I1128 17:39:02.692002 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-wwpvc" event={"ID":"bbb1a892-57b8-4458-836f-3f01bc129873","Type":"ContainerDied","Data":"2ad8ceb060dc5cbaaf5ed1547d0535ad5db785cfc424093e6f1dbac13284cbb2"} Nov 28 17:39:02 crc kubenswrapper[4909]: I1128 17:39:02.692411 4909 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2ad8ceb060dc5cbaaf5ed1547d0535ad5db785cfc424093e6f1dbac13284cbb2" Nov 28 17:39:02 crc kubenswrapper[4909]: I1128 17:39:02.693336 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-722f-account-create-update-f7nk9" event={"ID":"505dcabe-949d-4084-af7c-bb46f054ab68","Type":"ContainerDied","Data":"918f6730d278bae8a1aa33a4a819b8a925bbc8d4b9c6704335ba14ae3bd5a323"} Nov 28 17:39:02 crc kubenswrapper[4909]: I1128 17:39:02.693372 4909 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="918f6730d278bae8a1aa33a4a819b8a925bbc8d4b9c6704335ba14ae3bd5a323" Nov 28 17:39:02 crc kubenswrapper[4909]: I1128 17:39:02.693377 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-722f-account-create-update-f7nk9" Nov 28 17:39:03 crc kubenswrapper[4909]: I1128 17:39:03.305698 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-northd-0" Nov 28 17:39:04 crc kubenswrapper[4909]: I1128 17:39:04.395231 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-sync-6nvlj"] Nov 28 17:39:04 crc kubenswrapper[4909]: E1128 17:39:04.395666 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bbb1a892-57b8-4458-836f-3f01bc129873" containerName="mariadb-database-create" Nov 28 17:39:04 crc kubenswrapper[4909]: I1128 17:39:04.395685 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="bbb1a892-57b8-4458-836f-3f01bc129873" containerName="mariadb-database-create" Nov 28 17:39:04 crc kubenswrapper[4909]: E1128 17:39:04.395739 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="505dcabe-949d-4084-af7c-bb46f054ab68" containerName="mariadb-account-create-update" Nov 28 17:39:04 crc kubenswrapper[4909]: I1128 17:39:04.395749 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="505dcabe-949d-4084-af7c-bb46f054ab68" containerName="mariadb-account-create-update" Nov 28 17:39:04 crc kubenswrapper[4909]: I1128 17:39:04.395955 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="505dcabe-949d-4084-af7c-bb46f054ab68" containerName="mariadb-account-create-update" Nov 28 17:39:04 crc kubenswrapper[4909]: I1128 17:39:04.395976 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="bbb1a892-57b8-4458-836f-3f01bc129873" containerName="mariadb-database-create" Nov 28 17:39:04 crc kubenswrapper[4909]: I1128 17:39:04.396684 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-6nvlj" Nov 28 17:39:04 crc kubenswrapper[4909]: I1128 17:39:04.399845 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 28 17:39:04 crc kubenswrapper[4909]: I1128 17:39:04.400389 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 28 17:39:04 crc kubenswrapper[4909]: I1128 17:39:04.400583 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 28 17:39:04 crc kubenswrapper[4909]: I1128 17:39:04.400856 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-qhgw8" Nov 28 17:39:04 crc kubenswrapper[4909]: I1128 17:39:04.405458 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-6nvlj"] Nov 28 17:39:04 crc kubenswrapper[4909]: I1128 17:39:04.486224 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e5384a5e-a9a1-4beb-a760-4b15d072f465-config-data\") pod \"keystone-db-sync-6nvlj\" (UID: \"e5384a5e-a9a1-4beb-a760-4b15d072f465\") " pod="openstack/keystone-db-sync-6nvlj" Nov 28 17:39:04 crc kubenswrapper[4909]: I1128 17:39:04.486289 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7nc8c\" (UniqueName: \"kubernetes.io/projected/e5384a5e-a9a1-4beb-a760-4b15d072f465-kube-api-access-7nc8c\") pod \"keystone-db-sync-6nvlj\" (UID: \"e5384a5e-a9a1-4beb-a760-4b15d072f465\") " pod="openstack/keystone-db-sync-6nvlj" Nov 28 17:39:04 crc kubenswrapper[4909]: I1128 17:39:04.486367 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e5384a5e-a9a1-4beb-a760-4b15d072f465-combined-ca-bundle\") pod \"keystone-db-sync-6nvlj\" (UID: \"e5384a5e-a9a1-4beb-a760-4b15d072f465\") " pod="openstack/keystone-db-sync-6nvlj" Nov 28 17:39:04 crc kubenswrapper[4909]: I1128 17:39:04.587570 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e5384a5e-a9a1-4beb-a760-4b15d072f465-config-data\") pod \"keystone-db-sync-6nvlj\" (UID: \"e5384a5e-a9a1-4beb-a760-4b15d072f465\") " pod="openstack/keystone-db-sync-6nvlj" Nov 28 17:39:04 crc kubenswrapper[4909]: I1128 17:39:04.587701 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7nc8c\" (UniqueName: \"kubernetes.io/projected/e5384a5e-a9a1-4beb-a760-4b15d072f465-kube-api-access-7nc8c\") pod \"keystone-db-sync-6nvlj\" (UID: \"e5384a5e-a9a1-4beb-a760-4b15d072f465\") " pod="openstack/keystone-db-sync-6nvlj" Nov 28 17:39:04 crc kubenswrapper[4909]: I1128 17:39:04.587806 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e5384a5e-a9a1-4beb-a760-4b15d072f465-combined-ca-bundle\") pod \"keystone-db-sync-6nvlj\" (UID: \"e5384a5e-a9a1-4beb-a760-4b15d072f465\") " pod="openstack/keystone-db-sync-6nvlj" Nov 28 17:39:04 crc kubenswrapper[4909]: I1128 17:39:04.593377 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e5384a5e-a9a1-4beb-a760-4b15d072f465-combined-ca-bundle\") pod \"keystone-db-sync-6nvlj\" (UID: \"e5384a5e-a9a1-4beb-a760-4b15d072f465\") " pod="openstack/keystone-db-sync-6nvlj" Nov 28 17:39:04 crc kubenswrapper[4909]: I1128 17:39:04.594292 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e5384a5e-a9a1-4beb-a760-4b15d072f465-config-data\") pod \"keystone-db-sync-6nvlj\" (UID: \"e5384a5e-a9a1-4beb-a760-4b15d072f465\") " pod="openstack/keystone-db-sync-6nvlj" Nov 28 17:39:04 crc kubenswrapper[4909]: I1128 17:39:04.608046 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7nc8c\" (UniqueName: \"kubernetes.io/projected/e5384a5e-a9a1-4beb-a760-4b15d072f465-kube-api-access-7nc8c\") pod \"keystone-db-sync-6nvlj\" (UID: \"e5384a5e-a9a1-4beb-a760-4b15d072f465\") " pod="openstack/keystone-db-sync-6nvlj" Nov 28 17:39:04 crc kubenswrapper[4909]: I1128 17:39:04.729599 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-6nvlj" Nov 28 17:39:05 crc kubenswrapper[4909]: I1128 17:39:05.193563 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-6nvlj"] Nov 28 17:39:05 crc kubenswrapper[4909]: I1128 17:39:05.727206 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-6nvlj" event={"ID":"e5384a5e-a9a1-4beb-a760-4b15d072f465","Type":"ContainerStarted","Data":"5884c2188f4a95ea79c9b961064a62dfa885997f4ee9ec83988a2575a447add5"} Nov 28 17:39:05 crc kubenswrapper[4909]: I1128 17:39:05.727490 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-6nvlj" event={"ID":"e5384a5e-a9a1-4beb-a760-4b15d072f465","Type":"ContainerStarted","Data":"b8815cdcbf44fd6e1dd9366bc1eafe3007cf7164e0e4a1db4c633aacd8e2d6da"} Nov 28 17:39:05 crc kubenswrapper[4909]: I1128 17:39:05.758767 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-db-sync-6nvlj" podStartSLOduration=1.758741055 podStartE2EDuration="1.758741055s" podCreationTimestamp="2025-11-28 17:39:04 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 17:39:05.753918336 +0000 UTC m=+5328.150602910" watchObservedRunningTime="2025-11-28 17:39:05.758741055 +0000 UTC m=+5328.155425599" Nov 28 17:39:07 crc kubenswrapper[4909]: I1128 17:39:07.742818 4909 generic.go:334] "Generic (PLEG): container finished" podID="e5384a5e-a9a1-4beb-a760-4b15d072f465" containerID="5884c2188f4a95ea79c9b961064a62dfa885997f4ee9ec83988a2575a447add5" exitCode=0 Nov 28 17:39:07 crc kubenswrapper[4909]: I1128 17:39:07.742909 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-6nvlj" event={"ID":"e5384a5e-a9a1-4beb-a760-4b15d072f465","Type":"ContainerDied","Data":"5884c2188f4a95ea79c9b961064a62dfa885997f4ee9ec83988a2575a447add5"} Nov 28 17:39:09 crc kubenswrapper[4909]: I1128 17:39:09.094452 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-6nvlj" Nov 28 17:39:09 crc kubenswrapper[4909]: I1128 17:39:09.284472 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e5384a5e-a9a1-4beb-a760-4b15d072f465-config-data\") pod \"e5384a5e-a9a1-4beb-a760-4b15d072f465\" (UID: \"e5384a5e-a9a1-4beb-a760-4b15d072f465\") " Nov 28 17:39:09 crc kubenswrapper[4909]: I1128 17:39:09.285318 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7nc8c\" (UniqueName: \"kubernetes.io/projected/e5384a5e-a9a1-4beb-a760-4b15d072f465-kube-api-access-7nc8c\") pod \"e5384a5e-a9a1-4beb-a760-4b15d072f465\" (UID: \"e5384a5e-a9a1-4beb-a760-4b15d072f465\") " Nov 28 17:39:09 crc kubenswrapper[4909]: I1128 17:39:09.285422 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e5384a5e-a9a1-4beb-a760-4b15d072f465-combined-ca-bundle\") pod \"e5384a5e-a9a1-4beb-a760-4b15d072f465\" (UID: \"e5384a5e-a9a1-4beb-a760-4b15d072f465\") " Nov 28 17:39:09 crc kubenswrapper[4909]: I1128 17:39:09.292096 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e5384a5e-a9a1-4beb-a760-4b15d072f465-kube-api-access-7nc8c" (OuterVolumeSpecName: "kube-api-access-7nc8c") pod "e5384a5e-a9a1-4beb-a760-4b15d072f465" (UID: "e5384a5e-a9a1-4beb-a760-4b15d072f465"). InnerVolumeSpecName "kube-api-access-7nc8c". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:39:09 crc kubenswrapper[4909]: I1128 17:39:09.327679 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e5384a5e-a9a1-4beb-a760-4b15d072f465-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e5384a5e-a9a1-4beb-a760-4b15d072f465" (UID: "e5384a5e-a9a1-4beb-a760-4b15d072f465"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:39:09 crc kubenswrapper[4909]: I1128 17:39:09.330229 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e5384a5e-a9a1-4beb-a760-4b15d072f465-config-data" (OuterVolumeSpecName: "config-data") pod "e5384a5e-a9a1-4beb-a760-4b15d072f465" (UID: "e5384a5e-a9a1-4beb-a760-4b15d072f465"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:39:09 crc kubenswrapper[4909]: I1128 17:39:09.387840 4909 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e5384a5e-a9a1-4beb-a760-4b15d072f465-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 17:39:09 crc kubenswrapper[4909]: I1128 17:39:09.387881 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7nc8c\" (UniqueName: \"kubernetes.io/projected/e5384a5e-a9a1-4beb-a760-4b15d072f465-kube-api-access-7nc8c\") on node \"crc\" DevicePath \"\"" Nov 28 17:39:09 crc kubenswrapper[4909]: I1128 17:39:09.387894 4909 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e5384a5e-a9a1-4beb-a760-4b15d072f465-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 17:39:09 crc kubenswrapper[4909]: I1128 17:39:09.760787 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-6nvlj" event={"ID":"e5384a5e-a9a1-4beb-a760-4b15d072f465","Type":"ContainerDied","Data":"b8815cdcbf44fd6e1dd9366bc1eafe3007cf7164e0e4a1db4c633aacd8e2d6da"} Nov 28 17:39:09 crc kubenswrapper[4909]: I1128 17:39:09.760844 4909 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b8815cdcbf44fd6e1dd9366bc1eafe3007cf7164e0e4a1db4c633aacd8e2d6da" Nov 28 17:39:09 crc kubenswrapper[4909]: I1128 17:39:09.760854 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-6nvlj" Nov 28 17:39:10 crc kubenswrapper[4909]: I1128 17:39:10.366686 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-7bfdbcb9b7-kpnz6"] Nov 28 17:39:10 crc kubenswrapper[4909]: E1128 17:39:10.374050 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e5384a5e-a9a1-4beb-a760-4b15d072f465" containerName="keystone-db-sync" Nov 28 17:39:10 crc kubenswrapper[4909]: I1128 17:39:10.374102 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="e5384a5e-a9a1-4beb-a760-4b15d072f465" containerName="keystone-db-sync" Nov 28 17:39:10 crc kubenswrapper[4909]: I1128 17:39:10.374466 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="e5384a5e-a9a1-4beb-a760-4b15d072f465" containerName="keystone-db-sync" Nov 28 17:39:10 crc kubenswrapper[4909]: I1128 17:39:10.375839 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7bfdbcb9b7-kpnz6" Nov 28 17:39:10 crc kubenswrapper[4909]: I1128 17:39:10.377722 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7bfdbcb9b7-kpnz6"] Nov 28 17:39:10 crc kubenswrapper[4909]: I1128 17:39:10.417573 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-cqrxb"] Nov 28 17:39:10 crc kubenswrapper[4909]: I1128 17:39:10.422845 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-cqrxb" Nov 28 17:39:10 crc kubenswrapper[4909]: I1128 17:39:10.428142 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 28 17:39:10 crc kubenswrapper[4909]: I1128 17:39:10.428311 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-qhgw8" Nov 28 17:39:10 crc kubenswrapper[4909]: I1128 17:39:10.428385 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Nov 28 17:39:10 crc kubenswrapper[4909]: I1128 17:39:10.428404 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 28 17:39:10 crc kubenswrapper[4909]: I1128 17:39:10.428635 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 28 17:39:10 crc kubenswrapper[4909]: I1128 17:39:10.436187 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-cqrxb"] Nov 28 17:39:10 crc kubenswrapper[4909]: I1128 17:39:10.509239 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6d29a694-47e1-4b41-8325-ca24e9e34c31-ovsdbserver-nb\") pod \"dnsmasq-dns-7bfdbcb9b7-kpnz6\" (UID: \"6d29a694-47e1-4b41-8325-ca24e9e34c31\") " pod="openstack/dnsmasq-dns-7bfdbcb9b7-kpnz6" Nov 28 17:39:10 crc kubenswrapper[4909]: I1128 17:39:10.509354 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3f3aa07c-1186-41a8-9223-974daf3f1fef-config-data\") pod \"keystone-bootstrap-cqrxb\" (UID: \"3f3aa07c-1186-41a8-9223-974daf3f1fef\") " pod="openstack/keystone-bootstrap-cqrxb" Nov 28 17:39:10 crc kubenswrapper[4909]: I1128 17:39:10.509415 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4vs26\" (UniqueName: \"kubernetes.io/projected/6d29a694-47e1-4b41-8325-ca24e9e34c31-kube-api-access-4vs26\") pod \"dnsmasq-dns-7bfdbcb9b7-kpnz6\" (UID: \"6d29a694-47e1-4b41-8325-ca24e9e34c31\") " pod="openstack/dnsmasq-dns-7bfdbcb9b7-kpnz6" Nov 28 17:39:10 crc kubenswrapper[4909]: I1128 17:39:10.509465 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3f3aa07c-1186-41a8-9223-974daf3f1fef-scripts\") pod \"keystone-bootstrap-cqrxb\" (UID: \"3f3aa07c-1186-41a8-9223-974daf3f1fef\") " pod="openstack/keystone-bootstrap-cqrxb" Nov 28 17:39:10 crc kubenswrapper[4909]: I1128 17:39:10.509555 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b6ppt\" (UniqueName: \"kubernetes.io/projected/3f3aa07c-1186-41a8-9223-974daf3f1fef-kube-api-access-b6ppt\") pod \"keystone-bootstrap-cqrxb\" (UID: \"3f3aa07c-1186-41a8-9223-974daf3f1fef\") " pod="openstack/keystone-bootstrap-cqrxb" Nov 28 17:39:10 crc kubenswrapper[4909]: I1128 17:39:10.509619 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3f3aa07c-1186-41a8-9223-974daf3f1fef-combined-ca-bundle\") pod \"keystone-bootstrap-cqrxb\" (UID: \"3f3aa07c-1186-41a8-9223-974daf3f1fef\") " pod="openstack/keystone-bootstrap-cqrxb" Nov 28 17:39:10 crc kubenswrapper[4909]: I1128 17:39:10.509635 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6d29a694-47e1-4b41-8325-ca24e9e34c31-config\") pod \"dnsmasq-dns-7bfdbcb9b7-kpnz6\" (UID: \"6d29a694-47e1-4b41-8325-ca24e9e34c31\") " pod="openstack/dnsmasq-dns-7bfdbcb9b7-kpnz6" Nov 28 17:39:10 crc kubenswrapper[4909]: I1128 17:39:10.509715 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/3f3aa07c-1186-41a8-9223-974daf3f1fef-credential-keys\") pod \"keystone-bootstrap-cqrxb\" (UID: \"3f3aa07c-1186-41a8-9223-974daf3f1fef\") " pod="openstack/keystone-bootstrap-cqrxb" Nov 28 17:39:10 crc kubenswrapper[4909]: I1128 17:39:10.509780 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6d29a694-47e1-4b41-8325-ca24e9e34c31-dns-svc\") pod \"dnsmasq-dns-7bfdbcb9b7-kpnz6\" (UID: \"6d29a694-47e1-4b41-8325-ca24e9e34c31\") " pod="openstack/dnsmasq-dns-7bfdbcb9b7-kpnz6" Nov 28 17:39:10 crc kubenswrapper[4909]: I1128 17:39:10.509831 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/3f3aa07c-1186-41a8-9223-974daf3f1fef-fernet-keys\") pod \"keystone-bootstrap-cqrxb\" (UID: \"3f3aa07c-1186-41a8-9223-974daf3f1fef\") " pod="openstack/keystone-bootstrap-cqrxb" Nov 28 17:39:10 crc kubenswrapper[4909]: I1128 17:39:10.509848 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6d29a694-47e1-4b41-8325-ca24e9e34c31-ovsdbserver-sb\") pod \"dnsmasq-dns-7bfdbcb9b7-kpnz6\" (UID: \"6d29a694-47e1-4b41-8325-ca24e9e34c31\") " pod="openstack/dnsmasq-dns-7bfdbcb9b7-kpnz6" Nov 28 17:39:10 crc kubenswrapper[4909]: I1128 17:39:10.611506 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/3f3aa07c-1186-41a8-9223-974daf3f1fef-credential-keys\") pod \"keystone-bootstrap-cqrxb\" (UID: \"3f3aa07c-1186-41a8-9223-974daf3f1fef\") " pod="openstack/keystone-bootstrap-cqrxb" Nov 28 17:39:10 crc kubenswrapper[4909]: I1128 17:39:10.611831 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6d29a694-47e1-4b41-8325-ca24e9e34c31-dns-svc\") pod \"dnsmasq-dns-7bfdbcb9b7-kpnz6\" (UID: \"6d29a694-47e1-4b41-8325-ca24e9e34c31\") " pod="openstack/dnsmasq-dns-7bfdbcb9b7-kpnz6" Nov 28 17:39:10 crc kubenswrapper[4909]: I1128 17:39:10.611854 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/3f3aa07c-1186-41a8-9223-974daf3f1fef-fernet-keys\") pod \"keystone-bootstrap-cqrxb\" (UID: \"3f3aa07c-1186-41a8-9223-974daf3f1fef\") " pod="openstack/keystone-bootstrap-cqrxb" Nov 28 17:39:10 crc kubenswrapper[4909]: I1128 17:39:10.611871 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6d29a694-47e1-4b41-8325-ca24e9e34c31-ovsdbserver-sb\") pod \"dnsmasq-dns-7bfdbcb9b7-kpnz6\" (UID: \"6d29a694-47e1-4b41-8325-ca24e9e34c31\") " pod="openstack/dnsmasq-dns-7bfdbcb9b7-kpnz6" Nov 28 17:39:10 crc kubenswrapper[4909]: I1128 17:39:10.611894 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6d29a694-47e1-4b41-8325-ca24e9e34c31-ovsdbserver-nb\") pod \"dnsmasq-dns-7bfdbcb9b7-kpnz6\" (UID: \"6d29a694-47e1-4b41-8325-ca24e9e34c31\") " pod="openstack/dnsmasq-dns-7bfdbcb9b7-kpnz6" Nov 28 17:39:10 crc kubenswrapper[4909]: I1128 17:39:10.611908 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3f3aa07c-1186-41a8-9223-974daf3f1fef-config-data\") pod \"keystone-bootstrap-cqrxb\" (UID: \"3f3aa07c-1186-41a8-9223-974daf3f1fef\") " pod="openstack/keystone-bootstrap-cqrxb" Nov 28 17:39:10 crc kubenswrapper[4909]: I1128 17:39:10.611928 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4vs26\" (UniqueName: \"kubernetes.io/projected/6d29a694-47e1-4b41-8325-ca24e9e34c31-kube-api-access-4vs26\") pod \"dnsmasq-dns-7bfdbcb9b7-kpnz6\" (UID: \"6d29a694-47e1-4b41-8325-ca24e9e34c31\") " pod="openstack/dnsmasq-dns-7bfdbcb9b7-kpnz6" Nov 28 17:39:10 crc kubenswrapper[4909]: I1128 17:39:10.611951 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3f3aa07c-1186-41a8-9223-974daf3f1fef-scripts\") pod \"keystone-bootstrap-cqrxb\" (UID: \"3f3aa07c-1186-41a8-9223-974daf3f1fef\") " pod="openstack/keystone-bootstrap-cqrxb" Nov 28 17:39:10 crc kubenswrapper[4909]: I1128 17:39:10.611997 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b6ppt\" (UniqueName: \"kubernetes.io/projected/3f3aa07c-1186-41a8-9223-974daf3f1fef-kube-api-access-b6ppt\") pod \"keystone-bootstrap-cqrxb\" (UID: \"3f3aa07c-1186-41a8-9223-974daf3f1fef\") " pod="openstack/keystone-bootstrap-cqrxb" Nov 28 17:39:10 crc kubenswrapper[4909]: I1128 17:39:10.612030 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3f3aa07c-1186-41a8-9223-974daf3f1fef-combined-ca-bundle\") pod \"keystone-bootstrap-cqrxb\" (UID: \"3f3aa07c-1186-41a8-9223-974daf3f1fef\") " pod="openstack/keystone-bootstrap-cqrxb" Nov 28 17:39:10 crc kubenswrapper[4909]: I1128 17:39:10.612048 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6d29a694-47e1-4b41-8325-ca24e9e34c31-config\") pod \"dnsmasq-dns-7bfdbcb9b7-kpnz6\" (UID: \"6d29a694-47e1-4b41-8325-ca24e9e34c31\") " pod="openstack/dnsmasq-dns-7bfdbcb9b7-kpnz6" Nov 28 17:39:10 crc kubenswrapper[4909]: I1128 17:39:10.612864 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6d29a694-47e1-4b41-8325-ca24e9e34c31-config\") pod \"dnsmasq-dns-7bfdbcb9b7-kpnz6\" (UID: \"6d29a694-47e1-4b41-8325-ca24e9e34c31\") " pod="openstack/dnsmasq-dns-7bfdbcb9b7-kpnz6" Nov 28 17:39:10 crc kubenswrapper[4909]: I1128 17:39:10.612897 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6d29a694-47e1-4b41-8325-ca24e9e34c31-dns-svc\") pod \"dnsmasq-dns-7bfdbcb9b7-kpnz6\" (UID: \"6d29a694-47e1-4b41-8325-ca24e9e34c31\") " pod="openstack/dnsmasq-dns-7bfdbcb9b7-kpnz6" Nov 28 17:39:10 crc kubenswrapper[4909]: I1128 17:39:10.613454 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6d29a694-47e1-4b41-8325-ca24e9e34c31-ovsdbserver-sb\") pod \"dnsmasq-dns-7bfdbcb9b7-kpnz6\" (UID: \"6d29a694-47e1-4b41-8325-ca24e9e34c31\") " pod="openstack/dnsmasq-dns-7bfdbcb9b7-kpnz6" Nov 28 17:39:10 crc kubenswrapper[4909]: I1128 17:39:10.614718 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6d29a694-47e1-4b41-8325-ca24e9e34c31-ovsdbserver-nb\") pod \"dnsmasq-dns-7bfdbcb9b7-kpnz6\" (UID: \"6d29a694-47e1-4b41-8325-ca24e9e34c31\") " pod="openstack/dnsmasq-dns-7bfdbcb9b7-kpnz6" Nov 28 17:39:10 crc kubenswrapper[4909]: I1128 17:39:10.621925 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/3f3aa07c-1186-41a8-9223-974daf3f1fef-credential-keys\") pod \"keystone-bootstrap-cqrxb\" (UID: \"3f3aa07c-1186-41a8-9223-974daf3f1fef\") " pod="openstack/keystone-bootstrap-cqrxb" Nov 28 17:39:10 crc kubenswrapper[4909]: I1128 17:39:10.622092 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/3f3aa07c-1186-41a8-9223-974daf3f1fef-fernet-keys\") pod \"keystone-bootstrap-cqrxb\" (UID: \"3f3aa07c-1186-41a8-9223-974daf3f1fef\") " pod="openstack/keystone-bootstrap-cqrxb" Nov 28 17:39:10 crc kubenswrapper[4909]: I1128 17:39:10.622133 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3f3aa07c-1186-41a8-9223-974daf3f1fef-scripts\") pod \"keystone-bootstrap-cqrxb\" (UID: \"3f3aa07c-1186-41a8-9223-974daf3f1fef\") " pod="openstack/keystone-bootstrap-cqrxb" Nov 28 17:39:10 crc kubenswrapper[4909]: I1128 17:39:10.626095 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3f3aa07c-1186-41a8-9223-974daf3f1fef-combined-ca-bundle\") pod \"keystone-bootstrap-cqrxb\" (UID: \"3f3aa07c-1186-41a8-9223-974daf3f1fef\") " pod="openstack/keystone-bootstrap-cqrxb" Nov 28 17:39:10 crc kubenswrapper[4909]: I1128 17:39:10.631759 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3f3aa07c-1186-41a8-9223-974daf3f1fef-config-data\") pod \"keystone-bootstrap-cqrxb\" (UID: \"3f3aa07c-1186-41a8-9223-974daf3f1fef\") " pod="openstack/keystone-bootstrap-cqrxb" Nov 28 17:39:10 crc kubenswrapper[4909]: I1128 17:39:10.633306 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b6ppt\" (UniqueName: \"kubernetes.io/projected/3f3aa07c-1186-41a8-9223-974daf3f1fef-kube-api-access-b6ppt\") pod \"keystone-bootstrap-cqrxb\" (UID: \"3f3aa07c-1186-41a8-9223-974daf3f1fef\") " pod="openstack/keystone-bootstrap-cqrxb" Nov 28 17:39:10 crc kubenswrapper[4909]: I1128 17:39:10.633794 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4vs26\" (UniqueName: \"kubernetes.io/projected/6d29a694-47e1-4b41-8325-ca24e9e34c31-kube-api-access-4vs26\") pod \"dnsmasq-dns-7bfdbcb9b7-kpnz6\" (UID: \"6d29a694-47e1-4b41-8325-ca24e9e34c31\") " pod="openstack/dnsmasq-dns-7bfdbcb9b7-kpnz6" Nov 28 17:39:10 crc kubenswrapper[4909]: I1128 17:39:10.698353 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7bfdbcb9b7-kpnz6" Nov 28 17:39:10 crc kubenswrapper[4909]: I1128 17:39:10.748649 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-cqrxb" Nov 28 17:39:11 crc kubenswrapper[4909]: I1128 17:39:11.272408 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7bfdbcb9b7-kpnz6"] Nov 28 17:39:11 crc kubenswrapper[4909]: W1128 17:39:11.280811 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6d29a694_47e1_4b41_8325_ca24e9e34c31.slice/crio-b648e286da952c2e2d5921e9714de4b878189b4d07b1baf1f37735596f0dc705 WatchSource:0}: Error finding container b648e286da952c2e2d5921e9714de4b878189b4d07b1baf1f37735596f0dc705: Status 404 returned error can't find the container with id b648e286da952c2e2d5921e9714de4b878189b4d07b1baf1f37735596f0dc705 Nov 28 17:39:11 crc kubenswrapper[4909]: I1128 17:39:11.384552 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-cqrxb"] Nov 28 17:39:11 crc kubenswrapper[4909]: W1128 17:39:11.391133 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3f3aa07c_1186_41a8_9223_974daf3f1fef.slice/crio-7ecd6193b72168648b5243dc69169fc5870fafb9fdee06804f072aacc040e45d WatchSource:0}: Error finding container 7ecd6193b72168648b5243dc69169fc5870fafb9fdee06804f072aacc040e45d: Status 404 returned error can't find the container with id 7ecd6193b72168648b5243dc69169fc5870fafb9fdee06804f072aacc040e45d Nov 28 17:39:11 crc kubenswrapper[4909]: I1128 17:39:11.777967 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-cqrxb" event={"ID":"3f3aa07c-1186-41a8-9223-974daf3f1fef","Type":"ContainerStarted","Data":"5616564468cc67b3105708c4adb7fdef9c93a9b70402c2c83d2e7f88e6f9aafb"} Nov 28 17:39:11 crc kubenswrapper[4909]: I1128 17:39:11.778035 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-cqrxb" event={"ID":"3f3aa07c-1186-41a8-9223-974daf3f1fef","Type":"ContainerStarted","Data":"7ecd6193b72168648b5243dc69169fc5870fafb9fdee06804f072aacc040e45d"} Nov 28 17:39:11 crc kubenswrapper[4909]: I1128 17:39:11.781597 4909 generic.go:334] "Generic (PLEG): container finished" podID="6d29a694-47e1-4b41-8325-ca24e9e34c31" containerID="32425aa1cada73baa4a8ea8b30692195a456cd6b7b974f2077d4a5c2b9d2a957" exitCode=0 Nov 28 17:39:11 crc kubenswrapper[4909]: I1128 17:39:11.781636 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7bfdbcb9b7-kpnz6" event={"ID":"6d29a694-47e1-4b41-8325-ca24e9e34c31","Type":"ContainerDied","Data":"32425aa1cada73baa4a8ea8b30692195a456cd6b7b974f2077d4a5c2b9d2a957"} Nov 28 17:39:11 crc kubenswrapper[4909]: I1128 17:39:11.781687 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7bfdbcb9b7-kpnz6" event={"ID":"6d29a694-47e1-4b41-8325-ca24e9e34c31","Type":"ContainerStarted","Data":"b648e286da952c2e2d5921e9714de4b878189b4d07b1baf1f37735596f0dc705"} Nov 28 17:39:11 crc kubenswrapper[4909]: I1128 17:39:11.805432 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-cqrxb" podStartSLOduration=1.805408508 podStartE2EDuration="1.805408508s" podCreationTimestamp="2025-11-28 17:39:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 17:39:11.800464666 +0000 UTC m=+5334.197149180" watchObservedRunningTime="2025-11-28 17:39:11.805408508 +0000 UTC m=+5334.202093032" Nov 28 17:39:11 crc kubenswrapper[4909]: I1128 17:39:11.902056 4909 scope.go:117] "RemoveContainer" containerID="ba4943f4ba136c11fa217eba14fcdb34cf54ee4ef96ee334416ec901f5f4fe45" Nov 28 17:39:11 crc kubenswrapper[4909]: E1128 17:39:11.902290 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 17:39:12 crc kubenswrapper[4909]: I1128 17:39:12.797877 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7bfdbcb9b7-kpnz6" event={"ID":"6d29a694-47e1-4b41-8325-ca24e9e34c31","Type":"ContainerStarted","Data":"d7a80975f39a6e21817f38c3470a438daff6bcaefcd3382b6805a25782f30b03"} Nov 28 17:39:12 crc kubenswrapper[4909]: I1128 17:39:12.798620 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-7bfdbcb9b7-kpnz6" Nov 28 17:39:12 crc kubenswrapper[4909]: I1128 17:39:12.834437 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-7bfdbcb9b7-kpnz6" podStartSLOduration=2.8343759950000003 podStartE2EDuration="2.834375995s" podCreationTimestamp="2025-11-28 17:39:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 17:39:12.827227113 +0000 UTC m=+5335.223911667" watchObservedRunningTime="2025-11-28 17:39:12.834375995 +0000 UTC m=+5335.231060559" Nov 28 17:39:14 crc kubenswrapper[4909]: I1128 17:39:14.814031 4909 generic.go:334] "Generic (PLEG): container finished" podID="3f3aa07c-1186-41a8-9223-974daf3f1fef" containerID="5616564468cc67b3105708c4adb7fdef9c93a9b70402c2c83d2e7f88e6f9aafb" exitCode=0 Nov 28 17:39:14 crc kubenswrapper[4909]: I1128 17:39:14.814086 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-cqrxb" event={"ID":"3f3aa07c-1186-41a8-9223-974daf3f1fef","Type":"ContainerDied","Data":"5616564468cc67b3105708c4adb7fdef9c93a9b70402c2c83d2e7f88e6f9aafb"} Nov 28 17:39:16 crc kubenswrapper[4909]: I1128 17:39:16.218103 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-cqrxb" Nov 28 17:39:16 crc kubenswrapper[4909]: I1128 17:39:16.317457 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3f3aa07c-1186-41a8-9223-974daf3f1fef-config-data\") pod \"3f3aa07c-1186-41a8-9223-974daf3f1fef\" (UID: \"3f3aa07c-1186-41a8-9223-974daf3f1fef\") " Nov 28 17:39:16 crc kubenswrapper[4909]: I1128 17:39:16.317588 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3f3aa07c-1186-41a8-9223-974daf3f1fef-scripts\") pod \"3f3aa07c-1186-41a8-9223-974daf3f1fef\" (UID: \"3f3aa07c-1186-41a8-9223-974daf3f1fef\") " Nov 28 17:39:16 crc kubenswrapper[4909]: I1128 17:39:16.317609 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3f3aa07c-1186-41a8-9223-974daf3f1fef-combined-ca-bundle\") pod \"3f3aa07c-1186-41a8-9223-974daf3f1fef\" (UID: \"3f3aa07c-1186-41a8-9223-974daf3f1fef\") " Nov 28 17:39:16 crc kubenswrapper[4909]: I1128 17:39:16.317636 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/3f3aa07c-1186-41a8-9223-974daf3f1fef-fernet-keys\") pod \"3f3aa07c-1186-41a8-9223-974daf3f1fef\" (UID: \"3f3aa07c-1186-41a8-9223-974daf3f1fef\") " Nov 28 17:39:16 crc kubenswrapper[4909]: I1128 17:39:16.317685 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/3f3aa07c-1186-41a8-9223-974daf3f1fef-credential-keys\") pod \"3f3aa07c-1186-41a8-9223-974daf3f1fef\" (UID: \"3f3aa07c-1186-41a8-9223-974daf3f1fef\") " Nov 28 17:39:16 crc kubenswrapper[4909]: I1128 17:39:16.317758 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-b6ppt\" (UniqueName: \"kubernetes.io/projected/3f3aa07c-1186-41a8-9223-974daf3f1fef-kube-api-access-b6ppt\") pod \"3f3aa07c-1186-41a8-9223-974daf3f1fef\" (UID: \"3f3aa07c-1186-41a8-9223-974daf3f1fef\") " Nov 28 17:39:16 crc kubenswrapper[4909]: I1128 17:39:16.323096 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3f3aa07c-1186-41a8-9223-974daf3f1fef-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "3f3aa07c-1186-41a8-9223-974daf3f1fef" (UID: "3f3aa07c-1186-41a8-9223-974daf3f1fef"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:39:16 crc kubenswrapper[4909]: I1128 17:39:16.324848 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3f3aa07c-1186-41a8-9223-974daf3f1fef-scripts" (OuterVolumeSpecName: "scripts") pod "3f3aa07c-1186-41a8-9223-974daf3f1fef" (UID: "3f3aa07c-1186-41a8-9223-974daf3f1fef"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:39:16 crc kubenswrapper[4909]: I1128 17:39:16.326871 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3f3aa07c-1186-41a8-9223-974daf3f1fef-kube-api-access-b6ppt" (OuterVolumeSpecName: "kube-api-access-b6ppt") pod "3f3aa07c-1186-41a8-9223-974daf3f1fef" (UID: "3f3aa07c-1186-41a8-9223-974daf3f1fef"). InnerVolumeSpecName "kube-api-access-b6ppt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:39:16 crc kubenswrapper[4909]: I1128 17:39:16.331314 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3f3aa07c-1186-41a8-9223-974daf3f1fef-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "3f3aa07c-1186-41a8-9223-974daf3f1fef" (UID: "3f3aa07c-1186-41a8-9223-974daf3f1fef"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:39:16 crc kubenswrapper[4909]: I1128 17:39:16.345387 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3f3aa07c-1186-41a8-9223-974daf3f1fef-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "3f3aa07c-1186-41a8-9223-974daf3f1fef" (UID: "3f3aa07c-1186-41a8-9223-974daf3f1fef"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:39:16 crc kubenswrapper[4909]: I1128 17:39:16.359157 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3f3aa07c-1186-41a8-9223-974daf3f1fef-config-data" (OuterVolumeSpecName: "config-data") pod "3f3aa07c-1186-41a8-9223-974daf3f1fef" (UID: "3f3aa07c-1186-41a8-9223-974daf3f1fef"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:39:16 crc kubenswrapper[4909]: I1128 17:39:16.419401 4909 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3f3aa07c-1186-41a8-9223-974daf3f1fef-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 17:39:16 crc kubenswrapper[4909]: I1128 17:39:16.419442 4909 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3f3aa07c-1186-41a8-9223-974daf3f1fef-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 17:39:16 crc kubenswrapper[4909]: I1128 17:39:16.419456 4909 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3f3aa07c-1186-41a8-9223-974daf3f1fef-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 17:39:16 crc kubenswrapper[4909]: I1128 17:39:16.419468 4909 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/3f3aa07c-1186-41a8-9223-974daf3f1fef-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 28 17:39:16 crc kubenswrapper[4909]: I1128 17:39:16.419479 4909 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/3f3aa07c-1186-41a8-9223-974daf3f1fef-credential-keys\") on node \"crc\" DevicePath \"\"" Nov 28 17:39:16 crc kubenswrapper[4909]: I1128 17:39:16.419490 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-b6ppt\" (UniqueName: \"kubernetes.io/projected/3f3aa07c-1186-41a8-9223-974daf3f1fef-kube-api-access-b6ppt\") on node \"crc\" DevicePath \"\"" Nov 28 17:39:16 crc kubenswrapper[4909]: I1128 17:39:16.841181 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-cqrxb" event={"ID":"3f3aa07c-1186-41a8-9223-974daf3f1fef","Type":"ContainerDied","Data":"7ecd6193b72168648b5243dc69169fc5870fafb9fdee06804f072aacc040e45d"} Nov 28 17:39:16 crc kubenswrapper[4909]: I1128 17:39:16.841240 4909 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7ecd6193b72168648b5243dc69169fc5870fafb9fdee06804f072aacc040e45d" Nov 28 17:39:16 crc kubenswrapper[4909]: I1128 17:39:16.841265 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-cqrxb" Nov 28 17:39:16 crc kubenswrapper[4909]: I1128 17:39:16.950269 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-cqrxb"] Nov 28 17:39:16 crc kubenswrapper[4909]: I1128 17:39:16.956572 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-cqrxb"] Nov 28 17:39:17 crc kubenswrapper[4909]: I1128 17:39:17.005122 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-l7db9"] Nov 28 17:39:17 crc kubenswrapper[4909]: E1128 17:39:17.005690 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3f3aa07c-1186-41a8-9223-974daf3f1fef" containerName="keystone-bootstrap" Nov 28 17:39:17 crc kubenswrapper[4909]: I1128 17:39:17.005710 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="3f3aa07c-1186-41a8-9223-974daf3f1fef" containerName="keystone-bootstrap" Nov 28 17:39:17 crc kubenswrapper[4909]: I1128 17:39:17.005916 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="3f3aa07c-1186-41a8-9223-974daf3f1fef" containerName="keystone-bootstrap" Nov 28 17:39:17 crc kubenswrapper[4909]: I1128 17:39:17.006483 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-l7db9" Nov 28 17:39:17 crc kubenswrapper[4909]: I1128 17:39:17.008637 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 28 17:39:17 crc kubenswrapper[4909]: I1128 17:39:17.009080 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Nov 28 17:39:17 crc kubenswrapper[4909]: I1128 17:39:17.009323 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 28 17:39:17 crc kubenswrapper[4909]: I1128 17:39:17.009580 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 28 17:39:17 crc kubenswrapper[4909]: I1128 17:39:17.009820 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-qhgw8" Nov 28 17:39:17 crc kubenswrapper[4909]: I1128 17:39:17.021095 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-l7db9"] Nov 28 17:39:17 crc kubenswrapper[4909]: I1128 17:39:17.130815 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/473b34a3-d6d9-4f27-946c-707ffe7641ed-combined-ca-bundle\") pod \"keystone-bootstrap-l7db9\" (UID: \"473b34a3-d6d9-4f27-946c-707ffe7641ed\") " pod="openstack/keystone-bootstrap-l7db9" Nov 28 17:39:17 crc kubenswrapper[4909]: I1128 17:39:17.130946 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/473b34a3-d6d9-4f27-946c-707ffe7641ed-scripts\") pod \"keystone-bootstrap-l7db9\" (UID: \"473b34a3-d6d9-4f27-946c-707ffe7641ed\") " pod="openstack/keystone-bootstrap-l7db9" Nov 28 17:39:17 crc kubenswrapper[4909]: I1128 17:39:17.131208 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/473b34a3-d6d9-4f27-946c-707ffe7641ed-credential-keys\") pod \"keystone-bootstrap-l7db9\" (UID: \"473b34a3-d6d9-4f27-946c-707ffe7641ed\") " pod="openstack/keystone-bootstrap-l7db9" Nov 28 17:39:17 crc kubenswrapper[4909]: I1128 17:39:17.131243 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/473b34a3-d6d9-4f27-946c-707ffe7641ed-config-data\") pod \"keystone-bootstrap-l7db9\" (UID: \"473b34a3-d6d9-4f27-946c-707ffe7641ed\") " pod="openstack/keystone-bootstrap-l7db9" Nov 28 17:39:17 crc kubenswrapper[4909]: I1128 17:39:17.131293 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/473b34a3-d6d9-4f27-946c-707ffe7641ed-fernet-keys\") pod \"keystone-bootstrap-l7db9\" (UID: \"473b34a3-d6d9-4f27-946c-707ffe7641ed\") " pod="openstack/keystone-bootstrap-l7db9" Nov 28 17:39:17 crc kubenswrapper[4909]: I1128 17:39:17.131541 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xx8xb\" (UniqueName: \"kubernetes.io/projected/473b34a3-d6d9-4f27-946c-707ffe7641ed-kube-api-access-xx8xb\") pod \"keystone-bootstrap-l7db9\" (UID: \"473b34a3-d6d9-4f27-946c-707ffe7641ed\") " pod="openstack/keystone-bootstrap-l7db9" Nov 28 17:39:17 crc kubenswrapper[4909]: I1128 17:39:17.232957 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/473b34a3-d6d9-4f27-946c-707ffe7641ed-scripts\") pod \"keystone-bootstrap-l7db9\" (UID: \"473b34a3-d6d9-4f27-946c-707ffe7641ed\") " pod="openstack/keystone-bootstrap-l7db9" Nov 28 17:39:17 crc kubenswrapper[4909]: I1128 17:39:17.233071 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/473b34a3-d6d9-4f27-946c-707ffe7641ed-credential-keys\") pod \"keystone-bootstrap-l7db9\" (UID: \"473b34a3-d6d9-4f27-946c-707ffe7641ed\") " pod="openstack/keystone-bootstrap-l7db9" Nov 28 17:39:17 crc kubenswrapper[4909]: I1128 17:39:17.233101 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/473b34a3-d6d9-4f27-946c-707ffe7641ed-config-data\") pod \"keystone-bootstrap-l7db9\" (UID: \"473b34a3-d6d9-4f27-946c-707ffe7641ed\") " pod="openstack/keystone-bootstrap-l7db9" Nov 28 17:39:17 crc kubenswrapper[4909]: I1128 17:39:17.233132 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/473b34a3-d6d9-4f27-946c-707ffe7641ed-fernet-keys\") pod \"keystone-bootstrap-l7db9\" (UID: \"473b34a3-d6d9-4f27-946c-707ffe7641ed\") " pod="openstack/keystone-bootstrap-l7db9" Nov 28 17:39:17 crc kubenswrapper[4909]: I1128 17:39:17.233189 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xx8xb\" (UniqueName: \"kubernetes.io/projected/473b34a3-d6d9-4f27-946c-707ffe7641ed-kube-api-access-xx8xb\") pod \"keystone-bootstrap-l7db9\" (UID: \"473b34a3-d6d9-4f27-946c-707ffe7641ed\") " pod="openstack/keystone-bootstrap-l7db9" Nov 28 17:39:17 crc kubenswrapper[4909]: I1128 17:39:17.233230 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/473b34a3-d6d9-4f27-946c-707ffe7641ed-combined-ca-bundle\") pod \"keystone-bootstrap-l7db9\" (UID: \"473b34a3-d6d9-4f27-946c-707ffe7641ed\") " pod="openstack/keystone-bootstrap-l7db9" Nov 28 17:39:17 crc kubenswrapper[4909]: I1128 17:39:17.240187 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/473b34a3-d6d9-4f27-946c-707ffe7641ed-credential-keys\") pod \"keystone-bootstrap-l7db9\" (UID: \"473b34a3-d6d9-4f27-946c-707ffe7641ed\") " pod="openstack/keystone-bootstrap-l7db9" Nov 28 17:39:17 crc kubenswrapper[4909]: I1128 17:39:17.249151 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/473b34a3-d6d9-4f27-946c-707ffe7641ed-scripts\") pod \"keystone-bootstrap-l7db9\" (UID: \"473b34a3-d6d9-4f27-946c-707ffe7641ed\") " pod="openstack/keystone-bootstrap-l7db9" Nov 28 17:39:17 crc kubenswrapper[4909]: I1128 17:39:17.249767 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/473b34a3-d6d9-4f27-946c-707ffe7641ed-fernet-keys\") pod \"keystone-bootstrap-l7db9\" (UID: \"473b34a3-d6d9-4f27-946c-707ffe7641ed\") " pod="openstack/keystone-bootstrap-l7db9" Nov 28 17:39:17 crc kubenswrapper[4909]: I1128 17:39:17.249879 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/473b34a3-d6d9-4f27-946c-707ffe7641ed-combined-ca-bundle\") pod \"keystone-bootstrap-l7db9\" (UID: \"473b34a3-d6d9-4f27-946c-707ffe7641ed\") " pod="openstack/keystone-bootstrap-l7db9" Nov 28 17:39:17 crc kubenswrapper[4909]: I1128 17:39:17.250031 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/473b34a3-d6d9-4f27-946c-707ffe7641ed-config-data\") pod \"keystone-bootstrap-l7db9\" (UID: \"473b34a3-d6d9-4f27-946c-707ffe7641ed\") " pod="openstack/keystone-bootstrap-l7db9" Nov 28 17:39:17 crc kubenswrapper[4909]: I1128 17:39:17.255374 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xx8xb\" (UniqueName: \"kubernetes.io/projected/473b34a3-d6d9-4f27-946c-707ffe7641ed-kube-api-access-xx8xb\") pod \"keystone-bootstrap-l7db9\" (UID: \"473b34a3-d6d9-4f27-946c-707ffe7641ed\") " pod="openstack/keystone-bootstrap-l7db9" Nov 28 17:39:17 crc kubenswrapper[4909]: I1128 17:39:17.362122 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-l7db9" Nov 28 17:39:17 crc kubenswrapper[4909]: I1128 17:39:17.845393 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-l7db9"] Nov 28 17:39:17 crc kubenswrapper[4909]: I1128 17:39:17.932458 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3f3aa07c-1186-41a8-9223-974daf3f1fef" path="/var/lib/kubelet/pods/3f3aa07c-1186-41a8-9223-974daf3f1fef/volumes" Nov 28 17:39:18 crc kubenswrapper[4909]: I1128 17:39:18.869432 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-l7db9" event={"ID":"473b34a3-d6d9-4f27-946c-707ffe7641ed","Type":"ContainerStarted","Data":"af8c707d13707ef6a2121caee72f7e2db770c9c44031bdfefc8e522dc19f843b"} Nov 28 17:39:18 crc kubenswrapper[4909]: I1128 17:39:18.869717 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-l7db9" event={"ID":"473b34a3-d6d9-4f27-946c-707ffe7641ed","Type":"ContainerStarted","Data":"3bd18d76a4024129f0d5c8d678f935c0b7c4d015f9d23d6bb01281d6322842ea"} Nov 28 17:39:18 crc kubenswrapper[4909]: I1128 17:39:18.907744 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-l7db9" podStartSLOduration=2.907726525 podStartE2EDuration="2.907726525s" podCreationTimestamp="2025-11-28 17:39:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 17:39:18.901019405 +0000 UTC m=+5341.297703969" watchObservedRunningTime="2025-11-28 17:39:18.907726525 +0000 UTC m=+5341.304411079" Nov 28 17:39:20 crc kubenswrapper[4909]: I1128 17:39:20.700199 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-7bfdbcb9b7-kpnz6" Nov 28 17:39:20 crc kubenswrapper[4909]: I1128 17:39:20.822186 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6bb899f49f-86csq"] Nov 28 17:39:20 crc kubenswrapper[4909]: I1128 17:39:20.822529 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-6bb899f49f-86csq" podUID="78ed4655-73ba-4c46-9d91-8e0f4dfbe258" containerName="dnsmasq-dns" containerID="cri-o://6392edefb565a7c99c3228befdaca83d5ed4ebfde8e3e7a9c4241ab4e6714d42" gracePeriod=10 Nov 28 17:39:20 crc kubenswrapper[4909]: I1128 17:39:20.890688 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-l7db9" event={"ID":"473b34a3-d6d9-4f27-946c-707ffe7641ed","Type":"ContainerDied","Data":"af8c707d13707ef6a2121caee72f7e2db770c9c44031bdfefc8e522dc19f843b"} Nov 28 17:39:20 crc kubenswrapper[4909]: I1128 17:39:20.890753 4909 generic.go:334] "Generic (PLEG): container finished" podID="473b34a3-d6d9-4f27-946c-707ffe7641ed" containerID="af8c707d13707ef6a2121caee72f7e2db770c9c44031bdfefc8e522dc19f843b" exitCode=0 Nov 28 17:39:21 crc kubenswrapper[4909]: I1128 17:39:21.282238 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6bb899f49f-86csq" Nov 28 17:39:21 crc kubenswrapper[4909]: I1128 17:39:21.426095 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m7bvt\" (UniqueName: \"kubernetes.io/projected/78ed4655-73ba-4c46-9d91-8e0f4dfbe258-kube-api-access-m7bvt\") pod \"78ed4655-73ba-4c46-9d91-8e0f4dfbe258\" (UID: \"78ed4655-73ba-4c46-9d91-8e0f4dfbe258\") " Nov 28 17:39:21 crc kubenswrapper[4909]: I1128 17:39:21.426391 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/78ed4655-73ba-4c46-9d91-8e0f4dfbe258-config\") pod \"78ed4655-73ba-4c46-9d91-8e0f4dfbe258\" (UID: \"78ed4655-73ba-4c46-9d91-8e0f4dfbe258\") " Nov 28 17:39:21 crc kubenswrapper[4909]: I1128 17:39:21.426483 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/78ed4655-73ba-4c46-9d91-8e0f4dfbe258-ovsdbserver-sb\") pod \"78ed4655-73ba-4c46-9d91-8e0f4dfbe258\" (UID: \"78ed4655-73ba-4c46-9d91-8e0f4dfbe258\") " Nov 28 17:39:21 crc kubenswrapper[4909]: I1128 17:39:21.426605 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/78ed4655-73ba-4c46-9d91-8e0f4dfbe258-dns-svc\") pod \"78ed4655-73ba-4c46-9d91-8e0f4dfbe258\" (UID: \"78ed4655-73ba-4c46-9d91-8e0f4dfbe258\") " Nov 28 17:39:21 crc kubenswrapper[4909]: I1128 17:39:21.426710 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/78ed4655-73ba-4c46-9d91-8e0f4dfbe258-ovsdbserver-nb\") pod \"78ed4655-73ba-4c46-9d91-8e0f4dfbe258\" (UID: \"78ed4655-73ba-4c46-9d91-8e0f4dfbe258\") " Nov 28 17:39:21 crc kubenswrapper[4909]: I1128 17:39:21.433136 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/78ed4655-73ba-4c46-9d91-8e0f4dfbe258-kube-api-access-m7bvt" (OuterVolumeSpecName: "kube-api-access-m7bvt") pod "78ed4655-73ba-4c46-9d91-8e0f4dfbe258" (UID: "78ed4655-73ba-4c46-9d91-8e0f4dfbe258"). InnerVolumeSpecName "kube-api-access-m7bvt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:39:21 crc kubenswrapper[4909]: I1128 17:39:21.463612 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/78ed4655-73ba-4c46-9d91-8e0f4dfbe258-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "78ed4655-73ba-4c46-9d91-8e0f4dfbe258" (UID: "78ed4655-73ba-4c46-9d91-8e0f4dfbe258"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 17:39:21 crc kubenswrapper[4909]: I1128 17:39:21.465084 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/78ed4655-73ba-4c46-9d91-8e0f4dfbe258-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "78ed4655-73ba-4c46-9d91-8e0f4dfbe258" (UID: "78ed4655-73ba-4c46-9d91-8e0f4dfbe258"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 17:39:21 crc kubenswrapper[4909]: I1128 17:39:21.468295 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/78ed4655-73ba-4c46-9d91-8e0f4dfbe258-config" (OuterVolumeSpecName: "config") pod "78ed4655-73ba-4c46-9d91-8e0f4dfbe258" (UID: "78ed4655-73ba-4c46-9d91-8e0f4dfbe258"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 17:39:21 crc kubenswrapper[4909]: I1128 17:39:21.469497 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/78ed4655-73ba-4c46-9d91-8e0f4dfbe258-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "78ed4655-73ba-4c46-9d91-8e0f4dfbe258" (UID: "78ed4655-73ba-4c46-9d91-8e0f4dfbe258"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 17:39:21 crc kubenswrapper[4909]: I1128 17:39:21.528345 4909 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/78ed4655-73ba-4c46-9d91-8e0f4dfbe258-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 17:39:21 crc kubenswrapper[4909]: I1128 17:39:21.528379 4909 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/78ed4655-73ba-4c46-9d91-8e0f4dfbe258-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 28 17:39:21 crc kubenswrapper[4909]: I1128 17:39:21.528390 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m7bvt\" (UniqueName: \"kubernetes.io/projected/78ed4655-73ba-4c46-9d91-8e0f4dfbe258-kube-api-access-m7bvt\") on node \"crc\" DevicePath \"\"" Nov 28 17:39:21 crc kubenswrapper[4909]: I1128 17:39:21.528399 4909 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/78ed4655-73ba-4c46-9d91-8e0f4dfbe258-config\") on node \"crc\" DevicePath \"\"" Nov 28 17:39:21 crc kubenswrapper[4909]: I1128 17:39:21.528409 4909 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/78ed4655-73ba-4c46-9d91-8e0f4dfbe258-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 28 17:39:21 crc kubenswrapper[4909]: I1128 17:39:21.901960 4909 generic.go:334] "Generic (PLEG): container finished" podID="78ed4655-73ba-4c46-9d91-8e0f4dfbe258" containerID="6392edefb565a7c99c3228befdaca83d5ed4ebfde8e3e7a9c4241ab4e6714d42" exitCode=0 Nov 28 17:39:21 crc kubenswrapper[4909]: I1128 17:39:21.902171 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6bb899f49f-86csq" Nov 28 17:39:21 crc kubenswrapper[4909]: I1128 17:39:21.924571 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bb899f49f-86csq" event={"ID":"78ed4655-73ba-4c46-9d91-8e0f4dfbe258","Type":"ContainerDied","Data":"6392edefb565a7c99c3228befdaca83d5ed4ebfde8e3e7a9c4241ab4e6714d42"} Nov 28 17:39:21 crc kubenswrapper[4909]: I1128 17:39:21.924619 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bb899f49f-86csq" event={"ID":"78ed4655-73ba-4c46-9d91-8e0f4dfbe258","Type":"ContainerDied","Data":"eca675926d33382d1769be77fea9d6b9634cdb1c549f3a55a09c3fa6a531eaa6"} Nov 28 17:39:21 crc kubenswrapper[4909]: I1128 17:39:21.924640 4909 scope.go:117] "RemoveContainer" containerID="6392edefb565a7c99c3228befdaca83d5ed4ebfde8e3e7a9c4241ab4e6714d42" Nov 28 17:39:21 crc kubenswrapper[4909]: I1128 17:39:21.957186 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6bb899f49f-86csq"] Nov 28 17:39:21 crc kubenswrapper[4909]: I1128 17:39:21.963508 4909 scope.go:117] "RemoveContainer" containerID="a55bf7c324d79cd457920d599fe502dae9d25617ae7cd2ee5a8af6353ec8fcb8" Nov 28 17:39:21 crc kubenswrapper[4909]: I1128 17:39:21.971317 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6bb899f49f-86csq"] Nov 28 17:39:22 crc kubenswrapper[4909]: I1128 17:39:22.001071 4909 scope.go:117] "RemoveContainer" containerID="6392edefb565a7c99c3228befdaca83d5ed4ebfde8e3e7a9c4241ab4e6714d42" Nov 28 17:39:22 crc kubenswrapper[4909]: E1128 17:39:22.001615 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6392edefb565a7c99c3228befdaca83d5ed4ebfde8e3e7a9c4241ab4e6714d42\": container with ID starting with 6392edefb565a7c99c3228befdaca83d5ed4ebfde8e3e7a9c4241ab4e6714d42 not found: ID does not exist" containerID="6392edefb565a7c99c3228befdaca83d5ed4ebfde8e3e7a9c4241ab4e6714d42" Nov 28 17:39:22 crc kubenswrapper[4909]: I1128 17:39:22.001678 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6392edefb565a7c99c3228befdaca83d5ed4ebfde8e3e7a9c4241ab4e6714d42"} err="failed to get container status \"6392edefb565a7c99c3228befdaca83d5ed4ebfde8e3e7a9c4241ab4e6714d42\": rpc error: code = NotFound desc = could not find container \"6392edefb565a7c99c3228befdaca83d5ed4ebfde8e3e7a9c4241ab4e6714d42\": container with ID starting with 6392edefb565a7c99c3228befdaca83d5ed4ebfde8e3e7a9c4241ab4e6714d42 not found: ID does not exist" Nov 28 17:39:22 crc kubenswrapper[4909]: I1128 17:39:22.001709 4909 scope.go:117] "RemoveContainer" containerID="a55bf7c324d79cd457920d599fe502dae9d25617ae7cd2ee5a8af6353ec8fcb8" Nov 28 17:39:22 crc kubenswrapper[4909]: E1128 17:39:22.001992 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a55bf7c324d79cd457920d599fe502dae9d25617ae7cd2ee5a8af6353ec8fcb8\": container with ID starting with a55bf7c324d79cd457920d599fe502dae9d25617ae7cd2ee5a8af6353ec8fcb8 not found: ID does not exist" containerID="a55bf7c324d79cd457920d599fe502dae9d25617ae7cd2ee5a8af6353ec8fcb8" Nov 28 17:39:22 crc kubenswrapper[4909]: I1128 17:39:22.002028 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a55bf7c324d79cd457920d599fe502dae9d25617ae7cd2ee5a8af6353ec8fcb8"} err="failed to get container status \"a55bf7c324d79cd457920d599fe502dae9d25617ae7cd2ee5a8af6353ec8fcb8\": rpc error: code = NotFound desc = could not find container \"a55bf7c324d79cd457920d599fe502dae9d25617ae7cd2ee5a8af6353ec8fcb8\": container with ID starting with a55bf7c324d79cd457920d599fe502dae9d25617ae7cd2ee5a8af6353ec8fcb8 not found: ID does not exist" Nov 28 17:39:22 crc kubenswrapper[4909]: I1128 17:39:22.271601 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-l7db9" Nov 28 17:39:22 crc kubenswrapper[4909]: I1128 17:39:22.343280 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/473b34a3-d6d9-4f27-946c-707ffe7641ed-scripts\") pod \"473b34a3-d6d9-4f27-946c-707ffe7641ed\" (UID: \"473b34a3-d6d9-4f27-946c-707ffe7641ed\") " Nov 28 17:39:22 crc kubenswrapper[4909]: I1128 17:39:22.343408 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xx8xb\" (UniqueName: \"kubernetes.io/projected/473b34a3-d6d9-4f27-946c-707ffe7641ed-kube-api-access-xx8xb\") pod \"473b34a3-d6d9-4f27-946c-707ffe7641ed\" (UID: \"473b34a3-d6d9-4f27-946c-707ffe7641ed\") " Nov 28 17:39:22 crc kubenswrapper[4909]: I1128 17:39:22.343435 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/473b34a3-d6d9-4f27-946c-707ffe7641ed-fernet-keys\") pod \"473b34a3-d6d9-4f27-946c-707ffe7641ed\" (UID: \"473b34a3-d6d9-4f27-946c-707ffe7641ed\") " Nov 28 17:39:22 crc kubenswrapper[4909]: I1128 17:39:22.343471 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/473b34a3-d6d9-4f27-946c-707ffe7641ed-config-data\") pod \"473b34a3-d6d9-4f27-946c-707ffe7641ed\" (UID: \"473b34a3-d6d9-4f27-946c-707ffe7641ed\") " Nov 28 17:39:22 crc kubenswrapper[4909]: I1128 17:39:22.343494 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/473b34a3-d6d9-4f27-946c-707ffe7641ed-combined-ca-bundle\") pod \"473b34a3-d6d9-4f27-946c-707ffe7641ed\" (UID: \"473b34a3-d6d9-4f27-946c-707ffe7641ed\") " Nov 28 17:39:22 crc kubenswrapper[4909]: I1128 17:39:22.343567 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/473b34a3-d6d9-4f27-946c-707ffe7641ed-credential-keys\") pod \"473b34a3-d6d9-4f27-946c-707ffe7641ed\" (UID: \"473b34a3-d6d9-4f27-946c-707ffe7641ed\") " Nov 28 17:39:22 crc kubenswrapper[4909]: I1128 17:39:22.348994 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/473b34a3-d6d9-4f27-946c-707ffe7641ed-kube-api-access-xx8xb" (OuterVolumeSpecName: "kube-api-access-xx8xb") pod "473b34a3-d6d9-4f27-946c-707ffe7641ed" (UID: "473b34a3-d6d9-4f27-946c-707ffe7641ed"). InnerVolumeSpecName "kube-api-access-xx8xb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:39:22 crc kubenswrapper[4909]: I1128 17:39:22.349792 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/473b34a3-d6d9-4f27-946c-707ffe7641ed-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "473b34a3-d6d9-4f27-946c-707ffe7641ed" (UID: "473b34a3-d6d9-4f27-946c-707ffe7641ed"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:39:22 crc kubenswrapper[4909]: I1128 17:39:22.349855 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/473b34a3-d6d9-4f27-946c-707ffe7641ed-scripts" (OuterVolumeSpecName: "scripts") pod "473b34a3-d6d9-4f27-946c-707ffe7641ed" (UID: "473b34a3-d6d9-4f27-946c-707ffe7641ed"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:39:22 crc kubenswrapper[4909]: I1128 17:39:22.356816 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/473b34a3-d6d9-4f27-946c-707ffe7641ed-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "473b34a3-d6d9-4f27-946c-707ffe7641ed" (UID: "473b34a3-d6d9-4f27-946c-707ffe7641ed"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:39:22 crc kubenswrapper[4909]: I1128 17:39:22.364226 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/473b34a3-d6d9-4f27-946c-707ffe7641ed-config-data" (OuterVolumeSpecName: "config-data") pod "473b34a3-d6d9-4f27-946c-707ffe7641ed" (UID: "473b34a3-d6d9-4f27-946c-707ffe7641ed"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:39:22 crc kubenswrapper[4909]: I1128 17:39:22.379964 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/473b34a3-d6d9-4f27-946c-707ffe7641ed-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "473b34a3-d6d9-4f27-946c-707ffe7641ed" (UID: "473b34a3-d6d9-4f27-946c-707ffe7641ed"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:39:22 crc kubenswrapper[4909]: I1128 17:39:22.445528 4909 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/473b34a3-d6d9-4f27-946c-707ffe7641ed-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 17:39:22 crc kubenswrapper[4909]: I1128 17:39:22.445556 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xx8xb\" (UniqueName: \"kubernetes.io/projected/473b34a3-d6d9-4f27-946c-707ffe7641ed-kube-api-access-xx8xb\") on node \"crc\" DevicePath \"\"" Nov 28 17:39:22 crc kubenswrapper[4909]: I1128 17:39:22.445567 4909 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/473b34a3-d6d9-4f27-946c-707ffe7641ed-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 28 17:39:22 crc kubenswrapper[4909]: I1128 17:39:22.445575 4909 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/473b34a3-d6d9-4f27-946c-707ffe7641ed-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 17:39:22 crc kubenswrapper[4909]: I1128 17:39:22.445582 4909 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/473b34a3-d6d9-4f27-946c-707ffe7641ed-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 17:39:22 crc kubenswrapper[4909]: I1128 17:39:22.445591 4909 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/473b34a3-d6d9-4f27-946c-707ffe7641ed-credential-keys\") on node \"crc\" DevicePath \"\"" Nov 28 17:39:22 crc kubenswrapper[4909]: I1128 17:39:22.902296 4909 scope.go:117] "RemoveContainer" containerID="ba4943f4ba136c11fa217eba14fcdb34cf54ee4ef96ee334416ec901f5f4fe45" Nov 28 17:39:22 crc kubenswrapper[4909]: E1128 17:39:22.903614 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 17:39:22 crc kubenswrapper[4909]: I1128 17:39:22.926536 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-l7db9" event={"ID":"473b34a3-d6d9-4f27-946c-707ffe7641ed","Type":"ContainerDied","Data":"3bd18d76a4024129f0d5c8d678f935c0b7c4d015f9d23d6bb01281d6322842ea"} Nov 28 17:39:22 crc kubenswrapper[4909]: I1128 17:39:22.926579 4909 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3bd18d76a4024129f0d5c8d678f935c0b7c4d015f9d23d6bb01281d6322842ea" Nov 28 17:39:22 crc kubenswrapper[4909]: I1128 17:39:22.926630 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-l7db9" Nov 28 17:39:23 crc kubenswrapper[4909]: I1128 17:39:23.008689 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-755754cf9d-jvrhc"] Nov 28 17:39:23 crc kubenswrapper[4909]: E1128 17:39:23.009068 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="473b34a3-d6d9-4f27-946c-707ffe7641ed" containerName="keystone-bootstrap" Nov 28 17:39:23 crc kubenswrapper[4909]: I1128 17:39:23.009092 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="473b34a3-d6d9-4f27-946c-707ffe7641ed" containerName="keystone-bootstrap" Nov 28 17:39:23 crc kubenswrapper[4909]: E1128 17:39:23.009111 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="78ed4655-73ba-4c46-9d91-8e0f4dfbe258" containerName="init" Nov 28 17:39:23 crc kubenswrapper[4909]: I1128 17:39:23.009120 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="78ed4655-73ba-4c46-9d91-8e0f4dfbe258" containerName="init" Nov 28 17:39:23 crc kubenswrapper[4909]: E1128 17:39:23.009142 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="78ed4655-73ba-4c46-9d91-8e0f4dfbe258" containerName="dnsmasq-dns" Nov 28 17:39:23 crc kubenswrapper[4909]: I1128 17:39:23.009151 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="78ed4655-73ba-4c46-9d91-8e0f4dfbe258" containerName="dnsmasq-dns" Nov 28 17:39:23 crc kubenswrapper[4909]: I1128 17:39:23.009325 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="473b34a3-d6d9-4f27-946c-707ffe7641ed" containerName="keystone-bootstrap" Nov 28 17:39:23 crc kubenswrapper[4909]: I1128 17:39:23.009342 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="78ed4655-73ba-4c46-9d91-8e0f4dfbe258" containerName="dnsmasq-dns" Nov 28 17:39:23 crc kubenswrapper[4909]: I1128 17:39:23.010013 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-755754cf9d-jvrhc" Nov 28 17:39:23 crc kubenswrapper[4909]: I1128 17:39:23.012055 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 28 17:39:23 crc kubenswrapper[4909]: I1128 17:39:23.012600 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 28 17:39:23 crc kubenswrapper[4909]: I1128 17:39:23.013448 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 28 17:39:23 crc kubenswrapper[4909]: I1128 17:39:23.013734 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-qhgw8" Nov 28 17:39:23 crc kubenswrapper[4909]: I1128 17:39:23.026301 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-755754cf9d-jvrhc"] Nov 28 17:39:23 crc kubenswrapper[4909]: I1128 17:39:23.157680 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/206f6377-a9e3-41f9-899c-fea7f35ecd51-combined-ca-bundle\") pod \"keystone-755754cf9d-jvrhc\" (UID: \"206f6377-a9e3-41f9-899c-fea7f35ecd51\") " pod="openstack/keystone-755754cf9d-jvrhc" Nov 28 17:39:23 crc kubenswrapper[4909]: I1128 17:39:23.157752 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/206f6377-a9e3-41f9-899c-fea7f35ecd51-scripts\") pod \"keystone-755754cf9d-jvrhc\" (UID: \"206f6377-a9e3-41f9-899c-fea7f35ecd51\") " pod="openstack/keystone-755754cf9d-jvrhc" Nov 28 17:39:23 crc kubenswrapper[4909]: I1128 17:39:23.157811 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6hlrz\" (UniqueName: \"kubernetes.io/projected/206f6377-a9e3-41f9-899c-fea7f35ecd51-kube-api-access-6hlrz\") pod \"keystone-755754cf9d-jvrhc\" (UID: \"206f6377-a9e3-41f9-899c-fea7f35ecd51\") " pod="openstack/keystone-755754cf9d-jvrhc" Nov 28 17:39:23 crc kubenswrapper[4909]: I1128 17:39:23.158027 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/206f6377-a9e3-41f9-899c-fea7f35ecd51-config-data\") pod \"keystone-755754cf9d-jvrhc\" (UID: \"206f6377-a9e3-41f9-899c-fea7f35ecd51\") " pod="openstack/keystone-755754cf9d-jvrhc" Nov 28 17:39:23 crc kubenswrapper[4909]: I1128 17:39:23.158189 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/206f6377-a9e3-41f9-899c-fea7f35ecd51-fernet-keys\") pod \"keystone-755754cf9d-jvrhc\" (UID: \"206f6377-a9e3-41f9-899c-fea7f35ecd51\") " pod="openstack/keystone-755754cf9d-jvrhc" Nov 28 17:39:23 crc kubenswrapper[4909]: I1128 17:39:23.158223 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/206f6377-a9e3-41f9-899c-fea7f35ecd51-credential-keys\") pod \"keystone-755754cf9d-jvrhc\" (UID: \"206f6377-a9e3-41f9-899c-fea7f35ecd51\") " pod="openstack/keystone-755754cf9d-jvrhc" Nov 28 17:39:23 crc kubenswrapper[4909]: I1128 17:39:23.259450 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/206f6377-a9e3-41f9-899c-fea7f35ecd51-fernet-keys\") pod \"keystone-755754cf9d-jvrhc\" (UID: \"206f6377-a9e3-41f9-899c-fea7f35ecd51\") " pod="openstack/keystone-755754cf9d-jvrhc" Nov 28 17:39:23 crc kubenswrapper[4909]: I1128 17:39:23.259493 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/206f6377-a9e3-41f9-899c-fea7f35ecd51-credential-keys\") pod \"keystone-755754cf9d-jvrhc\" (UID: \"206f6377-a9e3-41f9-899c-fea7f35ecd51\") " pod="openstack/keystone-755754cf9d-jvrhc" Nov 28 17:39:23 crc kubenswrapper[4909]: I1128 17:39:23.259545 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/206f6377-a9e3-41f9-899c-fea7f35ecd51-combined-ca-bundle\") pod \"keystone-755754cf9d-jvrhc\" (UID: \"206f6377-a9e3-41f9-899c-fea7f35ecd51\") " pod="openstack/keystone-755754cf9d-jvrhc" Nov 28 17:39:23 crc kubenswrapper[4909]: I1128 17:39:23.259570 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/206f6377-a9e3-41f9-899c-fea7f35ecd51-scripts\") pod \"keystone-755754cf9d-jvrhc\" (UID: \"206f6377-a9e3-41f9-899c-fea7f35ecd51\") " pod="openstack/keystone-755754cf9d-jvrhc" Nov 28 17:39:23 crc kubenswrapper[4909]: I1128 17:39:23.259603 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6hlrz\" (UniqueName: \"kubernetes.io/projected/206f6377-a9e3-41f9-899c-fea7f35ecd51-kube-api-access-6hlrz\") pod \"keystone-755754cf9d-jvrhc\" (UID: \"206f6377-a9e3-41f9-899c-fea7f35ecd51\") " pod="openstack/keystone-755754cf9d-jvrhc" Nov 28 17:39:23 crc kubenswrapper[4909]: I1128 17:39:23.259637 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/206f6377-a9e3-41f9-899c-fea7f35ecd51-config-data\") pod \"keystone-755754cf9d-jvrhc\" (UID: \"206f6377-a9e3-41f9-899c-fea7f35ecd51\") " pod="openstack/keystone-755754cf9d-jvrhc" Nov 28 17:39:23 crc kubenswrapper[4909]: I1128 17:39:23.270563 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/206f6377-a9e3-41f9-899c-fea7f35ecd51-credential-keys\") pod \"keystone-755754cf9d-jvrhc\" (UID: \"206f6377-a9e3-41f9-899c-fea7f35ecd51\") " pod="openstack/keystone-755754cf9d-jvrhc" Nov 28 17:39:23 crc kubenswrapper[4909]: I1128 17:39:23.270780 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/206f6377-a9e3-41f9-899c-fea7f35ecd51-combined-ca-bundle\") pod \"keystone-755754cf9d-jvrhc\" (UID: \"206f6377-a9e3-41f9-899c-fea7f35ecd51\") " pod="openstack/keystone-755754cf9d-jvrhc" Nov 28 17:39:23 crc kubenswrapper[4909]: I1128 17:39:23.271162 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/206f6377-a9e3-41f9-899c-fea7f35ecd51-scripts\") pod \"keystone-755754cf9d-jvrhc\" (UID: \"206f6377-a9e3-41f9-899c-fea7f35ecd51\") " pod="openstack/keystone-755754cf9d-jvrhc" Nov 28 17:39:23 crc kubenswrapper[4909]: I1128 17:39:23.271223 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/206f6377-a9e3-41f9-899c-fea7f35ecd51-config-data\") pod \"keystone-755754cf9d-jvrhc\" (UID: \"206f6377-a9e3-41f9-899c-fea7f35ecd51\") " pod="openstack/keystone-755754cf9d-jvrhc" Nov 28 17:39:23 crc kubenswrapper[4909]: I1128 17:39:23.276328 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/206f6377-a9e3-41f9-899c-fea7f35ecd51-fernet-keys\") pod \"keystone-755754cf9d-jvrhc\" (UID: \"206f6377-a9e3-41f9-899c-fea7f35ecd51\") " pod="openstack/keystone-755754cf9d-jvrhc" Nov 28 17:39:23 crc kubenswrapper[4909]: I1128 17:39:23.278777 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6hlrz\" (UniqueName: \"kubernetes.io/projected/206f6377-a9e3-41f9-899c-fea7f35ecd51-kube-api-access-6hlrz\") pod \"keystone-755754cf9d-jvrhc\" (UID: \"206f6377-a9e3-41f9-899c-fea7f35ecd51\") " pod="openstack/keystone-755754cf9d-jvrhc" Nov 28 17:39:23 crc kubenswrapper[4909]: I1128 17:39:23.336338 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-755754cf9d-jvrhc" Nov 28 17:39:23 crc kubenswrapper[4909]: I1128 17:39:23.769526 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-755754cf9d-jvrhc"] Nov 28 17:39:23 crc kubenswrapper[4909]: I1128 17:39:23.913252 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="78ed4655-73ba-4c46-9d91-8e0f4dfbe258" path="/var/lib/kubelet/pods/78ed4655-73ba-4c46-9d91-8e0f4dfbe258/volumes" Nov 28 17:39:23 crc kubenswrapper[4909]: I1128 17:39:23.935928 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-755754cf9d-jvrhc" event={"ID":"206f6377-a9e3-41f9-899c-fea7f35ecd51","Type":"ContainerStarted","Data":"0c69985241d00783fab1ce37ce49161f1d827ec4e2e8bb3896456759460c085e"} Nov 28 17:39:24 crc kubenswrapper[4909]: I1128 17:39:24.945173 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-755754cf9d-jvrhc" event={"ID":"206f6377-a9e3-41f9-899c-fea7f35ecd51","Type":"ContainerStarted","Data":"91c1bd498151ea7053588e5b95e1cbae67fbd119282e763fe22b010f0e7b745b"} Nov 28 17:39:24 crc kubenswrapper[4909]: I1128 17:39:24.945469 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/keystone-755754cf9d-jvrhc" Nov 28 17:39:25 crc kubenswrapper[4909]: I1128 17:39:25.184481 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-755754cf9d-jvrhc" podStartSLOduration=3.184462237 podStartE2EDuration="3.184462237s" podCreationTimestamp="2025-11-28 17:39:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 17:39:25.176627537 +0000 UTC m=+5347.573312061" watchObservedRunningTime="2025-11-28 17:39:25.184462237 +0000 UTC m=+5347.581146771" Nov 28 17:39:31 crc kubenswrapper[4909]: I1128 17:39:31.698651 4909 scope.go:117] "RemoveContainer" containerID="903fba95ed66be364f6b4ee879aaf96bb6febe6761606ebeb02be07ae1f4cc3a" Nov 28 17:39:31 crc kubenswrapper[4909]: I1128 17:39:31.718944 4909 scope.go:117] "RemoveContainer" containerID="d3b50455012716c9385a8d6351e92cc47f666891ab77ed6d7e6d6b53effce566" Nov 28 17:39:31 crc kubenswrapper[4909]: I1128 17:39:31.767475 4909 scope.go:117] "RemoveContainer" containerID="9c2c726a1de00c20f5b3320df66b7cedefafc47829a8303ca229255aadde65cc" Nov 28 17:39:31 crc kubenswrapper[4909]: I1128 17:39:31.800746 4909 scope.go:117] "RemoveContainer" containerID="020bd65f619f7a7af9bc14dd491d2ead894c168643875fc7bda136597a78127c" Nov 28 17:39:34 crc kubenswrapper[4909]: I1128 17:39:34.671944 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-lv7qg"] Nov 28 17:39:34 crc kubenswrapper[4909]: I1128 17:39:34.674249 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-lv7qg" Nov 28 17:39:34 crc kubenswrapper[4909]: I1128 17:39:34.686014 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-lv7qg"] Nov 28 17:39:34 crc kubenswrapper[4909]: I1128 17:39:34.854025 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c88f6474-d3ee-495a-8bcc-21b322717343-utilities\") pod \"redhat-operators-lv7qg\" (UID: \"c88f6474-d3ee-495a-8bcc-21b322717343\") " pod="openshift-marketplace/redhat-operators-lv7qg" Nov 28 17:39:34 crc kubenswrapper[4909]: I1128 17:39:34.854087 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c88f6474-d3ee-495a-8bcc-21b322717343-catalog-content\") pod \"redhat-operators-lv7qg\" (UID: \"c88f6474-d3ee-495a-8bcc-21b322717343\") " pod="openshift-marketplace/redhat-operators-lv7qg" Nov 28 17:39:34 crc kubenswrapper[4909]: I1128 17:39:34.854471 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-472lt\" (UniqueName: \"kubernetes.io/projected/c88f6474-d3ee-495a-8bcc-21b322717343-kube-api-access-472lt\") pod \"redhat-operators-lv7qg\" (UID: \"c88f6474-d3ee-495a-8bcc-21b322717343\") " pod="openshift-marketplace/redhat-operators-lv7qg" Nov 28 17:39:34 crc kubenswrapper[4909]: I1128 17:39:34.956455 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-472lt\" (UniqueName: \"kubernetes.io/projected/c88f6474-d3ee-495a-8bcc-21b322717343-kube-api-access-472lt\") pod \"redhat-operators-lv7qg\" (UID: \"c88f6474-d3ee-495a-8bcc-21b322717343\") " pod="openshift-marketplace/redhat-operators-lv7qg" Nov 28 17:39:34 crc kubenswrapper[4909]: I1128 17:39:34.956559 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c88f6474-d3ee-495a-8bcc-21b322717343-utilities\") pod \"redhat-operators-lv7qg\" (UID: \"c88f6474-d3ee-495a-8bcc-21b322717343\") " pod="openshift-marketplace/redhat-operators-lv7qg" Nov 28 17:39:34 crc kubenswrapper[4909]: I1128 17:39:34.956584 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c88f6474-d3ee-495a-8bcc-21b322717343-catalog-content\") pod \"redhat-operators-lv7qg\" (UID: \"c88f6474-d3ee-495a-8bcc-21b322717343\") " pod="openshift-marketplace/redhat-operators-lv7qg" Nov 28 17:39:34 crc kubenswrapper[4909]: I1128 17:39:34.957113 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c88f6474-d3ee-495a-8bcc-21b322717343-catalog-content\") pod \"redhat-operators-lv7qg\" (UID: \"c88f6474-d3ee-495a-8bcc-21b322717343\") " pod="openshift-marketplace/redhat-operators-lv7qg" Nov 28 17:39:34 crc kubenswrapper[4909]: I1128 17:39:34.957320 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c88f6474-d3ee-495a-8bcc-21b322717343-utilities\") pod \"redhat-operators-lv7qg\" (UID: \"c88f6474-d3ee-495a-8bcc-21b322717343\") " pod="openshift-marketplace/redhat-operators-lv7qg" Nov 28 17:39:34 crc kubenswrapper[4909]: I1128 17:39:34.985002 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-472lt\" (UniqueName: \"kubernetes.io/projected/c88f6474-d3ee-495a-8bcc-21b322717343-kube-api-access-472lt\") pod \"redhat-operators-lv7qg\" (UID: \"c88f6474-d3ee-495a-8bcc-21b322717343\") " pod="openshift-marketplace/redhat-operators-lv7qg" Nov 28 17:39:35 crc kubenswrapper[4909]: I1128 17:39:35.001619 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-lv7qg" Nov 28 17:39:35 crc kubenswrapper[4909]: I1128 17:39:35.429906 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-lv7qg"] Nov 28 17:39:36 crc kubenswrapper[4909]: I1128 17:39:36.062109 4909 generic.go:334] "Generic (PLEG): container finished" podID="c88f6474-d3ee-495a-8bcc-21b322717343" containerID="df2a92aa8790034c9227efe94d1c8c858ea702356f4e6b99080e590200348c78" exitCode=0 Nov 28 17:39:36 crc kubenswrapper[4909]: I1128 17:39:36.062328 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-lv7qg" event={"ID":"c88f6474-d3ee-495a-8bcc-21b322717343","Type":"ContainerDied","Data":"df2a92aa8790034c9227efe94d1c8c858ea702356f4e6b99080e590200348c78"} Nov 28 17:39:36 crc kubenswrapper[4909]: I1128 17:39:36.062403 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-lv7qg" event={"ID":"c88f6474-d3ee-495a-8bcc-21b322717343","Type":"ContainerStarted","Data":"d2b89f8b5f8de2c5c2099332905c7ab01baecfe5b6c2cbcf345a12921471df0a"} Nov 28 17:39:36 crc kubenswrapper[4909]: I1128 17:39:36.901232 4909 scope.go:117] "RemoveContainer" containerID="ba4943f4ba136c11fa217eba14fcdb34cf54ee4ef96ee334416ec901f5f4fe45" Nov 28 17:39:36 crc kubenswrapper[4909]: E1128 17:39:36.901761 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 17:39:37 crc kubenswrapper[4909]: I1128 17:39:37.072617 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-lv7qg" event={"ID":"c88f6474-d3ee-495a-8bcc-21b322717343","Type":"ContainerStarted","Data":"37cd8799f204b19d99130fde7a6f7aa8c2e1b2db4c599de48e9a88a4a466d309"} Nov 28 17:39:38 crc kubenswrapper[4909]: I1128 17:39:38.086875 4909 generic.go:334] "Generic (PLEG): container finished" podID="c88f6474-d3ee-495a-8bcc-21b322717343" containerID="37cd8799f204b19d99130fde7a6f7aa8c2e1b2db4c599de48e9a88a4a466d309" exitCode=0 Nov 28 17:39:38 crc kubenswrapper[4909]: I1128 17:39:38.086932 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-lv7qg" event={"ID":"c88f6474-d3ee-495a-8bcc-21b322717343","Type":"ContainerDied","Data":"37cd8799f204b19d99130fde7a6f7aa8c2e1b2db4c599de48e9a88a4a466d309"} Nov 28 17:39:39 crc kubenswrapper[4909]: I1128 17:39:39.096771 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-lv7qg" event={"ID":"c88f6474-d3ee-495a-8bcc-21b322717343","Type":"ContainerStarted","Data":"b24ea3ea3bfea69baa3de8bfaeeec125ed570ae5f44a77da4007fd23d30256a1"} Nov 28 17:39:39 crc kubenswrapper[4909]: I1128 17:39:39.115525 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-lv7qg" podStartSLOduration=2.685488306 podStartE2EDuration="5.115474042s" podCreationTimestamp="2025-11-28 17:39:34 +0000 UTC" firstStartedPulling="2025-11-28 17:39:36.064717953 +0000 UTC m=+5358.461402477" lastFinishedPulling="2025-11-28 17:39:38.494703689 +0000 UTC m=+5360.891388213" observedRunningTime="2025-11-28 17:39:39.111932397 +0000 UTC m=+5361.508616931" watchObservedRunningTime="2025-11-28 17:39:39.115474042 +0000 UTC m=+5361.512158566" Nov 28 17:39:45 crc kubenswrapper[4909]: I1128 17:39:45.002556 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-lv7qg" Nov 28 17:39:45 crc kubenswrapper[4909]: I1128 17:39:45.003196 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-lv7qg" Nov 28 17:39:45 crc kubenswrapper[4909]: I1128 17:39:45.064465 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-lv7qg" Nov 28 17:39:45 crc kubenswrapper[4909]: I1128 17:39:45.188821 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-lv7qg" Nov 28 17:39:45 crc kubenswrapper[4909]: I1128 17:39:45.299496 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-lv7qg"] Nov 28 17:39:47 crc kubenswrapper[4909]: I1128 17:39:47.165357 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-lv7qg" podUID="c88f6474-d3ee-495a-8bcc-21b322717343" containerName="registry-server" containerID="cri-o://b24ea3ea3bfea69baa3de8bfaeeec125ed570ae5f44a77da4007fd23d30256a1" gracePeriod=2 Nov 28 17:39:47 crc kubenswrapper[4909]: I1128 17:39:47.911712 4909 scope.go:117] "RemoveContainer" containerID="ba4943f4ba136c11fa217eba14fcdb34cf54ee4ef96ee334416ec901f5f4fe45" Nov 28 17:39:47 crc kubenswrapper[4909]: E1128 17:39:47.912407 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 17:39:50 crc kubenswrapper[4909]: I1128 17:39:50.196094 4909 generic.go:334] "Generic (PLEG): container finished" podID="c88f6474-d3ee-495a-8bcc-21b322717343" containerID="b24ea3ea3bfea69baa3de8bfaeeec125ed570ae5f44a77da4007fd23d30256a1" exitCode=0 Nov 28 17:39:50 crc kubenswrapper[4909]: I1128 17:39:50.196220 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-lv7qg" event={"ID":"c88f6474-d3ee-495a-8bcc-21b322717343","Type":"ContainerDied","Data":"b24ea3ea3bfea69baa3de8bfaeeec125ed570ae5f44a77da4007fd23d30256a1"} Nov 28 17:39:50 crc kubenswrapper[4909]: I1128 17:39:50.336838 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-lv7qg" Nov 28 17:39:50 crc kubenswrapper[4909]: I1128 17:39:50.514347 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-472lt\" (UniqueName: \"kubernetes.io/projected/c88f6474-d3ee-495a-8bcc-21b322717343-kube-api-access-472lt\") pod \"c88f6474-d3ee-495a-8bcc-21b322717343\" (UID: \"c88f6474-d3ee-495a-8bcc-21b322717343\") " Nov 28 17:39:50 crc kubenswrapper[4909]: I1128 17:39:50.514493 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c88f6474-d3ee-495a-8bcc-21b322717343-catalog-content\") pod \"c88f6474-d3ee-495a-8bcc-21b322717343\" (UID: \"c88f6474-d3ee-495a-8bcc-21b322717343\") " Nov 28 17:39:50 crc kubenswrapper[4909]: I1128 17:39:50.514550 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c88f6474-d3ee-495a-8bcc-21b322717343-utilities\") pod \"c88f6474-d3ee-495a-8bcc-21b322717343\" (UID: \"c88f6474-d3ee-495a-8bcc-21b322717343\") " Nov 28 17:39:50 crc kubenswrapper[4909]: I1128 17:39:50.515906 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c88f6474-d3ee-495a-8bcc-21b322717343-utilities" (OuterVolumeSpecName: "utilities") pod "c88f6474-d3ee-495a-8bcc-21b322717343" (UID: "c88f6474-d3ee-495a-8bcc-21b322717343"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:39:50 crc kubenswrapper[4909]: I1128 17:39:50.521353 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c88f6474-d3ee-495a-8bcc-21b322717343-kube-api-access-472lt" (OuterVolumeSpecName: "kube-api-access-472lt") pod "c88f6474-d3ee-495a-8bcc-21b322717343" (UID: "c88f6474-d3ee-495a-8bcc-21b322717343"). InnerVolumeSpecName "kube-api-access-472lt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:39:50 crc kubenswrapper[4909]: I1128 17:39:50.616534 4909 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c88f6474-d3ee-495a-8bcc-21b322717343-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 17:39:50 crc kubenswrapper[4909]: I1128 17:39:50.616892 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-472lt\" (UniqueName: \"kubernetes.io/projected/c88f6474-d3ee-495a-8bcc-21b322717343-kube-api-access-472lt\") on node \"crc\" DevicePath \"\"" Nov 28 17:39:50 crc kubenswrapper[4909]: I1128 17:39:50.628831 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c88f6474-d3ee-495a-8bcc-21b322717343-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "c88f6474-d3ee-495a-8bcc-21b322717343" (UID: "c88f6474-d3ee-495a-8bcc-21b322717343"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:39:50 crc kubenswrapper[4909]: I1128 17:39:50.719426 4909 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c88f6474-d3ee-495a-8bcc-21b322717343-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 17:39:51 crc kubenswrapper[4909]: I1128 17:39:51.212193 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-lv7qg" event={"ID":"c88f6474-d3ee-495a-8bcc-21b322717343","Type":"ContainerDied","Data":"d2b89f8b5f8de2c5c2099332905c7ab01baecfe5b6c2cbcf345a12921471df0a"} Nov 28 17:39:51 crc kubenswrapper[4909]: I1128 17:39:51.212277 4909 scope.go:117] "RemoveContainer" containerID="b24ea3ea3bfea69baa3de8bfaeeec125ed570ae5f44a77da4007fd23d30256a1" Nov 28 17:39:51 crc kubenswrapper[4909]: I1128 17:39:51.212284 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-lv7qg" Nov 28 17:39:51 crc kubenswrapper[4909]: I1128 17:39:51.257890 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-lv7qg"] Nov 28 17:39:51 crc kubenswrapper[4909]: I1128 17:39:51.260487 4909 scope.go:117] "RemoveContainer" containerID="37cd8799f204b19d99130fde7a6f7aa8c2e1b2db4c599de48e9a88a4a466d309" Nov 28 17:39:51 crc kubenswrapper[4909]: I1128 17:39:51.266775 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-lv7qg"] Nov 28 17:39:51 crc kubenswrapper[4909]: I1128 17:39:51.307864 4909 scope.go:117] "RemoveContainer" containerID="df2a92aa8790034c9227efe94d1c8c858ea702356f4e6b99080e590200348c78" Nov 28 17:39:51 crc kubenswrapper[4909]: I1128 17:39:51.923192 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c88f6474-d3ee-495a-8bcc-21b322717343" path="/var/lib/kubelet/pods/c88f6474-d3ee-495a-8bcc-21b322717343/volumes" Nov 28 17:39:54 crc kubenswrapper[4909]: I1128 17:39:54.681771 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/keystone-755754cf9d-jvrhc" Nov 28 17:39:58 crc kubenswrapper[4909]: I1128 17:39:58.884047 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstackclient"] Nov 28 17:39:58 crc kubenswrapper[4909]: E1128 17:39:58.887911 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c88f6474-d3ee-495a-8bcc-21b322717343" containerName="extract-utilities" Nov 28 17:39:58 crc kubenswrapper[4909]: I1128 17:39:58.887965 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="c88f6474-d3ee-495a-8bcc-21b322717343" containerName="extract-utilities" Nov 28 17:39:58 crc kubenswrapper[4909]: E1128 17:39:58.888014 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c88f6474-d3ee-495a-8bcc-21b322717343" containerName="extract-content" Nov 28 17:39:58 crc kubenswrapper[4909]: I1128 17:39:58.888032 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="c88f6474-d3ee-495a-8bcc-21b322717343" containerName="extract-content" Nov 28 17:39:58 crc kubenswrapper[4909]: E1128 17:39:58.888092 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c88f6474-d3ee-495a-8bcc-21b322717343" containerName="registry-server" Nov 28 17:39:58 crc kubenswrapper[4909]: I1128 17:39:58.888113 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="c88f6474-d3ee-495a-8bcc-21b322717343" containerName="registry-server" Nov 28 17:39:58 crc kubenswrapper[4909]: I1128 17:39:58.888435 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="c88f6474-d3ee-495a-8bcc-21b322717343" containerName="registry-server" Nov 28 17:39:58 crc kubenswrapper[4909]: I1128 17:39:58.889456 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 28 17:39:58 crc kubenswrapper[4909]: I1128 17:39:58.893407 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-config-secret" Nov 28 17:39:58 crc kubenswrapper[4909]: I1128 17:39:58.903158 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstackclient-openstackclient-dockercfg-4vpgz" Nov 28 17:39:58 crc kubenswrapper[4909]: I1128 17:39:58.903158 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config" Nov 28 17:39:58 crc kubenswrapper[4909]: I1128 17:39:58.907919 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Nov 28 17:39:58 crc kubenswrapper[4909]: I1128 17:39:58.981246 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kftc4\" (UniqueName: \"kubernetes.io/projected/917f1c58-6e8e-4503-b697-d69434d8622c-kube-api-access-kftc4\") pod \"openstackclient\" (UID: \"917f1c58-6e8e-4503-b697-d69434d8622c\") " pod="openstack/openstackclient" Nov 28 17:39:58 crc kubenswrapper[4909]: I1128 17:39:58.981310 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/917f1c58-6e8e-4503-b697-d69434d8622c-openstack-config-secret\") pod \"openstackclient\" (UID: \"917f1c58-6e8e-4503-b697-d69434d8622c\") " pod="openstack/openstackclient" Nov 28 17:39:58 crc kubenswrapper[4909]: I1128 17:39:58.981359 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/917f1c58-6e8e-4503-b697-d69434d8622c-openstack-config\") pod \"openstackclient\" (UID: \"917f1c58-6e8e-4503-b697-d69434d8622c\") " pod="openstack/openstackclient" Nov 28 17:39:59 crc kubenswrapper[4909]: I1128 17:39:59.082417 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kftc4\" (UniqueName: \"kubernetes.io/projected/917f1c58-6e8e-4503-b697-d69434d8622c-kube-api-access-kftc4\") pod \"openstackclient\" (UID: \"917f1c58-6e8e-4503-b697-d69434d8622c\") " pod="openstack/openstackclient" Nov 28 17:39:59 crc kubenswrapper[4909]: I1128 17:39:59.082511 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/917f1c58-6e8e-4503-b697-d69434d8622c-openstack-config-secret\") pod \"openstackclient\" (UID: \"917f1c58-6e8e-4503-b697-d69434d8622c\") " pod="openstack/openstackclient" Nov 28 17:39:59 crc kubenswrapper[4909]: I1128 17:39:59.082597 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/917f1c58-6e8e-4503-b697-d69434d8622c-openstack-config\") pod \"openstackclient\" (UID: \"917f1c58-6e8e-4503-b697-d69434d8622c\") " pod="openstack/openstackclient" Nov 28 17:39:59 crc kubenswrapper[4909]: I1128 17:39:59.084204 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/917f1c58-6e8e-4503-b697-d69434d8622c-openstack-config\") pod \"openstackclient\" (UID: \"917f1c58-6e8e-4503-b697-d69434d8622c\") " pod="openstack/openstackclient" Nov 28 17:39:59 crc kubenswrapper[4909]: I1128 17:39:59.089558 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/917f1c58-6e8e-4503-b697-d69434d8622c-openstack-config-secret\") pod \"openstackclient\" (UID: \"917f1c58-6e8e-4503-b697-d69434d8622c\") " pod="openstack/openstackclient" Nov 28 17:39:59 crc kubenswrapper[4909]: I1128 17:39:59.107423 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kftc4\" (UniqueName: \"kubernetes.io/projected/917f1c58-6e8e-4503-b697-d69434d8622c-kube-api-access-kftc4\") pod \"openstackclient\" (UID: \"917f1c58-6e8e-4503-b697-d69434d8622c\") " pod="openstack/openstackclient" Nov 28 17:39:59 crc kubenswrapper[4909]: I1128 17:39:59.225711 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 28 17:39:59 crc kubenswrapper[4909]: I1128 17:39:59.726169 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Nov 28 17:40:00 crc kubenswrapper[4909]: I1128 17:40:00.304358 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"917f1c58-6e8e-4503-b697-d69434d8622c","Type":"ContainerStarted","Data":"702919c982ad62faf4b76e95d433f71f799d1238591069039fd5683c4198668e"} Nov 28 17:40:00 crc kubenswrapper[4909]: I1128 17:40:00.304758 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"917f1c58-6e8e-4503-b697-d69434d8622c","Type":"ContainerStarted","Data":"1c7a8c9186cf735f903a1fb880d8bdfc2aba875870c5515eb98bd315dcff8d66"} Nov 28 17:40:00 crc kubenswrapper[4909]: I1128 17:40:00.333820 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstackclient" podStartSLOduration=2.333801791 podStartE2EDuration="2.333801791s" podCreationTimestamp="2025-11-28 17:39:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 17:40:00.321598963 +0000 UTC m=+5382.718283487" watchObservedRunningTime="2025-11-28 17:40:00.333801791 +0000 UTC m=+5382.730486315" Nov 28 17:40:00 crc kubenswrapper[4909]: I1128 17:40:00.901902 4909 scope.go:117] "RemoveContainer" containerID="ba4943f4ba136c11fa217eba14fcdb34cf54ee4ef96ee334416ec901f5f4fe45" Nov 28 17:40:00 crc kubenswrapper[4909]: E1128 17:40:00.902139 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 17:40:14 crc kubenswrapper[4909]: I1128 17:40:14.902617 4909 scope.go:117] "RemoveContainer" containerID="ba4943f4ba136c11fa217eba14fcdb34cf54ee4ef96ee334416ec901f5f4fe45" Nov 28 17:40:14 crc kubenswrapper[4909]: E1128 17:40:14.904204 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 17:40:27 crc kubenswrapper[4909]: I1128 17:40:27.907183 4909 scope.go:117] "RemoveContainer" containerID="ba4943f4ba136c11fa217eba14fcdb34cf54ee4ef96ee334416ec901f5f4fe45" Nov 28 17:40:27 crc kubenswrapper[4909]: E1128 17:40:27.907826 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 17:40:40 crc kubenswrapper[4909]: I1128 17:40:40.901581 4909 scope.go:117] "RemoveContainer" containerID="ba4943f4ba136c11fa217eba14fcdb34cf54ee4ef96ee334416ec901f5f4fe45" Nov 28 17:40:40 crc kubenswrapper[4909]: E1128 17:40:40.902729 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 17:40:42 crc kubenswrapper[4909]: I1128 17:40:42.726188 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-nrf6x"] Nov 28 17:40:42 crc kubenswrapper[4909]: I1128 17:40:42.728601 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-nrf6x" Nov 28 17:40:42 crc kubenswrapper[4909]: I1128 17:40:42.746779 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-nrf6x"] Nov 28 17:40:42 crc kubenswrapper[4909]: I1128 17:40:42.857028 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wl9j5\" (UniqueName: \"kubernetes.io/projected/e5af103d-3875-440b-b729-2bb0ab80e53a-kube-api-access-wl9j5\") pod \"certified-operators-nrf6x\" (UID: \"e5af103d-3875-440b-b729-2bb0ab80e53a\") " pod="openshift-marketplace/certified-operators-nrf6x" Nov 28 17:40:42 crc kubenswrapper[4909]: I1128 17:40:42.857282 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e5af103d-3875-440b-b729-2bb0ab80e53a-utilities\") pod \"certified-operators-nrf6x\" (UID: \"e5af103d-3875-440b-b729-2bb0ab80e53a\") " pod="openshift-marketplace/certified-operators-nrf6x" Nov 28 17:40:42 crc kubenswrapper[4909]: I1128 17:40:42.857406 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e5af103d-3875-440b-b729-2bb0ab80e53a-catalog-content\") pod \"certified-operators-nrf6x\" (UID: \"e5af103d-3875-440b-b729-2bb0ab80e53a\") " pod="openshift-marketplace/certified-operators-nrf6x" Nov 28 17:40:42 crc kubenswrapper[4909]: I1128 17:40:42.961212 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e5af103d-3875-440b-b729-2bb0ab80e53a-utilities\") pod \"certified-operators-nrf6x\" (UID: \"e5af103d-3875-440b-b729-2bb0ab80e53a\") " pod="openshift-marketplace/certified-operators-nrf6x" Nov 28 17:40:42 crc kubenswrapper[4909]: I1128 17:40:42.961360 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e5af103d-3875-440b-b729-2bb0ab80e53a-catalog-content\") pod \"certified-operators-nrf6x\" (UID: \"e5af103d-3875-440b-b729-2bb0ab80e53a\") " pod="openshift-marketplace/certified-operators-nrf6x" Nov 28 17:40:42 crc kubenswrapper[4909]: I1128 17:40:42.961564 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wl9j5\" (UniqueName: \"kubernetes.io/projected/e5af103d-3875-440b-b729-2bb0ab80e53a-kube-api-access-wl9j5\") pod \"certified-operators-nrf6x\" (UID: \"e5af103d-3875-440b-b729-2bb0ab80e53a\") " pod="openshift-marketplace/certified-operators-nrf6x" Nov 28 17:40:42 crc kubenswrapper[4909]: I1128 17:40:42.961807 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e5af103d-3875-440b-b729-2bb0ab80e53a-catalog-content\") pod \"certified-operators-nrf6x\" (UID: \"e5af103d-3875-440b-b729-2bb0ab80e53a\") " pod="openshift-marketplace/certified-operators-nrf6x" Nov 28 17:40:42 crc kubenswrapper[4909]: I1128 17:40:42.961840 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e5af103d-3875-440b-b729-2bb0ab80e53a-utilities\") pod \"certified-operators-nrf6x\" (UID: \"e5af103d-3875-440b-b729-2bb0ab80e53a\") " pod="openshift-marketplace/certified-operators-nrf6x" Nov 28 17:40:42 crc kubenswrapper[4909]: I1128 17:40:42.985280 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wl9j5\" (UniqueName: \"kubernetes.io/projected/e5af103d-3875-440b-b729-2bb0ab80e53a-kube-api-access-wl9j5\") pod \"certified-operators-nrf6x\" (UID: \"e5af103d-3875-440b-b729-2bb0ab80e53a\") " pod="openshift-marketplace/certified-operators-nrf6x" Nov 28 17:40:43 crc kubenswrapper[4909]: I1128 17:40:43.053283 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-nrf6x" Nov 28 17:40:43 crc kubenswrapper[4909]: I1128 17:40:43.637344 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-nrf6x"] Nov 28 17:40:43 crc kubenswrapper[4909]: I1128 17:40:43.741156 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-nrf6x" event={"ID":"e5af103d-3875-440b-b729-2bb0ab80e53a","Type":"ContainerStarted","Data":"96467572296a96c4c80a176e5a51f155560673f1951af72ce9f4c40163288d9e"} Nov 28 17:40:44 crc kubenswrapper[4909]: I1128 17:40:44.758058 4909 generic.go:334] "Generic (PLEG): container finished" podID="e5af103d-3875-440b-b729-2bb0ab80e53a" containerID="d6a17f99bafb03958e7658b855c143a18c4aa64872a3e6faa30deb36c8281364" exitCode=0 Nov 28 17:40:44 crc kubenswrapper[4909]: I1128 17:40:44.758335 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-nrf6x" event={"ID":"e5af103d-3875-440b-b729-2bb0ab80e53a","Type":"ContainerDied","Data":"d6a17f99bafb03958e7658b855c143a18c4aa64872a3e6faa30deb36c8281364"} Nov 28 17:40:44 crc kubenswrapper[4909]: I1128 17:40:44.761161 4909 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 28 17:40:46 crc kubenswrapper[4909]: I1128 17:40:46.782415 4909 generic.go:334] "Generic (PLEG): container finished" podID="e5af103d-3875-440b-b729-2bb0ab80e53a" containerID="01159d70f090bdb1e8c75633698f6a097fdb0667b962026aa89cb881221779e5" exitCode=0 Nov 28 17:40:46 crc kubenswrapper[4909]: I1128 17:40:46.782495 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-nrf6x" event={"ID":"e5af103d-3875-440b-b729-2bb0ab80e53a","Type":"ContainerDied","Data":"01159d70f090bdb1e8c75633698f6a097fdb0667b962026aa89cb881221779e5"} Nov 28 17:40:48 crc kubenswrapper[4909]: I1128 17:40:48.805928 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-nrf6x" event={"ID":"e5af103d-3875-440b-b729-2bb0ab80e53a","Type":"ContainerStarted","Data":"9d4a0a0f9dbbf007032561990a47de98f6f8f48bb60d9f1641fba084fae38692"} Nov 28 17:40:48 crc kubenswrapper[4909]: I1128 17:40:48.843395 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-nrf6x" podStartSLOduration=3.8631571879999997 podStartE2EDuration="6.843377161s" podCreationTimestamp="2025-11-28 17:40:42 +0000 UTC" firstStartedPulling="2025-11-28 17:40:44.760924033 +0000 UTC m=+5427.157608567" lastFinishedPulling="2025-11-28 17:40:47.741143986 +0000 UTC m=+5430.137828540" observedRunningTime="2025-11-28 17:40:48.832783306 +0000 UTC m=+5431.229467850" watchObservedRunningTime="2025-11-28 17:40:48.843377161 +0000 UTC m=+5431.240061675" Nov 28 17:40:53 crc kubenswrapper[4909]: I1128 17:40:53.054339 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-nrf6x" Nov 28 17:40:53 crc kubenswrapper[4909]: I1128 17:40:53.055853 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-nrf6x" Nov 28 17:40:53 crc kubenswrapper[4909]: I1128 17:40:53.130536 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-nrf6x" Nov 28 17:40:53 crc kubenswrapper[4909]: I1128 17:40:53.901355 4909 scope.go:117] "RemoveContainer" containerID="ba4943f4ba136c11fa217eba14fcdb34cf54ee4ef96ee334416ec901f5f4fe45" Nov 28 17:40:53 crc kubenswrapper[4909]: E1128 17:40:53.901692 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 17:40:53 crc kubenswrapper[4909]: I1128 17:40:53.914257 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-nrf6x" Nov 28 17:40:53 crc kubenswrapper[4909]: I1128 17:40:53.976242 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-nrf6x"] Nov 28 17:40:55 crc kubenswrapper[4909]: I1128 17:40:55.864637 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-nrf6x" podUID="e5af103d-3875-440b-b729-2bb0ab80e53a" containerName="registry-server" containerID="cri-o://9d4a0a0f9dbbf007032561990a47de98f6f8f48bb60d9f1641fba084fae38692" gracePeriod=2 Nov 28 17:40:56 crc kubenswrapper[4909]: I1128 17:40:56.355136 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-nrf6x" Nov 28 17:40:56 crc kubenswrapper[4909]: I1128 17:40:56.528207 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e5af103d-3875-440b-b729-2bb0ab80e53a-utilities\") pod \"e5af103d-3875-440b-b729-2bb0ab80e53a\" (UID: \"e5af103d-3875-440b-b729-2bb0ab80e53a\") " Nov 28 17:40:56 crc kubenswrapper[4909]: I1128 17:40:56.529068 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e5af103d-3875-440b-b729-2bb0ab80e53a-utilities" (OuterVolumeSpecName: "utilities") pod "e5af103d-3875-440b-b729-2bb0ab80e53a" (UID: "e5af103d-3875-440b-b729-2bb0ab80e53a"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:40:56 crc kubenswrapper[4909]: I1128 17:40:56.530019 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e5af103d-3875-440b-b729-2bb0ab80e53a-catalog-content\") pod \"e5af103d-3875-440b-b729-2bb0ab80e53a\" (UID: \"e5af103d-3875-440b-b729-2bb0ab80e53a\") " Nov 28 17:40:56 crc kubenswrapper[4909]: I1128 17:40:56.538158 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wl9j5\" (UniqueName: \"kubernetes.io/projected/e5af103d-3875-440b-b729-2bb0ab80e53a-kube-api-access-wl9j5\") pod \"e5af103d-3875-440b-b729-2bb0ab80e53a\" (UID: \"e5af103d-3875-440b-b729-2bb0ab80e53a\") " Nov 28 17:40:56 crc kubenswrapper[4909]: I1128 17:40:56.538885 4909 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e5af103d-3875-440b-b729-2bb0ab80e53a-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 17:40:56 crc kubenswrapper[4909]: I1128 17:40:56.546601 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e5af103d-3875-440b-b729-2bb0ab80e53a-kube-api-access-wl9j5" (OuterVolumeSpecName: "kube-api-access-wl9j5") pod "e5af103d-3875-440b-b729-2bb0ab80e53a" (UID: "e5af103d-3875-440b-b729-2bb0ab80e53a"). InnerVolumeSpecName "kube-api-access-wl9j5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:40:56 crc kubenswrapper[4909]: I1128 17:40:56.582530 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e5af103d-3875-440b-b729-2bb0ab80e53a-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "e5af103d-3875-440b-b729-2bb0ab80e53a" (UID: "e5af103d-3875-440b-b729-2bb0ab80e53a"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:40:56 crc kubenswrapper[4909]: I1128 17:40:56.640982 4909 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e5af103d-3875-440b-b729-2bb0ab80e53a-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 17:40:56 crc kubenswrapper[4909]: I1128 17:40:56.641029 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wl9j5\" (UniqueName: \"kubernetes.io/projected/e5af103d-3875-440b-b729-2bb0ab80e53a-kube-api-access-wl9j5\") on node \"crc\" DevicePath \"\"" Nov 28 17:40:56 crc kubenswrapper[4909]: I1128 17:40:56.875998 4909 generic.go:334] "Generic (PLEG): container finished" podID="e5af103d-3875-440b-b729-2bb0ab80e53a" containerID="9d4a0a0f9dbbf007032561990a47de98f6f8f48bb60d9f1641fba084fae38692" exitCode=0 Nov 28 17:40:56 crc kubenswrapper[4909]: I1128 17:40:56.876061 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-nrf6x" event={"ID":"e5af103d-3875-440b-b729-2bb0ab80e53a","Type":"ContainerDied","Data":"9d4a0a0f9dbbf007032561990a47de98f6f8f48bb60d9f1641fba084fae38692"} Nov 28 17:40:56 crc kubenswrapper[4909]: I1128 17:40:56.876113 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-nrf6x" Nov 28 17:40:56 crc kubenswrapper[4909]: I1128 17:40:56.876151 4909 scope.go:117] "RemoveContainer" containerID="9d4a0a0f9dbbf007032561990a47de98f6f8f48bb60d9f1641fba084fae38692" Nov 28 17:40:56 crc kubenswrapper[4909]: I1128 17:40:56.876100 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-nrf6x" event={"ID":"e5af103d-3875-440b-b729-2bb0ab80e53a","Type":"ContainerDied","Data":"96467572296a96c4c80a176e5a51f155560673f1951af72ce9f4c40163288d9e"} Nov 28 17:40:56 crc kubenswrapper[4909]: I1128 17:40:56.919292 4909 scope.go:117] "RemoveContainer" containerID="01159d70f090bdb1e8c75633698f6a097fdb0667b962026aa89cb881221779e5" Nov 28 17:40:56 crc kubenswrapper[4909]: I1128 17:40:56.926061 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-nrf6x"] Nov 28 17:40:56 crc kubenswrapper[4909]: I1128 17:40:56.934949 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-nrf6x"] Nov 28 17:40:56 crc kubenswrapper[4909]: I1128 17:40:56.951135 4909 scope.go:117] "RemoveContainer" containerID="d6a17f99bafb03958e7658b855c143a18c4aa64872a3e6faa30deb36c8281364" Nov 28 17:40:57 crc kubenswrapper[4909]: I1128 17:40:57.005626 4909 scope.go:117] "RemoveContainer" containerID="9d4a0a0f9dbbf007032561990a47de98f6f8f48bb60d9f1641fba084fae38692" Nov 28 17:40:57 crc kubenswrapper[4909]: E1128 17:40:57.006905 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9d4a0a0f9dbbf007032561990a47de98f6f8f48bb60d9f1641fba084fae38692\": container with ID starting with 9d4a0a0f9dbbf007032561990a47de98f6f8f48bb60d9f1641fba084fae38692 not found: ID does not exist" containerID="9d4a0a0f9dbbf007032561990a47de98f6f8f48bb60d9f1641fba084fae38692" Nov 28 17:40:57 crc kubenswrapper[4909]: I1128 17:40:57.006977 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9d4a0a0f9dbbf007032561990a47de98f6f8f48bb60d9f1641fba084fae38692"} err="failed to get container status \"9d4a0a0f9dbbf007032561990a47de98f6f8f48bb60d9f1641fba084fae38692\": rpc error: code = NotFound desc = could not find container \"9d4a0a0f9dbbf007032561990a47de98f6f8f48bb60d9f1641fba084fae38692\": container with ID starting with 9d4a0a0f9dbbf007032561990a47de98f6f8f48bb60d9f1641fba084fae38692 not found: ID does not exist" Nov 28 17:40:57 crc kubenswrapper[4909]: I1128 17:40:57.007019 4909 scope.go:117] "RemoveContainer" containerID="01159d70f090bdb1e8c75633698f6a097fdb0667b962026aa89cb881221779e5" Nov 28 17:40:57 crc kubenswrapper[4909]: E1128 17:40:57.007568 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"01159d70f090bdb1e8c75633698f6a097fdb0667b962026aa89cb881221779e5\": container with ID starting with 01159d70f090bdb1e8c75633698f6a097fdb0667b962026aa89cb881221779e5 not found: ID does not exist" containerID="01159d70f090bdb1e8c75633698f6a097fdb0667b962026aa89cb881221779e5" Nov 28 17:40:57 crc kubenswrapper[4909]: I1128 17:40:57.007614 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"01159d70f090bdb1e8c75633698f6a097fdb0667b962026aa89cb881221779e5"} err="failed to get container status \"01159d70f090bdb1e8c75633698f6a097fdb0667b962026aa89cb881221779e5\": rpc error: code = NotFound desc = could not find container \"01159d70f090bdb1e8c75633698f6a097fdb0667b962026aa89cb881221779e5\": container with ID starting with 01159d70f090bdb1e8c75633698f6a097fdb0667b962026aa89cb881221779e5 not found: ID does not exist" Nov 28 17:40:57 crc kubenswrapper[4909]: I1128 17:40:57.007645 4909 scope.go:117] "RemoveContainer" containerID="d6a17f99bafb03958e7658b855c143a18c4aa64872a3e6faa30deb36c8281364" Nov 28 17:40:57 crc kubenswrapper[4909]: E1128 17:40:57.008188 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d6a17f99bafb03958e7658b855c143a18c4aa64872a3e6faa30deb36c8281364\": container with ID starting with d6a17f99bafb03958e7658b855c143a18c4aa64872a3e6faa30deb36c8281364 not found: ID does not exist" containerID="d6a17f99bafb03958e7658b855c143a18c4aa64872a3e6faa30deb36c8281364" Nov 28 17:40:57 crc kubenswrapper[4909]: I1128 17:40:57.008266 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d6a17f99bafb03958e7658b855c143a18c4aa64872a3e6faa30deb36c8281364"} err="failed to get container status \"d6a17f99bafb03958e7658b855c143a18c4aa64872a3e6faa30deb36c8281364\": rpc error: code = NotFound desc = could not find container \"d6a17f99bafb03958e7658b855c143a18c4aa64872a3e6faa30deb36c8281364\": container with ID starting with d6a17f99bafb03958e7658b855c143a18c4aa64872a3e6faa30deb36c8281364 not found: ID does not exist" Nov 28 17:40:57 crc kubenswrapper[4909]: I1128 17:40:57.918235 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e5af103d-3875-440b-b729-2bb0ab80e53a" path="/var/lib/kubelet/pods/e5af103d-3875-440b-b729-2bb0ab80e53a/volumes" Nov 28 17:41:08 crc kubenswrapper[4909]: I1128 17:41:08.904551 4909 scope.go:117] "RemoveContainer" containerID="ba4943f4ba136c11fa217eba14fcdb34cf54ee4ef96ee334416ec901f5f4fe45" Nov 28 17:41:08 crc kubenswrapper[4909]: E1128 17:41:08.905398 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 17:41:20 crc kubenswrapper[4909]: I1128 17:41:20.902170 4909 scope.go:117] "RemoveContainer" containerID="ba4943f4ba136c11fa217eba14fcdb34cf54ee4ef96ee334416ec901f5f4fe45" Nov 28 17:41:20 crc kubenswrapper[4909]: E1128 17:41:20.903341 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 17:41:34 crc kubenswrapper[4909]: I1128 17:41:34.901824 4909 scope.go:117] "RemoveContainer" containerID="ba4943f4ba136c11fa217eba14fcdb34cf54ee4ef96ee334416ec901f5f4fe45" Nov 28 17:41:34 crc kubenswrapper[4909]: E1128 17:41:34.902516 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 17:41:37 crc kubenswrapper[4909]: I1128 17:41:37.475795 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-create-gvx6t"] Nov 28 17:41:37 crc kubenswrapper[4909]: E1128 17:41:37.476695 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e5af103d-3875-440b-b729-2bb0ab80e53a" containerName="extract-utilities" Nov 28 17:41:37 crc kubenswrapper[4909]: I1128 17:41:37.476721 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="e5af103d-3875-440b-b729-2bb0ab80e53a" containerName="extract-utilities" Nov 28 17:41:37 crc kubenswrapper[4909]: E1128 17:41:37.476760 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e5af103d-3875-440b-b729-2bb0ab80e53a" containerName="registry-server" Nov 28 17:41:37 crc kubenswrapper[4909]: I1128 17:41:37.476772 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="e5af103d-3875-440b-b729-2bb0ab80e53a" containerName="registry-server" Nov 28 17:41:37 crc kubenswrapper[4909]: E1128 17:41:37.476798 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e5af103d-3875-440b-b729-2bb0ab80e53a" containerName="extract-content" Nov 28 17:41:37 crc kubenswrapper[4909]: I1128 17:41:37.476810 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="e5af103d-3875-440b-b729-2bb0ab80e53a" containerName="extract-content" Nov 28 17:41:37 crc kubenswrapper[4909]: I1128 17:41:37.477097 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="e5af103d-3875-440b-b729-2bb0ab80e53a" containerName="registry-server" Nov 28 17:41:37 crc kubenswrapper[4909]: I1128 17:41:37.478013 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-gvx6t" Nov 28 17:41:37 crc kubenswrapper[4909]: I1128 17:41:37.482209 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-5a74-account-create-update-69kzf"] Nov 28 17:41:37 crc kubenswrapper[4909]: I1128 17:41:37.483316 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-5a74-account-create-update-69kzf" Nov 28 17:41:37 crc kubenswrapper[4909]: I1128 17:41:37.494926 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-5a74-account-create-update-69kzf"] Nov 28 17:41:37 crc kubenswrapper[4909]: I1128 17:41:37.495305 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-db-secret" Nov 28 17:41:37 crc kubenswrapper[4909]: I1128 17:41:37.505623 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-gvx6t"] Nov 28 17:41:37 crc kubenswrapper[4909]: I1128 17:41:37.565885 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4478262b-7ded-47d1-91ac-4957886c7628-operator-scripts\") pod \"barbican-5a74-account-create-update-69kzf\" (UID: \"4478262b-7ded-47d1-91ac-4957886c7628\") " pod="openstack/barbican-5a74-account-create-update-69kzf" Nov 28 17:41:37 crc kubenswrapper[4909]: I1128 17:41:37.566245 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c4qvw\" (UniqueName: \"kubernetes.io/projected/4478262b-7ded-47d1-91ac-4957886c7628-kube-api-access-c4qvw\") pod \"barbican-5a74-account-create-update-69kzf\" (UID: \"4478262b-7ded-47d1-91ac-4957886c7628\") " pod="openstack/barbican-5a74-account-create-update-69kzf" Nov 28 17:41:37 crc kubenswrapper[4909]: I1128 17:41:37.566343 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wnqtd\" (UniqueName: \"kubernetes.io/projected/3ccc3c3c-a2ec-455d-b0a2-72ed6b32bf41-kube-api-access-wnqtd\") pod \"barbican-db-create-gvx6t\" (UID: \"3ccc3c3c-a2ec-455d-b0a2-72ed6b32bf41\") " pod="openstack/barbican-db-create-gvx6t" Nov 28 17:41:37 crc kubenswrapper[4909]: I1128 17:41:37.566461 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3ccc3c3c-a2ec-455d-b0a2-72ed6b32bf41-operator-scripts\") pod \"barbican-db-create-gvx6t\" (UID: \"3ccc3c3c-a2ec-455d-b0a2-72ed6b32bf41\") " pod="openstack/barbican-db-create-gvx6t" Nov 28 17:41:37 crc kubenswrapper[4909]: I1128 17:41:37.667843 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3ccc3c3c-a2ec-455d-b0a2-72ed6b32bf41-operator-scripts\") pod \"barbican-db-create-gvx6t\" (UID: \"3ccc3c3c-a2ec-455d-b0a2-72ed6b32bf41\") " pod="openstack/barbican-db-create-gvx6t" Nov 28 17:41:37 crc kubenswrapper[4909]: I1128 17:41:37.667976 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4478262b-7ded-47d1-91ac-4957886c7628-operator-scripts\") pod \"barbican-5a74-account-create-update-69kzf\" (UID: \"4478262b-7ded-47d1-91ac-4957886c7628\") " pod="openstack/barbican-5a74-account-create-update-69kzf" Nov 28 17:41:37 crc kubenswrapper[4909]: I1128 17:41:37.668056 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c4qvw\" (UniqueName: \"kubernetes.io/projected/4478262b-7ded-47d1-91ac-4957886c7628-kube-api-access-c4qvw\") pod \"barbican-5a74-account-create-update-69kzf\" (UID: \"4478262b-7ded-47d1-91ac-4957886c7628\") " pod="openstack/barbican-5a74-account-create-update-69kzf" Nov 28 17:41:37 crc kubenswrapper[4909]: I1128 17:41:37.668101 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wnqtd\" (UniqueName: \"kubernetes.io/projected/3ccc3c3c-a2ec-455d-b0a2-72ed6b32bf41-kube-api-access-wnqtd\") pod \"barbican-db-create-gvx6t\" (UID: \"3ccc3c3c-a2ec-455d-b0a2-72ed6b32bf41\") " pod="openstack/barbican-db-create-gvx6t" Nov 28 17:41:37 crc kubenswrapper[4909]: I1128 17:41:37.669060 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4478262b-7ded-47d1-91ac-4957886c7628-operator-scripts\") pod \"barbican-5a74-account-create-update-69kzf\" (UID: \"4478262b-7ded-47d1-91ac-4957886c7628\") " pod="openstack/barbican-5a74-account-create-update-69kzf" Nov 28 17:41:37 crc kubenswrapper[4909]: I1128 17:41:37.669585 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3ccc3c3c-a2ec-455d-b0a2-72ed6b32bf41-operator-scripts\") pod \"barbican-db-create-gvx6t\" (UID: \"3ccc3c3c-a2ec-455d-b0a2-72ed6b32bf41\") " pod="openstack/barbican-db-create-gvx6t" Nov 28 17:41:37 crc kubenswrapper[4909]: I1128 17:41:37.688367 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wnqtd\" (UniqueName: \"kubernetes.io/projected/3ccc3c3c-a2ec-455d-b0a2-72ed6b32bf41-kube-api-access-wnqtd\") pod \"barbican-db-create-gvx6t\" (UID: \"3ccc3c3c-a2ec-455d-b0a2-72ed6b32bf41\") " pod="openstack/barbican-db-create-gvx6t" Nov 28 17:41:37 crc kubenswrapper[4909]: I1128 17:41:37.691459 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c4qvw\" (UniqueName: \"kubernetes.io/projected/4478262b-7ded-47d1-91ac-4957886c7628-kube-api-access-c4qvw\") pod \"barbican-5a74-account-create-update-69kzf\" (UID: \"4478262b-7ded-47d1-91ac-4957886c7628\") " pod="openstack/barbican-5a74-account-create-update-69kzf" Nov 28 17:41:37 crc kubenswrapper[4909]: I1128 17:41:37.810530 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-gvx6t" Nov 28 17:41:37 crc kubenswrapper[4909]: I1128 17:41:37.833317 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-5a74-account-create-update-69kzf" Nov 28 17:41:38 crc kubenswrapper[4909]: I1128 17:41:38.290229 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-gvx6t"] Nov 28 17:41:38 crc kubenswrapper[4909]: I1128 17:41:38.388948 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-5a74-account-create-update-69kzf"] Nov 28 17:41:38 crc kubenswrapper[4909]: W1128 17:41:38.395334 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4478262b_7ded_47d1_91ac_4957886c7628.slice/crio-06765c841b14e83c9ebedf412ff3cfcb415dcfd678935034e9a2446b6c0ab302 WatchSource:0}: Error finding container 06765c841b14e83c9ebedf412ff3cfcb415dcfd678935034e9a2446b6c0ab302: Status 404 returned error can't find the container with id 06765c841b14e83c9ebedf412ff3cfcb415dcfd678935034e9a2446b6c0ab302 Nov 28 17:41:39 crc kubenswrapper[4909]: I1128 17:41:39.293428 4909 generic.go:334] "Generic (PLEG): container finished" podID="3ccc3c3c-a2ec-455d-b0a2-72ed6b32bf41" containerID="103ea4de5ee4e81fc593501bacd6154237ed3912f0c3c5e1d3301f7ad9bc415e" exitCode=0 Nov 28 17:41:39 crc kubenswrapper[4909]: I1128 17:41:39.293545 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-gvx6t" event={"ID":"3ccc3c3c-a2ec-455d-b0a2-72ed6b32bf41","Type":"ContainerDied","Data":"103ea4de5ee4e81fc593501bacd6154237ed3912f0c3c5e1d3301f7ad9bc415e"} Nov 28 17:41:39 crc kubenswrapper[4909]: I1128 17:41:39.293681 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-gvx6t" event={"ID":"3ccc3c3c-a2ec-455d-b0a2-72ed6b32bf41","Type":"ContainerStarted","Data":"b9c9dff23c987672e870cce2cf7478ce6571c47dada6de43ac27c123706fc6eb"} Nov 28 17:41:39 crc kubenswrapper[4909]: I1128 17:41:39.299264 4909 generic.go:334] "Generic (PLEG): container finished" podID="4478262b-7ded-47d1-91ac-4957886c7628" containerID="dc6675478ee391625360b48caf9c7bc81074f2db2c0d62631302a25656382e7d" exitCode=0 Nov 28 17:41:39 crc kubenswrapper[4909]: I1128 17:41:39.299340 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-5a74-account-create-update-69kzf" event={"ID":"4478262b-7ded-47d1-91ac-4957886c7628","Type":"ContainerDied","Data":"dc6675478ee391625360b48caf9c7bc81074f2db2c0d62631302a25656382e7d"} Nov 28 17:41:39 crc kubenswrapper[4909]: I1128 17:41:39.299383 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-5a74-account-create-update-69kzf" event={"ID":"4478262b-7ded-47d1-91ac-4957886c7628","Type":"ContainerStarted","Data":"06765c841b14e83c9ebedf412ff3cfcb415dcfd678935034e9a2446b6c0ab302"} Nov 28 17:41:40 crc kubenswrapper[4909]: I1128 17:41:40.700781 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-gvx6t" Nov 28 17:41:40 crc kubenswrapper[4909]: I1128 17:41:40.706628 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-5a74-account-create-update-69kzf" Nov 28 17:41:40 crc kubenswrapper[4909]: I1128 17:41:40.827070 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4478262b-7ded-47d1-91ac-4957886c7628-operator-scripts\") pod \"4478262b-7ded-47d1-91ac-4957886c7628\" (UID: \"4478262b-7ded-47d1-91ac-4957886c7628\") " Nov 28 17:41:40 crc kubenswrapper[4909]: I1128 17:41:40.827277 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3ccc3c3c-a2ec-455d-b0a2-72ed6b32bf41-operator-scripts\") pod \"3ccc3c3c-a2ec-455d-b0a2-72ed6b32bf41\" (UID: \"3ccc3c3c-a2ec-455d-b0a2-72ed6b32bf41\") " Nov 28 17:41:40 crc kubenswrapper[4909]: I1128 17:41:40.827455 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wnqtd\" (UniqueName: \"kubernetes.io/projected/3ccc3c3c-a2ec-455d-b0a2-72ed6b32bf41-kube-api-access-wnqtd\") pod \"3ccc3c3c-a2ec-455d-b0a2-72ed6b32bf41\" (UID: \"3ccc3c3c-a2ec-455d-b0a2-72ed6b32bf41\") " Nov 28 17:41:40 crc kubenswrapper[4909]: I1128 17:41:40.827721 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c4qvw\" (UniqueName: \"kubernetes.io/projected/4478262b-7ded-47d1-91ac-4957886c7628-kube-api-access-c4qvw\") pod \"4478262b-7ded-47d1-91ac-4957886c7628\" (UID: \"4478262b-7ded-47d1-91ac-4957886c7628\") " Nov 28 17:41:40 crc kubenswrapper[4909]: I1128 17:41:40.827973 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3ccc3c3c-a2ec-455d-b0a2-72ed6b32bf41-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "3ccc3c3c-a2ec-455d-b0a2-72ed6b32bf41" (UID: "3ccc3c3c-a2ec-455d-b0a2-72ed6b32bf41"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 17:41:40 crc kubenswrapper[4909]: I1128 17:41:40.828008 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4478262b-7ded-47d1-91ac-4957886c7628-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "4478262b-7ded-47d1-91ac-4957886c7628" (UID: "4478262b-7ded-47d1-91ac-4957886c7628"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 17:41:40 crc kubenswrapper[4909]: I1128 17:41:40.829451 4909 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3ccc3c3c-a2ec-455d-b0a2-72ed6b32bf41-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 17:41:40 crc kubenswrapper[4909]: I1128 17:41:40.829487 4909 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4478262b-7ded-47d1-91ac-4957886c7628-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 17:41:40 crc kubenswrapper[4909]: I1128 17:41:40.833111 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4478262b-7ded-47d1-91ac-4957886c7628-kube-api-access-c4qvw" (OuterVolumeSpecName: "kube-api-access-c4qvw") pod "4478262b-7ded-47d1-91ac-4957886c7628" (UID: "4478262b-7ded-47d1-91ac-4957886c7628"). InnerVolumeSpecName "kube-api-access-c4qvw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:41:40 crc kubenswrapper[4909]: I1128 17:41:40.833848 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3ccc3c3c-a2ec-455d-b0a2-72ed6b32bf41-kube-api-access-wnqtd" (OuterVolumeSpecName: "kube-api-access-wnqtd") pod "3ccc3c3c-a2ec-455d-b0a2-72ed6b32bf41" (UID: "3ccc3c3c-a2ec-455d-b0a2-72ed6b32bf41"). InnerVolumeSpecName "kube-api-access-wnqtd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:41:40 crc kubenswrapper[4909]: I1128 17:41:40.930999 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wnqtd\" (UniqueName: \"kubernetes.io/projected/3ccc3c3c-a2ec-455d-b0a2-72ed6b32bf41-kube-api-access-wnqtd\") on node \"crc\" DevicePath \"\"" Nov 28 17:41:40 crc kubenswrapper[4909]: I1128 17:41:40.931288 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c4qvw\" (UniqueName: \"kubernetes.io/projected/4478262b-7ded-47d1-91ac-4957886c7628-kube-api-access-c4qvw\") on node \"crc\" DevicePath \"\"" Nov 28 17:41:41 crc kubenswrapper[4909]: I1128 17:41:41.328007 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-5a74-account-create-update-69kzf" event={"ID":"4478262b-7ded-47d1-91ac-4957886c7628","Type":"ContainerDied","Data":"06765c841b14e83c9ebedf412ff3cfcb415dcfd678935034e9a2446b6c0ab302"} Nov 28 17:41:41 crc kubenswrapper[4909]: I1128 17:41:41.328072 4909 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="06765c841b14e83c9ebedf412ff3cfcb415dcfd678935034e9a2446b6c0ab302" Nov 28 17:41:41 crc kubenswrapper[4909]: I1128 17:41:41.328109 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-5a74-account-create-update-69kzf" Nov 28 17:41:41 crc kubenswrapper[4909]: I1128 17:41:41.330324 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-gvx6t" event={"ID":"3ccc3c3c-a2ec-455d-b0a2-72ed6b32bf41","Type":"ContainerDied","Data":"b9c9dff23c987672e870cce2cf7478ce6571c47dada6de43ac27c123706fc6eb"} Nov 28 17:41:41 crc kubenswrapper[4909]: I1128 17:41:41.330353 4909 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b9c9dff23c987672e870cce2cf7478ce6571c47dada6de43ac27c123706fc6eb" Nov 28 17:41:41 crc kubenswrapper[4909]: I1128 17:41:41.330416 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-gvx6t" Nov 28 17:41:42 crc kubenswrapper[4909]: I1128 17:41:42.786797 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-sync-6pvd8"] Nov 28 17:41:42 crc kubenswrapper[4909]: E1128 17:41:42.788060 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4478262b-7ded-47d1-91ac-4957886c7628" containerName="mariadb-account-create-update" Nov 28 17:41:42 crc kubenswrapper[4909]: I1128 17:41:42.788095 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="4478262b-7ded-47d1-91ac-4957886c7628" containerName="mariadb-account-create-update" Nov 28 17:41:42 crc kubenswrapper[4909]: E1128 17:41:42.788137 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3ccc3c3c-a2ec-455d-b0a2-72ed6b32bf41" containerName="mariadb-database-create" Nov 28 17:41:42 crc kubenswrapper[4909]: I1128 17:41:42.788158 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="3ccc3c3c-a2ec-455d-b0a2-72ed6b32bf41" containerName="mariadb-database-create" Nov 28 17:41:42 crc kubenswrapper[4909]: I1128 17:41:42.788569 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="3ccc3c3c-a2ec-455d-b0a2-72ed6b32bf41" containerName="mariadb-database-create" Nov 28 17:41:42 crc kubenswrapper[4909]: I1128 17:41:42.788623 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="4478262b-7ded-47d1-91ac-4957886c7628" containerName="mariadb-account-create-update" Nov 28 17:41:42 crc kubenswrapper[4909]: I1128 17:41:42.790060 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-6pvd8" Nov 28 17:41:42 crc kubenswrapper[4909]: I1128 17:41:42.793466 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Nov 28 17:41:42 crc kubenswrapper[4909]: I1128 17:41:42.793942 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-njfd5" Nov 28 17:41:42 crc kubenswrapper[4909]: I1128 17:41:42.805822 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-6pvd8"] Nov 28 17:41:42 crc kubenswrapper[4909]: I1128 17:41:42.872116 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/65413c3a-c6e0-4f61-89bd-52005f25872e-db-sync-config-data\") pod \"barbican-db-sync-6pvd8\" (UID: \"65413c3a-c6e0-4f61-89bd-52005f25872e\") " pod="openstack/barbican-db-sync-6pvd8" Nov 28 17:41:42 crc kubenswrapper[4909]: I1128 17:41:42.872262 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/65413c3a-c6e0-4f61-89bd-52005f25872e-combined-ca-bundle\") pod \"barbican-db-sync-6pvd8\" (UID: \"65413c3a-c6e0-4f61-89bd-52005f25872e\") " pod="openstack/barbican-db-sync-6pvd8" Nov 28 17:41:42 crc kubenswrapper[4909]: I1128 17:41:42.872298 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xdpjb\" (UniqueName: \"kubernetes.io/projected/65413c3a-c6e0-4f61-89bd-52005f25872e-kube-api-access-xdpjb\") pod \"barbican-db-sync-6pvd8\" (UID: \"65413c3a-c6e0-4f61-89bd-52005f25872e\") " pod="openstack/barbican-db-sync-6pvd8" Nov 28 17:41:42 crc kubenswrapper[4909]: I1128 17:41:42.980500 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/65413c3a-c6e0-4f61-89bd-52005f25872e-db-sync-config-data\") pod \"barbican-db-sync-6pvd8\" (UID: \"65413c3a-c6e0-4f61-89bd-52005f25872e\") " pod="openstack/barbican-db-sync-6pvd8" Nov 28 17:41:42 crc kubenswrapper[4909]: I1128 17:41:42.980591 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/65413c3a-c6e0-4f61-89bd-52005f25872e-combined-ca-bundle\") pod \"barbican-db-sync-6pvd8\" (UID: \"65413c3a-c6e0-4f61-89bd-52005f25872e\") " pod="openstack/barbican-db-sync-6pvd8" Nov 28 17:41:42 crc kubenswrapper[4909]: I1128 17:41:42.981687 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xdpjb\" (UniqueName: \"kubernetes.io/projected/65413c3a-c6e0-4f61-89bd-52005f25872e-kube-api-access-xdpjb\") pod \"barbican-db-sync-6pvd8\" (UID: \"65413c3a-c6e0-4f61-89bd-52005f25872e\") " pod="openstack/barbican-db-sync-6pvd8" Nov 28 17:41:42 crc kubenswrapper[4909]: I1128 17:41:42.990462 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/65413c3a-c6e0-4f61-89bd-52005f25872e-db-sync-config-data\") pod \"barbican-db-sync-6pvd8\" (UID: \"65413c3a-c6e0-4f61-89bd-52005f25872e\") " pod="openstack/barbican-db-sync-6pvd8" Nov 28 17:41:43 crc kubenswrapper[4909]: I1128 17:41:43.002139 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/65413c3a-c6e0-4f61-89bd-52005f25872e-combined-ca-bundle\") pod \"barbican-db-sync-6pvd8\" (UID: \"65413c3a-c6e0-4f61-89bd-52005f25872e\") " pod="openstack/barbican-db-sync-6pvd8" Nov 28 17:41:43 crc kubenswrapper[4909]: I1128 17:41:43.003601 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xdpjb\" (UniqueName: \"kubernetes.io/projected/65413c3a-c6e0-4f61-89bd-52005f25872e-kube-api-access-xdpjb\") pod \"barbican-db-sync-6pvd8\" (UID: \"65413c3a-c6e0-4f61-89bd-52005f25872e\") " pod="openstack/barbican-db-sync-6pvd8" Nov 28 17:41:43 crc kubenswrapper[4909]: I1128 17:41:43.127176 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-6pvd8" Nov 28 17:41:43 crc kubenswrapper[4909]: I1128 17:41:43.399209 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-6pvd8"] Nov 28 17:41:43 crc kubenswrapper[4909]: W1128 17:41:43.405855 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod65413c3a_c6e0_4f61_89bd_52005f25872e.slice/crio-a1a4c1671fb14929a05369a84eb0f387b370a83923de7f50e9d054e74c50a43c WatchSource:0}: Error finding container a1a4c1671fb14929a05369a84eb0f387b370a83923de7f50e9d054e74c50a43c: Status 404 returned error can't find the container with id a1a4c1671fb14929a05369a84eb0f387b370a83923de7f50e9d054e74c50a43c Nov 28 17:41:44 crc kubenswrapper[4909]: I1128 17:41:44.356004 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-6pvd8" event={"ID":"65413c3a-c6e0-4f61-89bd-52005f25872e","Type":"ContainerStarted","Data":"3170aef21dd9696ed3a74b81f6690d79a1021b41218a4501a000fa87b8063bc3"} Nov 28 17:41:44 crc kubenswrapper[4909]: I1128 17:41:44.356372 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-6pvd8" event={"ID":"65413c3a-c6e0-4f61-89bd-52005f25872e","Type":"ContainerStarted","Data":"a1a4c1671fb14929a05369a84eb0f387b370a83923de7f50e9d054e74c50a43c"} Nov 28 17:41:44 crc kubenswrapper[4909]: I1128 17:41:44.375565 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-db-sync-6pvd8" podStartSLOduration=2.375546861 podStartE2EDuration="2.375546861s" podCreationTimestamp="2025-11-28 17:41:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 17:41:44.370757893 +0000 UTC m=+5486.767442417" watchObservedRunningTime="2025-11-28 17:41:44.375546861 +0000 UTC m=+5486.772231385" Nov 28 17:41:45 crc kubenswrapper[4909]: I1128 17:41:45.363353 4909 generic.go:334] "Generic (PLEG): container finished" podID="65413c3a-c6e0-4f61-89bd-52005f25872e" containerID="3170aef21dd9696ed3a74b81f6690d79a1021b41218a4501a000fa87b8063bc3" exitCode=0 Nov 28 17:41:45 crc kubenswrapper[4909]: I1128 17:41:45.363394 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-6pvd8" event={"ID":"65413c3a-c6e0-4f61-89bd-52005f25872e","Type":"ContainerDied","Data":"3170aef21dd9696ed3a74b81f6690d79a1021b41218a4501a000fa87b8063bc3"} Nov 28 17:41:45 crc kubenswrapper[4909]: I1128 17:41:45.903172 4909 scope.go:117] "RemoveContainer" containerID="ba4943f4ba136c11fa217eba14fcdb34cf54ee4ef96ee334416ec901f5f4fe45" Nov 28 17:41:45 crc kubenswrapper[4909]: E1128 17:41:45.903743 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 17:41:46 crc kubenswrapper[4909]: I1128 17:41:46.673837 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-6pvd8" Nov 28 17:41:46 crc kubenswrapper[4909]: I1128 17:41:46.745518 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/65413c3a-c6e0-4f61-89bd-52005f25872e-db-sync-config-data\") pod \"65413c3a-c6e0-4f61-89bd-52005f25872e\" (UID: \"65413c3a-c6e0-4f61-89bd-52005f25872e\") " Nov 28 17:41:46 crc kubenswrapper[4909]: I1128 17:41:46.745664 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/65413c3a-c6e0-4f61-89bd-52005f25872e-combined-ca-bundle\") pod \"65413c3a-c6e0-4f61-89bd-52005f25872e\" (UID: \"65413c3a-c6e0-4f61-89bd-52005f25872e\") " Nov 28 17:41:46 crc kubenswrapper[4909]: I1128 17:41:46.745739 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xdpjb\" (UniqueName: \"kubernetes.io/projected/65413c3a-c6e0-4f61-89bd-52005f25872e-kube-api-access-xdpjb\") pod \"65413c3a-c6e0-4f61-89bd-52005f25872e\" (UID: \"65413c3a-c6e0-4f61-89bd-52005f25872e\") " Nov 28 17:41:46 crc kubenswrapper[4909]: I1128 17:41:46.751839 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/65413c3a-c6e0-4f61-89bd-52005f25872e-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "65413c3a-c6e0-4f61-89bd-52005f25872e" (UID: "65413c3a-c6e0-4f61-89bd-52005f25872e"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:41:46 crc kubenswrapper[4909]: I1128 17:41:46.751884 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/65413c3a-c6e0-4f61-89bd-52005f25872e-kube-api-access-xdpjb" (OuterVolumeSpecName: "kube-api-access-xdpjb") pod "65413c3a-c6e0-4f61-89bd-52005f25872e" (UID: "65413c3a-c6e0-4f61-89bd-52005f25872e"). InnerVolumeSpecName "kube-api-access-xdpjb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:41:46 crc kubenswrapper[4909]: I1128 17:41:46.766501 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/65413c3a-c6e0-4f61-89bd-52005f25872e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "65413c3a-c6e0-4f61-89bd-52005f25872e" (UID: "65413c3a-c6e0-4f61-89bd-52005f25872e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:41:46 crc kubenswrapper[4909]: I1128 17:41:46.848626 4909 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/65413c3a-c6e0-4f61-89bd-52005f25872e-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 17:41:46 crc kubenswrapper[4909]: I1128 17:41:46.848725 4909 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/65413c3a-c6e0-4f61-89bd-52005f25872e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 17:41:46 crc kubenswrapper[4909]: I1128 17:41:46.848752 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xdpjb\" (UniqueName: \"kubernetes.io/projected/65413c3a-c6e0-4f61-89bd-52005f25872e-kube-api-access-xdpjb\") on node \"crc\" DevicePath \"\"" Nov 28 17:41:47 crc kubenswrapper[4909]: I1128 17:41:47.387291 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-6pvd8" Nov 28 17:41:47 crc kubenswrapper[4909]: I1128 17:41:47.387303 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-6pvd8" event={"ID":"65413c3a-c6e0-4f61-89bd-52005f25872e","Type":"ContainerDied","Data":"a1a4c1671fb14929a05369a84eb0f387b370a83923de7f50e9d054e74c50a43c"} Nov 28 17:41:47 crc kubenswrapper[4909]: I1128 17:41:47.387526 4909 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a1a4c1671fb14929a05369a84eb0f387b370a83923de7f50e9d054e74c50a43c" Nov 28 17:41:47 crc kubenswrapper[4909]: I1128 17:41:47.591185 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-worker-74bff95d75-zkvnr"] Nov 28 17:41:47 crc kubenswrapper[4909]: E1128 17:41:47.591534 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="65413c3a-c6e0-4f61-89bd-52005f25872e" containerName="barbican-db-sync" Nov 28 17:41:47 crc kubenswrapper[4909]: I1128 17:41:47.591550 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="65413c3a-c6e0-4f61-89bd-52005f25872e" containerName="barbican-db-sync" Nov 28 17:41:47 crc kubenswrapper[4909]: I1128 17:41:47.591720 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="65413c3a-c6e0-4f61-89bd-52005f25872e" containerName="barbican-db-sync" Nov 28 17:41:47 crc kubenswrapper[4909]: I1128 17:41:47.593584 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-74bff95d75-zkvnr" Nov 28 17:41:47 crc kubenswrapper[4909]: I1128 17:41:47.597336 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-worker-config-data" Nov 28 17:41:47 crc kubenswrapper[4909]: I1128 17:41:47.597714 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Nov 28 17:41:47 crc kubenswrapper[4909]: I1128 17:41:47.598986 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-njfd5" Nov 28 17:41:47 crc kubenswrapper[4909]: I1128 17:41:47.603587 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-keystone-listener-588cbd7d6-6mxll"] Nov 28 17:41:47 crc kubenswrapper[4909]: I1128 17:41:47.605152 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-588cbd7d6-6mxll" Nov 28 17:41:47 crc kubenswrapper[4909]: I1128 17:41:47.609132 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-keystone-listener-config-data" Nov 28 17:41:47 crc kubenswrapper[4909]: I1128 17:41:47.617240 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-74bff95d75-zkvnr"] Nov 28 17:41:47 crc kubenswrapper[4909]: I1128 17:41:47.625164 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-588cbd7d6-6mxll"] Nov 28 17:41:47 crc kubenswrapper[4909]: I1128 17:41:47.665388 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/aa676b78-af57-406c-95fd-d7f329dd46d7-logs\") pod \"barbican-worker-74bff95d75-zkvnr\" (UID: \"aa676b78-af57-406c-95fd-d7f329dd46d7\") " pod="openstack/barbican-worker-74bff95d75-zkvnr" Nov 28 17:41:47 crc kubenswrapper[4909]: I1128 17:41:47.665425 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/93545244-c0f9-4766-9239-567b5722f3b6-config-data-custom\") pod \"barbican-keystone-listener-588cbd7d6-6mxll\" (UID: \"93545244-c0f9-4766-9239-567b5722f3b6\") " pod="openstack/barbican-keystone-listener-588cbd7d6-6mxll" Nov 28 17:41:47 crc kubenswrapper[4909]: I1128 17:41:47.665455 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/aa676b78-af57-406c-95fd-d7f329dd46d7-config-data-custom\") pod \"barbican-worker-74bff95d75-zkvnr\" (UID: \"aa676b78-af57-406c-95fd-d7f329dd46d7\") " pod="openstack/barbican-worker-74bff95d75-zkvnr" Nov 28 17:41:47 crc kubenswrapper[4909]: I1128 17:41:47.665514 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8t6v8\" (UniqueName: \"kubernetes.io/projected/93545244-c0f9-4766-9239-567b5722f3b6-kube-api-access-8t6v8\") pod \"barbican-keystone-listener-588cbd7d6-6mxll\" (UID: \"93545244-c0f9-4766-9239-567b5722f3b6\") " pod="openstack/barbican-keystone-listener-588cbd7d6-6mxll" Nov 28 17:41:47 crc kubenswrapper[4909]: I1128 17:41:47.665539 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/aa676b78-af57-406c-95fd-d7f329dd46d7-config-data\") pod \"barbican-worker-74bff95d75-zkvnr\" (UID: \"aa676b78-af57-406c-95fd-d7f329dd46d7\") " pod="openstack/barbican-worker-74bff95d75-zkvnr" Nov 28 17:41:47 crc kubenswrapper[4909]: I1128 17:41:47.665559 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/93545244-c0f9-4766-9239-567b5722f3b6-config-data\") pod \"barbican-keystone-listener-588cbd7d6-6mxll\" (UID: \"93545244-c0f9-4766-9239-567b5722f3b6\") " pod="openstack/barbican-keystone-listener-588cbd7d6-6mxll" Nov 28 17:41:47 crc kubenswrapper[4909]: I1128 17:41:47.665576 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/93545244-c0f9-4766-9239-567b5722f3b6-combined-ca-bundle\") pod \"barbican-keystone-listener-588cbd7d6-6mxll\" (UID: \"93545244-c0f9-4766-9239-567b5722f3b6\") " pod="openstack/barbican-keystone-listener-588cbd7d6-6mxll" Nov 28 17:41:47 crc kubenswrapper[4909]: I1128 17:41:47.665612 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l2fq7\" (UniqueName: \"kubernetes.io/projected/aa676b78-af57-406c-95fd-d7f329dd46d7-kube-api-access-l2fq7\") pod \"barbican-worker-74bff95d75-zkvnr\" (UID: \"aa676b78-af57-406c-95fd-d7f329dd46d7\") " pod="openstack/barbican-worker-74bff95d75-zkvnr" Nov 28 17:41:47 crc kubenswrapper[4909]: I1128 17:41:47.665631 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/93545244-c0f9-4766-9239-567b5722f3b6-logs\") pod \"barbican-keystone-listener-588cbd7d6-6mxll\" (UID: \"93545244-c0f9-4766-9239-567b5722f3b6\") " pod="openstack/barbican-keystone-listener-588cbd7d6-6mxll" Nov 28 17:41:47 crc kubenswrapper[4909]: I1128 17:41:47.665648 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/aa676b78-af57-406c-95fd-d7f329dd46d7-combined-ca-bundle\") pod \"barbican-worker-74bff95d75-zkvnr\" (UID: \"aa676b78-af57-406c-95fd-d7f329dd46d7\") " pod="openstack/barbican-worker-74bff95d75-zkvnr" Nov 28 17:41:47 crc kubenswrapper[4909]: I1128 17:41:47.690601 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-56c9fd548f-cvf9g"] Nov 28 17:41:47 crc kubenswrapper[4909]: I1128 17:41:47.692421 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-56c9fd548f-cvf9g" Nov 28 17:41:47 crc kubenswrapper[4909]: I1128 17:41:47.703550 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-56c9fd548f-cvf9g"] Nov 28 17:41:47 crc kubenswrapper[4909]: I1128 17:41:47.768708 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hhs6k\" (UniqueName: \"kubernetes.io/projected/5ebd76e7-4dac-469d-b366-3e210472ec63-kube-api-access-hhs6k\") pod \"dnsmasq-dns-56c9fd548f-cvf9g\" (UID: \"5ebd76e7-4dac-469d-b366-3e210472ec63\") " pod="openstack/dnsmasq-dns-56c9fd548f-cvf9g" Nov 28 17:41:47 crc kubenswrapper[4909]: I1128 17:41:47.768804 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8t6v8\" (UniqueName: \"kubernetes.io/projected/93545244-c0f9-4766-9239-567b5722f3b6-kube-api-access-8t6v8\") pod \"barbican-keystone-listener-588cbd7d6-6mxll\" (UID: \"93545244-c0f9-4766-9239-567b5722f3b6\") " pod="openstack/barbican-keystone-listener-588cbd7d6-6mxll" Nov 28 17:41:47 crc kubenswrapper[4909]: I1128 17:41:47.768841 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/aa676b78-af57-406c-95fd-d7f329dd46d7-config-data\") pod \"barbican-worker-74bff95d75-zkvnr\" (UID: \"aa676b78-af57-406c-95fd-d7f329dd46d7\") " pod="openstack/barbican-worker-74bff95d75-zkvnr" Nov 28 17:41:47 crc kubenswrapper[4909]: I1128 17:41:47.768875 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/93545244-c0f9-4766-9239-567b5722f3b6-config-data\") pod \"barbican-keystone-listener-588cbd7d6-6mxll\" (UID: \"93545244-c0f9-4766-9239-567b5722f3b6\") " pod="openstack/barbican-keystone-listener-588cbd7d6-6mxll" Nov 28 17:41:47 crc kubenswrapper[4909]: I1128 17:41:47.768902 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5ebd76e7-4dac-469d-b366-3e210472ec63-config\") pod \"dnsmasq-dns-56c9fd548f-cvf9g\" (UID: \"5ebd76e7-4dac-469d-b366-3e210472ec63\") " pod="openstack/dnsmasq-dns-56c9fd548f-cvf9g" Nov 28 17:41:47 crc kubenswrapper[4909]: I1128 17:41:47.768928 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/93545244-c0f9-4766-9239-567b5722f3b6-combined-ca-bundle\") pod \"barbican-keystone-listener-588cbd7d6-6mxll\" (UID: \"93545244-c0f9-4766-9239-567b5722f3b6\") " pod="openstack/barbican-keystone-listener-588cbd7d6-6mxll" Nov 28 17:41:47 crc kubenswrapper[4909]: I1128 17:41:47.768963 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/5ebd76e7-4dac-469d-b366-3e210472ec63-ovsdbserver-nb\") pod \"dnsmasq-dns-56c9fd548f-cvf9g\" (UID: \"5ebd76e7-4dac-469d-b366-3e210472ec63\") " pod="openstack/dnsmasq-dns-56c9fd548f-cvf9g" Nov 28 17:41:47 crc kubenswrapper[4909]: I1128 17:41:47.768998 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5ebd76e7-4dac-469d-b366-3e210472ec63-dns-svc\") pod \"dnsmasq-dns-56c9fd548f-cvf9g\" (UID: \"5ebd76e7-4dac-469d-b366-3e210472ec63\") " pod="openstack/dnsmasq-dns-56c9fd548f-cvf9g" Nov 28 17:41:47 crc kubenswrapper[4909]: I1128 17:41:47.769084 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l2fq7\" (UniqueName: \"kubernetes.io/projected/aa676b78-af57-406c-95fd-d7f329dd46d7-kube-api-access-l2fq7\") pod \"barbican-worker-74bff95d75-zkvnr\" (UID: \"aa676b78-af57-406c-95fd-d7f329dd46d7\") " pod="openstack/barbican-worker-74bff95d75-zkvnr" Nov 28 17:41:47 crc kubenswrapper[4909]: I1128 17:41:47.769115 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/93545244-c0f9-4766-9239-567b5722f3b6-logs\") pod \"barbican-keystone-listener-588cbd7d6-6mxll\" (UID: \"93545244-c0f9-4766-9239-567b5722f3b6\") " pod="openstack/barbican-keystone-listener-588cbd7d6-6mxll" Nov 28 17:41:47 crc kubenswrapper[4909]: I1128 17:41:47.769138 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/aa676b78-af57-406c-95fd-d7f329dd46d7-combined-ca-bundle\") pod \"barbican-worker-74bff95d75-zkvnr\" (UID: \"aa676b78-af57-406c-95fd-d7f329dd46d7\") " pod="openstack/barbican-worker-74bff95d75-zkvnr" Nov 28 17:41:47 crc kubenswrapper[4909]: I1128 17:41:47.769196 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/aa676b78-af57-406c-95fd-d7f329dd46d7-logs\") pod \"barbican-worker-74bff95d75-zkvnr\" (UID: \"aa676b78-af57-406c-95fd-d7f329dd46d7\") " pod="openstack/barbican-worker-74bff95d75-zkvnr" Nov 28 17:41:47 crc kubenswrapper[4909]: I1128 17:41:47.769222 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/93545244-c0f9-4766-9239-567b5722f3b6-config-data-custom\") pod \"barbican-keystone-listener-588cbd7d6-6mxll\" (UID: \"93545244-c0f9-4766-9239-567b5722f3b6\") " pod="openstack/barbican-keystone-listener-588cbd7d6-6mxll" Nov 28 17:41:47 crc kubenswrapper[4909]: I1128 17:41:47.769250 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/aa676b78-af57-406c-95fd-d7f329dd46d7-config-data-custom\") pod \"barbican-worker-74bff95d75-zkvnr\" (UID: \"aa676b78-af57-406c-95fd-d7f329dd46d7\") " pod="openstack/barbican-worker-74bff95d75-zkvnr" Nov 28 17:41:47 crc kubenswrapper[4909]: I1128 17:41:47.769283 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/5ebd76e7-4dac-469d-b366-3e210472ec63-ovsdbserver-sb\") pod \"dnsmasq-dns-56c9fd548f-cvf9g\" (UID: \"5ebd76e7-4dac-469d-b366-3e210472ec63\") " pod="openstack/dnsmasq-dns-56c9fd548f-cvf9g" Nov 28 17:41:47 crc kubenswrapper[4909]: I1128 17:41:47.770875 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/93545244-c0f9-4766-9239-567b5722f3b6-logs\") pod \"barbican-keystone-listener-588cbd7d6-6mxll\" (UID: \"93545244-c0f9-4766-9239-567b5722f3b6\") " pod="openstack/barbican-keystone-listener-588cbd7d6-6mxll" Nov 28 17:41:47 crc kubenswrapper[4909]: I1128 17:41:47.772681 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/aa676b78-af57-406c-95fd-d7f329dd46d7-logs\") pod \"barbican-worker-74bff95d75-zkvnr\" (UID: \"aa676b78-af57-406c-95fd-d7f329dd46d7\") " pod="openstack/barbican-worker-74bff95d75-zkvnr" Nov 28 17:41:47 crc kubenswrapper[4909]: I1128 17:41:47.782285 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/93545244-c0f9-4766-9239-567b5722f3b6-config-data\") pod \"barbican-keystone-listener-588cbd7d6-6mxll\" (UID: \"93545244-c0f9-4766-9239-567b5722f3b6\") " pod="openstack/barbican-keystone-listener-588cbd7d6-6mxll" Nov 28 17:41:47 crc kubenswrapper[4909]: I1128 17:41:47.788698 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/aa676b78-af57-406c-95fd-d7f329dd46d7-config-data-custom\") pod \"barbican-worker-74bff95d75-zkvnr\" (UID: \"aa676b78-af57-406c-95fd-d7f329dd46d7\") " pod="openstack/barbican-worker-74bff95d75-zkvnr" Nov 28 17:41:47 crc kubenswrapper[4909]: I1128 17:41:47.792513 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-76ddd994f8-kn6g9"] Nov 28 17:41:47 crc kubenswrapper[4909]: I1128 17:41:47.793094 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/93545244-c0f9-4766-9239-567b5722f3b6-combined-ca-bundle\") pod \"barbican-keystone-listener-588cbd7d6-6mxll\" (UID: \"93545244-c0f9-4766-9239-567b5722f3b6\") " pod="openstack/barbican-keystone-listener-588cbd7d6-6mxll" Nov 28 17:41:47 crc kubenswrapper[4909]: I1128 17:41:47.793572 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/93545244-c0f9-4766-9239-567b5722f3b6-config-data-custom\") pod \"barbican-keystone-listener-588cbd7d6-6mxll\" (UID: \"93545244-c0f9-4766-9239-567b5722f3b6\") " pod="openstack/barbican-keystone-listener-588cbd7d6-6mxll" Nov 28 17:41:47 crc kubenswrapper[4909]: I1128 17:41:47.793825 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-76ddd994f8-kn6g9" Nov 28 17:41:47 crc kubenswrapper[4909]: I1128 17:41:47.801559 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/aa676b78-af57-406c-95fd-d7f329dd46d7-combined-ca-bundle\") pod \"barbican-worker-74bff95d75-zkvnr\" (UID: \"aa676b78-af57-406c-95fd-d7f329dd46d7\") " pod="openstack/barbican-worker-74bff95d75-zkvnr" Nov 28 17:41:47 crc kubenswrapper[4909]: I1128 17:41:47.802872 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/aa676b78-af57-406c-95fd-d7f329dd46d7-config-data\") pod \"barbican-worker-74bff95d75-zkvnr\" (UID: \"aa676b78-af57-406c-95fd-d7f329dd46d7\") " pod="openstack/barbican-worker-74bff95d75-zkvnr" Nov 28 17:41:47 crc kubenswrapper[4909]: I1128 17:41:47.806557 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-api-config-data" Nov 28 17:41:47 crc kubenswrapper[4909]: I1128 17:41:47.810203 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-76ddd994f8-kn6g9"] Nov 28 17:41:47 crc kubenswrapper[4909]: I1128 17:41:47.816007 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8t6v8\" (UniqueName: \"kubernetes.io/projected/93545244-c0f9-4766-9239-567b5722f3b6-kube-api-access-8t6v8\") pod \"barbican-keystone-listener-588cbd7d6-6mxll\" (UID: \"93545244-c0f9-4766-9239-567b5722f3b6\") " pod="openstack/barbican-keystone-listener-588cbd7d6-6mxll" Nov 28 17:41:47 crc kubenswrapper[4909]: I1128 17:41:47.820919 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l2fq7\" (UniqueName: \"kubernetes.io/projected/aa676b78-af57-406c-95fd-d7f329dd46d7-kube-api-access-l2fq7\") pod \"barbican-worker-74bff95d75-zkvnr\" (UID: \"aa676b78-af57-406c-95fd-d7f329dd46d7\") " pod="openstack/barbican-worker-74bff95d75-zkvnr" Nov 28 17:41:47 crc kubenswrapper[4909]: I1128 17:41:47.883828 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/799b6ce4-6703-4ef0-a2a9-dc181821c868-config-data-custom\") pod \"barbican-api-76ddd994f8-kn6g9\" (UID: \"799b6ce4-6703-4ef0-a2a9-dc181821c868\") " pod="openstack/barbican-api-76ddd994f8-kn6g9" Nov 28 17:41:47 crc kubenswrapper[4909]: I1128 17:41:47.883928 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5ebd76e7-4dac-469d-b366-3e210472ec63-config\") pod \"dnsmasq-dns-56c9fd548f-cvf9g\" (UID: \"5ebd76e7-4dac-469d-b366-3e210472ec63\") " pod="openstack/dnsmasq-dns-56c9fd548f-cvf9g" Nov 28 17:41:47 crc kubenswrapper[4909]: I1128 17:41:47.884031 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/5ebd76e7-4dac-469d-b366-3e210472ec63-ovsdbserver-nb\") pod \"dnsmasq-dns-56c9fd548f-cvf9g\" (UID: \"5ebd76e7-4dac-469d-b366-3e210472ec63\") " pod="openstack/dnsmasq-dns-56c9fd548f-cvf9g" Nov 28 17:41:47 crc kubenswrapper[4909]: I1128 17:41:47.884098 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5ebd76e7-4dac-469d-b366-3e210472ec63-dns-svc\") pod \"dnsmasq-dns-56c9fd548f-cvf9g\" (UID: \"5ebd76e7-4dac-469d-b366-3e210472ec63\") " pod="openstack/dnsmasq-dns-56c9fd548f-cvf9g" Nov 28 17:41:47 crc kubenswrapper[4909]: I1128 17:41:47.884153 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fkjgk\" (UniqueName: \"kubernetes.io/projected/799b6ce4-6703-4ef0-a2a9-dc181821c868-kube-api-access-fkjgk\") pod \"barbican-api-76ddd994f8-kn6g9\" (UID: \"799b6ce4-6703-4ef0-a2a9-dc181821c868\") " pod="openstack/barbican-api-76ddd994f8-kn6g9" Nov 28 17:41:47 crc kubenswrapper[4909]: I1128 17:41:47.884224 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/799b6ce4-6703-4ef0-a2a9-dc181821c868-logs\") pod \"barbican-api-76ddd994f8-kn6g9\" (UID: \"799b6ce4-6703-4ef0-a2a9-dc181821c868\") " pod="openstack/barbican-api-76ddd994f8-kn6g9" Nov 28 17:41:47 crc kubenswrapper[4909]: I1128 17:41:47.884317 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/799b6ce4-6703-4ef0-a2a9-dc181821c868-config-data\") pod \"barbican-api-76ddd994f8-kn6g9\" (UID: \"799b6ce4-6703-4ef0-a2a9-dc181821c868\") " pod="openstack/barbican-api-76ddd994f8-kn6g9" Nov 28 17:41:47 crc kubenswrapper[4909]: I1128 17:41:47.884348 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/5ebd76e7-4dac-469d-b366-3e210472ec63-ovsdbserver-sb\") pod \"dnsmasq-dns-56c9fd548f-cvf9g\" (UID: \"5ebd76e7-4dac-469d-b366-3e210472ec63\") " pod="openstack/dnsmasq-dns-56c9fd548f-cvf9g" Nov 28 17:41:47 crc kubenswrapper[4909]: I1128 17:41:47.884386 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/799b6ce4-6703-4ef0-a2a9-dc181821c868-combined-ca-bundle\") pod \"barbican-api-76ddd994f8-kn6g9\" (UID: \"799b6ce4-6703-4ef0-a2a9-dc181821c868\") " pod="openstack/barbican-api-76ddd994f8-kn6g9" Nov 28 17:41:47 crc kubenswrapper[4909]: I1128 17:41:47.884423 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hhs6k\" (UniqueName: \"kubernetes.io/projected/5ebd76e7-4dac-469d-b366-3e210472ec63-kube-api-access-hhs6k\") pod \"dnsmasq-dns-56c9fd548f-cvf9g\" (UID: \"5ebd76e7-4dac-469d-b366-3e210472ec63\") " pod="openstack/dnsmasq-dns-56c9fd548f-cvf9g" Nov 28 17:41:47 crc kubenswrapper[4909]: I1128 17:41:47.885489 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5ebd76e7-4dac-469d-b366-3e210472ec63-config\") pod \"dnsmasq-dns-56c9fd548f-cvf9g\" (UID: \"5ebd76e7-4dac-469d-b366-3e210472ec63\") " pod="openstack/dnsmasq-dns-56c9fd548f-cvf9g" Nov 28 17:41:47 crc kubenswrapper[4909]: I1128 17:41:47.885492 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5ebd76e7-4dac-469d-b366-3e210472ec63-dns-svc\") pod \"dnsmasq-dns-56c9fd548f-cvf9g\" (UID: \"5ebd76e7-4dac-469d-b366-3e210472ec63\") " pod="openstack/dnsmasq-dns-56c9fd548f-cvf9g" Nov 28 17:41:47 crc kubenswrapper[4909]: I1128 17:41:47.885814 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/5ebd76e7-4dac-469d-b366-3e210472ec63-ovsdbserver-sb\") pod \"dnsmasq-dns-56c9fd548f-cvf9g\" (UID: \"5ebd76e7-4dac-469d-b366-3e210472ec63\") " pod="openstack/dnsmasq-dns-56c9fd548f-cvf9g" Nov 28 17:41:47 crc kubenswrapper[4909]: I1128 17:41:47.886050 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/5ebd76e7-4dac-469d-b366-3e210472ec63-ovsdbserver-nb\") pod \"dnsmasq-dns-56c9fd548f-cvf9g\" (UID: \"5ebd76e7-4dac-469d-b366-3e210472ec63\") " pod="openstack/dnsmasq-dns-56c9fd548f-cvf9g" Nov 28 17:41:47 crc kubenswrapper[4909]: I1128 17:41:47.905519 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hhs6k\" (UniqueName: \"kubernetes.io/projected/5ebd76e7-4dac-469d-b366-3e210472ec63-kube-api-access-hhs6k\") pod \"dnsmasq-dns-56c9fd548f-cvf9g\" (UID: \"5ebd76e7-4dac-469d-b366-3e210472ec63\") " pod="openstack/dnsmasq-dns-56c9fd548f-cvf9g" Nov 28 17:41:47 crc kubenswrapper[4909]: I1128 17:41:47.936593 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-74bff95d75-zkvnr" Nov 28 17:41:47 crc kubenswrapper[4909]: I1128 17:41:47.958961 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-588cbd7d6-6mxll" Nov 28 17:41:47 crc kubenswrapper[4909]: I1128 17:41:47.986258 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/799b6ce4-6703-4ef0-a2a9-dc181821c868-config-data\") pod \"barbican-api-76ddd994f8-kn6g9\" (UID: \"799b6ce4-6703-4ef0-a2a9-dc181821c868\") " pod="openstack/barbican-api-76ddd994f8-kn6g9" Nov 28 17:41:47 crc kubenswrapper[4909]: I1128 17:41:47.986349 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/799b6ce4-6703-4ef0-a2a9-dc181821c868-combined-ca-bundle\") pod \"barbican-api-76ddd994f8-kn6g9\" (UID: \"799b6ce4-6703-4ef0-a2a9-dc181821c868\") " pod="openstack/barbican-api-76ddd994f8-kn6g9" Nov 28 17:41:47 crc kubenswrapper[4909]: I1128 17:41:47.986406 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/799b6ce4-6703-4ef0-a2a9-dc181821c868-config-data-custom\") pod \"barbican-api-76ddd994f8-kn6g9\" (UID: \"799b6ce4-6703-4ef0-a2a9-dc181821c868\") " pod="openstack/barbican-api-76ddd994f8-kn6g9" Nov 28 17:41:47 crc kubenswrapper[4909]: I1128 17:41:47.986539 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fkjgk\" (UniqueName: \"kubernetes.io/projected/799b6ce4-6703-4ef0-a2a9-dc181821c868-kube-api-access-fkjgk\") pod \"barbican-api-76ddd994f8-kn6g9\" (UID: \"799b6ce4-6703-4ef0-a2a9-dc181821c868\") " pod="openstack/barbican-api-76ddd994f8-kn6g9" Nov 28 17:41:47 crc kubenswrapper[4909]: I1128 17:41:47.986620 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/799b6ce4-6703-4ef0-a2a9-dc181821c868-logs\") pod \"barbican-api-76ddd994f8-kn6g9\" (UID: \"799b6ce4-6703-4ef0-a2a9-dc181821c868\") " pod="openstack/barbican-api-76ddd994f8-kn6g9" Nov 28 17:41:47 crc kubenswrapper[4909]: I1128 17:41:47.987109 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/799b6ce4-6703-4ef0-a2a9-dc181821c868-logs\") pod \"barbican-api-76ddd994f8-kn6g9\" (UID: \"799b6ce4-6703-4ef0-a2a9-dc181821c868\") " pod="openstack/barbican-api-76ddd994f8-kn6g9" Nov 28 17:41:47 crc kubenswrapper[4909]: I1128 17:41:47.991987 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/799b6ce4-6703-4ef0-a2a9-dc181821c868-config-data\") pod \"barbican-api-76ddd994f8-kn6g9\" (UID: \"799b6ce4-6703-4ef0-a2a9-dc181821c868\") " pod="openstack/barbican-api-76ddd994f8-kn6g9" Nov 28 17:41:47 crc kubenswrapper[4909]: I1128 17:41:47.994940 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/799b6ce4-6703-4ef0-a2a9-dc181821c868-config-data-custom\") pod \"barbican-api-76ddd994f8-kn6g9\" (UID: \"799b6ce4-6703-4ef0-a2a9-dc181821c868\") " pod="openstack/barbican-api-76ddd994f8-kn6g9" Nov 28 17:41:47 crc kubenswrapper[4909]: I1128 17:41:47.995264 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/799b6ce4-6703-4ef0-a2a9-dc181821c868-combined-ca-bundle\") pod \"barbican-api-76ddd994f8-kn6g9\" (UID: \"799b6ce4-6703-4ef0-a2a9-dc181821c868\") " pod="openstack/barbican-api-76ddd994f8-kn6g9" Nov 28 17:41:48 crc kubenswrapper[4909]: I1128 17:41:48.008230 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fkjgk\" (UniqueName: \"kubernetes.io/projected/799b6ce4-6703-4ef0-a2a9-dc181821c868-kube-api-access-fkjgk\") pod \"barbican-api-76ddd994f8-kn6g9\" (UID: \"799b6ce4-6703-4ef0-a2a9-dc181821c868\") " pod="openstack/barbican-api-76ddd994f8-kn6g9" Nov 28 17:41:48 crc kubenswrapper[4909]: I1128 17:41:48.021438 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-56c9fd548f-cvf9g" Nov 28 17:41:48 crc kubenswrapper[4909]: I1128 17:41:48.201837 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-76ddd994f8-kn6g9" Nov 28 17:41:48 crc kubenswrapper[4909]: I1128 17:41:48.214336 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-74bff95d75-zkvnr"] Nov 28 17:41:48 crc kubenswrapper[4909]: W1128 17:41:48.230733 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podaa676b78_af57_406c_95fd_d7f329dd46d7.slice/crio-8a155c461ec8dbfb5c8399e0bb2bc9011a55c5b64a3d5bbbd3862bdb16da8709 WatchSource:0}: Error finding container 8a155c461ec8dbfb5c8399e0bb2bc9011a55c5b64a3d5bbbd3862bdb16da8709: Status 404 returned error can't find the container with id 8a155c461ec8dbfb5c8399e0bb2bc9011a55c5b64a3d5bbbd3862bdb16da8709 Nov 28 17:41:48 crc kubenswrapper[4909]: I1128 17:41:48.282033 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-588cbd7d6-6mxll"] Nov 28 17:41:48 crc kubenswrapper[4909]: I1128 17:41:48.399880 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-588cbd7d6-6mxll" event={"ID":"93545244-c0f9-4766-9239-567b5722f3b6","Type":"ContainerStarted","Data":"36ff8161d3a5e04669c84ce0fe77c7da4a66683a0b66b071243acb8250eb4588"} Nov 28 17:41:48 crc kubenswrapper[4909]: I1128 17:41:48.401836 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-74bff95d75-zkvnr" event={"ID":"aa676b78-af57-406c-95fd-d7f329dd46d7","Type":"ContainerStarted","Data":"8a155c461ec8dbfb5c8399e0bb2bc9011a55c5b64a3d5bbbd3862bdb16da8709"} Nov 28 17:41:48 crc kubenswrapper[4909]: I1128 17:41:48.585801 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-56c9fd548f-cvf9g"] Nov 28 17:41:48 crc kubenswrapper[4909]: W1128 17:41:48.587523 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5ebd76e7_4dac_469d_b366_3e210472ec63.slice/crio-990682e141e898be50b27c714a80df26580e74869f2c77d6a878de223b09517c WatchSource:0}: Error finding container 990682e141e898be50b27c714a80df26580e74869f2c77d6a878de223b09517c: Status 404 returned error can't find the container with id 990682e141e898be50b27c714a80df26580e74869f2c77d6a878de223b09517c Nov 28 17:41:48 crc kubenswrapper[4909]: I1128 17:41:48.673822 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-76ddd994f8-kn6g9"] Nov 28 17:41:48 crc kubenswrapper[4909]: W1128 17:41:48.675159 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod799b6ce4_6703_4ef0_a2a9_dc181821c868.slice/crio-23c78bd7eaeb17139aa989bf57d1f856b9c653c8018a5c74732af5a0c4f224db WatchSource:0}: Error finding container 23c78bd7eaeb17139aa989bf57d1f856b9c653c8018a5c74732af5a0c4f224db: Status 404 returned error can't find the container with id 23c78bd7eaeb17139aa989bf57d1f856b9c653c8018a5c74732af5a0c4f224db Nov 28 17:41:49 crc kubenswrapper[4909]: I1128 17:41:49.410520 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-74bff95d75-zkvnr" event={"ID":"aa676b78-af57-406c-95fd-d7f329dd46d7","Type":"ContainerStarted","Data":"ee2770aef54df63caed5dd8f152d0f13c70bd0d968ac6019dd1b6fa142e84725"} Nov 28 17:41:49 crc kubenswrapper[4909]: I1128 17:41:49.410839 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-74bff95d75-zkvnr" event={"ID":"aa676b78-af57-406c-95fd-d7f329dd46d7","Type":"ContainerStarted","Data":"3eca931865859da18ce74780b9405f38c64c8a7ce150a9e0b3a0a0890632adab"} Nov 28 17:41:49 crc kubenswrapper[4909]: I1128 17:41:49.414278 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-76ddd994f8-kn6g9" event={"ID":"799b6ce4-6703-4ef0-a2a9-dc181821c868","Type":"ContainerStarted","Data":"936964f525dbbab19446d96baa95948d0125a4631052bd8979e1831cd92454fe"} Nov 28 17:41:49 crc kubenswrapper[4909]: I1128 17:41:49.414331 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-76ddd994f8-kn6g9" event={"ID":"799b6ce4-6703-4ef0-a2a9-dc181821c868","Type":"ContainerStarted","Data":"4e0ee01845b8b24dd42511d9e112e3ef48daf16622559d65ab45d2d9f06b32f8"} Nov 28 17:41:49 crc kubenswrapper[4909]: I1128 17:41:49.414598 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-76ddd994f8-kn6g9" event={"ID":"799b6ce4-6703-4ef0-a2a9-dc181821c868","Type":"ContainerStarted","Data":"23c78bd7eaeb17139aa989bf57d1f856b9c653c8018a5c74732af5a0c4f224db"} Nov 28 17:41:49 crc kubenswrapper[4909]: I1128 17:41:49.414633 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-76ddd994f8-kn6g9" Nov 28 17:41:49 crc kubenswrapper[4909]: I1128 17:41:49.414646 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-76ddd994f8-kn6g9" Nov 28 17:41:49 crc kubenswrapper[4909]: I1128 17:41:49.415864 4909 generic.go:334] "Generic (PLEG): container finished" podID="5ebd76e7-4dac-469d-b366-3e210472ec63" containerID="e6c2fef3b7a50dadbcf10e194c08126d1c55e1e376e7369d6c7fed4cbf0612c2" exitCode=0 Nov 28 17:41:49 crc kubenswrapper[4909]: I1128 17:41:49.416362 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-56c9fd548f-cvf9g" event={"ID":"5ebd76e7-4dac-469d-b366-3e210472ec63","Type":"ContainerDied","Data":"e6c2fef3b7a50dadbcf10e194c08126d1c55e1e376e7369d6c7fed4cbf0612c2"} Nov 28 17:41:49 crc kubenswrapper[4909]: I1128 17:41:49.416395 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-56c9fd548f-cvf9g" event={"ID":"5ebd76e7-4dac-469d-b366-3e210472ec63","Type":"ContainerStarted","Data":"990682e141e898be50b27c714a80df26580e74869f2c77d6a878de223b09517c"} Nov 28 17:41:49 crc kubenswrapper[4909]: I1128 17:41:49.420800 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-588cbd7d6-6mxll" event={"ID":"93545244-c0f9-4766-9239-567b5722f3b6","Type":"ContainerStarted","Data":"c915f9055d21a3ae6fed9ffa651d551d7d4cfeaf3a1b0c8a1fffa006cbdc3679"} Nov 28 17:41:49 crc kubenswrapper[4909]: I1128 17:41:49.420853 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-588cbd7d6-6mxll" event={"ID":"93545244-c0f9-4766-9239-567b5722f3b6","Type":"ContainerStarted","Data":"c7390b2a554daea9f55ca7b5b9bdbfa4a6dd777a3ec1ffc2a305d71c0240ca12"} Nov 28 17:41:49 crc kubenswrapper[4909]: I1128 17:41:49.427803 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-worker-74bff95d75-zkvnr" podStartSLOduration=2.427776606 podStartE2EDuration="2.427776606s" podCreationTimestamp="2025-11-28 17:41:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 17:41:49.424726354 +0000 UTC m=+5491.821410888" watchObservedRunningTime="2025-11-28 17:41:49.427776606 +0000 UTC m=+5491.824461130" Nov 28 17:41:49 crc kubenswrapper[4909]: I1128 17:41:49.528320 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-keystone-listener-588cbd7d6-6mxll" podStartSLOduration=2.528293156 podStartE2EDuration="2.528293156s" podCreationTimestamp="2025-11-28 17:41:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 17:41:49.503975803 +0000 UTC m=+5491.900660337" watchObservedRunningTime="2025-11-28 17:41:49.528293156 +0000 UTC m=+5491.924977690" Nov 28 17:41:49 crc kubenswrapper[4909]: I1128 17:41:49.622505 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-76ddd994f8-kn6g9" podStartSLOduration=2.622485456 podStartE2EDuration="2.622485456s" podCreationTimestamp="2025-11-28 17:41:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 17:41:49.621190251 +0000 UTC m=+5492.017874795" watchObservedRunningTime="2025-11-28 17:41:49.622485456 +0000 UTC m=+5492.019169980" Nov 28 17:41:50 crc kubenswrapper[4909]: I1128 17:41:50.433807 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-56c9fd548f-cvf9g" event={"ID":"5ebd76e7-4dac-469d-b366-3e210472ec63","Type":"ContainerStarted","Data":"053c44abaa5e0958c8d88ed167ae49451733bc35bdfecb3c548619ab2164e4f6"} Nov 28 17:41:50 crc kubenswrapper[4909]: I1128 17:41:50.435776 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-56c9fd548f-cvf9g" Nov 28 17:41:50 crc kubenswrapper[4909]: I1128 17:41:50.466380 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-56c9fd548f-cvf9g" podStartSLOduration=3.466347221 podStartE2EDuration="3.466347221s" podCreationTimestamp="2025-11-28 17:41:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 17:41:50.462964 +0000 UTC m=+5492.859648544" watchObservedRunningTime="2025-11-28 17:41:50.466347221 +0000 UTC m=+5492.863031855" Nov 28 17:41:54 crc kubenswrapper[4909]: I1128 17:41:54.646282 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-76ddd994f8-kn6g9" Nov 28 17:41:56 crc kubenswrapper[4909]: I1128 17:41:56.042943 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-76ddd994f8-kn6g9" Nov 28 17:41:58 crc kubenswrapper[4909]: I1128 17:41:58.023826 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-56c9fd548f-cvf9g" Nov 28 17:41:58 crc kubenswrapper[4909]: I1128 17:41:58.116766 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7bfdbcb9b7-kpnz6"] Nov 28 17:41:58 crc kubenswrapper[4909]: I1128 17:41:58.117337 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-7bfdbcb9b7-kpnz6" podUID="6d29a694-47e1-4b41-8325-ca24e9e34c31" containerName="dnsmasq-dns" containerID="cri-o://d7a80975f39a6e21817f38c3470a438daff6bcaefcd3382b6805a25782f30b03" gracePeriod=10 Nov 28 17:41:58 crc kubenswrapper[4909]: I1128 17:41:58.509762 4909 generic.go:334] "Generic (PLEG): container finished" podID="6d29a694-47e1-4b41-8325-ca24e9e34c31" containerID="d7a80975f39a6e21817f38c3470a438daff6bcaefcd3382b6805a25782f30b03" exitCode=0 Nov 28 17:41:58 crc kubenswrapper[4909]: I1128 17:41:58.509824 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7bfdbcb9b7-kpnz6" event={"ID":"6d29a694-47e1-4b41-8325-ca24e9e34c31","Type":"ContainerDied","Data":"d7a80975f39a6e21817f38c3470a438daff6bcaefcd3382b6805a25782f30b03"} Nov 28 17:41:58 crc kubenswrapper[4909]: I1128 17:41:58.587821 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7bfdbcb9b7-kpnz6" Nov 28 17:41:58 crc kubenswrapper[4909]: I1128 17:41:58.611582 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4vs26\" (UniqueName: \"kubernetes.io/projected/6d29a694-47e1-4b41-8325-ca24e9e34c31-kube-api-access-4vs26\") pod \"6d29a694-47e1-4b41-8325-ca24e9e34c31\" (UID: \"6d29a694-47e1-4b41-8325-ca24e9e34c31\") " Nov 28 17:41:58 crc kubenswrapper[4909]: I1128 17:41:58.611648 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6d29a694-47e1-4b41-8325-ca24e9e34c31-ovsdbserver-sb\") pod \"6d29a694-47e1-4b41-8325-ca24e9e34c31\" (UID: \"6d29a694-47e1-4b41-8325-ca24e9e34c31\") " Nov 28 17:41:58 crc kubenswrapper[4909]: I1128 17:41:58.611696 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6d29a694-47e1-4b41-8325-ca24e9e34c31-config\") pod \"6d29a694-47e1-4b41-8325-ca24e9e34c31\" (UID: \"6d29a694-47e1-4b41-8325-ca24e9e34c31\") " Nov 28 17:41:58 crc kubenswrapper[4909]: I1128 17:41:58.611845 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6d29a694-47e1-4b41-8325-ca24e9e34c31-ovsdbserver-nb\") pod \"6d29a694-47e1-4b41-8325-ca24e9e34c31\" (UID: \"6d29a694-47e1-4b41-8325-ca24e9e34c31\") " Nov 28 17:41:58 crc kubenswrapper[4909]: I1128 17:41:58.611894 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6d29a694-47e1-4b41-8325-ca24e9e34c31-dns-svc\") pod \"6d29a694-47e1-4b41-8325-ca24e9e34c31\" (UID: \"6d29a694-47e1-4b41-8325-ca24e9e34c31\") " Nov 28 17:41:58 crc kubenswrapper[4909]: I1128 17:41:58.647949 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6d29a694-47e1-4b41-8325-ca24e9e34c31-kube-api-access-4vs26" (OuterVolumeSpecName: "kube-api-access-4vs26") pod "6d29a694-47e1-4b41-8325-ca24e9e34c31" (UID: "6d29a694-47e1-4b41-8325-ca24e9e34c31"). InnerVolumeSpecName "kube-api-access-4vs26". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:41:58 crc kubenswrapper[4909]: I1128 17:41:58.682394 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6d29a694-47e1-4b41-8325-ca24e9e34c31-config" (OuterVolumeSpecName: "config") pod "6d29a694-47e1-4b41-8325-ca24e9e34c31" (UID: "6d29a694-47e1-4b41-8325-ca24e9e34c31"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 17:41:58 crc kubenswrapper[4909]: I1128 17:41:58.683849 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6d29a694-47e1-4b41-8325-ca24e9e34c31-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "6d29a694-47e1-4b41-8325-ca24e9e34c31" (UID: "6d29a694-47e1-4b41-8325-ca24e9e34c31"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 17:41:58 crc kubenswrapper[4909]: I1128 17:41:58.689926 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6d29a694-47e1-4b41-8325-ca24e9e34c31-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "6d29a694-47e1-4b41-8325-ca24e9e34c31" (UID: "6d29a694-47e1-4b41-8325-ca24e9e34c31"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 17:41:58 crc kubenswrapper[4909]: I1128 17:41:58.709107 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6d29a694-47e1-4b41-8325-ca24e9e34c31-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "6d29a694-47e1-4b41-8325-ca24e9e34c31" (UID: "6d29a694-47e1-4b41-8325-ca24e9e34c31"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 17:41:58 crc kubenswrapper[4909]: I1128 17:41:58.713521 4909 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6d29a694-47e1-4b41-8325-ca24e9e34c31-config\") on node \"crc\" DevicePath \"\"" Nov 28 17:41:58 crc kubenswrapper[4909]: I1128 17:41:58.713560 4909 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6d29a694-47e1-4b41-8325-ca24e9e34c31-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 28 17:41:58 crc kubenswrapper[4909]: I1128 17:41:58.713572 4909 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6d29a694-47e1-4b41-8325-ca24e9e34c31-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 17:41:58 crc kubenswrapper[4909]: I1128 17:41:58.713584 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4vs26\" (UniqueName: \"kubernetes.io/projected/6d29a694-47e1-4b41-8325-ca24e9e34c31-kube-api-access-4vs26\") on node \"crc\" DevicePath \"\"" Nov 28 17:41:58 crc kubenswrapper[4909]: I1128 17:41:58.713596 4909 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6d29a694-47e1-4b41-8325-ca24e9e34c31-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 28 17:41:58 crc kubenswrapper[4909]: I1128 17:41:58.902035 4909 scope.go:117] "RemoveContainer" containerID="ba4943f4ba136c11fa217eba14fcdb34cf54ee4ef96ee334416ec901f5f4fe45" Nov 28 17:41:58 crc kubenswrapper[4909]: E1128 17:41:58.902312 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 17:41:59 crc kubenswrapper[4909]: I1128 17:41:59.521902 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7bfdbcb9b7-kpnz6" event={"ID":"6d29a694-47e1-4b41-8325-ca24e9e34c31","Type":"ContainerDied","Data":"b648e286da952c2e2d5921e9714de4b878189b4d07b1baf1f37735596f0dc705"} Nov 28 17:41:59 crc kubenswrapper[4909]: I1128 17:41:59.522236 4909 scope.go:117] "RemoveContainer" containerID="d7a80975f39a6e21817f38c3470a438daff6bcaefcd3382b6805a25782f30b03" Nov 28 17:41:59 crc kubenswrapper[4909]: I1128 17:41:59.522407 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7bfdbcb9b7-kpnz6" Nov 28 17:41:59 crc kubenswrapper[4909]: I1128 17:41:59.565859 4909 scope.go:117] "RemoveContainer" containerID="32425aa1cada73baa4a8ea8b30692195a456cd6b7b974f2077d4a5c2b9d2a957" Nov 28 17:41:59 crc kubenswrapper[4909]: I1128 17:41:59.577032 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7bfdbcb9b7-kpnz6"] Nov 28 17:41:59 crc kubenswrapper[4909]: I1128 17:41:59.585976 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-7bfdbcb9b7-kpnz6"] Nov 28 17:41:59 crc kubenswrapper[4909]: I1128 17:41:59.912630 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6d29a694-47e1-4b41-8325-ca24e9e34c31" path="/var/lib/kubelet/pods/6d29a694-47e1-4b41-8325-ca24e9e34c31/volumes" Nov 28 17:42:08 crc kubenswrapper[4909]: I1128 17:42:08.335848 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-create-mqt5d"] Nov 28 17:42:08 crc kubenswrapper[4909]: E1128 17:42:08.336852 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6d29a694-47e1-4b41-8325-ca24e9e34c31" containerName="dnsmasq-dns" Nov 28 17:42:08 crc kubenswrapper[4909]: I1128 17:42:08.336871 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="6d29a694-47e1-4b41-8325-ca24e9e34c31" containerName="dnsmasq-dns" Nov 28 17:42:08 crc kubenswrapper[4909]: E1128 17:42:08.336893 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6d29a694-47e1-4b41-8325-ca24e9e34c31" containerName="init" Nov 28 17:42:08 crc kubenswrapper[4909]: I1128 17:42:08.336901 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="6d29a694-47e1-4b41-8325-ca24e9e34c31" containerName="init" Nov 28 17:42:08 crc kubenswrapper[4909]: I1128 17:42:08.337116 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="6d29a694-47e1-4b41-8325-ca24e9e34c31" containerName="dnsmasq-dns" Nov 28 17:42:08 crc kubenswrapper[4909]: I1128 17:42:08.337769 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-mqt5d" Nov 28 17:42:08 crc kubenswrapper[4909]: I1128 17:42:08.343378 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-mqt5d"] Nov 28 17:42:08 crc kubenswrapper[4909]: I1128 17:42:08.441517 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-0ec6-account-create-update-fz282"] Nov 28 17:42:08 crc kubenswrapper[4909]: I1128 17:42:08.442103 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e080779e-a48f-4e59-9c88-8d458d3791df-operator-scripts\") pod \"neutron-db-create-mqt5d\" (UID: \"e080779e-a48f-4e59-9c88-8d458d3791df\") " pod="openstack/neutron-db-create-mqt5d" Nov 28 17:42:08 crc kubenswrapper[4909]: I1128 17:42:08.442136 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rkrrd\" (UniqueName: \"kubernetes.io/projected/e080779e-a48f-4e59-9c88-8d458d3791df-kube-api-access-rkrrd\") pod \"neutron-db-create-mqt5d\" (UID: \"e080779e-a48f-4e59-9c88-8d458d3791df\") " pod="openstack/neutron-db-create-mqt5d" Nov 28 17:42:08 crc kubenswrapper[4909]: I1128 17:42:08.443210 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-0ec6-account-create-update-fz282" Nov 28 17:42:08 crc kubenswrapper[4909]: I1128 17:42:08.445530 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-db-secret" Nov 28 17:42:08 crc kubenswrapper[4909]: I1128 17:42:08.451551 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-0ec6-account-create-update-fz282"] Nov 28 17:42:08 crc kubenswrapper[4909]: I1128 17:42:08.543913 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m6pdn\" (UniqueName: \"kubernetes.io/projected/1805556c-845c-4ac7-aa36-4a32906b45fe-kube-api-access-m6pdn\") pod \"neutron-0ec6-account-create-update-fz282\" (UID: \"1805556c-845c-4ac7-aa36-4a32906b45fe\") " pod="openstack/neutron-0ec6-account-create-update-fz282" Nov 28 17:42:08 crc kubenswrapper[4909]: I1128 17:42:08.544030 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e080779e-a48f-4e59-9c88-8d458d3791df-operator-scripts\") pod \"neutron-db-create-mqt5d\" (UID: \"e080779e-a48f-4e59-9c88-8d458d3791df\") " pod="openstack/neutron-db-create-mqt5d" Nov 28 17:42:08 crc kubenswrapper[4909]: I1128 17:42:08.544057 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rkrrd\" (UniqueName: \"kubernetes.io/projected/e080779e-a48f-4e59-9c88-8d458d3791df-kube-api-access-rkrrd\") pod \"neutron-db-create-mqt5d\" (UID: \"e080779e-a48f-4e59-9c88-8d458d3791df\") " pod="openstack/neutron-db-create-mqt5d" Nov 28 17:42:08 crc kubenswrapper[4909]: I1128 17:42:08.544098 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1805556c-845c-4ac7-aa36-4a32906b45fe-operator-scripts\") pod \"neutron-0ec6-account-create-update-fz282\" (UID: \"1805556c-845c-4ac7-aa36-4a32906b45fe\") " pod="openstack/neutron-0ec6-account-create-update-fz282" Nov 28 17:42:08 crc kubenswrapper[4909]: I1128 17:42:08.544969 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e080779e-a48f-4e59-9c88-8d458d3791df-operator-scripts\") pod \"neutron-db-create-mqt5d\" (UID: \"e080779e-a48f-4e59-9c88-8d458d3791df\") " pod="openstack/neutron-db-create-mqt5d" Nov 28 17:42:08 crc kubenswrapper[4909]: I1128 17:42:08.569694 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rkrrd\" (UniqueName: \"kubernetes.io/projected/e080779e-a48f-4e59-9c88-8d458d3791df-kube-api-access-rkrrd\") pod \"neutron-db-create-mqt5d\" (UID: \"e080779e-a48f-4e59-9c88-8d458d3791df\") " pod="openstack/neutron-db-create-mqt5d" Nov 28 17:42:08 crc kubenswrapper[4909]: I1128 17:42:08.645027 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1805556c-845c-4ac7-aa36-4a32906b45fe-operator-scripts\") pod \"neutron-0ec6-account-create-update-fz282\" (UID: \"1805556c-845c-4ac7-aa36-4a32906b45fe\") " pod="openstack/neutron-0ec6-account-create-update-fz282" Nov 28 17:42:08 crc kubenswrapper[4909]: I1128 17:42:08.645150 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m6pdn\" (UniqueName: \"kubernetes.io/projected/1805556c-845c-4ac7-aa36-4a32906b45fe-kube-api-access-m6pdn\") pod \"neutron-0ec6-account-create-update-fz282\" (UID: \"1805556c-845c-4ac7-aa36-4a32906b45fe\") " pod="openstack/neutron-0ec6-account-create-update-fz282" Nov 28 17:42:08 crc kubenswrapper[4909]: I1128 17:42:08.645918 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1805556c-845c-4ac7-aa36-4a32906b45fe-operator-scripts\") pod \"neutron-0ec6-account-create-update-fz282\" (UID: \"1805556c-845c-4ac7-aa36-4a32906b45fe\") " pod="openstack/neutron-0ec6-account-create-update-fz282" Nov 28 17:42:08 crc kubenswrapper[4909]: I1128 17:42:08.660456 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m6pdn\" (UniqueName: \"kubernetes.io/projected/1805556c-845c-4ac7-aa36-4a32906b45fe-kube-api-access-m6pdn\") pod \"neutron-0ec6-account-create-update-fz282\" (UID: \"1805556c-845c-4ac7-aa36-4a32906b45fe\") " pod="openstack/neutron-0ec6-account-create-update-fz282" Nov 28 17:42:08 crc kubenswrapper[4909]: I1128 17:42:08.670494 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-mqt5d" Nov 28 17:42:08 crc kubenswrapper[4909]: I1128 17:42:08.788925 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-0ec6-account-create-update-fz282" Nov 28 17:42:09 crc kubenswrapper[4909]: I1128 17:42:09.107096 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-mqt5d"] Nov 28 17:42:09 crc kubenswrapper[4909]: W1128 17:42:09.217548 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1805556c_845c_4ac7_aa36_4a32906b45fe.slice/crio-1e80c649b9dcb20261ca6e6658d82614052dc843c782bcfb1be0cef1503e739f WatchSource:0}: Error finding container 1e80c649b9dcb20261ca6e6658d82614052dc843c782bcfb1be0cef1503e739f: Status 404 returned error can't find the container with id 1e80c649b9dcb20261ca6e6658d82614052dc843c782bcfb1be0cef1503e739f Nov 28 17:42:09 crc kubenswrapper[4909]: I1128 17:42:09.225314 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-0ec6-account-create-update-fz282"] Nov 28 17:42:09 crc kubenswrapper[4909]: I1128 17:42:09.628109 4909 generic.go:334] "Generic (PLEG): container finished" podID="1805556c-845c-4ac7-aa36-4a32906b45fe" containerID="49da55a0a93ceaa414483e1d4f76d6003a274d793c56881e4eb87322806c570b" exitCode=0 Nov 28 17:42:09 crc kubenswrapper[4909]: I1128 17:42:09.628154 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-0ec6-account-create-update-fz282" event={"ID":"1805556c-845c-4ac7-aa36-4a32906b45fe","Type":"ContainerDied","Data":"49da55a0a93ceaa414483e1d4f76d6003a274d793c56881e4eb87322806c570b"} Nov 28 17:42:09 crc kubenswrapper[4909]: I1128 17:42:09.628196 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-0ec6-account-create-update-fz282" event={"ID":"1805556c-845c-4ac7-aa36-4a32906b45fe","Type":"ContainerStarted","Data":"1e80c649b9dcb20261ca6e6658d82614052dc843c782bcfb1be0cef1503e739f"} Nov 28 17:42:09 crc kubenswrapper[4909]: I1128 17:42:09.630770 4909 generic.go:334] "Generic (PLEG): container finished" podID="e080779e-a48f-4e59-9c88-8d458d3791df" containerID="fcc222908bacb3596ecca1a232cbd552340adfaabb38e259c211ecb0919d07b2" exitCode=0 Nov 28 17:42:09 crc kubenswrapper[4909]: I1128 17:42:09.630802 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-mqt5d" event={"ID":"e080779e-a48f-4e59-9c88-8d458d3791df","Type":"ContainerDied","Data":"fcc222908bacb3596ecca1a232cbd552340adfaabb38e259c211ecb0919d07b2"} Nov 28 17:42:09 crc kubenswrapper[4909]: I1128 17:42:09.630823 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-mqt5d" event={"ID":"e080779e-a48f-4e59-9c88-8d458d3791df","Type":"ContainerStarted","Data":"9975f03c3603e096f3e6d043888c5246742a7aea86a06d2d441bf1b8933760c1"} Nov 28 17:42:11 crc kubenswrapper[4909]: I1128 17:42:11.001053 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-mqt5d" Nov 28 17:42:11 crc kubenswrapper[4909]: I1128 17:42:11.006596 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-0ec6-account-create-update-fz282" Nov 28 17:42:11 crc kubenswrapper[4909]: I1128 17:42:11.081201 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rkrrd\" (UniqueName: \"kubernetes.io/projected/e080779e-a48f-4e59-9c88-8d458d3791df-kube-api-access-rkrrd\") pod \"e080779e-a48f-4e59-9c88-8d458d3791df\" (UID: \"e080779e-a48f-4e59-9c88-8d458d3791df\") " Nov 28 17:42:11 crc kubenswrapper[4909]: I1128 17:42:11.081292 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m6pdn\" (UniqueName: \"kubernetes.io/projected/1805556c-845c-4ac7-aa36-4a32906b45fe-kube-api-access-m6pdn\") pod \"1805556c-845c-4ac7-aa36-4a32906b45fe\" (UID: \"1805556c-845c-4ac7-aa36-4a32906b45fe\") " Nov 28 17:42:11 crc kubenswrapper[4909]: I1128 17:42:11.081344 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1805556c-845c-4ac7-aa36-4a32906b45fe-operator-scripts\") pod \"1805556c-845c-4ac7-aa36-4a32906b45fe\" (UID: \"1805556c-845c-4ac7-aa36-4a32906b45fe\") " Nov 28 17:42:11 crc kubenswrapper[4909]: I1128 17:42:11.081459 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e080779e-a48f-4e59-9c88-8d458d3791df-operator-scripts\") pod \"e080779e-a48f-4e59-9c88-8d458d3791df\" (UID: \"e080779e-a48f-4e59-9c88-8d458d3791df\") " Nov 28 17:42:11 crc kubenswrapper[4909]: I1128 17:42:11.082120 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e080779e-a48f-4e59-9c88-8d458d3791df-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "e080779e-a48f-4e59-9c88-8d458d3791df" (UID: "e080779e-a48f-4e59-9c88-8d458d3791df"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 17:42:11 crc kubenswrapper[4909]: I1128 17:42:11.082266 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1805556c-845c-4ac7-aa36-4a32906b45fe-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "1805556c-845c-4ac7-aa36-4a32906b45fe" (UID: "1805556c-845c-4ac7-aa36-4a32906b45fe"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 17:42:11 crc kubenswrapper[4909]: I1128 17:42:11.086195 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e080779e-a48f-4e59-9c88-8d458d3791df-kube-api-access-rkrrd" (OuterVolumeSpecName: "kube-api-access-rkrrd") pod "e080779e-a48f-4e59-9c88-8d458d3791df" (UID: "e080779e-a48f-4e59-9c88-8d458d3791df"). InnerVolumeSpecName "kube-api-access-rkrrd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:42:11 crc kubenswrapper[4909]: I1128 17:42:11.087461 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1805556c-845c-4ac7-aa36-4a32906b45fe-kube-api-access-m6pdn" (OuterVolumeSpecName: "kube-api-access-m6pdn") pod "1805556c-845c-4ac7-aa36-4a32906b45fe" (UID: "1805556c-845c-4ac7-aa36-4a32906b45fe"). InnerVolumeSpecName "kube-api-access-m6pdn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:42:11 crc kubenswrapper[4909]: I1128 17:42:11.183790 4909 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e080779e-a48f-4e59-9c88-8d458d3791df-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 17:42:11 crc kubenswrapper[4909]: I1128 17:42:11.183925 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rkrrd\" (UniqueName: \"kubernetes.io/projected/e080779e-a48f-4e59-9c88-8d458d3791df-kube-api-access-rkrrd\") on node \"crc\" DevicePath \"\"" Nov 28 17:42:11 crc kubenswrapper[4909]: I1128 17:42:11.183954 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m6pdn\" (UniqueName: \"kubernetes.io/projected/1805556c-845c-4ac7-aa36-4a32906b45fe-kube-api-access-m6pdn\") on node \"crc\" DevicePath \"\"" Nov 28 17:42:11 crc kubenswrapper[4909]: I1128 17:42:11.183981 4909 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1805556c-845c-4ac7-aa36-4a32906b45fe-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 17:42:11 crc kubenswrapper[4909]: I1128 17:42:11.655428 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-0ec6-account-create-update-fz282" event={"ID":"1805556c-845c-4ac7-aa36-4a32906b45fe","Type":"ContainerDied","Data":"1e80c649b9dcb20261ca6e6658d82614052dc843c782bcfb1be0cef1503e739f"} Nov 28 17:42:11 crc kubenswrapper[4909]: I1128 17:42:11.655880 4909 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1e80c649b9dcb20261ca6e6658d82614052dc843c782bcfb1be0cef1503e739f" Nov 28 17:42:11 crc kubenswrapper[4909]: I1128 17:42:11.655478 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-0ec6-account-create-update-fz282" Nov 28 17:42:11 crc kubenswrapper[4909]: I1128 17:42:11.658308 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-mqt5d" event={"ID":"e080779e-a48f-4e59-9c88-8d458d3791df","Type":"ContainerDied","Data":"9975f03c3603e096f3e6d043888c5246742a7aea86a06d2d441bf1b8933760c1"} Nov 28 17:42:11 crc kubenswrapper[4909]: I1128 17:42:11.658366 4909 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9975f03c3603e096f3e6d043888c5246742a7aea86a06d2d441bf1b8933760c1" Nov 28 17:42:11 crc kubenswrapper[4909]: I1128 17:42:11.658413 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-mqt5d" Nov 28 17:42:11 crc kubenswrapper[4909]: I1128 17:42:11.916160 4909 scope.go:117] "RemoveContainer" containerID="ba4943f4ba136c11fa217eba14fcdb34cf54ee4ef96ee334416ec901f5f4fe45" Nov 28 17:42:11 crc kubenswrapper[4909]: E1128 17:42:11.917107 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 17:42:13 crc kubenswrapper[4909]: I1128 17:42:13.641721 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-sync-8jlbg"] Nov 28 17:42:13 crc kubenswrapper[4909]: E1128 17:42:13.642562 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e080779e-a48f-4e59-9c88-8d458d3791df" containerName="mariadb-database-create" Nov 28 17:42:13 crc kubenswrapper[4909]: I1128 17:42:13.642586 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="e080779e-a48f-4e59-9c88-8d458d3791df" containerName="mariadb-database-create" Nov 28 17:42:13 crc kubenswrapper[4909]: E1128 17:42:13.642629 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1805556c-845c-4ac7-aa36-4a32906b45fe" containerName="mariadb-account-create-update" Nov 28 17:42:13 crc kubenswrapper[4909]: I1128 17:42:13.642637 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="1805556c-845c-4ac7-aa36-4a32906b45fe" containerName="mariadb-account-create-update" Nov 28 17:42:13 crc kubenswrapper[4909]: I1128 17:42:13.642902 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="1805556c-845c-4ac7-aa36-4a32906b45fe" containerName="mariadb-account-create-update" Nov 28 17:42:13 crc kubenswrapper[4909]: I1128 17:42:13.642924 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="e080779e-a48f-4e59-9c88-8d458d3791df" containerName="mariadb-database-create" Nov 28 17:42:13 crc kubenswrapper[4909]: I1128 17:42:13.643796 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-8jlbg" Nov 28 17:42:13 crc kubenswrapper[4909]: I1128 17:42:13.648296 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Nov 28 17:42:13 crc kubenswrapper[4909]: I1128 17:42:13.648612 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-nhnlb" Nov 28 17:42:13 crc kubenswrapper[4909]: I1128 17:42:13.648894 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Nov 28 17:42:13 crc kubenswrapper[4909]: I1128 17:42:13.658813 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-8jlbg"] Nov 28 17:42:13 crc kubenswrapper[4909]: I1128 17:42:13.728632 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xfwqv\" (UniqueName: \"kubernetes.io/projected/00ebab16-ee08-4375-877d-12fff71e44be-kube-api-access-xfwqv\") pod \"neutron-db-sync-8jlbg\" (UID: \"00ebab16-ee08-4375-877d-12fff71e44be\") " pod="openstack/neutron-db-sync-8jlbg" Nov 28 17:42:13 crc kubenswrapper[4909]: I1128 17:42:13.728962 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/00ebab16-ee08-4375-877d-12fff71e44be-config\") pod \"neutron-db-sync-8jlbg\" (UID: \"00ebab16-ee08-4375-877d-12fff71e44be\") " pod="openstack/neutron-db-sync-8jlbg" Nov 28 17:42:13 crc kubenswrapper[4909]: I1128 17:42:13.729029 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/00ebab16-ee08-4375-877d-12fff71e44be-combined-ca-bundle\") pod \"neutron-db-sync-8jlbg\" (UID: \"00ebab16-ee08-4375-877d-12fff71e44be\") " pod="openstack/neutron-db-sync-8jlbg" Nov 28 17:42:13 crc kubenswrapper[4909]: I1128 17:42:13.830718 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/00ebab16-ee08-4375-877d-12fff71e44be-config\") pod \"neutron-db-sync-8jlbg\" (UID: \"00ebab16-ee08-4375-877d-12fff71e44be\") " pod="openstack/neutron-db-sync-8jlbg" Nov 28 17:42:13 crc kubenswrapper[4909]: I1128 17:42:13.830785 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/00ebab16-ee08-4375-877d-12fff71e44be-combined-ca-bundle\") pod \"neutron-db-sync-8jlbg\" (UID: \"00ebab16-ee08-4375-877d-12fff71e44be\") " pod="openstack/neutron-db-sync-8jlbg" Nov 28 17:42:13 crc kubenswrapper[4909]: I1128 17:42:13.830842 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xfwqv\" (UniqueName: \"kubernetes.io/projected/00ebab16-ee08-4375-877d-12fff71e44be-kube-api-access-xfwqv\") pod \"neutron-db-sync-8jlbg\" (UID: \"00ebab16-ee08-4375-877d-12fff71e44be\") " pod="openstack/neutron-db-sync-8jlbg" Nov 28 17:42:13 crc kubenswrapper[4909]: I1128 17:42:13.836214 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/00ebab16-ee08-4375-877d-12fff71e44be-combined-ca-bundle\") pod \"neutron-db-sync-8jlbg\" (UID: \"00ebab16-ee08-4375-877d-12fff71e44be\") " pod="openstack/neutron-db-sync-8jlbg" Nov 28 17:42:13 crc kubenswrapper[4909]: I1128 17:42:13.837515 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/00ebab16-ee08-4375-877d-12fff71e44be-config\") pod \"neutron-db-sync-8jlbg\" (UID: \"00ebab16-ee08-4375-877d-12fff71e44be\") " pod="openstack/neutron-db-sync-8jlbg" Nov 28 17:42:13 crc kubenswrapper[4909]: I1128 17:42:13.847145 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xfwqv\" (UniqueName: \"kubernetes.io/projected/00ebab16-ee08-4375-877d-12fff71e44be-kube-api-access-xfwqv\") pod \"neutron-db-sync-8jlbg\" (UID: \"00ebab16-ee08-4375-877d-12fff71e44be\") " pod="openstack/neutron-db-sync-8jlbg" Nov 28 17:42:13 crc kubenswrapper[4909]: I1128 17:42:13.973159 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-8jlbg" Nov 28 17:42:14 crc kubenswrapper[4909]: W1128 17:42:14.481455 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod00ebab16_ee08_4375_877d_12fff71e44be.slice/crio-128900bc05843b24d946ad9ae2b33ebe0e8067b7db575590efeb7a05e1d3cd85 WatchSource:0}: Error finding container 128900bc05843b24d946ad9ae2b33ebe0e8067b7db575590efeb7a05e1d3cd85: Status 404 returned error can't find the container with id 128900bc05843b24d946ad9ae2b33ebe0e8067b7db575590efeb7a05e1d3cd85 Nov 28 17:42:14 crc kubenswrapper[4909]: I1128 17:42:14.482476 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-8jlbg"] Nov 28 17:42:14 crc kubenswrapper[4909]: I1128 17:42:14.702191 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-8jlbg" event={"ID":"00ebab16-ee08-4375-877d-12fff71e44be","Type":"ContainerStarted","Data":"128900bc05843b24d946ad9ae2b33ebe0e8067b7db575590efeb7a05e1d3cd85"} Nov 28 17:42:15 crc kubenswrapper[4909]: I1128 17:42:15.722088 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-8jlbg" event={"ID":"00ebab16-ee08-4375-877d-12fff71e44be","Type":"ContainerStarted","Data":"92f1bb93adb3c8d54919e6b67a98fb85f8a3e20f9b9cd19cafff4ffe87a45585"} Nov 28 17:42:15 crc kubenswrapper[4909]: I1128 17:42:15.756525 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-db-sync-8jlbg" podStartSLOduration=2.756454412 podStartE2EDuration="2.756454412s" podCreationTimestamp="2025-11-28 17:42:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 17:42:15.744727927 +0000 UTC m=+5518.141412491" watchObservedRunningTime="2025-11-28 17:42:15.756454412 +0000 UTC m=+5518.153138946" Nov 28 17:42:19 crc kubenswrapper[4909]: I1128 17:42:19.761215 4909 generic.go:334] "Generic (PLEG): container finished" podID="00ebab16-ee08-4375-877d-12fff71e44be" containerID="92f1bb93adb3c8d54919e6b67a98fb85f8a3e20f9b9cd19cafff4ffe87a45585" exitCode=0 Nov 28 17:42:19 crc kubenswrapper[4909]: I1128 17:42:19.761796 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-8jlbg" event={"ID":"00ebab16-ee08-4375-877d-12fff71e44be","Type":"ContainerDied","Data":"92f1bb93adb3c8d54919e6b67a98fb85f8a3e20f9b9cd19cafff4ffe87a45585"} Nov 28 17:42:21 crc kubenswrapper[4909]: I1128 17:42:21.131001 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-8jlbg" Nov 28 17:42:21 crc kubenswrapper[4909]: I1128 17:42:21.268784 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xfwqv\" (UniqueName: \"kubernetes.io/projected/00ebab16-ee08-4375-877d-12fff71e44be-kube-api-access-xfwqv\") pod \"00ebab16-ee08-4375-877d-12fff71e44be\" (UID: \"00ebab16-ee08-4375-877d-12fff71e44be\") " Nov 28 17:42:21 crc kubenswrapper[4909]: I1128 17:42:21.268876 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/00ebab16-ee08-4375-877d-12fff71e44be-config\") pod \"00ebab16-ee08-4375-877d-12fff71e44be\" (UID: \"00ebab16-ee08-4375-877d-12fff71e44be\") " Nov 28 17:42:21 crc kubenswrapper[4909]: I1128 17:42:21.268952 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/00ebab16-ee08-4375-877d-12fff71e44be-combined-ca-bundle\") pod \"00ebab16-ee08-4375-877d-12fff71e44be\" (UID: \"00ebab16-ee08-4375-877d-12fff71e44be\") " Nov 28 17:42:21 crc kubenswrapper[4909]: I1128 17:42:21.276490 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/00ebab16-ee08-4375-877d-12fff71e44be-kube-api-access-xfwqv" (OuterVolumeSpecName: "kube-api-access-xfwqv") pod "00ebab16-ee08-4375-877d-12fff71e44be" (UID: "00ebab16-ee08-4375-877d-12fff71e44be"). InnerVolumeSpecName "kube-api-access-xfwqv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:42:21 crc kubenswrapper[4909]: I1128 17:42:21.303366 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/00ebab16-ee08-4375-877d-12fff71e44be-config" (OuterVolumeSpecName: "config") pod "00ebab16-ee08-4375-877d-12fff71e44be" (UID: "00ebab16-ee08-4375-877d-12fff71e44be"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:42:21 crc kubenswrapper[4909]: I1128 17:42:21.317809 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/00ebab16-ee08-4375-877d-12fff71e44be-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "00ebab16-ee08-4375-877d-12fff71e44be" (UID: "00ebab16-ee08-4375-877d-12fff71e44be"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:42:21 crc kubenswrapper[4909]: I1128 17:42:21.370760 4909 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/00ebab16-ee08-4375-877d-12fff71e44be-config\") on node \"crc\" DevicePath \"\"" Nov 28 17:42:21 crc kubenswrapper[4909]: I1128 17:42:21.370798 4909 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/00ebab16-ee08-4375-877d-12fff71e44be-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 17:42:21 crc kubenswrapper[4909]: I1128 17:42:21.370813 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xfwqv\" (UniqueName: \"kubernetes.io/projected/00ebab16-ee08-4375-877d-12fff71e44be-kube-api-access-xfwqv\") on node \"crc\" DevicePath \"\"" Nov 28 17:42:21 crc kubenswrapper[4909]: I1128 17:42:21.784636 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-8jlbg" event={"ID":"00ebab16-ee08-4375-877d-12fff71e44be","Type":"ContainerDied","Data":"128900bc05843b24d946ad9ae2b33ebe0e8067b7db575590efeb7a05e1d3cd85"} Nov 28 17:42:21 crc kubenswrapper[4909]: I1128 17:42:21.784698 4909 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="128900bc05843b24d946ad9ae2b33ebe0e8067b7db575590efeb7a05e1d3cd85" Nov 28 17:42:21 crc kubenswrapper[4909]: I1128 17:42:21.784758 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-8jlbg" Nov 28 17:42:21 crc kubenswrapper[4909]: I1128 17:42:21.957214 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-bdc76b79f-jsx6s"] Nov 28 17:42:21 crc kubenswrapper[4909]: E1128 17:42:21.957568 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="00ebab16-ee08-4375-877d-12fff71e44be" containerName="neutron-db-sync" Nov 28 17:42:21 crc kubenswrapper[4909]: I1128 17:42:21.957584 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="00ebab16-ee08-4375-877d-12fff71e44be" containerName="neutron-db-sync" Nov 28 17:42:21 crc kubenswrapper[4909]: I1128 17:42:21.957754 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="00ebab16-ee08-4375-877d-12fff71e44be" containerName="neutron-db-sync" Nov 28 17:42:21 crc kubenswrapper[4909]: I1128 17:42:21.958543 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-bdc76b79f-jsx6s" Nov 28 17:42:21 crc kubenswrapper[4909]: I1128 17:42:21.988013 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-bdc76b79f-jsx6s"] Nov 28 17:42:22 crc kubenswrapper[4909]: I1128 17:42:22.048789 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-7b4d47ccc7-5qprs"] Nov 28 17:42:22 crc kubenswrapper[4909]: I1128 17:42:22.050177 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-7b4d47ccc7-5qprs" Nov 28 17:42:22 crc kubenswrapper[4909]: I1128 17:42:22.055840 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Nov 28 17:42:22 crc kubenswrapper[4909]: I1128 17:42:22.055977 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-nhnlb" Nov 28 17:42:22 crc kubenswrapper[4909]: I1128 17:42:22.056131 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Nov 28 17:42:22 crc kubenswrapper[4909]: I1128 17:42:22.067139 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-7b4d47ccc7-5qprs"] Nov 28 17:42:22 crc kubenswrapper[4909]: I1128 17:42:22.085601 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mhb67\" (UniqueName: \"kubernetes.io/projected/046651a0-5bd3-47ec-91a9-f45f86868e24-kube-api-access-mhb67\") pod \"dnsmasq-dns-bdc76b79f-jsx6s\" (UID: \"046651a0-5bd3-47ec-91a9-f45f86868e24\") " pod="openstack/dnsmasq-dns-bdc76b79f-jsx6s" Nov 28 17:42:22 crc kubenswrapper[4909]: I1128 17:42:22.085648 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/046651a0-5bd3-47ec-91a9-f45f86868e24-dns-svc\") pod \"dnsmasq-dns-bdc76b79f-jsx6s\" (UID: \"046651a0-5bd3-47ec-91a9-f45f86868e24\") " pod="openstack/dnsmasq-dns-bdc76b79f-jsx6s" Nov 28 17:42:22 crc kubenswrapper[4909]: I1128 17:42:22.085699 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/046651a0-5bd3-47ec-91a9-f45f86868e24-ovsdbserver-sb\") pod \"dnsmasq-dns-bdc76b79f-jsx6s\" (UID: \"046651a0-5bd3-47ec-91a9-f45f86868e24\") " pod="openstack/dnsmasq-dns-bdc76b79f-jsx6s" Nov 28 17:42:22 crc kubenswrapper[4909]: I1128 17:42:22.085719 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/046651a0-5bd3-47ec-91a9-f45f86868e24-ovsdbserver-nb\") pod \"dnsmasq-dns-bdc76b79f-jsx6s\" (UID: \"046651a0-5bd3-47ec-91a9-f45f86868e24\") " pod="openstack/dnsmasq-dns-bdc76b79f-jsx6s" Nov 28 17:42:22 crc kubenswrapper[4909]: I1128 17:42:22.085768 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/046651a0-5bd3-47ec-91a9-f45f86868e24-config\") pod \"dnsmasq-dns-bdc76b79f-jsx6s\" (UID: \"046651a0-5bd3-47ec-91a9-f45f86868e24\") " pod="openstack/dnsmasq-dns-bdc76b79f-jsx6s" Nov 28 17:42:22 crc kubenswrapper[4909]: I1128 17:42:22.186953 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2c9e3241-c8bc-44a3-88cd-4e251259dba0-combined-ca-bundle\") pod \"neutron-7b4d47ccc7-5qprs\" (UID: \"2c9e3241-c8bc-44a3-88cd-4e251259dba0\") " pod="openstack/neutron-7b4d47ccc7-5qprs" Nov 28 17:42:22 crc kubenswrapper[4909]: I1128 17:42:22.186996 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nzclk\" (UniqueName: \"kubernetes.io/projected/2c9e3241-c8bc-44a3-88cd-4e251259dba0-kube-api-access-nzclk\") pod \"neutron-7b4d47ccc7-5qprs\" (UID: \"2c9e3241-c8bc-44a3-88cd-4e251259dba0\") " pod="openstack/neutron-7b4d47ccc7-5qprs" Nov 28 17:42:22 crc kubenswrapper[4909]: I1128 17:42:22.187033 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mhb67\" (UniqueName: \"kubernetes.io/projected/046651a0-5bd3-47ec-91a9-f45f86868e24-kube-api-access-mhb67\") pod \"dnsmasq-dns-bdc76b79f-jsx6s\" (UID: \"046651a0-5bd3-47ec-91a9-f45f86868e24\") " pod="openstack/dnsmasq-dns-bdc76b79f-jsx6s" Nov 28 17:42:22 crc kubenswrapper[4909]: I1128 17:42:22.187060 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/046651a0-5bd3-47ec-91a9-f45f86868e24-dns-svc\") pod \"dnsmasq-dns-bdc76b79f-jsx6s\" (UID: \"046651a0-5bd3-47ec-91a9-f45f86868e24\") " pod="openstack/dnsmasq-dns-bdc76b79f-jsx6s" Nov 28 17:42:22 crc kubenswrapper[4909]: I1128 17:42:22.187081 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/046651a0-5bd3-47ec-91a9-f45f86868e24-ovsdbserver-sb\") pod \"dnsmasq-dns-bdc76b79f-jsx6s\" (UID: \"046651a0-5bd3-47ec-91a9-f45f86868e24\") " pod="openstack/dnsmasq-dns-bdc76b79f-jsx6s" Nov 28 17:42:22 crc kubenswrapper[4909]: I1128 17:42:22.187098 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/2c9e3241-c8bc-44a3-88cd-4e251259dba0-httpd-config\") pod \"neutron-7b4d47ccc7-5qprs\" (UID: \"2c9e3241-c8bc-44a3-88cd-4e251259dba0\") " pod="openstack/neutron-7b4d47ccc7-5qprs" Nov 28 17:42:22 crc kubenswrapper[4909]: I1128 17:42:22.187113 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/046651a0-5bd3-47ec-91a9-f45f86868e24-ovsdbserver-nb\") pod \"dnsmasq-dns-bdc76b79f-jsx6s\" (UID: \"046651a0-5bd3-47ec-91a9-f45f86868e24\") " pod="openstack/dnsmasq-dns-bdc76b79f-jsx6s" Nov 28 17:42:22 crc kubenswrapper[4909]: I1128 17:42:22.187133 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/2c9e3241-c8bc-44a3-88cd-4e251259dba0-config\") pod \"neutron-7b4d47ccc7-5qprs\" (UID: \"2c9e3241-c8bc-44a3-88cd-4e251259dba0\") " pod="openstack/neutron-7b4d47ccc7-5qprs" Nov 28 17:42:22 crc kubenswrapper[4909]: I1128 17:42:22.187159 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/046651a0-5bd3-47ec-91a9-f45f86868e24-config\") pod \"dnsmasq-dns-bdc76b79f-jsx6s\" (UID: \"046651a0-5bd3-47ec-91a9-f45f86868e24\") " pod="openstack/dnsmasq-dns-bdc76b79f-jsx6s" Nov 28 17:42:22 crc kubenswrapper[4909]: I1128 17:42:22.188231 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/046651a0-5bd3-47ec-91a9-f45f86868e24-config\") pod \"dnsmasq-dns-bdc76b79f-jsx6s\" (UID: \"046651a0-5bd3-47ec-91a9-f45f86868e24\") " pod="openstack/dnsmasq-dns-bdc76b79f-jsx6s" Nov 28 17:42:22 crc kubenswrapper[4909]: I1128 17:42:22.188752 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/046651a0-5bd3-47ec-91a9-f45f86868e24-dns-svc\") pod \"dnsmasq-dns-bdc76b79f-jsx6s\" (UID: \"046651a0-5bd3-47ec-91a9-f45f86868e24\") " pod="openstack/dnsmasq-dns-bdc76b79f-jsx6s" Nov 28 17:42:22 crc kubenswrapper[4909]: I1128 17:42:22.188992 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/046651a0-5bd3-47ec-91a9-f45f86868e24-ovsdbserver-sb\") pod \"dnsmasq-dns-bdc76b79f-jsx6s\" (UID: \"046651a0-5bd3-47ec-91a9-f45f86868e24\") " pod="openstack/dnsmasq-dns-bdc76b79f-jsx6s" Nov 28 17:42:22 crc kubenswrapper[4909]: I1128 17:42:22.189357 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/046651a0-5bd3-47ec-91a9-f45f86868e24-ovsdbserver-nb\") pod \"dnsmasq-dns-bdc76b79f-jsx6s\" (UID: \"046651a0-5bd3-47ec-91a9-f45f86868e24\") " pod="openstack/dnsmasq-dns-bdc76b79f-jsx6s" Nov 28 17:42:22 crc kubenswrapper[4909]: I1128 17:42:22.211168 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mhb67\" (UniqueName: \"kubernetes.io/projected/046651a0-5bd3-47ec-91a9-f45f86868e24-kube-api-access-mhb67\") pod \"dnsmasq-dns-bdc76b79f-jsx6s\" (UID: \"046651a0-5bd3-47ec-91a9-f45f86868e24\") " pod="openstack/dnsmasq-dns-bdc76b79f-jsx6s" Nov 28 17:42:22 crc kubenswrapper[4909]: I1128 17:42:22.284513 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-bdc76b79f-jsx6s" Nov 28 17:42:22 crc kubenswrapper[4909]: I1128 17:42:22.288692 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/2c9e3241-c8bc-44a3-88cd-4e251259dba0-httpd-config\") pod \"neutron-7b4d47ccc7-5qprs\" (UID: \"2c9e3241-c8bc-44a3-88cd-4e251259dba0\") " pod="openstack/neutron-7b4d47ccc7-5qprs" Nov 28 17:42:22 crc kubenswrapper[4909]: I1128 17:42:22.288735 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/2c9e3241-c8bc-44a3-88cd-4e251259dba0-config\") pod \"neutron-7b4d47ccc7-5qprs\" (UID: \"2c9e3241-c8bc-44a3-88cd-4e251259dba0\") " pod="openstack/neutron-7b4d47ccc7-5qprs" Nov 28 17:42:22 crc kubenswrapper[4909]: I1128 17:42:22.288831 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2c9e3241-c8bc-44a3-88cd-4e251259dba0-combined-ca-bundle\") pod \"neutron-7b4d47ccc7-5qprs\" (UID: \"2c9e3241-c8bc-44a3-88cd-4e251259dba0\") " pod="openstack/neutron-7b4d47ccc7-5qprs" Nov 28 17:42:22 crc kubenswrapper[4909]: I1128 17:42:22.288854 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nzclk\" (UniqueName: \"kubernetes.io/projected/2c9e3241-c8bc-44a3-88cd-4e251259dba0-kube-api-access-nzclk\") pod \"neutron-7b4d47ccc7-5qprs\" (UID: \"2c9e3241-c8bc-44a3-88cd-4e251259dba0\") " pod="openstack/neutron-7b4d47ccc7-5qprs" Nov 28 17:42:22 crc kubenswrapper[4909]: I1128 17:42:22.292822 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2c9e3241-c8bc-44a3-88cd-4e251259dba0-combined-ca-bundle\") pod \"neutron-7b4d47ccc7-5qprs\" (UID: \"2c9e3241-c8bc-44a3-88cd-4e251259dba0\") " pod="openstack/neutron-7b4d47ccc7-5qprs" Nov 28 17:42:22 crc kubenswrapper[4909]: I1128 17:42:22.292858 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/2c9e3241-c8bc-44a3-88cd-4e251259dba0-config\") pod \"neutron-7b4d47ccc7-5qprs\" (UID: \"2c9e3241-c8bc-44a3-88cd-4e251259dba0\") " pod="openstack/neutron-7b4d47ccc7-5qprs" Nov 28 17:42:22 crc kubenswrapper[4909]: I1128 17:42:22.293392 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/2c9e3241-c8bc-44a3-88cd-4e251259dba0-httpd-config\") pod \"neutron-7b4d47ccc7-5qprs\" (UID: \"2c9e3241-c8bc-44a3-88cd-4e251259dba0\") " pod="openstack/neutron-7b4d47ccc7-5qprs" Nov 28 17:42:22 crc kubenswrapper[4909]: I1128 17:42:22.333358 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nzclk\" (UniqueName: \"kubernetes.io/projected/2c9e3241-c8bc-44a3-88cd-4e251259dba0-kube-api-access-nzclk\") pod \"neutron-7b4d47ccc7-5qprs\" (UID: \"2c9e3241-c8bc-44a3-88cd-4e251259dba0\") " pod="openstack/neutron-7b4d47ccc7-5qprs" Nov 28 17:42:22 crc kubenswrapper[4909]: I1128 17:42:22.376134 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-7b4d47ccc7-5qprs" Nov 28 17:42:22 crc kubenswrapper[4909]: W1128 17:42:22.878076 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod046651a0_5bd3_47ec_91a9_f45f86868e24.slice/crio-8f0abaa9031ebec09ba87d5aee6ac7425e8a0512056a6ca13027c065aa73bcf3 WatchSource:0}: Error finding container 8f0abaa9031ebec09ba87d5aee6ac7425e8a0512056a6ca13027c065aa73bcf3: Status 404 returned error can't find the container with id 8f0abaa9031ebec09ba87d5aee6ac7425e8a0512056a6ca13027c065aa73bcf3 Nov 28 17:42:22 crc kubenswrapper[4909]: I1128 17:42:22.884380 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-bdc76b79f-jsx6s"] Nov 28 17:42:22 crc kubenswrapper[4909]: I1128 17:42:22.902922 4909 scope.go:117] "RemoveContainer" containerID="ba4943f4ba136c11fa217eba14fcdb34cf54ee4ef96ee334416ec901f5f4fe45" Nov 28 17:42:22 crc kubenswrapper[4909]: E1128 17:42:22.903234 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 17:42:23 crc kubenswrapper[4909]: W1128 17:42:23.052887 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2c9e3241_c8bc_44a3_88cd_4e251259dba0.slice/crio-899247b7fe1870817b65b7bc5668e0bdc5dad7a0bbc552eed2848f705215eb4a WatchSource:0}: Error finding container 899247b7fe1870817b65b7bc5668e0bdc5dad7a0bbc552eed2848f705215eb4a: Status 404 returned error can't find the container with id 899247b7fe1870817b65b7bc5668e0bdc5dad7a0bbc552eed2848f705215eb4a Nov 28 17:42:23 crc kubenswrapper[4909]: I1128 17:42:23.058519 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-7b4d47ccc7-5qprs"] Nov 28 17:42:23 crc kubenswrapper[4909]: I1128 17:42:23.803966 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7b4d47ccc7-5qprs" event={"ID":"2c9e3241-c8bc-44a3-88cd-4e251259dba0","Type":"ContainerStarted","Data":"899247b7fe1870817b65b7bc5668e0bdc5dad7a0bbc552eed2848f705215eb4a"} Nov 28 17:42:23 crc kubenswrapper[4909]: I1128 17:42:23.806136 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-bdc76b79f-jsx6s" event={"ID":"046651a0-5bd3-47ec-91a9-f45f86868e24","Type":"ContainerStarted","Data":"eb9d133c9e9598660a3940e51effd3c62dda2f739a977e2fe4b2b8e55380d63c"} Nov 28 17:42:23 crc kubenswrapper[4909]: I1128 17:42:23.807670 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-bdc76b79f-jsx6s" event={"ID":"046651a0-5bd3-47ec-91a9-f45f86868e24","Type":"ContainerStarted","Data":"8f0abaa9031ebec09ba87d5aee6ac7425e8a0512056a6ca13027c065aa73bcf3"} Nov 28 17:42:24 crc kubenswrapper[4909]: I1128 17:42:24.815182 4909 generic.go:334] "Generic (PLEG): container finished" podID="046651a0-5bd3-47ec-91a9-f45f86868e24" containerID="eb9d133c9e9598660a3940e51effd3c62dda2f739a977e2fe4b2b8e55380d63c" exitCode=0 Nov 28 17:42:24 crc kubenswrapper[4909]: I1128 17:42:24.815337 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-bdc76b79f-jsx6s" event={"ID":"046651a0-5bd3-47ec-91a9-f45f86868e24","Type":"ContainerDied","Data":"eb9d133c9e9598660a3940e51effd3c62dda2f739a977e2fe4b2b8e55380d63c"} Nov 28 17:42:24 crc kubenswrapper[4909]: I1128 17:42:24.817114 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7b4d47ccc7-5qprs" event={"ID":"2c9e3241-c8bc-44a3-88cd-4e251259dba0","Type":"ContainerStarted","Data":"9032e5c5c457898b2a8c03eb22329a4b3ae3c54bf500f739178e1e35d8c644db"} Nov 28 17:42:24 crc kubenswrapper[4909]: I1128 17:42:24.817144 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7b4d47ccc7-5qprs" event={"ID":"2c9e3241-c8bc-44a3-88cd-4e251259dba0","Type":"ContainerStarted","Data":"5b8df250d976cce324a592bdaf416dc541a43bd62685be332ac4be368f2935ed"} Nov 28 17:42:24 crc kubenswrapper[4909]: I1128 17:42:24.818072 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-7b4d47ccc7-5qprs" Nov 28 17:42:24 crc kubenswrapper[4909]: I1128 17:42:24.856646 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-7b4d47ccc7-5qprs" podStartSLOduration=2.856620544 podStartE2EDuration="2.856620544s" podCreationTimestamp="2025-11-28 17:42:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 17:42:24.854302652 +0000 UTC m=+5527.250987176" watchObservedRunningTime="2025-11-28 17:42:24.856620544 +0000 UTC m=+5527.253305078" Nov 28 17:42:25 crc kubenswrapper[4909]: I1128 17:42:25.827060 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-bdc76b79f-jsx6s" event={"ID":"046651a0-5bd3-47ec-91a9-f45f86868e24","Type":"ContainerStarted","Data":"caf745226829359899be0c347e6decc4f5d79c18b6d81d31427369fc6e70d753"} Nov 28 17:42:25 crc kubenswrapper[4909]: I1128 17:42:25.856631 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-bdc76b79f-jsx6s" podStartSLOduration=4.856610269 podStartE2EDuration="4.856610269s" podCreationTimestamp="2025-11-28 17:42:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 17:42:25.845167341 +0000 UTC m=+5528.241851865" watchObservedRunningTime="2025-11-28 17:42:25.856610269 +0000 UTC m=+5528.253294813" Nov 28 17:42:26 crc kubenswrapper[4909]: I1128 17:42:26.834133 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-bdc76b79f-jsx6s" Nov 28 17:42:32 crc kubenswrapper[4909]: I1128 17:42:32.286894 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-bdc76b79f-jsx6s" Nov 28 17:42:32 crc kubenswrapper[4909]: I1128 17:42:32.376010 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-56c9fd548f-cvf9g"] Nov 28 17:42:32 crc kubenswrapper[4909]: I1128 17:42:32.376298 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-56c9fd548f-cvf9g" podUID="5ebd76e7-4dac-469d-b366-3e210472ec63" containerName="dnsmasq-dns" containerID="cri-o://053c44abaa5e0958c8d88ed167ae49451733bc35bdfecb3c548619ab2164e4f6" gracePeriod=10 Nov 28 17:42:32 crc kubenswrapper[4909]: I1128 17:42:32.848914 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-56c9fd548f-cvf9g" Nov 28 17:42:32 crc kubenswrapper[4909]: I1128 17:42:32.903819 4909 generic.go:334] "Generic (PLEG): container finished" podID="5ebd76e7-4dac-469d-b366-3e210472ec63" containerID="053c44abaa5e0958c8d88ed167ae49451733bc35bdfecb3c548619ab2164e4f6" exitCode=0 Nov 28 17:42:32 crc kubenswrapper[4909]: I1128 17:42:32.903866 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-56c9fd548f-cvf9g" event={"ID":"5ebd76e7-4dac-469d-b366-3e210472ec63","Type":"ContainerDied","Data":"053c44abaa5e0958c8d88ed167ae49451733bc35bdfecb3c548619ab2164e4f6"} Nov 28 17:42:32 crc kubenswrapper[4909]: I1128 17:42:32.903891 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-56c9fd548f-cvf9g" event={"ID":"5ebd76e7-4dac-469d-b366-3e210472ec63","Type":"ContainerDied","Data":"990682e141e898be50b27c714a80df26580e74869f2c77d6a878de223b09517c"} Nov 28 17:42:32 crc kubenswrapper[4909]: I1128 17:42:32.903908 4909 scope.go:117] "RemoveContainer" containerID="053c44abaa5e0958c8d88ed167ae49451733bc35bdfecb3c548619ab2164e4f6" Nov 28 17:42:32 crc kubenswrapper[4909]: I1128 17:42:32.904008 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-56c9fd548f-cvf9g" Nov 28 17:42:32 crc kubenswrapper[4909]: I1128 17:42:32.921113 4909 scope.go:117] "RemoveContainer" containerID="e6c2fef3b7a50dadbcf10e194c08126d1c55e1e376e7369d6c7fed4cbf0612c2" Nov 28 17:42:32 crc kubenswrapper[4909]: I1128 17:42:32.940165 4909 scope.go:117] "RemoveContainer" containerID="053c44abaa5e0958c8d88ed167ae49451733bc35bdfecb3c548619ab2164e4f6" Nov 28 17:42:32 crc kubenswrapper[4909]: E1128 17:42:32.940615 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"053c44abaa5e0958c8d88ed167ae49451733bc35bdfecb3c548619ab2164e4f6\": container with ID starting with 053c44abaa5e0958c8d88ed167ae49451733bc35bdfecb3c548619ab2164e4f6 not found: ID does not exist" containerID="053c44abaa5e0958c8d88ed167ae49451733bc35bdfecb3c548619ab2164e4f6" Nov 28 17:42:32 crc kubenswrapper[4909]: I1128 17:42:32.940642 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"053c44abaa5e0958c8d88ed167ae49451733bc35bdfecb3c548619ab2164e4f6"} err="failed to get container status \"053c44abaa5e0958c8d88ed167ae49451733bc35bdfecb3c548619ab2164e4f6\": rpc error: code = NotFound desc = could not find container \"053c44abaa5e0958c8d88ed167ae49451733bc35bdfecb3c548619ab2164e4f6\": container with ID starting with 053c44abaa5e0958c8d88ed167ae49451733bc35bdfecb3c548619ab2164e4f6 not found: ID does not exist" Nov 28 17:42:32 crc kubenswrapper[4909]: I1128 17:42:32.940689 4909 scope.go:117] "RemoveContainer" containerID="e6c2fef3b7a50dadbcf10e194c08126d1c55e1e376e7369d6c7fed4cbf0612c2" Nov 28 17:42:32 crc kubenswrapper[4909]: E1128 17:42:32.940886 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e6c2fef3b7a50dadbcf10e194c08126d1c55e1e376e7369d6c7fed4cbf0612c2\": container with ID starting with e6c2fef3b7a50dadbcf10e194c08126d1c55e1e376e7369d6c7fed4cbf0612c2 not found: ID does not exist" containerID="e6c2fef3b7a50dadbcf10e194c08126d1c55e1e376e7369d6c7fed4cbf0612c2" Nov 28 17:42:32 crc kubenswrapper[4909]: I1128 17:42:32.940905 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e6c2fef3b7a50dadbcf10e194c08126d1c55e1e376e7369d6c7fed4cbf0612c2"} err="failed to get container status \"e6c2fef3b7a50dadbcf10e194c08126d1c55e1e376e7369d6c7fed4cbf0612c2\": rpc error: code = NotFound desc = could not find container \"e6c2fef3b7a50dadbcf10e194c08126d1c55e1e376e7369d6c7fed4cbf0612c2\": container with ID starting with e6c2fef3b7a50dadbcf10e194c08126d1c55e1e376e7369d6c7fed4cbf0612c2 not found: ID does not exist" Nov 28 17:42:32 crc kubenswrapper[4909]: I1128 17:42:32.955459 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/5ebd76e7-4dac-469d-b366-3e210472ec63-ovsdbserver-sb\") pod \"5ebd76e7-4dac-469d-b366-3e210472ec63\" (UID: \"5ebd76e7-4dac-469d-b366-3e210472ec63\") " Nov 28 17:42:32 crc kubenswrapper[4909]: I1128 17:42:32.955503 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hhs6k\" (UniqueName: \"kubernetes.io/projected/5ebd76e7-4dac-469d-b366-3e210472ec63-kube-api-access-hhs6k\") pod \"5ebd76e7-4dac-469d-b366-3e210472ec63\" (UID: \"5ebd76e7-4dac-469d-b366-3e210472ec63\") " Nov 28 17:42:32 crc kubenswrapper[4909]: I1128 17:42:32.955540 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/5ebd76e7-4dac-469d-b366-3e210472ec63-ovsdbserver-nb\") pod \"5ebd76e7-4dac-469d-b366-3e210472ec63\" (UID: \"5ebd76e7-4dac-469d-b366-3e210472ec63\") " Nov 28 17:42:32 crc kubenswrapper[4909]: I1128 17:42:32.955627 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5ebd76e7-4dac-469d-b366-3e210472ec63-dns-svc\") pod \"5ebd76e7-4dac-469d-b366-3e210472ec63\" (UID: \"5ebd76e7-4dac-469d-b366-3e210472ec63\") " Nov 28 17:42:32 crc kubenswrapper[4909]: I1128 17:42:32.955675 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5ebd76e7-4dac-469d-b366-3e210472ec63-config\") pod \"5ebd76e7-4dac-469d-b366-3e210472ec63\" (UID: \"5ebd76e7-4dac-469d-b366-3e210472ec63\") " Nov 28 17:42:32 crc kubenswrapper[4909]: I1128 17:42:32.961952 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5ebd76e7-4dac-469d-b366-3e210472ec63-kube-api-access-hhs6k" (OuterVolumeSpecName: "kube-api-access-hhs6k") pod "5ebd76e7-4dac-469d-b366-3e210472ec63" (UID: "5ebd76e7-4dac-469d-b366-3e210472ec63"). InnerVolumeSpecName "kube-api-access-hhs6k". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:42:33 crc kubenswrapper[4909]: I1128 17:42:33.004003 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5ebd76e7-4dac-469d-b366-3e210472ec63-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "5ebd76e7-4dac-469d-b366-3e210472ec63" (UID: "5ebd76e7-4dac-469d-b366-3e210472ec63"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 17:42:33 crc kubenswrapper[4909]: I1128 17:42:33.004922 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5ebd76e7-4dac-469d-b366-3e210472ec63-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "5ebd76e7-4dac-469d-b366-3e210472ec63" (UID: "5ebd76e7-4dac-469d-b366-3e210472ec63"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 17:42:33 crc kubenswrapper[4909]: I1128 17:42:33.005723 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5ebd76e7-4dac-469d-b366-3e210472ec63-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "5ebd76e7-4dac-469d-b366-3e210472ec63" (UID: "5ebd76e7-4dac-469d-b366-3e210472ec63"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 17:42:33 crc kubenswrapper[4909]: I1128 17:42:33.006973 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5ebd76e7-4dac-469d-b366-3e210472ec63-config" (OuterVolumeSpecName: "config") pod "5ebd76e7-4dac-469d-b366-3e210472ec63" (UID: "5ebd76e7-4dac-469d-b366-3e210472ec63"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 17:42:33 crc kubenswrapper[4909]: I1128 17:42:33.063880 4909 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5ebd76e7-4dac-469d-b366-3e210472ec63-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 17:42:33 crc kubenswrapper[4909]: I1128 17:42:33.063920 4909 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5ebd76e7-4dac-469d-b366-3e210472ec63-config\") on node \"crc\" DevicePath \"\"" Nov 28 17:42:33 crc kubenswrapper[4909]: I1128 17:42:33.063937 4909 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/5ebd76e7-4dac-469d-b366-3e210472ec63-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 28 17:42:33 crc kubenswrapper[4909]: I1128 17:42:33.063953 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hhs6k\" (UniqueName: \"kubernetes.io/projected/5ebd76e7-4dac-469d-b366-3e210472ec63-kube-api-access-hhs6k\") on node \"crc\" DevicePath \"\"" Nov 28 17:42:33 crc kubenswrapper[4909]: I1128 17:42:33.063966 4909 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/5ebd76e7-4dac-469d-b366-3e210472ec63-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 28 17:42:33 crc kubenswrapper[4909]: I1128 17:42:33.240664 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-56c9fd548f-cvf9g"] Nov 28 17:42:33 crc kubenswrapper[4909]: I1128 17:42:33.249879 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-56c9fd548f-cvf9g"] Nov 28 17:42:33 crc kubenswrapper[4909]: I1128 17:42:33.917410 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5ebd76e7-4dac-469d-b366-3e210472ec63" path="/var/lib/kubelet/pods/5ebd76e7-4dac-469d-b366-3e210472ec63/volumes" Nov 28 17:42:37 crc kubenswrapper[4909]: I1128 17:42:37.911050 4909 scope.go:117] "RemoveContainer" containerID="ba4943f4ba136c11fa217eba14fcdb34cf54ee4ef96ee334416ec901f5f4fe45" Nov 28 17:42:37 crc kubenswrapper[4909]: E1128 17:42:37.911719 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 17:42:48 crc kubenswrapper[4909]: I1128 17:42:48.902439 4909 scope.go:117] "RemoveContainer" containerID="ba4943f4ba136c11fa217eba14fcdb34cf54ee4ef96ee334416ec901f5f4fe45" Nov 28 17:42:48 crc kubenswrapper[4909]: E1128 17:42:48.903712 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 17:42:52 crc kubenswrapper[4909]: I1128 17:42:52.485145 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-7b4d47ccc7-5qprs" Nov 28 17:42:59 crc kubenswrapper[4909]: I1128 17:42:59.615136 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-create-2n9cc"] Nov 28 17:42:59 crc kubenswrapper[4909]: E1128 17:42:59.616092 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5ebd76e7-4dac-469d-b366-3e210472ec63" containerName="init" Nov 28 17:42:59 crc kubenswrapper[4909]: I1128 17:42:59.616110 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="5ebd76e7-4dac-469d-b366-3e210472ec63" containerName="init" Nov 28 17:42:59 crc kubenswrapper[4909]: E1128 17:42:59.616152 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5ebd76e7-4dac-469d-b366-3e210472ec63" containerName="dnsmasq-dns" Nov 28 17:42:59 crc kubenswrapper[4909]: I1128 17:42:59.616163 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="5ebd76e7-4dac-469d-b366-3e210472ec63" containerName="dnsmasq-dns" Nov 28 17:42:59 crc kubenswrapper[4909]: I1128 17:42:59.616383 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="5ebd76e7-4dac-469d-b366-3e210472ec63" containerName="dnsmasq-dns" Nov 28 17:42:59 crc kubenswrapper[4909]: I1128 17:42:59.617082 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-2n9cc" Nov 28 17:42:59 crc kubenswrapper[4909]: I1128 17:42:59.624719 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-2n9cc"] Nov 28 17:42:59 crc kubenswrapper[4909]: I1128 17:42:59.713543 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-e5ef-account-create-update-rxlg6"] Nov 28 17:42:59 crc kubenswrapper[4909]: I1128 17:42:59.714831 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-e5ef-account-create-update-rxlg6" Nov 28 17:42:59 crc kubenswrapper[4909]: I1128 17:42:59.717684 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-db-secret" Nov 28 17:42:59 crc kubenswrapper[4909]: I1128 17:42:59.724541 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-e5ef-account-create-update-rxlg6"] Nov 28 17:42:59 crc kubenswrapper[4909]: I1128 17:42:59.765843 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/87d51f30-31ed-490c-b758-43cdaf1d73d1-operator-scripts\") pod \"glance-db-create-2n9cc\" (UID: \"87d51f30-31ed-490c-b758-43cdaf1d73d1\") " pod="openstack/glance-db-create-2n9cc" Nov 28 17:42:59 crc kubenswrapper[4909]: I1128 17:42:59.765941 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7c2jc\" (UniqueName: \"kubernetes.io/projected/87d51f30-31ed-490c-b758-43cdaf1d73d1-kube-api-access-7c2jc\") pod \"glance-db-create-2n9cc\" (UID: \"87d51f30-31ed-490c-b758-43cdaf1d73d1\") " pod="openstack/glance-db-create-2n9cc" Nov 28 17:42:59 crc kubenswrapper[4909]: I1128 17:42:59.867764 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gjnzf\" (UniqueName: \"kubernetes.io/projected/e11133f8-5269-401c-aa3d-3658264356f6-kube-api-access-gjnzf\") pod \"glance-e5ef-account-create-update-rxlg6\" (UID: \"e11133f8-5269-401c-aa3d-3658264356f6\") " pod="openstack/glance-e5ef-account-create-update-rxlg6" Nov 28 17:42:59 crc kubenswrapper[4909]: I1128 17:42:59.867840 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/87d51f30-31ed-490c-b758-43cdaf1d73d1-operator-scripts\") pod \"glance-db-create-2n9cc\" (UID: \"87d51f30-31ed-490c-b758-43cdaf1d73d1\") " pod="openstack/glance-db-create-2n9cc" Nov 28 17:42:59 crc kubenswrapper[4909]: I1128 17:42:59.868153 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e11133f8-5269-401c-aa3d-3658264356f6-operator-scripts\") pod \"glance-e5ef-account-create-update-rxlg6\" (UID: \"e11133f8-5269-401c-aa3d-3658264356f6\") " pod="openstack/glance-e5ef-account-create-update-rxlg6" Nov 28 17:42:59 crc kubenswrapper[4909]: I1128 17:42:59.868240 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7c2jc\" (UniqueName: \"kubernetes.io/projected/87d51f30-31ed-490c-b758-43cdaf1d73d1-kube-api-access-7c2jc\") pod \"glance-db-create-2n9cc\" (UID: \"87d51f30-31ed-490c-b758-43cdaf1d73d1\") " pod="openstack/glance-db-create-2n9cc" Nov 28 17:42:59 crc kubenswrapper[4909]: I1128 17:42:59.868588 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/87d51f30-31ed-490c-b758-43cdaf1d73d1-operator-scripts\") pod \"glance-db-create-2n9cc\" (UID: \"87d51f30-31ed-490c-b758-43cdaf1d73d1\") " pod="openstack/glance-db-create-2n9cc" Nov 28 17:42:59 crc kubenswrapper[4909]: I1128 17:42:59.888188 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7c2jc\" (UniqueName: \"kubernetes.io/projected/87d51f30-31ed-490c-b758-43cdaf1d73d1-kube-api-access-7c2jc\") pod \"glance-db-create-2n9cc\" (UID: \"87d51f30-31ed-490c-b758-43cdaf1d73d1\") " pod="openstack/glance-db-create-2n9cc" Nov 28 17:42:59 crc kubenswrapper[4909]: I1128 17:42:59.938599 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-2n9cc" Nov 28 17:42:59 crc kubenswrapper[4909]: I1128 17:42:59.977579 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e11133f8-5269-401c-aa3d-3658264356f6-operator-scripts\") pod \"glance-e5ef-account-create-update-rxlg6\" (UID: \"e11133f8-5269-401c-aa3d-3658264356f6\") " pod="openstack/glance-e5ef-account-create-update-rxlg6" Nov 28 17:42:59 crc kubenswrapper[4909]: I1128 17:42:59.977945 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gjnzf\" (UniqueName: \"kubernetes.io/projected/e11133f8-5269-401c-aa3d-3658264356f6-kube-api-access-gjnzf\") pod \"glance-e5ef-account-create-update-rxlg6\" (UID: \"e11133f8-5269-401c-aa3d-3658264356f6\") " pod="openstack/glance-e5ef-account-create-update-rxlg6" Nov 28 17:42:59 crc kubenswrapper[4909]: I1128 17:42:59.978296 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e11133f8-5269-401c-aa3d-3658264356f6-operator-scripts\") pod \"glance-e5ef-account-create-update-rxlg6\" (UID: \"e11133f8-5269-401c-aa3d-3658264356f6\") " pod="openstack/glance-e5ef-account-create-update-rxlg6" Nov 28 17:42:59 crc kubenswrapper[4909]: I1128 17:42:59.995596 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gjnzf\" (UniqueName: \"kubernetes.io/projected/e11133f8-5269-401c-aa3d-3658264356f6-kube-api-access-gjnzf\") pod \"glance-e5ef-account-create-update-rxlg6\" (UID: \"e11133f8-5269-401c-aa3d-3658264356f6\") " pod="openstack/glance-e5ef-account-create-update-rxlg6" Nov 28 17:43:00 crc kubenswrapper[4909]: I1128 17:43:00.044104 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-e5ef-account-create-update-rxlg6" Nov 28 17:43:00 crc kubenswrapper[4909]: I1128 17:43:00.451061 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-2n9cc"] Nov 28 17:43:00 crc kubenswrapper[4909]: I1128 17:43:00.569127 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-e5ef-account-create-update-rxlg6"] Nov 28 17:43:00 crc kubenswrapper[4909]: W1128 17:43:00.572026 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode11133f8_5269_401c_aa3d_3658264356f6.slice/crio-fe39e619c284a5da4253f9e3fb399058ac184575547474a016997e76517f08d3 WatchSource:0}: Error finding container fe39e619c284a5da4253f9e3fb399058ac184575547474a016997e76517f08d3: Status 404 returned error can't find the container with id fe39e619c284a5da4253f9e3fb399058ac184575547474a016997e76517f08d3 Nov 28 17:43:00 crc kubenswrapper[4909]: I1128 17:43:00.901083 4909 scope.go:117] "RemoveContainer" containerID="ba4943f4ba136c11fa217eba14fcdb34cf54ee4ef96ee334416ec901f5f4fe45" Nov 28 17:43:00 crc kubenswrapper[4909]: E1128 17:43:00.901599 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 17:43:01 crc kubenswrapper[4909]: I1128 17:43:01.205951 4909 generic.go:334] "Generic (PLEG): container finished" podID="87d51f30-31ed-490c-b758-43cdaf1d73d1" containerID="e40dceaab2b0f4da20494408ea54519ae2c665306e73fedcfbc8428dd8372633" exitCode=0 Nov 28 17:43:01 crc kubenswrapper[4909]: I1128 17:43:01.206049 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-2n9cc" event={"ID":"87d51f30-31ed-490c-b758-43cdaf1d73d1","Type":"ContainerDied","Data":"e40dceaab2b0f4da20494408ea54519ae2c665306e73fedcfbc8428dd8372633"} Nov 28 17:43:01 crc kubenswrapper[4909]: I1128 17:43:01.206085 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-2n9cc" event={"ID":"87d51f30-31ed-490c-b758-43cdaf1d73d1","Type":"ContainerStarted","Data":"7da12974ceb9c841bc0d7805159fa8d85f0634bc9b8c119f2d3fef9aa3bf829e"} Nov 28 17:43:01 crc kubenswrapper[4909]: I1128 17:43:01.208349 4909 generic.go:334] "Generic (PLEG): container finished" podID="e11133f8-5269-401c-aa3d-3658264356f6" containerID="0b3308e20a8fe2c349170f13d9c193e690b8b406744a2fbabef4ba64c37e82fa" exitCode=0 Nov 28 17:43:01 crc kubenswrapper[4909]: I1128 17:43:01.208381 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-e5ef-account-create-update-rxlg6" event={"ID":"e11133f8-5269-401c-aa3d-3658264356f6","Type":"ContainerDied","Data":"0b3308e20a8fe2c349170f13d9c193e690b8b406744a2fbabef4ba64c37e82fa"} Nov 28 17:43:01 crc kubenswrapper[4909]: I1128 17:43:01.208400 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-e5ef-account-create-update-rxlg6" event={"ID":"e11133f8-5269-401c-aa3d-3658264356f6","Type":"ContainerStarted","Data":"fe39e619c284a5da4253f9e3fb399058ac184575547474a016997e76517f08d3"} Nov 28 17:43:02 crc kubenswrapper[4909]: I1128 17:43:02.692635 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-2n9cc" Nov 28 17:43:02 crc kubenswrapper[4909]: I1128 17:43:02.699595 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-e5ef-account-create-update-rxlg6" Nov 28 17:43:02 crc kubenswrapper[4909]: I1128 17:43:02.861505 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/87d51f30-31ed-490c-b758-43cdaf1d73d1-operator-scripts\") pod \"87d51f30-31ed-490c-b758-43cdaf1d73d1\" (UID: \"87d51f30-31ed-490c-b758-43cdaf1d73d1\") " Nov 28 17:43:02 crc kubenswrapper[4909]: I1128 17:43:02.861577 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e11133f8-5269-401c-aa3d-3658264356f6-operator-scripts\") pod \"e11133f8-5269-401c-aa3d-3658264356f6\" (UID: \"e11133f8-5269-401c-aa3d-3658264356f6\") " Nov 28 17:43:02 crc kubenswrapper[4909]: I1128 17:43:02.861641 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gjnzf\" (UniqueName: \"kubernetes.io/projected/e11133f8-5269-401c-aa3d-3658264356f6-kube-api-access-gjnzf\") pod \"e11133f8-5269-401c-aa3d-3658264356f6\" (UID: \"e11133f8-5269-401c-aa3d-3658264356f6\") " Nov 28 17:43:02 crc kubenswrapper[4909]: I1128 17:43:02.861708 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7c2jc\" (UniqueName: \"kubernetes.io/projected/87d51f30-31ed-490c-b758-43cdaf1d73d1-kube-api-access-7c2jc\") pod \"87d51f30-31ed-490c-b758-43cdaf1d73d1\" (UID: \"87d51f30-31ed-490c-b758-43cdaf1d73d1\") " Nov 28 17:43:02 crc kubenswrapper[4909]: I1128 17:43:02.862607 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/87d51f30-31ed-490c-b758-43cdaf1d73d1-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "87d51f30-31ed-490c-b758-43cdaf1d73d1" (UID: "87d51f30-31ed-490c-b758-43cdaf1d73d1"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 17:43:02 crc kubenswrapper[4909]: I1128 17:43:02.863382 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e11133f8-5269-401c-aa3d-3658264356f6-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "e11133f8-5269-401c-aa3d-3658264356f6" (UID: "e11133f8-5269-401c-aa3d-3658264356f6"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 17:43:02 crc kubenswrapper[4909]: I1128 17:43:02.868833 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/87d51f30-31ed-490c-b758-43cdaf1d73d1-kube-api-access-7c2jc" (OuterVolumeSpecName: "kube-api-access-7c2jc") pod "87d51f30-31ed-490c-b758-43cdaf1d73d1" (UID: "87d51f30-31ed-490c-b758-43cdaf1d73d1"). InnerVolumeSpecName "kube-api-access-7c2jc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:43:02 crc kubenswrapper[4909]: I1128 17:43:02.869834 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e11133f8-5269-401c-aa3d-3658264356f6-kube-api-access-gjnzf" (OuterVolumeSpecName: "kube-api-access-gjnzf") pod "e11133f8-5269-401c-aa3d-3658264356f6" (UID: "e11133f8-5269-401c-aa3d-3658264356f6"). InnerVolumeSpecName "kube-api-access-gjnzf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:43:02 crc kubenswrapper[4909]: I1128 17:43:02.963901 4909 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/87d51f30-31ed-490c-b758-43cdaf1d73d1-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 17:43:02 crc kubenswrapper[4909]: I1128 17:43:02.963936 4909 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e11133f8-5269-401c-aa3d-3658264356f6-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 17:43:02 crc kubenswrapper[4909]: I1128 17:43:02.963945 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gjnzf\" (UniqueName: \"kubernetes.io/projected/e11133f8-5269-401c-aa3d-3658264356f6-kube-api-access-gjnzf\") on node \"crc\" DevicePath \"\"" Nov 28 17:43:02 crc kubenswrapper[4909]: I1128 17:43:02.963957 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7c2jc\" (UniqueName: \"kubernetes.io/projected/87d51f30-31ed-490c-b758-43cdaf1d73d1-kube-api-access-7c2jc\") on node \"crc\" DevicePath \"\"" Nov 28 17:43:03 crc kubenswrapper[4909]: I1128 17:43:03.233862 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-2n9cc" event={"ID":"87d51f30-31ed-490c-b758-43cdaf1d73d1","Type":"ContainerDied","Data":"7da12974ceb9c841bc0d7805159fa8d85f0634bc9b8c119f2d3fef9aa3bf829e"} Nov 28 17:43:03 crc kubenswrapper[4909]: I1128 17:43:03.234382 4909 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7da12974ceb9c841bc0d7805159fa8d85f0634bc9b8c119f2d3fef9aa3bf829e" Nov 28 17:43:03 crc kubenswrapper[4909]: I1128 17:43:03.233898 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-2n9cc" Nov 28 17:43:03 crc kubenswrapper[4909]: I1128 17:43:03.239368 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-e5ef-account-create-update-rxlg6" event={"ID":"e11133f8-5269-401c-aa3d-3658264356f6","Type":"ContainerDied","Data":"fe39e619c284a5da4253f9e3fb399058ac184575547474a016997e76517f08d3"} Nov 28 17:43:03 crc kubenswrapper[4909]: I1128 17:43:03.239418 4909 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="fe39e619c284a5da4253f9e3fb399058ac184575547474a016997e76517f08d3" Nov 28 17:43:03 crc kubenswrapper[4909]: I1128 17:43:03.239481 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-e5ef-account-create-update-rxlg6" Nov 28 17:43:04 crc kubenswrapper[4909]: I1128 17:43:04.955400 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-sync-4pqhr"] Nov 28 17:43:04 crc kubenswrapper[4909]: E1128 17:43:04.956125 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="87d51f30-31ed-490c-b758-43cdaf1d73d1" containerName="mariadb-database-create" Nov 28 17:43:04 crc kubenswrapper[4909]: I1128 17:43:04.956142 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="87d51f30-31ed-490c-b758-43cdaf1d73d1" containerName="mariadb-database-create" Nov 28 17:43:04 crc kubenswrapper[4909]: E1128 17:43:04.956155 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e11133f8-5269-401c-aa3d-3658264356f6" containerName="mariadb-account-create-update" Nov 28 17:43:04 crc kubenswrapper[4909]: I1128 17:43:04.956163 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="e11133f8-5269-401c-aa3d-3658264356f6" containerName="mariadb-account-create-update" Nov 28 17:43:04 crc kubenswrapper[4909]: I1128 17:43:04.956370 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="87d51f30-31ed-490c-b758-43cdaf1d73d1" containerName="mariadb-database-create" Nov 28 17:43:04 crc kubenswrapper[4909]: I1128 17:43:04.956391 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="e11133f8-5269-401c-aa3d-3658264356f6" containerName="mariadb-account-create-update" Nov 28 17:43:04 crc kubenswrapper[4909]: I1128 17:43:04.957068 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-4pqhr" Nov 28 17:43:04 crc kubenswrapper[4909]: I1128 17:43:04.962090 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-config-data" Nov 28 17:43:04 crc kubenswrapper[4909]: I1128 17:43:04.962150 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-lcbsj" Nov 28 17:43:04 crc kubenswrapper[4909]: I1128 17:43:04.970422 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-4pqhr"] Nov 28 17:43:05 crc kubenswrapper[4909]: I1128 17:43:05.112485 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/614d33fc-17b9-4a47-b85a-20a0dec18413-config-data\") pod \"glance-db-sync-4pqhr\" (UID: \"614d33fc-17b9-4a47-b85a-20a0dec18413\") " pod="openstack/glance-db-sync-4pqhr" Nov 28 17:43:05 crc kubenswrapper[4909]: I1128 17:43:05.112594 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nk2bj\" (UniqueName: \"kubernetes.io/projected/614d33fc-17b9-4a47-b85a-20a0dec18413-kube-api-access-nk2bj\") pod \"glance-db-sync-4pqhr\" (UID: \"614d33fc-17b9-4a47-b85a-20a0dec18413\") " pod="openstack/glance-db-sync-4pqhr" Nov 28 17:43:05 crc kubenswrapper[4909]: I1128 17:43:05.112644 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/614d33fc-17b9-4a47-b85a-20a0dec18413-combined-ca-bundle\") pod \"glance-db-sync-4pqhr\" (UID: \"614d33fc-17b9-4a47-b85a-20a0dec18413\") " pod="openstack/glance-db-sync-4pqhr" Nov 28 17:43:05 crc kubenswrapper[4909]: I1128 17:43:05.112815 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/614d33fc-17b9-4a47-b85a-20a0dec18413-db-sync-config-data\") pod \"glance-db-sync-4pqhr\" (UID: \"614d33fc-17b9-4a47-b85a-20a0dec18413\") " pod="openstack/glance-db-sync-4pqhr" Nov 28 17:43:05 crc kubenswrapper[4909]: I1128 17:43:05.214356 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/614d33fc-17b9-4a47-b85a-20a0dec18413-config-data\") pod \"glance-db-sync-4pqhr\" (UID: \"614d33fc-17b9-4a47-b85a-20a0dec18413\") " pod="openstack/glance-db-sync-4pqhr" Nov 28 17:43:05 crc kubenswrapper[4909]: I1128 17:43:05.214423 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nk2bj\" (UniqueName: \"kubernetes.io/projected/614d33fc-17b9-4a47-b85a-20a0dec18413-kube-api-access-nk2bj\") pod \"glance-db-sync-4pqhr\" (UID: \"614d33fc-17b9-4a47-b85a-20a0dec18413\") " pod="openstack/glance-db-sync-4pqhr" Nov 28 17:43:05 crc kubenswrapper[4909]: I1128 17:43:05.214470 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/614d33fc-17b9-4a47-b85a-20a0dec18413-combined-ca-bundle\") pod \"glance-db-sync-4pqhr\" (UID: \"614d33fc-17b9-4a47-b85a-20a0dec18413\") " pod="openstack/glance-db-sync-4pqhr" Nov 28 17:43:05 crc kubenswrapper[4909]: I1128 17:43:05.215107 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/614d33fc-17b9-4a47-b85a-20a0dec18413-db-sync-config-data\") pod \"glance-db-sync-4pqhr\" (UID: \"614d33fc-17b9-4a47-b85a-20a0dec18413\") " pod="openstack/glance-db-sync-4pqhr" Nov 28 17:43:05 crc kubenswrapper[4909]: I1128 17:43:05.225384 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/614d33fc-17b9-4a47-b85a-20a0dec18413-config-data\") pod \"glance-db-sync-4pqhr\" (UID: \"614d33fc-17b9-4a47-b85a-20a0dec18413\") " pod="openstack/glance-db-sync-4pqhr" Nov 28 17:43:05 crc kubenswrapper[4909]: I1128 17:43:05.228264 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/614d33fc-17b9-4a47-b85a-20a0dec18413-combined-ca-bundle\") pod \"glance-db-sync-4pqhr\" (UID: \"614d33fc-17b9-4a47-b85a-20a0dec18413\") " pod="openstack/glance-db-sync-4pqhr" Nov 28 17:43:05 crc kubenswrapper[4909]: I1128 17:43:05.242748 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/614d33fc-17b9-4a47-b85a-20a0dec18413-db-sync-config-data\") pod \"glance-db-sync-4pqhr\" (UID: \"614d33fc-17b9-4a47-b85a-20a0dec18413\") " pod="openstack/glance-db-sync-4pqhr" Nov 28 17:43:05 crc kubenswrapper[4909]: I1128 17:43:05.247216 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nk2bj\" (UniqueName: \"kubernetes.io/projected/614d33fc-17b9-4a47-b85a-20a0dec18413-kube-api-access-nk2bj\") pod \"glance-db-sync-4pqhr\" (UID: \"614d33fc-17b9-4a47-b85a-20a0dec18413\") " pod="openstack/glance-db-sync-4pqhr" Nov 28 17:43:05 crc kubenswrapper[4909]: I1128 17:43:05.285145 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-4pqhr" Nov 28 17:43:05 crc kubenswrapper[4909]: I1128 17:43:05.863498 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-4pqhr"] Nov 28 17:43:06 crc kubenswrapper[4909]: I1128 17:43:06.283729 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-4pqhr" event={"ID":"614d33fc-17b9-4a47-b85a-20a0dec18413","Type":"ContainerStarted","Data":"22dfd3491eeb9b0c010d5238f985dd7e3a861f30d45b593996f8362cafdc4ae7"} Nov 28 17:43:07 crc kubenswrapper[4909]: I1128 17:43:07.294839 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-4pqhr" event={"ID":"614d33fc-17b9-4a47-b85a-20a0dec18413","Type":"ContainerStarted","Data":"43def0371b58b8f0cb0c33aac74483346a5f2f1c01c81d3509584522fcb06d36"} Nov 28 17:43:07 crc kubenswrapper[4909]: I1128 17:43:07.317206 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-db-sync-4pqhr" podStartSLOduration=3.317178409 podStartE2EDuration="3.317178409s" podCreationTimestamp="2025-11-28 17:43:04 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 17:43:07.311140317 +0000 UTC m=+5569.707824841" watchObservedRunningTime="2025-11-28 17:43:07.317178409 +0000 UTC m=+5569.713862973" Nov 28 17:43:10 crc kubenswrapper[4909]: I1128 17:43:10.329974 4909 generic.go:334] "Generic (PLEG): container finished" podID="614d33fc-17b9-4a47-b85a-20a0dec18413" containerID="43def0371b58b8f0cb0c33aac74483346a5f2f1c01c81d3509584522fcb06d36" exitCode=0 Nov 28 17:43:10 crc kubenswrapper[4909]: I1128 17:43:10.330096 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-4pqhr" event={"ID":"614d33fc-17b9-4a47-b85a-20a0dec18413","Type":"ContainerDied","Data":"43def0371b58b8f0cb0c33aac74483346a5f2f1c01c81d3509584522fcb06d36"} Nov 28 17:43:11 crc kubenswrapper[4909]: I1128 17:43:11.735264 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-4pqhr" Nov 28 17:43:11 crc kubenswrapper[4909]: I1128 17:43:11.845432 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/614d33fc-17b9-4a47-b85a-20a0dec18413-config-data\") pod \"614d33fc-17b9-4a47-b85a-20a0dec18413\" (UID: \"614d33fc-17b9-4a47-b85a-20a0dec18413\") " Nov 28 17:43:11 crc kubenswrapper[4909]: I1128 17:43:11.845539 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nk2bj\" (UniqueName: \"kubernetes.io/projected/614d33fc-17b9-4a47-b85a-20a0dec18413-kube-api-access-nk2bj\") pod \"614d33fc-17b9-4a47-b85a-20a0dec18413\" (UID: \"614d33fc-17b9-4a47-b85a-20a0dec18413\") " Nov 28 17:43:11 crc kubenswrapper[4909]: I1128 17:43:11.845608 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/614d33fc-17b9-4a47-b85a-20a0dec18413-db-sync-config-data\") pod \"614d33fc-17b9-4a47-b85a-20a0dec18413\" (UID: \"614d33fc-17b9-4a47-b85a-20a0dec18413\") " Nov 28 17:43:11 crc kubenswrapper[4909]: I1128 17:43:11.845675 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/614d33fc-17b9-4a47-b85a-20a0dec18413-combined-ca-bundle\") pod \"614d33fc-17b9-4a47-b85a-20a0dec18413\" (UID: \"614d33fc-17b9-4a47-b85a-20a0dec18413\") " Nov 28 17:43:11 crc kubenswrapper[4909]: I1128 17:43:11.854807 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/614d33fc-17b9-4a47-b85a-20a0dec18413-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "614d33fc-17b9-4a47-b85a-20a0dec18413" (UID: "614d33fc-17b9-4a47-b85a-20a0dec18413"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:43:11 crc kubenswrapper[4909]: I1128 17:43:11.854836 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/614d33fc-17b9-4a47-b85a-20a0dec18413-kube-api-access-nk2bj" (OuterVolumeSpecName: "kube-api-access-nk2bj") pod "614d33fc-17b9-4a47-b85a-20a0dec18413" (UID: "614d33fc-17b9-4a47-b85a-20a0dec18413"). InnerVolumeSpecName "kube-api-access-nk2bj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:43:11 crc kubenswrapper[4909]: I1128 17:43:11.878563 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/614d33fc-17b9-4a47-b85a-20a0dec18413-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "614d33fc-17b9-4a47-b85a-20a0dec18413" (UID: "614d33fc-17b9-4a47-b85a-20a0dec18413"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:43:11 crc kubenswrapper[4909]: I1128 17:43:11.892056 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/614d33fc-17b9-4a47-b85a-20a0dec18413-config-data" (OuterVolumeSpecName: "config-data") pod "614d33fc-17b9-4a47-b85a-20a0dec18413" (UID: "614d33fc-17b9-4a47-b85a-20a0dec18413"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:43:11 crc kubenswrapper[4909]: I1128 17:43:11.947725 4909 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/614d33fc-17b9-4a47-b85a-20a0dec18413-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 17:43:11 crc kubenswrapper[4909]: I1128 17:43:11.947754 4909 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/614d33fc-17b9-4a47-b85a-20a0dec18413-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 17:43:11 crc kubenswrapper[4909]: I1128 17:43:11.947764 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nk2bj\" (UniqueName: \"kubernetes.io/projected/614d33fc-17b9-4a47-b85a-20a0dec18413-kube-api-access-nk2bj\") on node \"crc\" DevicePath \"\"" Nov 28 17:43:11 crc kubenswrapper[4909]: I1128 17:43:11.947774 4909 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/614d33fc-17b9-4a47-b85a-20a0dec18413-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 17:43:12 crc kubenswrapper[4909]: I1128 17:43:12.350212 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-4pqhr" event={"ID":"614d33fc-17b9-4a47-b85a-20a0dec18413","Type":"ContainerDied","Data":"22dfd3491eeb9b0c010d5238f985dd7e3a861f30d45b593996f8362cafdc4ae7"} Nov 28 17:43:12 crc kubenswrapper[4909]: I1128 17:43:12.350269 4909 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="22dfd3491eeb9b0c010d5238f985dd7e3a861f30d45b593996f8362cafdc4ae7" Nov 28 17:43:12 crc kubenswrapper[4909]: I1128 17:43:12.350344 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-4pqhr" Nov 28 17:43:12 crc kubenswrapper[4909]: I1128 17:43:12.687835 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 17:43:12 crc kubenswrapper[4909]: E1128 17:43:12.688518 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="614d33fc-17b9-4a47-b85a-20a0dec18413" containerName="glance-db-sync" Nov 28 17:43:12 crc kubenswrapper[4909]: I1128 17:43:12.688539 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="614d33fc-17b9-4a47-b85a-20a0dec18413" containerName="glance-db-sync" Nov 28 17:43:12 crc kubenswrapper[4909]: I1128 17:43:12.688752 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="614d33fc-17b9-4a47-b85a-20a0dec18413" containerName="glance-db-sync" Nov 28 17:43:12 crc kubenswrapper[4909]: I1128 17:43:12.695885 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 28 17:43:12 crc kubenswrapper[4909]: I1128 17:43:12.698853 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Nov 28 17:43:12 crc kubenswrapper[4909]: I1128 17:43:12.699037 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Nov 28 17:43:12 crc kubenswrapper[4909]: I1128 17:43:12.700358 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-lcbsj" Nov 28 17:43:12 crc kubenswrapper[4909]: I1128 17:43:12.701915 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Nov 28 17:43:12 crc kubenswrapper[4909]: I1128 17:43:12.720924 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 17:43:12 crc kubenswrapper[4909]: I1128 17:43:12.813992 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-64d4964cf7-xrqbj"] Nov 28 17:43:12 crc kubenswrapper[4909]: I1128 17:43:12.815833 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-64d4964cf7-xrqbj" Nov 28 17:43:12 crc kubenswrapper[4909]: I1128 17:43:12.828973 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-64d4964cf7-xrqbj"] Nov 28 17:43:12 crc kubenswrapper[4909]: I1128 17:43:12.862487 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ee167be5-2cd2-4407-b21b-f6e2bb87ed6c-config-data\") pod \"glance-default-external-api-0\" (UID: \"ee167be5-2cd2-4407-b21b-f6e2bb87ed6c\") " pod="openstack/glance-default-external-api-0" Nov 28 17:43:12 crc kubenswrapper[4909]: I1128 17:43:12.862590 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/ee167be5-2cd2-4407-b21b-f6e2bb87ed6c-ceph\") pod \"glance-default-external-api-0\" (UID: \"ee167be5-2cd2-4407-b21b-f6e2bb87ed6c\") " pod="openstack/glance-default-external-api-0" Nov 28 17:43:12 crc kubenswrapper[4909]: I1128 17:43:12.862708 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ee167be5-2cd2-4407-b21b-f6e2bb87ed6c-logs\") pod \"glance-default-external-api-0\" (UID: \"ee167be5-2cd2-4407-b21b-f6e2bb87ed6c\") " pod="openstack/glance-default-external-api-0" Nov 28 17:43:12 crc kubenswrapper[4909]: I1128 17:43:12.862818 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ee167be5-2cd2-4407-b21b-f6e2bb87ed6c-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"ee167be5-2cd2-4407-b21b-f6e2bb87ed6c\") " pod="openstack/glance-default-external-api-0" Nov 28 17:43:12 crc kubenswrapper[4909]: I1128 17:43:12.862906 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ee167be5-2cd2-4407-b21b-f6e2bb87ed6c-scripts\") pod \"glance-default-external-api-0\" (UID: \"ee167be5-2cd2-4407-b21b-f6e2bb87ed6c\") " pod="openstack/glance-default-external-api-0" Nov 28 17:43:12 crc kubenswrapper[4909]: I1128 17:43:12.862983 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zfjcz\" (UniqueName: \"kubernetes.io/projected/ee167be5-2cd2-4407-b21b-f6e2bb87ed6c-kube-api-access-zfjcz\") pod \"glance-default-external-api-0\" (UID: \"ee167be5-2cd2-4407-b21b-f6e2bb87ed6c\") " pod="openstack/glance-default-external-api-0" Nov 28 17:43:12 crc kubenswrapper[4909]: I1128 17:43:12.863032 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/ee167be5-2cd2-4407-b21b-f6e2bb87ed6c-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"ee167be5-2cd2-4407-b21b-f6e2bb87ed6c\") " pod="openstack/glance-default-external-api-0" Nov 28 17:43:12 crc kubenswrapper[4909]: I1128 17:43:12.896255 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 17:43:12 crc kubenswrapper[4909]: I1128 17:43:12.899967 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 28 17:43:12 crc kubenswrapper[4909]: I1128 17:43:12.905792 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Nov 28 17:43:12 crc kubenswrapper[4909]: I1128 17:43:12.909948 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 17:43:12 crc kubenswrapper[4909]: I1128 17:43:12.964995 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ee167be5-2cd2-4407-b21b-f6e2bb87ed6c-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"ee167be5-2cd2-4407-b21b-f6e2bb87ed6c\") " pod="openstack/glance-default-external-api-0" Nov 28 17:43:12 crc kubenswrapper[4909]: I1128 17:43:12.965062 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f7vfj\" (UniqueName: \"kubernetes.io/projected/435e59f3-911f-41f4-8f2d-cdc24a55c38e-kube-api-access-f7vfj\") pod \"dnsmasq-dns-64d4964cf7-xrqbj\" (UID: \"435e59f3-911f-41f4-8f2d-cdc24a55c38e\") " pod="openstack/dnsmasq-dns-64d4964cf7-xrqbj" Nov 28 17:43:12 crc kubenswrapper[4909]: I1128 17:43:12.965100 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/435e59f3-911f-41f4-8f2d-cdc24a55c38e-dns-svc\") pod \"dnsmasq-dns-64d4964cf7-xrqbj\" (UID: \"435e59f3-911f-41f4-8f2d-cdc24a55c38e\") " pod="openstack/dnsmasq-dns-64d4964cf7-xrqbj" Nov 28 17:43:12 crc kubenswrapper[4909]: I1128 17:43:12.965128 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ee167be5-2cd2-4407-b21b-f6e2bb87ed6c-scripts\") pod \"glance-default-external-api-0\" (UID: \"ee167be5-2cd2-4407-b21b-f6e2bb87ed6c\") " pod="openstack/glance-default-external-api-0" Nov 28 17:43:12 crc kubenswrapper[4909]: I1128 17:43:12.965163 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/435e59f3-911f-41f4-8f2d-cdc24a55c38e-ovsdbserver-nb\") pod \"dnsmasq-dns-64d4964cf7-xrqbj\" (UID: \"435e59f3-911f-41f4-8f2d-cdc24a55c38e\") " pod="openstack/dnsmasq-dns-64d4964cf7-xrqbj" Nov 28 17:43:12 crc kubenswrapper[4909]: I1128 17:43:12.965376 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zfjcz\" (UniqueName: \"kubernetes.io/projected/ee167be5-2cd2-4407-b21b-f6e2bb87ed6c-kube-api-access-zfjcz\") pod \"glance-default-external-api-0\" (UID: \"ee167be5-2cd2-4407-b21b-f6e2bb87ed6c\") " pod="openstack/glance-default-external-api-0" Nov 28 17:43:12 crc kubenswrapper[4909]: I1128 17:43:12.965445 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/435e59f3-911f-41f4-8f2d-cdc24a55c38e-config\") pod \"dnsmasq-dns-64d4964cf7-xrqbj\" (UID: \"435e59f3-911f-41f4-8f2d-cdc24a55c38e\") " pod="openstack/dnsmasq-dns-64d4964cf7-xrqbj" Nov 28 17:43:12 crc kubenswrapper[4909]: I1128 17:43:12.965489 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/ee167be5-2cd2-4407-b21b-f6e2bb87ed6c-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"ee167be5-2cd2-4407-b21b-f6e2bb87ed6c\") " pod="openstack/glance-default-external-api-0" Nov 28 17:43:12 crc kubenswrapper[4909]: I1128 17:43:12.965507 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/435e59f3-911f-41f4-8f2d-cdc24a55c38e-ovsdbserver-sb\") pod \"dnsmasq-dns-64d4964cf7-xrqbj\" (UID: \"435e59f3-911f-41f4-8f2d-cdc24a55c38e\") " pod="openstack/dnsmasq-dns-64d4964cf7-xrqbj" Nov 28 17:43:12 crc kubenswrapper[4909]: I1128 17:43:12.965547 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ee167be5-2cd2-4407-b21b-f6e2bb87ed6c-config-data\") pod \"glance-default-external-api-0\" (UID: \"ee167be5-2cd2-4407-b21b-f6e2bb87ed6c\") " pod="openstack/glance-default-external-api-0" Nov 28 17:43:12 crc kubenswrapper[4909]: I1128 17:43:12.965777 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/ee167be5-2cd2-4407-b21b-f6e2bb87ed6c-ceph\") pod \"glance-default-external-api-0\" (UID: \"ee167be5-2cd2-4407-b21b-f6e2bb87ed6c\") " pod="openstack/glance-default-external-api-0" Nov 28 17:43:12 crc kubenswrapper[4909]: I1128 17:43:12.965819 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ee167be5-2cd2-4407-b21b-f6e2bb87ed6c-logs\") pod \"glance-default-external-api-0\" (UID: \"ee167be5-2cd2-4407-b21b-f6e2bb87ed6c\") " pod="openstack/glance-default-external-api-0" Nov 28 17:43:12 crc kubenswrapper[4909]: I1128 17:43:12.966101 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/ee167be5-2cd2-4407-b21b-f6e2bb87ed6c-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"ee167be5-2cd2-4407-b21b-f6e2bb87ed6c\") " pod="openstack/glance-default-external-api-0" Nov 28 17:43:12 crc kubenswrapper[4909]: I1128 17:43:12.966169 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ee167be5-2cd2-4407-b21b-f6e2bb87ed6c-logs\") pod \"glance-default-external-api-0\" (UID: \"ee167be5-2cd2-4407-b21b-f6e2bb87ed6c\") " pod="openstack/glance-default-external-api-0" Nov 28 17:43:12 crc kubenswrapper[4909]: I1128 17:43:12.969535 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ee167be5-2cd2-4407-b21b-f6e2bb87ed6c-scripts\") pod \"glance-default-external-api-0\" (UID: \"ee167be5-2cd2-4407-b21b-f6e2bb87ed6c\") " pod="openstack/glance-default-external-api-0" Nov 28 17:43:12 crc kubenswrapper[4909]: I1128 17:43:12.970224 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ee167be5-2cd2-4407-b21b-f6e2bb87ed6c-config-data\") pod \"glance-default-external-api-0\" (UID: \"ee167be5-2cd2-4407-b21b-f6e2bb87ed6c\") " pod="openstack/glance-default-external-api-0" Nov 28 17:43:12 crc kubenswrapper[4909]: I1128 17:43:12.971306 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/ee167be5-2cd2-4407-b21b-f6e2bb87ed6c-ceph\") pod \"glance-default-external-api-0\" (UID: \"ee167be5-2cd2-4407-b21b-f6e2bb87ed6c\") " pod="openstack/glance-default-external-api-0" Nov 28 17:43:12 crc kubenswrapper[4909]: I1128 17:43:12.992010 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zfjcz\" (UniqueName: \"kubernetes.io/projected/ee167be5-2cd2-4407-b21b-f6e2bb87ed6c-kube-api-access-zfjcz\") pod \"glance-default-external-api-0\" (UID: \"ee167be5-2cd2-4407-b21b-f6e2bb87ed6c\") " pod="openstack/glance-default-external-api-0" Nov 28 17:43:12 crc kubenswrapper[4909]: I1128 17:43:12.992847 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ee167be5-2cd2-4407-b21b-f6e2bb87ed6c-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"ee167be5-2cd2-4407-b21b-f6e2bb87ed6c\") " pod="openstack/glance-default-external-api-0" Nov 28 17:43:13 crc kubenswrapper[4909]: I1128 17:43:13.018215 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 28 17:43:13 crc kubenswrapper[4909]: I1128 17:43:13.067574 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qt74r\" (UniqueName: \"kubernetes.io/projected/8aa469b4-b3b2-48aa-82b4-7d6bdfd5cabd-kube-api-access-qt74r\") pod \"glance-default-internal-api-0\" (UID: \"8aa469b4-b3b2-48aa-82b4-7d6bdfd5cabd\") " pod="openstack/glance-default-internal-api-0" Nov 28 17:43:13 crc kubenswrapper[4909]: I1128 17:43:13.067647 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/8aa469b4-b3b2-48aa-82b4-7d6bdfd5cabd-ceph\") pod \"glance-default-internal-api-0\" (UID: \"8aa469b4-b3b2-48aa-82b4-7d6bdfd5cabd\") " pod="openstack/glance-default-internal-api-0" Nov 28 17:43:13 crc kubenswrapper[4909]: I1128 17:43:13.067706 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f7vfj\" (UniqueName: \"kubernetes.io/projected/435e59f3-911f-41f4-8f2d-cdc24a55c38e-kube-api-access-f7vfj\") pod \"dnsmasq-dns-64d4964cf7-xrqbj\" (UID: \"435e59f3-911f-41f4-8f2d-cdc24a55c38e\") " pod="openstack/dnsmasq-dns-64d4964cf7-xrqbj" Nov 28 17:43:13 crc kubenswrapper[4909]: I1128 17:43:13.067742 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/435e59f3-911f-41f4-8f2d-cdc24a55c38e-dns-svc\") pod \"dnsmasq-dns-64d4964cf7-xrqbj\" (UID: \"435e59f3-911f-41f4-8f2d-cdc24a55c38e\") " pod="openstack/dnsmasq-dns-64d4964cf7-xrqbj" Nov 28 17:43:13 crc kubenswrapper[4909]: I1128 17:43:13.067779 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/435e59f3-911f-41f4-8f2d-cdc24a55c38e-ovsdbserver-nb\") pod \"dnsmasq-dns-64d4964cf7-xrqbj\" (UID: \"435e59f3-911f-41f4-8f2d-cdc24a55c38e\") " pod="openstack/dnsmasq-dns-64d4964cf7-xrqbj" Nov 28 17:43:13 crc kubenswrapper[4909]: I1128 17:43:13.067861 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8aa469b4-b3b2-48aa-82b4-7d6bdfd5cabd-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"8aa469b4-b3b2-48aa-82b4-7d6bdfd5cabd\") " pod="openstack/glance-default-internal-api-0" Nov 28 17:43:13 crc kubenswrapper[4909]: I1128 17:43:13.067897 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/435e59f3-911f-41f4-8f2d-cdc24a55c38e-config\") pod \"dnsmasq-dns-64d4964cf7-xrqbj\" (UID: \"435e59f3-911f-41f4-8f2d-cdc24a55c38e\") " pod="openstack/dnsmasq-dns-64d4964cf7-xrqbj" Nov 28 17:43:13 crc kubenswrapper[4909]: I1128 17:43:13.067926 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8aa469b4-b3b2-48aa-82b4-7d6bdfd5cabd-scripts\") pod \"glance-default-internal-api-0\" (UID: \"8aa469b4-b3b2-48aa-82b4-7d6bdfd5cabd\") " pod="openstack/glance-default-internal-api-0" Nov 28 17:43:13 crc kubenswrapper[4909]: I1128 17:43:13.067955 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/435e59f3-911f-41f4-8f2d-cdc24a55c38e-ovsdbserver-sb\") pod \"dnsmasq-dns-64d4964cf7-xrqbj\" (UID: \"435e59f3-911f-41f4-8f2d-cdc24a55c38e\") " pod="openstack/dnsmasq-dns-64d4964cf7-xrqbj" Nov 28 17:43:13 crc kubenswrapper[4909]: I1128 17:43:13.067993 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8aa469b4-b3b2-48aa-82b4-7d6bdfd5cabd-logs\") pod \"glance-default-internal-api-0\" (UID: \"8aa469b4-b3b2-48aa-82b4-7d6bdfd5cabd\") " pod="openstack/glance-default-internal-api-0" Nov 28 17:43:13 crc kubenswrapper[4909]: I1128 17:43:13.068037 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/8aa469b4-b3b2-48aa-82b4-7d6bdfd5cabd-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"8aa469b4-b3b2-48aa-82b4-7d6bdfd5cabd\") " pod="openstack/glance-default-internal-api-0" Nov 28 17:43:13 crc kubenswrapper[4909]: I1128 17:43:13.068085 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8aa469b4-b3b2-48aa-82b4-7d6bdfd5cabd-config-data\") pod \"glance-default-internal-api-0\" (UID: \"8aa469b4-b3b2-48aa-82b4-7d6bdfd5cabd\") " pod="openstack/glance-default-internal-api-0" Nov 28 17:43:13 crc kubenswrapper[4909]: I1128 17:43:13.070043 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/435e59f3-911f-41f4-8f2d-cdc24a55c38e-dns-svc\") pod \"dnsmasq-dns-64d4964cf7-xrqbj\" (UID: \"435e59f3-911f-41f4-8f2d-cdc24a55c38e\") " pod="openstack/dnsmasq-dns-64d4964cf7-xrqbj" Nov 28 17:43:13 crc kubenswrapper[4909]: I1128 17:43:13.070707 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/435e59f3-911f-41f4-8f2d-cdc24a55c38e-ovsdbserver-nb\") pod \"dnsmasq-dns-64d4964cf7-xrqbj\" (UID: \"435e59f3-911f-41f4-8f2d-cdc24a55c38e\") " pod="openstack/dnsmasq-dns-64d4964cf7-xrqbj" Nov 28 17:43:13 crc kubenswrapper[4909]: I1128 17:43:13.071369 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/435e59f3-911f-41f4-8f2d-cdc24a55c38e-ovsdbserver-sb\") pod \"dnsmasq-dns-64d4964cf7-xrqbj\" (UID: \"435e59f3-911f-41f4-8f2d-cdc24a55c38e\") " pod="openstack/dnsmasq-dns-64d4964cf7-xrqbj" Nov 28 17:43:13 crc kubenswrapper[4909]: I1128 17:43:13.071465 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/435e59f3-911f-41f4-8f2d-cdc24a55c38e-config\") pod \"dnsmasq-dns-64d4964cf7-xrqbj\" (UID: \"435e59f3-911f-41f4-8f2d-cdc24a55c38e\") " pod="openstack/dnsmasq-dns-64d4964cf7-xrqbj" Nov 28 17:43:13 crc kubenswrapper[4909]: I1128 17:43:13.108455 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f7vfj\" (UniqueName: \"kubernetes.io/projected/435e59f3-911f-41f4-8f2d-cdc24a55c38e-kube-api-access-f7vfj\") pod \"dnsmasq-dns-64d4964cf7-xrqbj\" (UID: \"435e59f3-911f-41f4-8f2d-cdc24a55c38e\") " pod="openstack/dnsmasq-dns-64d4964cf7-xrqbj" Nov 28 17:43:13 crc kubenswrapper[4909]: I1128 17:43:13.137020 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-64d4964cf7-xrqbj" Nov 28 17:43:13 crc kubenswrapper[4909]: I1128 17:43:13.169501 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8aa469b4-b3b2-48aa-82b4-7d6bdfd5cabd-config-data\") pod \"glance-default-internal-api-0\" (UID: \"8aa469b4-b3b2-48aa-82b4-7d6bdfd5cabd\") " pod="openstack/glance-default-internal-api-0" Nov 28 17:43:13 crc kubenswrapper[4909]: I1128 17:43:13.169556 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qt74r\" (UniqueName: \"kubernetes.io/projected/8aa469b4-b3b2-48aa-82b4-7d6bdfd5cabd-kube-api-access-qt74r\") pod \"glance-default-internal-api-0\" (UID: \"8aa469b4-b3b2-48aa-82b4-7d6bdfd5cabd\") " pod="openstack/glance-default-internal-api-0" Nov 28 17:43:13 crc kubenswrapper[4909]: I1128 17:43:13.169582 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/8aa469b4-b3b2-48aa-82b4-7d6bdfd5cabd-ceph\") pod \"glance-default-internal-api-0\" (UID: \"8aa469b4-b3b2-48aa-82b4-7d6bdfd5cabd\") " pod="openstack/glance-default-internal-api-0" Nov 28 17:43:13 crc kubenswrapper[4909]: I1128 17:43:13.169653 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8aa469b4-b3b2-48aa-82b4-7d6bdfd5cabd-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"8aa469b4-b3b2-48aa-82b4-7d6bdfd5cabd\") " pod="openstack/glance-default-internal-api-0" Nov 28 17:43:13 crc kubenswrapper[4909]: I1128 17:43:13.169686 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8aa469b4-b3b2-48aa-82b4-7d6bdfd5cabd-scripts\") pod \"glance-default-internal-api-0\" (UID: \"8aa469b4-b3b2-48aa-82b4-7d6bdfd5cabd\") " pod="openstack/glance-default-internal-api-0" Nov 28 17:43:13 crc kubenswrapper[4909]: I1128 17:43:13.169715 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8aa469b4-b3b2-48aa-82b4-7d6bdfd5cabd-logs\") pod \"glance-default-internal-api-0\" (UID: \"8aa469b4-b3b2-48aa-82b4-7d6bdfd5cabd\") " pod="openstack/glance-default-internal-api-0" Nov 28 17:43:13 crc kubenswrapper[4909]: I1128 17:43:13.169745 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/8aa469b4-b3b2-48aa-82b4-7d6bdfd5cabd-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"8aa469b4-b3b2-48aa-82b4-7d6bdfd5cabd\") " pod="openstack/glance-default-internal-api-0" Nov 28 17:43:13 crc kubenswrapper[4909]: I1128 17:43:13.171046 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/8aa469b4-b3b2-48aa-82b4-7d6bdfd5cabd-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"8aa469b4-b3b2-48aa-82b4-7d6bdfd5cabd\") " pod="openstack/glance-default-internal-api-0" Nov 28 17:43:13 crc kubenswrapper[4909]: I1128 17:43:13.174337 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8aa469b4-b3b2-48aa-82b4-7d6bdfd5cabd-scripts\") pod \"glance-default-internal-api-0\" (UID: \"8aa469b4-b3b2-48aa-82b4-7d6bdfd5cabd\") " pod="openstack/glance-default-internal-api-0" Nov 28 17:43:13 crc kubenswrapper[4909]: I1128 17:43:13.177020 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8aa469b4-b3b2-48aa-82b4-7d6bdfd5cabd-config-data\") pod \"glance-default-internal-api-0\" (UID: \"8aa469b4-b3b2-48aa-82b4-7d6bdfd5cabd\") " pod="openstack/glance-default-internal-api-0" Nov 28 17:43:13 crc kubenswrapper[4909]: I1128 17:43:13.177554 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/8aa469b4-b3b2-48aa-82b4-7d6bdfd5cabd-ceph\") pod \"glance-default-internal-api-0\" (UID: \"8aa469b4-b3b2-48aa-82b4-7d6bdfd5cabd\") " pod="openstack/glance-default-internal-api-0" Nov 28 17:43:13 crc kubenswrapper[4909]: I1128 17:43:13.181104 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8aa469b4-b3b2-48aa-82b4-7d6bdfd5cabd-logs\") pod \"glance-default-internal-api-0\" (UID: \"8aa469b4-b3b2-48aa-82b4-7d6bdfd5cabd\") " pod="openstack/glance-default-internal-api-0" Nov 28 17:43:13 crc kubenswrapper[4909]: I1128 17:43:13.183495 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8aa469b4-b3b2-48aa-82b4-7d6bdfd5cabd-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"8aa469b4-b3b2-48aa-82b4-7d6bdfd5cabd\") " pod="openstack/glance-default-internal-api-0" Nov 28 17:43:13 crc kubenswrapper[4909]: I1128 17:43:13.207362 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qt74r\" (UniqueName: \"kubernetes.io/projected/8aa469b4-b3b2-48aa-82b4-7d6bdfd5cabd-kube-api-access-qt74r\") pod \"glance-default-internal-api-0\" (UID: \"8aa469b4-b3b2-48aa-82b4-7d6bdfd5cabd\") " pod="openstack/glance-default-internal-api-0" Nov 28 17:43:13 crc kubenswrapper[4909]: I1128 17:43:13.224108 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 28 17:43:13 crc kubenswrapper[4909]: I1128 17:43:13.504849 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 17:43:13 crc kubenswrapper[4909]: I1128 17:43:13.695095 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-64d4964cf7-xrqbj"] Nov 28 17:43:13 crc kubenswrapper[4909]: W1128 17:43:13.701897 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod435e59f3_911f_41f4_8f2d_cdc24a55c38e.slice/crio-6318fbaac04bde7c29d017f911ad2428c901619b56669540b432e4c25cdc3dff WatchSource:0}: Error finding container 6318fbaac04bde7c29d017f911ad2428c901619b56669540b432e4c25cdc3dff: Status 404 returned error can't find the container with id 6318fbaac04bde7c29d017f911ad2428c901619b56669540b432e4c25cdc3dff Nov 28 17:43:13 crc kubenswrapper[4909]: I1128 17:43:13.722028 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 17:43:13 crc kubenswrapper[4909]: W1128 17:43:13.727821 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8aa469b4_b3b2_48aa_82b4_7d6bdfd5cabd.slice/crio-ed37a0d4a9070679f12928729168efb03856b41e233ac1e26697b0dab26d42d4 WatchSource:0}: Error finding container ed37a0d4a9070679f12928729168efb03856b41e233ac1e26697b0dab26d42d4: Status 404 returned error can't find the container with id ed37a0d4a9070679f12928729168efb03856b41e233ac1e26697b0dab26d42d4 Nov 28 17:43:13 crc kubenswrapper[4909]: I1128 17:43:13.962217 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 17:43:14 crc kubenswrapper[4909]: I1128 17:43:14.388598 4909 generic.go:334] "Generic (PLEG): container finished" podID="435e59f3-911f-41f4-8f2d-cdc24a55c38e" containerID="d9ed563b6335561f0a80e5fbdbfe73da4c165e713bff023e29138a70c60ca7e6" exitCode=0 Nov 28 17:43:14 crc kubenswrapper[4909]: I1128 17:43:14.388852 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-64d4964cf7-xrqbj" event={"ID":"435e59f3-911f-41f4-8f2d-cdc24a55c38e","Type":"ContainerDied","Data":"d9ed563b6335561f0a80e5fbdbfe73da4c165e713bff023e29138a70c60ca7e6"} Nov 28 17:43:14 crc kubenswrapper[4909]: I1128 17:43:14.389015 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-64d4964cf7-xrqbj" event={"ID":"435e59f3-911f-41f4-8f2d-cdc24a55c38e","Type":"ContainerStarted","Data":"6318fbaac04bde7c29d017f911ad2428c901619b56669540b432e4c25cdc3dff"} Nov 28 17:43:14 crc kubenswrapper[4909]: I1128 17:43:14.392092 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"8aa469b4-b3b2-48aa-82b4-7d6bdfd5cabd","Type":"ContainerStarted","Data":"9cf0002534800de15e04efa1e7bb74aee8730f31f9dc4294b39a34fcf44c2882"} Nov 28 17:43:14 crc kubenswrapper[4909]: I1128 17:43:14.392472 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"8aa469b4-b3b2-48aa-82b4-7d6bdfd5cabd","Type":"ContainerStarted","Data":"ed37a0d4a9070679f12928729168efb03856b41e233ac1e26697b0dab26d42d4"} Nov 28 17:43:14 crc kubenswrapper[4909]: I1128 17:43:14.415491 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"ee167be5-2cd2-4407-b21b-f6e2bb87ed6c","Type":"ContainerStarted","Data":"228fd91856148a45e3c7c7e92ff94a46d4e0aad7c85f4f92fd0c87da426bc741"} Nov 28 17:43:14 crc kubenswrapper[4909]: I1128 17:43:14.415570 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"ee167be5-2cd2-4407-b21b-f6e2bb87ed6c","Type":"ContainerStarted","Data":"47f480a340ee103556212f8179781c3acbb2a22263547db084f20668ff2928e8"} Nov 28 17:43:15 crc kubenswrapper[4909]: I1128 17:43:15.428673 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"ee167be5-2cd2-4407-b21b-f6e2bb87ed6c","Type":"ContainerStarted","Data":"ddbbaad9828352ad2a2d58bb32b0f7bcc1cc20130445df9a7b8ad5f5752bd33e"} Nov 28 17:43:15 crc kubenswrapper[4909]: I1128 17:43:15.428828 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="ee167be5-2cd2-4407-b21b-f6e2bb87ed6c" containerName="glance-log" containerID="cri-o://228fd91856148a45e3c7c7e92ff94a46d4e0aad7c85f4f92fd0c87da426bc741" gracePeriod=30 Nov 28 17:43:15 crc kubenswrapper[4909]: I1128 17:43:15.429074 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="ee167be5-2cd2-4407-b21b-f6e2bb87ed6c" containerName="glance-httpd" containerID="cri-o://ddbbaad9828352ad2a2d58bb32b0f7bcc1cc20130445df9a7b8ad5f5752bd33e" gracePeriod=30 Nov 28 17:43:15 crc kubenswrapper[4909]: I1128 17:43:15.439714 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-64d4964cf7-xrqbj" event={"ID":"435e59f3-911f-41f4-8f2d-cdc24a55c38e","Type":"ContainerStarted","Data":"d50e36c9ca6fbf1eee072a7a1b478677c3d8c0fee9b90731fa1be00667be6a4e"} Nov 28 17:43:15 crc kubenswrapper[4909]: I1128 17:43:15.439849 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-64d4964cf7-xrqbj" Nov 28 17:43:15 crc kubenswrapper[4909]: I1128 17:43:15.444077 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"8aa469b4-b3b2-48aa-82b4-7d6bdfd5cabd","Type":"ContainerStarted","Data":"7e85edfa127c99c4c3b905e157346aee56db33203f1a6adcd349962850e64a7a"} Nov 28 17:43:15 crc kubenswrapper[4909]: I1128 17:43:15.467366 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=3.4673505430000002 podStartE2EDuration="3.467350543s" podCreationTimestamp="2025-11-28 17:43:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 17:43:15.465078912 +0000 UTC m=+5577.861763436" watchObservedRunningTime="2025-11-28 17:43:15.467350543 +0000 UTC m=+5577.864035067" Nov 28 17:43:15 crc kubenswrapper[4909]: I1128 17:43:15.506676 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=3.506605319 podStartE2EDuration="3.506605319s" podCreationTimestamp="2025-11-28 17:43:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 17:43:15.48992089 +0000 UTC m=+5577.886605454" watchObservedRunningTime="2025-11-28 17:43:15.506605319 +0000 UTC m=+5577.903289873" Nov 28 17:43:15 crc kubenswrapper[4909]: I1128 17:43:15.514388 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-64d4964cf7-xrqbj" podStartSLOduration=3.514365757 podStartE2EDuration="3.514365757s" podCreationTimestamp="2025-11-28 17:43:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 17:43:15.513982627 +0000 UTC m=+5577.910667181" watchObservedRunningTime="2025-11-28 17:43:15.514365757 +0000 UTC m=+5577.911050301" Nov 28 17:43:15 crc kubenswrapper[4909]: I1128 17:43:15.804679 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 17:43:15 crc kubenswrapper[4909]: I1128 17:43:15.901254 4909 scope.go:117] "RemoveContainer" containerID="ba4943f4ba136c11fa217eba14fcdb34cf54ee4ef96ee334416ec901f5f4fe45" Nov 28 17:43:15 crc kubenswrapper[4909]: E1128 17:43:15.901562 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 17:43:16 crc kubenswrapper[4909]: I1128 17:43:16.081842 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 28 17:43:16 crc kubenswrapper[4909]: I1128 17:43:16.236035 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zfjcz\" (UniqueName: \"kubernetes.io/projected/ee167be5-2cd2-4407-b21b-f6e2bb87ed6c-kube-api-access-zfjcz\") pod \"ee167be5-2cd2-4407-b21b-f6e2bb87ed6c\" (UID: \"ee167be5-2cd2-4407-b21b-f6e2bb87ed6c\") " Nov 28 17:43:16 crc kubenswrapper[4909]: I1128 17:43:16.236420 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ee167be5-2cd2-4407-b21b-f6e2bb87ed6c-scripts\") pod \"ee167be5-2cd2-4407-b21b-f6e2bb87ed6c\" (UID: \"ee167be5-2cd2-4407-b21b-f6e2bb87ed6c\") " Nov 28 17:43:16 crc kubenswrapper[4909]: I1128 17:43:16.236511 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ee167be5-2cd2-4407-b21b-f6e2bb87ed6c-config-data\") pod \"ee167be5-2cd2-4407-b21b-f6e2bb87ed6c\" (UID: \"ee167be5-2cd2-4407-b21b-f6e2bb87ed6c\") " Nov 28 17:43:16 crc kubenswrapper[4909]: I1128 17:43:16.236564 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ee167be5-2cd2-4407-b21b-f6e2bb87ed6c-logs\") pod \"ee167be5-2cd2-4407-b21b-f6e2bb87ed6c\" (UID: \"ee167be5-2cd2-4407-b21b-f6e2bb87ed6c\") " Nov 28 17:43:16 crc kubenswrapper[4909]: I1128 17:43:16.236686 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/ee167be5-2cd2-4407-b21b-f6e2bb87ed6c-httpd-run\") pod \"ee167be5-2cd2-4407-b21b-f6e2bb87ed6c\" (UID: \"ee167be5-2cd2-4407-b21b-f6e2bb87ed6c\") " Nov 28 17:43:16 crc kubenswrapper[4909]: I1128 17:43:16.236845 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/ee167be5-2cd2-4407-b21b-f6e2bb87ed6c-ceph\") pod \"ee167be5-2cd2-4407-b21b-f6e2bb87ed6c\" (UID: \"ee167be5-2cd2-4407-b21b-f6e2bb87ed6c\") " Nov 28 17:43:16 crc kubenswrapper[4909]: I1128 17:43:16.236879 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ee167be5-2cd2-4407-b21b-f6e2bb87ed6c-combined-ca-bundle\") pod \"ee167be5-2cd2-4407-b21b-f6e2bb87ed6c\" (UID: \"ee167be5-2cd2-4407-b21b-f6e2bb87ed6c\") " Nov 28 17:43:16 crc kubenswrapper[4909]: I1128 17:43:16.237330 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ee167be5-2cd2-4407-b21b-f6e2bb87ed6c-logs" (OuterVolumeSpecName: "logs") pod "ee167be5-2cd2-4407-b21b-f6e2bb87ed6c" (UID: "ee167be5-2cd2-4407-b21b-f6e2bb87ed6c"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:43:16 crc kubenswrapper[4909]: I1128 17:43:16.237743 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ee167be5-2cd2-4407-b21b-f6e2bb87ed6c-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "ee167be5-2cd2-4407-b21b-f6e2bb87ed6c" (UID: "ee167be5-2cd2-4407-b21b-f6e2bb87ed6c"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:43:16 crc kubenswrapper[4909]: I1128 17:43:16.237897 4909 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ee167be5-2cd2-4407-b21b-f6e2bb87ed6c-logs\") on node \"crc\" DevicePath \"\"" Nov 28 17:43:16 crc kubenswrapper[4909]: I1128 17:43:16.237924 4909 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/ee167be5-2cd2-4407-b21b-f6e2bb87ed6c-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 28 17:43:16 crc kubenswrapper[4909]: I1128 17:43:16.242829 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ee167be5-2cd2-4407-b21b-f6e2bb87ed6c-kube-api-access-zfjcz" (OuterVolumeSpecName: "kube-api-access-zfjcz") pod "ee167be5-2cd2-4407-b21b-f6e2bb87ed6c" (UID: "ee167be5-2cd2-4407-b21b-f6e2bb87ed6c"). InnerVolumeSpecName "kube-api-access-zfjcz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:43:16 crc kubenswrapper[4909]: I1128 17:43:16.245626 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ee167be5-2cd2-4407-b21b-f6e2bb87ed6c-ceph" (OuterVolumeSpecName: "ceph") pod "ee167be5-2cd2-4407-b21b-f6e2bb87ed6c" (UID: "ee167be5-2cd2-4407-b21b-f6e2bb87ed6c"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:43:16 crc kubenswrapper[4909]: I1128 17:43:16.245864 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ee167be5-2cd2-4407-b21b-f6e2bb87ed6c-scripts" (OuterVolumeSpecName: "scripts") pod "ee167be5-2cd2-4407-b21b-f6e2bb87ed6c" (UID: "ee167be5-2cd2-4407-b21b-f6e2bb87ed6c"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:43:16 crc kubenswrapper[4909]: I1128 17:43:16.278908 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ee167be5-2cd2-4407-b21b-f6e2bb87ed6c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ee167be5-2cd2-4407-b21b-f6e2bb87ed6c" (UID: "ee167be5-2cd2-4407-b21b-f6e2bb87ed6c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:43:16 crc kubenswrapper[4909]: I1128 17:43:16.323264 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ee167be5-2cd2-4407-b21b-f6e2bb87ed6c-config-data" (OuterVolumeSpecName: "config-data") pod "ee167be5-2cd2-4407-b21b-f6e2bb87ed6c" (UID: "ee167be5-2cd2-4407-b21b-f6e2bb87ed6c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:43:16 crc kubenswrapper[4909]: I1128 17:43:16.339462 4909 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/ee167be5-2cd2-4407-b21b-f6e2bb87ed6c-ceph\") on node \"crc\" DevicePath \"\"" Nov 28 17:43:16 crc kubenswrapper[4909]: I1128 17:43:16.339936 4909 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ee167be5-2cd2-4407-b21b-f6e2bb87ed6c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 17:43:16 crc kubenswrapper[4909]: I1128 17:43:16.340041 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zfjcz\" (UniqueName: \"kubernetes.io/projected/ee167be5-2cd2-4407-b21b-f6e2bb87ed6c-kube-api-access-zfjcz\") on node \"crc\" DevicePath \"\"" Nov 28 17:43:16 crc kubenswrapper[4909]: I1128 17:43:16.340283 4909 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ee167be5-2cd2-4407-b21b-f6e2bb87ed6c-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 17:43:16 crc kubenswrapper[4909]: I1128 17:43:16.340386 4909 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ee167be5-2cd2-4407-b21b-f6e2bb87ed6c-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 17:43:16 crc kubenswrapper[4909]: I1128 17:43:16.454817 4909 generic.go:334] "Generic (PLEG): container finished" podID="ee167be5-2cd2-4407-b21b-f6e2bb87ed6c" containerID="ddbbaad9828352ad2a2d58bb32b0f7bcc1cc20130445df9a7b8ad5f5752bd33e" exitCode=0 Nov 28 17:43:16 crc kubenswrapper[4909]: I1128 17:43:16.454851 4909 generic.go:334] "Generic (PLEG): container finished" podID="ee167be5-2cd2-4407-b21b-f6e2bb87ed6c" containerID="228fd91856148a45e3c7c7e92ff94a46d4e0aad7c85f4f92fd0c87da426bc741" exitCode=143 Nov 28 17:43:16 crc kubenswrapper[4909]: I1128 17:43:16.454875 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 28 17:43:16 crc kubenswrapper[4909]: I1128 17:43:16.460309 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"ee167be5-2cd2-4407-b21b-f6e2bb87ed6c","Type":"ContainerDied","Data":"ddbbaad9828352ad2a2d58bb32b0f7bcc1cc20130445df9a7b8ad5f5752bd33e"} Nov 28 17:43:16 crc kubenswrapper[4909]: I1128 17:43:16.460466 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"ee167be5-2cd2-4407-b21b-f6e2bb87ed6c","Type":"ContainerDied","Data":"228fd91856148a45e3c7c7e92ff94a46d4e0aad7c85f4f92fd0c87da426bc741"} Nov 28 17:43:16 crc kubenswrapper[4909]: I1128 17:43:16.460733 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"ee167be5-2cd2-4407-b21b-f6e2bb87ed6c","Type":"ContainerDied","Data":"47f480a340ee103556212f8179781c3acbb2a22263547db084f20668ff2928e8"} Nov 28 17:43:16 crc kubenswrapper[4909]: I1128 17:43:16.460646 4909 scope.go:117] "RemoveContainer" containerID="ddbbaad9828352ad2a2d58bb32b0f7bcc1cc20130445df9a7b8ad5f5752bd33e" Nov 28 17:43:16 crc kubenswrapper[4909]: I1128 17:43:16.498424 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 17:43:16 crc kubenswrapper[4909]: I1128 17:43:16.501912 4909 scope.go:117] "RemoveContainer" containerID="228fd91856148a45e3c7c7e92ff94a46d4e0aad7c85f4f92fd0c87da426bc741" Nov 28 17:43:16 crc kubenswrapper[4909]: I1128 17:43:16.513793 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 17:43:16 crc kubenswrapper[4909]: I1128 17:43:16.526288 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 17:43:16 crc kubenswrapper[4909]: E1128 17:43:16.526622 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ee167be5-2cd2-4407-b21b-f6e2bb87ed6c" containerName="glance-log" Nov 28 17:43:16 crc kubenswrapper[4909]: I1128 17:43:16.526638 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="ee167be5-2cd2-4407-b21b-f6e2bb87ed6c" containerName="glance-log" Nov 28 17:43:16 crc kubenswrapper[4909]: E1128 17:43:16.526695 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ee167be5-2cd2-4407-b21b-f6e2bb87ed6c" containerName="glance-httpd" Nov 28 17:43:16 crc kubenswrapper[4909]: I1128 17:43:16.526702 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="ee167be5-2cd2-4407-b21b-f6e2bb87ed6c" containerName="glance-httpd" Nov 28 17:43:16 crc kubenswrapper[4909]: I1128 17:43:16.526858 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="ee167be5-2cd2-4407-b21b-f6e2bb87ed6c" containerName="glance-httpd" Nov 28 17:43:16 crc kubenswrapper[4909]: I1128 17:43:16.526875 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="ee167be5-2cd2-4407-b21b-f6e2bb87ed6c" containerName="glance-log" Nov 28 17:43:16 crc kubenswrapper[4909]: I1128 17:43:16.527005 4909 scope.go:117] "RemoveContainer" containerID="ddbbaad9828352ad2a2d58bb32b0f7bcc1cc20130445df9a7b8ad5f5752bd33e" Nov 28 17:43:16 crc kubenswrapper[4909]: E1128 17:43:16.527416 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ddbbaad9828352ad2a2d58bb32b0f7bcc1cc20130445df9a7b8ad5f5752bd33e\": container with ID starting with ddbbaad9828352ad2a2d58bb32b0f7bcc1cc20130445df9a7b8ad5f5752bd33e not found: ID does not exist" containerID="ddbbaad9828352ad2a2d58bb32b0f7bcc1cc20130445df9a7b8ad5f5752bd33e" Nov 28 17:43:16 crc kubenswrapper[4909]: I1128 17:43:16.527451 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ddbbaad9828352ad2a2d58bb32b0f7bcc1cc20130445df9a7b8ad5f5752bd33e"} err="failed to get container status \"ddbbaad9828352ad2a2d58bb32b0f7bcc1cc20130445df9a7b8ad5f5752bd33e\": rpc error: code = NotFound desc = could not find container \"ddbbaad9828352ad2a2d58bb32b0f7bcc1cc20130445df9a7b8ad5f5752bd33e\": container with ID starting with ddbbaad9828352ad2a2d58bb32b0f7bcc1cc20130445df9a7b8ad5f5752bd33e not found: ID does not exist" Nov 28 17:43:16 crc kubenswrapper[4909]: I1128 17:43:16.527491 4909 scope.go:117] "RemoveContainer" containerID="228fd91856148a45e3c7c7e92ff94a46d4e0aad7c85f4f92fd0c87da426bc741" Nov 28 17:43:16 crc kubenswrapper[4909]: E1128 17:43:16.527790 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"228fd91856148a45e3c7c7e92ff94a46d4e0aad7c85f4f92fd0c87da426bc741\": container with ID starting with 228fd91856148a45e3c7c7e92ff94a46d4e0aad7c85f4f92fd0c87da426bc741 not found: ID does not exist" containerID="228fd91856148a45e3c7c7e92ff94a46d4e0aad7c85f4f92fd0c87da426bc741" Nov 28 17:43:16 crc kubenswrapper[4909]: I1128 17:43:16.527816 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"228fd91856148a45e3c7c7e92ff94a46d4e0aad7c85f4f92fd0c87da426bc741"} err="failed to get container status \"228fd91856148a45e3c7c7e92ff94a46d4e0aad7c85f4f92fd0c87da426bc741\": rpc error: code = NotFound desc = could not find container \"228fd91856148a45e3c7c7e92ff94a46d4e0aad7c85f4f92fd0c87da426bc741\": container with ID starting with 228fd91856148a45e3c7c7e92ff94a46d4e0aad7c85f4f92fd0c87da426bc741 not found: ID does not exist" Nov 28 17:43:16 crc kubenswrapper[4909]: I1128 17:43:16.527834 4909 scope.go:117] "RemoveContainer" containerID="ddbbaad9828352ad2a2d58bb32b0f7bcc1cc20130445df9a7b8ad5f5752bd33e" Nov 28 17:43:16 crc kubenswrapper[4909]: I1128 17:43:16.527954 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 28 17:43:16 crc kubenswrapper[4909]: I1128 17:43:16.528673 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ddbbaad9828352ad2a2d58bb32b0f7bcc1cc20130445df9a7b8ad5f5752bd33e"} err="failed to get container status \"ddbbaad9828352ad2a2d58bb32b0f7bcc1cc20130445df9a7b8ad5f5752bd33e\": rpc error: code = NotFound desc = could not find container \"ddbbaad9828352ad2a2d58bb32b0f7bcc1cc20130445df9a7b8ad5f5752bd33e\": container with ID starting with ddbbaad9828352ad2a2d58bb32b0f7bcc1cc20130445df9a7b8ad5f5752bd33e not found: ID does not exist" Nov 28 17:43:16 crc kubenswrapper[4909]: I1128 17:43:16.528703 4909 scope.go:117] "RemoveContainer" containerID="228fd91856148a45e3c7c7e92ff94a46d4e0aad7c85f4f92fd0c87da426bc741" Nov 28 17:43:16 crc kubenswrapper[4909]: I1128 17:43:16.531481 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"228fd91856148a45e3c7c7e92ff94a46d4e0aad7c85f4f92fd0c87da426bc741"} err="failed to get container status \"228fd91856148a45e3c7c7e92ff94a46d4e0aad7c85f4f92fd0c87da426bc741\": rpc error: code = NotFound desc = could not find container \"228fd91856148a45e3c7c7e92ff94a46d4e0aad7c85f4f92fd0c87da426bc741\": container with ID starting with 228fd91856148a45e3c7c7e92ff94a46d4e0aad7c85f4f92fd0c87da426bc741 not found: ID does not exist" Nov 28 17:43:16 crc kubenswrapper[4909]: I1128 17:43:16.535174 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Nov 28 17:43:16 crc kubenswrapper[4909]: I1128 17:43:16.560095 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 17:43:16 crc kubenswrapper[4909]: I1128 17:43:16.648744 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dc7x6\" (UniqueName: \"kubernetes.io/projected/5e86471a-d918-4d86-8696-842dc2205cd3-kube-api-access-dc7x6\") pod \"glance-default-external-api-0\" (UID: \"5e86471a-d918-4d86-8696-842dc2205cd3\") " pod="openstack/glance-default-external-api-0" Nov 28 17:43:16 crc kubenswrapper[4909]: I1128 17:43:16.648888 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5e86471a-d918-4d86-8696-842dc2205cd3-config-data\") pod \"glance-default-external-api-0\" (UID: \"5e86471a-d918-4d86-8696-842dc2205cd3\") " pod="openstack/glance-default-external-api-0" Nov 28 17:43:16 crc kubenswrapper[4909]: I1128 17:43:16.648927 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5e86471a-d918-4d86-8696-842dc2205cd3-logs\") pod \"glance-default-external-api-0\" (UID: \"5e86471a-d918-4d86-8696-842dc2205cd3\") " pod="openstack/glance-default-external-api-0" Nov 28 17:43:16 crc kubenswrapper[4909]: I1128 17:43:16.648991 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5e86471a-d918-4d86-8696-842dc2205cd3-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"5e86471a-d918-4d86-8696-842dc2205cd3\") " pod="openstack/glance-default-external-api-0" Nov 28 17:43:16 crc kubenswrapper[4909]: I1128 17:43:16.649037 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/5e86471a-d918-4d86-8696-842dc2205cd3-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"5e86471a-d918-4d86-8696-842dc2205cd3\") " pod="openstack/glance-default-external-api-0" Nov 28 17:43:16 crc kubenswrapper[4909]: I1128 17:43:16.649067 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/5e86471a-d918-4d86-8696-842dc2205cd3-ceph\") pod \"glance-default-external-api-0\" (UID: \"5e86471a-d918-4d86-8696-842dc2205cd3\") " pod="openstack/glance-default-external-api-0" Nov 28 17:43:16 crc kubenswrapper[4909]: I1128 17:43:16.649091 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5e86471a-d918-4d86-8696-842dc2205cd3-scripts\") pod \"glance-default-external-api-0\" (UID: \"5e86471a-d918-4d86-8696-842dc2205cd3\") " pod="openstack/glance-default-external-api-0" Nov 28 17:43:16 crc kubenswrapper[4909]: I1128 17:43:16.750586 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5e86471a-d918-4d86-8696-842dc2205cd3-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"5e86471a-d918-4d86-8696-842dc2205cd3\") " pod="openstack/glance-default-external-api-0" Nov 28 17:43:16 crc kubenswrapper[4909]: I1128 17:43:16.750713 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/5e86471a-d918-4d86-8696-842dc2205cd3-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"5e86471a-d918-4d86-8696-842dc2205cd3\") " pod="openstack/glance-default-external-api-0" Nov 28 17:43:16 crc kubenswrapper[4909]: I1128 17:43:16.750742 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/5e86471a-d918-4d86-8696-842dc2205cd3-ceph\") pod \"glance-default-external-api-0\" (UID: \"5e86471a-d918-4d86-8696-842dc2205cd3\") " pod="openstack/glance-default-external-api-0" Nov 28 17:43:16 crc kubenswrapper[4909]: I1128 17:43:16.750761 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5e86471a-d918-4d86-8696-842dc2205cd3-scripts\") pod \"glance-default-external-api-0\" (UID: \"5e86471a-d918-4d86-8696-842dc2205cd3\") " pod="openstack/glance-default-external-api-0" Nov 28 17:43:16 crc kubenswrapper[4909]: I1128 17:43:16.750801 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dc7x6\" (UniqueName: \"kubernetes.io/projected/5e86471a-d918-4d86-8696-842dc2205cd3-kube-api-access-dc7x6\") pod \"glance-default-external-api-0\" (UID: \"5e86471a-d918-4d86-8696-842dc2205cd3\") " pod="openstack/glance-default-external-api-0" Nov 28 17:43:16 crc kubenswrapper[4909]: I1128 17:43:16.750870 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5e86471a-d918-4d86-8696-842dc2205cd3-config-data\") pod \"glance-default-external-api-0\" (UID: \"5e86471a-d918-4d86-8696-842dc2205cd3\") " pod="openstack/glance-default-external-api-0" Nov 28 17:43:16 crc kubenswrapper[4909]: I1128 17:43:16.750896 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5e86471a-d918-4d86-8696-842dc2205cd3-logs\") pod \"glance-default-external-api-0\" (UID: \"5e86471a-d918-4d86-8696-842dc2205cd3\") " pod="openstack/glance-default-external-api-0" Nov 28 17:43:16 crc kubenswrapper[4909]: I1128 17:43:16.751223 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/5e86471a-d918-4d86-8696-842dc2205cd3-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"5e86471a-d918-4d86-8696-842dc2205cd3\") " pod="openstack/glance-default-external-api-0" Nov 28 17:43:16 crc kubenswrapper[4909]: I1128 17:43:16.751253 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5e86471a-d918-4d86-8696-842dc2205cd3-logs\") pod \"glance-default-external-api-0\" (UID: \"5e86471a-d918-4d86-8696-842dc2205cd3\") " pod="openstack/glance-default-external-api-0" Nov 28 17:43:16 crc kubenswrapper[4909]: I1128 17:43:16.754630 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5e86471a-d918-4d86-8696-842dc2205cd3-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"5e86471a-d918-4d86-8696-842dc2205cd3\") " pod="openstack/glance-default-external-api-0" Nov 28 17:43:16 crc kubenswrapper[4909]: I1128 17:43:16.755078 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5e86471a-d918-4d86-8696-842dc2205cd3-config-data\") pod \"glance-default-external-api-0\" (UID: \"5e86471a-d918-4d86-8696-842dc2205cd3\") " pod="openstack/glance-default-external-api-0" Nov 28 17:43:16 crc kubenswrapper[4909]: I1128 17:43:16.755551 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/5e86471a-d918-4d86-8696-842dc2205cd3-ceph\") pod \"glance-default-external-api-0\" (UID: \"5e86471a-d918-4d86-8696-842dc2205cd3\") " pod="openstack/glance-default-external-api-0" Nov 28 17:43:16 crc kubenswrapper[4909]: I1128 17:43:16.755832 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5e86471a-d918-4d86-8696-842dc2205cd3-scripts\") pod \"glance-default-external-api-0\" (UID: \"5e86471a-d918-4d86-8696-842dc2205cd3\") " pod="openstack/glance-default-external-api-0" Nov 28 17:43:16 crc kubenswrapper[4909]: I1128 17:43:16.772483 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dc7x6\" (UniqueName: \"kubernetes.io/projected/5e86471a-d918-4d86-8696-842dc2205cd3-kube-api-access-dc7x6\") pod \"glance-default-external-api-0\" (UID: \"5e86471a-d918-4d86-8696-842dc2205cd3\") " pod="openstack/glance-default-external-api-0" Nov 28 17:43:16 crc kubenswrapper[4909]: I1128 17:43:16.865529 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 28 17:43:17 crc kubenswrapper[4909]: W1128 17:43:17.388533 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5e86471a_d918_4d86_8696_842dc2205cd3.slice/crio-6c3257a3dff06fa5f7d28458b1b354e38b9a2673f9944cf66769580abd7348ee WatchSource:0}: Error finding container 6c3257a3dff06fa5f7d28458b1b354e38b9a2673f9944cf66769580abd7348ee: Status 404 returned error can't find the container with id 6c3257a3dff06fa5f7d28458b1b354e38b9a2673f9944cf66769580abd7348ee Nov 28 17:43:17 crc kubenswrapper[4909]: I1128 17:43:17.395584 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 17:43:17 crc kubenswrapper[4909]: I1128 17:43:17.467368 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"5e86471a-d918-4d86-8696-842dc2205cd3","Type":"ContainerStarted","Data":"6c3257a3dff06fa5f7d28458b1b354e38b9a2673f9944cf66769580abd7348ee"} Nov 28 17:43:17 crc kubenswrapper[4909]: I1128 17:43:17.467564 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="8aa469b4-b3b2-48aa-82b4-7d6bdfd5cabd" containerName="glance-log" containerID="cri-o://9cf0002534800de15e04efa1e7bb74aee8730f31f9dc4294b39a34fcf44c2882" gracePeriod=30 Nov 28 17:43:17 crc kubenswrapper[4909]: I1128 17:43:17.467636 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="8aa469b4-b3b2-48aa-82b4-7d6bdfd5cabd" containerName="glance-httpd" containerID="cri-o://7e85edfa127c99c4c3b905e157346aee56db33203f1a6adcd349962850e64a7a" gracePeriod=30 Nov 28 17:43:17 crc kubenswrapper[4909]: I1128 17:43:17.917996 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ee167be5-2cd2-4407-b21b-f6e2bb87ed6c" path="/var/lib/kubelet/pods/ee167be5-2cd2-4407-b21b-f6e2bb87ed6c/volumes" Nov 28 17:43:17 crc kubenswrapper[4909]: I1128 17:43:17.988417 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 28 17:43:18 crc kubenswrapper[4909]: I1128 17:43:18.073408 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8aa469b4-b3b2-48aa-82b4-7d6bdfd5cabd-combined-ca-bundle\") pod \"8aa469b4-b3b2-48aa-82b4-7d6bdfd5cabd\" (UID: \"8aa469b4-b3b2-48aa-82b4-7d6bdfd5cabd\") " Nov 28 17:43:18 crc kubenswrapper[4909]: I1128 17:43:18.073523 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/8aa469b4-b3b2-48aa-82b4-7d6bdfd5cabd-httpd-run\") pod \"8aa469b4-b3b2-48aa-82b4-7d6bdfd5cabd\" (UID: \"8aa469b4-b3b2-48aa-82b4-7d6bdfd5cabd\") " Nov 28 17:43:18 crc kubenswrapper[4909]: I1128 17:43:18.073609 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8aa469b4-b3b2-48aa-82b4-7d6bdfd5cabd-scripts\") pod \"8aa469b4-b3b2-48aa-82b4-7d6bdfd5cabd\" (UID: \"8aa469b4-b3b2-48aa-82b4-7d6bdfd5cabd\") " Nov 28 17:43:18 crc kubenswrapper[4909]: I1128 17:43:18.073734 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8aa469b4-b3b2-48aa-82b4-7d6bdfd5cabd-logs\") pod \"8aa469b4-b3b2-48aa-82b4-7d6bdfd5cabd\" (UID: \"8aa469b4-b3b2-48aa-82b4-7d6bdfd5cabd\") " Nov 28 17:43:18 crc kubenswrapper[4909]: I1128 17:43:18.073763 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qt74r\" (UniqueName: \"kubernetes.io/projected/8aa469b4-b3b2-48aa-82b4-7d6bdfd5cabd-kube-api-access-qt74r\") pod \"8aa469b4-b3b2-48aa-82b4-7d6bdfd5cabd\" (UID: \"8aa469b4-b3b2-48aa-82b4-7d6bdfd5cabd\") " Nov 28 17:43:18 crc kubenswrapper[4909]: I1128 17:43:18.073807 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/8aa469b4-b3b2-48aa-82b4-7d6bdfd5cabd-ceph\") pod \"8aa469b4-b3b2-48aa-82b4-7d6bdfd5cabd\" (UID: \"8aa469b4-b3b2-48aa-82b4-7d6bdfd5cabd\") " Nov 28 17:43:18 crc kubenswrapper[4909]: I1128 17:43:18.073857 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8aa469b4-b3b2-48aa-82b4-7d6bdfd5cabd-config-data\") pod \"8aa469b4-b3b2-48aa-82b4-7d6bdfd5cabd\" (UID: \"8aa469b4-b3b2-48aa-82b4-7d6bdfd5cabd\") " Nov 28 17:43:18 crc kubenswrapper[4909]: I1128 17:43:18.074478 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8aa469b4-b3b2-48aa-82b4-7d6bdfd5cabd-logs" (OuterVolumeSpecName: "logs") pod "8aa469b4-b3b2-48aa-82b4-7d6bdfd5cabd" (UID: "8aa469b4-b3b2-48aa-82b4-7d6bdfd5cabd"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:43:18 crc kubenswrapper[4909]: I1128 17:43:18.074889 4909 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8aa469b4-b3b2-48aa-82b4-7d6bdfd5cabd-logs\") on node \"crc\" DevicePath \"\"" Nov 28 17:43:18 crc kubenswrapper[4909]: I1128 17:43:18.075611 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8aa469b4-b3b2-48aa-82b4-7d6bdfd5cabd-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "8aa469b4-b3b2-48aa-82b4-7d6bdfd5cabd" (UID: "8aa469b4-b3b2-48aa-82b4-7d6bdfd5cabd"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:43:18 crc kubenswrapper[4909]: I1128 17:43:18.078676 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8aa469b4-b3b2-48aa-82b4-7d6bdfd5cabd-kube-api-access-qt74r" (OuterVolumeSpecName: "kube-api-access-qt74r") pod "8aa469b4-b3b2-48aa-82b4-7d6bdfd5cabd" (UID: "8aa469b4-b3b2-48aa-82b4-7d6bdfd5cabd"). InnerVolumeSpecName "kube-api-access-qt74r". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:43:18 crc kubenswrapper[4909]: I1128 17:43:18.079550 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8aa469b4-b3b2-48aa-82b4-7d6bdfd5cabd-ceph" (OuterVolumeSpecName: "ceph") pod "8aa469b4-b3b2-48aa-82b4-7d6bdfd5cabd" (UID: "8aa469b4-b3b2-48aa-82b4-7d6bdfd5cabd"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:43:18 crc kubenswrapper[4909]: I1128 17:43:18.087977 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8aa469b4-b3b2-48aa-82b4-7d6bdfd5cabd-scripts" (OuterVolumeSpecName: "scripts") pod "8aa469b4-b3b2-48aa-82b4-7d6bdfd5cabd" (UID: "8aa469b4-b3b2-48aa-82b4-7d6bdfd5cabd"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:43:18 crc kubenswrapper[4909]: I1128 17:43:18.103002 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8aa469b4-b3b2-48aa-82b4-7d6bdfd5cabd-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "8aa469b4-b3b2-48aa-82b4-7d6bdfd5cabd" (UID: "8aa469b4-b3b2-48aa-82b4-7d6bdfd5cabd"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:43:18 crc kubenswrapper[4909]: I1128 17:43:18.131905 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8aa469b4-b3b2-48aa-82b4-7d6bdfd5cabd-config-data" (OuterVolumeSpecName: "config-data") pod "8aa469b4-b3b2-48aa-82b4-7d6bdfd5cabd" (UID: "8aa469b4-b3b2-48aa-82b4-7d6bdfd5cabd"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:43:18 crc kubenswrapper[4909]: I1128 17:43:18.176833 4909 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8aa469b4-b3b2-48aa-82b4-7d6bdfd5cabd-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 17:43:18 crc kubenswrapper[4909]: I1128 17:43:18.176864 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qt74r\" (UniqueName: \"kubernetes.io/projected/8aa469b4-b3b2-48aa-82b4-7d6bdfd5cabd-kube-api-access-qt74r\") on node \"crc\" DevicePath \"\"" Nov 28 17:43:18 crc kubenswrapper[4909]: I1128 17:43:18.176877 4909 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/8aa469b4-b3b2-48aa-82b4-7d6bdfd5cabd-ceph\") on node \"crc\" DevicePath \"\"" Nov 28 17:43:18 crc kubenswrapper[4909]: I1128 17:43:18.176889 4909 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8aa469b4-b3b2-48aa-82b4-7d6bdfd5cabd-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 17:43:18 crc kubenswrapper[4909]: I1128 17:43:18.176899 4909 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8aa469b4-b3b2-48aa-82b4-7d6bdfd5cabd-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 17:43:18 crc kubenswrapper[4909]: I1128 17:43:18.176907 4909 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/8aa469b4-b3b2-48aa-82b4-7d6bdfd5cabd-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 28 17:43:18 crc kubenswrapper[4909]: I1128 17:43:18.478135 4909 generic.go:334] "Generic (PLEG): container finished" podID="8aa469b4-b3b2-48aa-82b4-7d6bdfd5cabd" containerID="7e85edfa127c99c4c3b905e157346aee56db33203f1a6adcd349962850e64a7a" exitCode=0 Nov 28 17:43:18 crc kubenswrapper[4909]: I1128 17:43:18.478513 4909 generic.go:334] "Generic (PLEG): container finished" podID="8aa469b4-b3b2-48aa-82b4-7d6bdfd5cabd" containerID="9cf0002534800de15e04efa1e7bb74aee8730f31f9dc4294b39a34fcf44c2882" exitCode=143 Nov 28 17:43:18 crc kubenswrapper[4909]: I1128 17:43:18.478190 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 28 17:43:18 crc kubenswrapper[4909]: I1128 17:43:18.478207 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"8aa469b4-b3b2-48aa-82b4-7d6bdfd5cabd","Type":"ContainerDied","Data":"7e85edfa127c99c4c3b905e157346aee56db33203f1a6adcd349962850e64a7a"} Nov 28 17:43:18 crc kubenswrapper[4909]: I1128 17:43:18.479676 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"8aa469b4-b3b2-48aa-82b4-7d6bdfd5cabd","Type":"ContainerDied","Data":"9cf0002534800de15e04efa1e7bb74aee8730f31f9dc4294b39a34fcf44c2882"} Nov 28 17:43:18 crc kubenswrapper[4909]: I1128 17:43:18.479695 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"8aa469b4-b3b2-48aa-82b4-7d6bdfd5cabd","Type":"ContainerDied","Data":"ed37a0d4a9070679f12928729168efb03856b41e233ac1e26697b0dab26d42d4"} Nov 28 17:43:18 crc kubenswrapper[4909]: I1128 17:43:18.479714 4909 scope.go:117] "RemoveContainer" containerID="7e85edfa127c99c4c3b905e157346aee56db33203f1a6adcd349962850e64a7a" Nov 28 17:43:18 crc kubenswrapper[4909]: I1128 17:43:18.484202 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"5e86471a-d918-4d86-8696-842dc2205cd3","Type":"ContainerStarted","Data":"322e0616f54341d91165dfa4c57a0977fe42661c774e7cc09684d3743a0e28cb"} Nov 28 17:43:18 crc kubenswrapper[4909]: I1128 17:43:18.484258 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"5e86471a-d918-4d86-8696-842dc2205cd3","Type":"ContainerStarted","Data":"28ecd248bb264123a78903ef56997b7630ddb7524a3f36eb3f76971ca78e3127"} Nov 28 17:43:18 crc kubenswrapper[4909]: I1128 17:43:18.536211 4909 scope.go:117] "RemoveContainer" containerID="9cf0002534800de15e04efa1e7bb74aee8730f31f9dc4294b39a34fcf44c2882" Nov 28 17:43:18 crc kubenswrapper[4909]: I1128 17:43:18.542283 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=2.542260171 podStartE2EDuration="2.542260171s" podCreationTimestamp="2025-11-28 17:43:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 17:43:18.502912863 +0000 UTC m=+5580.899597387" watchObservedRunningTime="2025-11-28 17:43:18.542260171 +0000 UTC m=+5580.938944735" Nov 28 17:43:18 crc kubenswrapper[4909]: I1128 17:43:18.552196 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 17:43:18 crc kubenswrapper[4909]: I1128 17:43:18.572121 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 17:43:18 crc kubenswrapper[4909]: I1128 17:43:18.586726 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 17:43:18 crc kubenswrapper[4909]: E1128 17:43:18.587191 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8aa469b4-b3b2-48aa-82b4-7d6bdfd5cabd" containerName="glance-log" Nov 28 17:43:18 crc kubenswrapper[4909]: I1128 17:43:18.587208 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="8aa469b4-b3b2-48aa-82b4-7d6bdfd5cabd" containerName="glance-log" Nov 28 17:43:18 crc kubenswrapper[4909]: E1128 17:43:18.587230 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8aa469b4-b3b2-48aa-82b4-7d6bdfd5cabd" containerName="glance-httpd" Nov 28 17:43:18 crc kubenswrapper[4909]: I1128 17:43:18.587238 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="8aa469b4-b3b2-48aa-82b4-7d6bdfd5cabd" containerName="glance-httpd" Nov 28 17:43:18 crc kubenswrapper[4909]: I1128 17:43:18.587488 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="8aa469b4-b3b2-48aa-82b4-7d6bdfd5cabd" containerName="glance-log" Nov 28 17:43:18 crc kubenswrapper[4909]: I1128 17:43:18.587507 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="8aa469b4-b3b2-48aa-82b4-7d6bdfd5cabd" containerName="glance-httpd" Nov 28 17:43:18 crc kubenswrapper[4909]: I1128 17:43:18.588673 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 28 17:43:18 crc kubenswrapper[4909]: I1128 17:43:18.590960 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Nov 28 17:43:18 crc kubenswrapper[4909]: I1128 17:43:18.594679 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 17:43:18 crc kubenswrapper[4909]: I1128 17:43:18.600846 4909 scope.go:117] "RemoveContainer" containerID="7e85edfa127c99c4c3b905e157346aee56db33203f1a6adcd349962850e64a7a" Nov 28 17:43:18 crc kubenswrapper[4909]: E1128 17:43:18.601296 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7e85edfa127c99c4c3b905e157346aee56db33203f1a6adcd349962850e64a7a\": container with ID starting with 7e85edfa127c99c4c3b905e157346aee56db33203f1a6adcd349962850e64a7a not found: ID does not exist" containerID="7e85edfa127c99c4c3b905e157346aee56db33203f1a6adcd349962850e64a7a" Nov 28 17:43:18 crc kubenswrapper[4909]: I1128 17:43:18.601332 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7e85edfa127c99c4c3b905e157346aee56db33203f1a6adcd349962850e64a7a"} err="failed to get container status \"7e85edfa127c99c4c3b905e157346aee56db33203f1a6adcd349962850e64a7a\": rpc error: code = NotFound desc = could not find container \"7e85edfa127c99c4c3b905e157346aee56db33203f1a6adcd349962850e64a7a\": container with ID starting with 7e85edfa127c99c4c3b905e157346aee56db33203f1a6adcd349962850e64a7a not found: ID does not exist" Nov 28 17:43:18 crc kubenswrapper[4909]: I1128 17:43:18.601358 4909 scope.go:117] "RemoveContainer" containerID="9cf0002534800de15e04efa1e7bb74aee8730f31f9dc4294b39a34fcf44c2882" Nov 28 17:43:18 crc kubenswrapper[4909]: E1128 17:43:18.601821 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9cf0002534800de15e04efa1e7bb74aee8730f31f9dc4294b39a34fcf44c2882\": container with ID starting with 9cf0002534800de15e04efa1e7bb74aee8730f31f9dc4294b39a34fcf44c2882 not found: ID does not exist" containerID="9cf0002534800de15e04efa1e7bb74aee8730f31f9dc4294b39a34fcf44c2882" Nov 28 17:43:18 crc kubenswrapper[4909]: I1128 17:43:18.601851 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9cf0002534800de15e04efa1e7bb74aee8730f31f9dc4294b39a34fcf44c2882"} err="failed to get container status \"9cf0002534800de15e04efa1e7bb74aee8730f31f9dc4294b39a34fcf44c2882\": rpc error: code = NotFound desc = could not find container \"9cf0002534800de15e04efa1e7bb74aee8730f31f9dc4294b39a34fcf44c2882\": container with ID starting with 9cf0002534800de15e04efa1e7bb74aee8730f31f9dc4294b39a34fcf44c2882 not found: ID does not exist" Nov 28 17:43:18 crc kubenswrapper[4909]: I1128 17:43:18.601874 4909 scope.go:117] "RemoveContainer" containerID="7e85edfa127c99c4c3b905e157346aee56db33203f1a6adcd349962850e64a7a" Nov 28 17:43:18 crc kubenswrapper[4909]: I1128 17:43:18.602200 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7e85edfa127c99c4c3b905e157346aee56db33203f1a6adcd349962850e64a7a"} err="failed to get container status \"7e85edfa127c99c4c3b905e157346aee56db33203f1a6adcd349962850e64a7a\": rpc error: code = NotFound desc = could not find container \"7e85edfa127c99c4c3b905e157346aee56db33203f1a6adcd349962850e64a7a\": container with ID starting with 7e85edfa127c99c4c3b905e157346aee56db33203f1a6adcd349962850e64a7a not found: ID does not exist" Nov 28 17:43:18 crc kubenswrapper[4909]: I1128 17:43:18.602221 4909 scope.go:117] "RemoveContainer" containerID="9cf0002534800de15e04efa1e7bb74aee8730f31f9dc4294b39a34fcf44c2882" Nov 28 17:43:18 crc kubenswrapper[4909]: I1128 17:43:18.603798 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9cf0002534800de15e04efa1e7bb74aee8730f31f9dc4294b39a34fcf44c2882"} err="failed to get container status \"9cf0002534800de15e04efa1e7bb74aee8730f31f9dc4294b39a34fcf44c2882\": rpc error: code = NotFound desc = could not find container \"9cf0002534800de15e04efa1e7bb74aee8730f31f9dc4294b39a34fcf44c2882\": container with ID starting with 9cf0002534800de15e04efa1e7bb74aee8730f31f9dc4294b39a34fcf44c2882 not found: ID does not exist" Nov 28 17:43:18 crc kubenswrapper[4909]: I1128 17:43:18.686234 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7aa429fe-eb79-40cb-9612-931f5b0e2b54-logs\") pod \"glance-default-internal-api-0\" (UID: \"7aa429fe-eb79-40cb-9612-931f5b0e2b54\") " pod="openstack/glance-default-internal-api-0" Nov 28 17:43:18 crc kubenswrapper[4909]: I1128 17:43:18.686327 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7aa429fe-eb79-40cb-9612-931f5b0e2b54-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"7aa429fe-eb79-40cb-9612-931f5b0e2b54\") " pod="openstack/glance-default-internal-api-0" Nov 28 17:43:18 crc kubenswrapper[4909]: I1128 17:43:18.686371 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kjgjj\" (UniqueName: \"kubernetes.io/projected/7aa429fe-eb79-40cb-9612-931f5b0e2b54-kube-api-access-kjgjj\") pod \"glance-default-internal-api-0\" (UID: \"7aa429fe-eb79-40cb-9612-931f5b0e2b54\") " pod="openstack/glance-default-internal-api-0" Nov 28 17:43:18 crc kubenswrapper[4909]: I1128 17:43:18.686402 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/7aa429fe-eb79-40cb-9612-931f5b0e2b54-ceph\") pod \"glance-default-internal-api-0\" (UID: \"7aa429fe-eb79-40cb-9612-931f5b0e2b54\") " pod="openstack/glance-default-internal-api-0" Nov 28 17:43:18 crc kubenswrapper[4909]: I1128 17:43:18.686471 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7aa429fe-eb79-40cb-9612-931f5b0e2b54-scripts\") pod \"glance-default-internal-api-0\" (UID: \"7aa429fe-eb79-40cb-9612-931f5b0e2b54\") " pod="openstack/glance-default-internal-api-0" Nov 28 17:43:18 crc kubenswrapper[4909]: I1128 17:43:18.686602 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/7aa429fe-eb79-40cb-9612-931f5b0e2b54-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"7aa429fe-eb79-40cb-9612-931f5b0e2b54\") " pod="openstack/glance-default-internal-api-0" Nov 28 17:43:18 crc kubenswrapper[4909]: I1128 17:43:18.686685 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7aa429fe-eb79-40cb-9612-931f5b0e2b54-config-data\") pod \"glance-default-internal-api-0\" (UID: \"7aa429fe-eb79-40cb-9612-931f5b0e2b54\") " pod="openstack/glance-default-internal-api-0" Nov 28 17:43:18 crc kubenswrapper[4909]: I1128 17:43:18.788638 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7aa429fe-eb79-40cb-9612-931f5b0e2b54-logs\") pod \"glance-default-internal-api-0\" (UID: \"7aa429fe-eb79-40cb-9612-931f5b0e2b54\") " pod="openstack/glance-default-internal-api-0" Nov 28 17:43:18 crc kubenswrapper[4909]: I1128 17:43:18.788719 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7aa429fe-eb79-40cb-9612-931f5b0e2b54-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"7aa429fe-eb79-40cb-9612-931f5b0e2b54\") " pod="openstack/glance-default-internal-api-0" Nov 28 17:43:18 crc kubenswrapper[4909]: I1128 17:43:18.788770 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kjgjj\" (UniqueName: \"kubernetes.io/projected/7aa429fe-eb79-40cb-9612-931f5b0e2b54-kube-api-access-kjgjj\") pod \"glance-default-internal-api-0\" (UID: \"7aa429fe-eb79-40cb-9612-931f5b0e2b54\") " pod="openstack/glance-default-internal-api-0" Nov 28 17:43:18 crc kubenswrapper[4909]: I1128 17:43:18.788799 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/7aa429fe-eb79-40cb-9612-931f5b0e2b54-ceph\") pod \"glance-default-internal-api-0\" (UID: \"7aa429fe-eb79-40cb-9612-931f5b0e2b54\") " pod="openstack/glance-default-internal-api-0" Nov 28 17:43:18 crc kubenswrapper[4909]: I1128 17:43:18.788838 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7aa429fe-eb79-40cb-9612-931f5b0e2b54-scripts\") pod \"glance-default-internal-api-0\" (UID: \"7aa429fe-eb79-40cb-9612-931f5b0e2b54\") " pod="openstack/glance-default-internal-api-0" Nov 28 17:43:18 crc kubenswrapper[4909]: I1128 17:43:18.788900 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/7aa429fe-eb79-40cb-9612-931f5b0e2b54-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"7aa429fe-eb79-40cb-9612-931f5b0e2b54\") " pod="openstack/glance-default-internal-api-0" Nov 28 17:43:18 crc kubenswrapper[4909]: I1128 17:43:18.788929 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7aa429fe-eb79-40cb-9612-931f5b0e2b54-config-data\") pod \"glance-default-internal-api-0\" (UID: \"7aa429fe-eb79-40cb-9612-931f5b0e2b54\") " pod="openstack/glance-default-internal-api-0" Nov 28 17:43:18 crc kubenswrapper[4909]: I1128 17:43:18.790035 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/7aa429fe-eb79-40cb-9612-931f5b0e2b54-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"7aa429fe-eb79-40cb-9612-931f5b0e2b54\") " pod="openstack/glance-default-internal-api-0" Nov 28 17:43:18 crc kubenswrapper[4909]: I1128 17:43:18.790260 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7aa429fe-eb79-40cb-9612-931f5b0e2b54-logs\") pod \"glance-default-internal-api-0\" (UID: \"7aa429fe-eb79-40cb-9612-931f5b0e2b54\") " pod="openstack/glance-default-internal-api-0" Nov 28 17:43:18 crc kubenswrapper[4909]: I1128 17:43:18.793285 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/7aa429fe-eb79-40cb-9612-931f5b0e2b54-ceph\") pod \"glance-default-internal-api-0\" (UID: \"7aa429fe-eb79-40cb-9612-931f5b0e2b54\") " pod="openstack/glance-default-internal-api-0" Nov 28 17:43:18 crc kubenswrapper[4909]: I1128 17:43:18.793876 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7aa429fe-eb79-40cb-9612-931f5b0e2b54-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"7aa429fe-eb79-40cb-9612-931f5b0e2b54\") " pod="openstack/glance-default-internal-api-0" Nov 28 17:43:18 crc kubenswrapper[4909]: I1128 17:43:18.793970 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7aa429fe-eb79-40cb-9612-931f5b0e2b54-scripts\") pod \"glance-default-internal-api-0\" (UID: \"7aa429fe-eb79-40cb-9612-931f5b0e2b54\") " pod="openstack/glance-default-internal-api-0" Nov 28 17:43:18 crc kubenswrapper[4909]: I1128 17:43:18.799820 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7aa429fe-eb79-40cb-9612-931f5b0e2b54-config-data\") pod \"glance-default-internal-api-0\" (UID: \"7aa429fe-eb79-40cb-9612-931f5b0e2b54\") " pod="openstack/glance-default-internal-api-0" Nov 28 17:43:18 crc kubenswrapper[4909]: I1128 17:43:18.819377 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kjgjj\" (UniqueName: \"kubernetes.io/projected/7aa429fe-eb79-40cb-9612-931f5b0e2b54-kube-api-access-kjgjj\") pod \"glance-default-internal-api-0\" (UID: \"7aa429fe-eb79-40cb-9612-931f5b0e2b54\") " pod="openstack/glance-default-internal-api-0" Nov 28 17:43:18 crc kubenswrapper[4909]: I1128 17:43:18.904624 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 28 17:43:19 crc kubenswrapper[4909]: I1128 17:43:19.487126 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 17:43:19 crc kubenswrapper[4909]: W1128 17:43:19.490420 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7aa429fe_eb79_40cb_9612_931f5b0e2b54.slice/crio-07e6dd46d956155726250cfb303e4a6a9ab5911d74ec23ac82ec22601f9a1e72 WatchSource:0}: Error finding container 07e6dd46d956155726250cfb303e4a6a9ab5911d74ec23ac82ec22601f9a1e72: Status 404 returned error can't find the container with id 07e6dd46d956155726250cfb303e4a6a9ab5911d74ec23ac82ec22601f9a1e72 Nov 28 17:43:19 crc kubenswrapper[4909]: I1128 17:43:19.912872 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8aa469b4-b3b2-48aa-82b4-7d6bdfd5cabd" path="/var/lib/kubelet/pods/8aa469b4-b3b2-48aa-82b4-7d6bdfd5cabd/volumes" Nov 28 17:43:20 crc kubenswrapper[4909]: I1128 17:43:20.508468 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"7aa429fe-eb79-40cb-9612-931f5b0e2b54","Type":"ContainerStarted","Data":"db86491c36d28334f4760b1a2499df8201d87e9374ffb080e7f82a02d4233608"} Nov 28 17:43:20 crc kubenswrapper[4909]: I1128 17:43:20.509349 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"7aa429fe-eb79-40cb-9612-931f5b0e2b54","Type":"ContainerStarted","Data":"1aa6258fe04ef4d341553ed1ab2bebe7218d2d91ca983ecc85033b22b76f9b05"} Nov 28 17:43:20 crc kubenswrapper[4909]: I1128 17:43:20.509418 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"7aa429fe-eb79-40cb-9612-931f5b0e2b54","Type":"ContainerStarted","Data":"07e6dd46d956155726250cfb303e4a6a9ab5911d74ec23ac82ec22601f9a1e72"} Nov 28 17:43:20 crc kubenswrapper[4909]: I1128 17:43:20.527446 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=2.527406021 podStartE2EDuration="2.527406021s" podCreationTimestamp="2025-11-28 17:43:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 17:43:20.525800908 +0000 UTC m=+5582.922485432" watchObservedRunningTime="2025-11-28 17:43:20.527406021 +0000 UTC m=+5582.924090555" Nov 28 17:43:23 crc kubenswrapper[4909]: I1128 17:43:23.139735 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-64d4964cf7-xrqbj" Nov 28 17:43:23 crc kubenswrapper[4909]: I1128 17:43:23.213062 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-bdc76b79f-jsx6s"] Nov 28 17:43:23 crc kubenswrapper[4909]: I1128 17:43:23.213337 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-bdc76b79f-jsx6s" podUID="046651a0-5bd3-47ec-91a9-f45f86868e24" containerName="dnsmasq-dns" containerID="cri-o://caf745226829359899be0c347e6decc4f5d79c18b6d81d31427369fc6e70d753" gracePeriod=10 Nov 28 17:43:23 crc kubenswrapper[4909]: I1128 17:43:23.995234 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-bdc76b79f-jsx6s" Nov 28 17:43:24 crc kubenswrapper[4909]: I1128 17:43:24.082229 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/046651a0-5bd3-47ec-91a9-f45f86868e24-ovsdbserver-sb\") pod \"046651a0-5bd3-47ec-91a9-f45f86868e24\" (UID: \"046651a0-5bd3-47ec-91a9-f45f86868e24\") " Nov 28 17:43:24 crc kubenswrapper[4909]: I1128 17:43:24.082341 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mhb67\" (UniqueName: \"kubernetes.io/projected/046651a0-5bd3-47ec-91a9-f45f86868e24-kube-api-access-mhb67\") pod \"046651a0-5bd3-47ec-91a9-f45f86868e24\" (UID: \"046651a0-5bd3-47ec-91a9-f45f86868e24\") " Nov 28 17:43:24 crc kubenswrapper[4909]: I1128 17:43:24.082459 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/046651a0-5bd3-47ec-91a9-f45f86868e24-config\") pod \"046651a0-5bd3-47ec-91a9-f45f86868e24\" (UID: \"046651a0-5bd3-47ec-91a9-f45f86868e24\") " Nov 28 17:43:24 crc kubenswrapper[4909]: I1128 17:43:24.082526 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/046651a0-5bd3-47ec-91a9-f45f86868e24-ovsdbserver-nb\") pod \"046651a0-5bd3-47ec-91a9-f45f86868e24\" (UID: \"046651a0-5bd3-47ec-91a9-f45f86868e24\") " Nov 28 17:43:24 crc kubenswrapper[4909]: I1128 17:43:24.083216 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/046651a0-5bd3-47ec-91a9-f45f86868e24-dns-svc\") pod \"046651a0-5bd3-47ec-91a9-f45f86868e24\" (UID: \"046651a0-5bd3-47ec-91a9-f45f86868e24\") " Nov 28 17:43:24 crc kubenswrapper[4909]: I1128 17:43:24.089886 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/046651a0-5bd3-47ec-91a9-f45f86868e24-kube-api-access-mhb67" (OuterVolumeSpecName: "kube-api-access-mhb67") pod "046651a0-5bd3-47ec-91a9-f45f86868e24" (UID: "046651a0-5bd3-47ec-91a9-f45f86868e24"). InnerVolumeSpecName "kube-api-access-mhb67". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:43:24 crc kubenswrapper[4909]: I1128 17:43:24.122600 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/046651a0-5bd3-47ec-91a9-f45f86868e24-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "046651a0-5bd3-47ec-91a9-f45f86868e24" (UID: "046651a0-5bd3-47ec-91a9-f45f86868e24"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 17:43:24 crc kubenswrapper[4909]: I1128 17:43:24.124960 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/046651a0-5bd3-47ec-91a9-f45f86868e24-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "046651a0-5bd3-47ec-91a9-f45f86868e24" (UID: "046651a0-5bd3-47ec-91a9-f45f86868e24"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 17:43:24 crc kubenswrapper[4909]: I1128 17:43:24.125394 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/046651a0-5bd3-47ec-91a9-f45f86868e24-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "046651a0-5bd3-47ec-91a9-f45f86868e24" (UID: "046651a0-5bd3-47ec-91a9-f45f86868e24"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 17:43:24 crc kubenswrapper[4909]: I1128 17:43:24.138442 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/046651a0-5bd3-47ec-91a9-f45f86868e24-config" (OuterVolumeSpecName: "config") pod "046651a0-5bd3-47ec-91a9-f45f86868e24" (UID: "046651a0-5bd3-47ec-91a9-f45f86868e24"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 17:43:24 crc kubenswrapper[4909]: I1128 17:43:24.184826 4909 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/046651a0-5bd3-47ec-91a9-f45f86868e24-config\") on node \"crc\" DevicePath \"\"" Nov 28 17:43:24 crc kubenswrapper[4909]: I1128 17:43:24.184863 4909 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/046651a0-5bd3-47ec-91a9-f45f86868e24-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 28 17:43:24 crc kubenswrapper[4909]: I1128 17:43:24.184873 4909 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/046651a0-5bd3-47ec-91a9-f45f86868e24-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 17:43:24 crc kubenswrapper[4909]: I1128 17:43:24.184881 4909 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/046651a0-5bd3-47ec-91a9-f45f86868e24-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 28 17:43:24 crc kubenswrapper[4909]: I1128 17:43:24.184893 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mhb67\" (UniqueName: \"kubernetes.io/projected/046651a0-5bd3-47ec-91a9-f45f86868e24-kube-api-access-mhb67\") on node \"crc\" DevicePath \"\"" Nov 28 17:43:24 crc kubenswrapper[4909]: I1128 17:43:24.564630 4909 generic.go:334] "Generic (PLEG): container finished" podID="046651a0-5bd3-47ec-91a9-f45f86868e24" containerID="caf745226829359899be0c347e6decc4f5d79c18b6d81d31427369fc6e70d753" exitCode=0 Nov 28 17:43:24 crc kubenswrapper[4909]: I1128 17:43:24.564703 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-bdc76b79f-jsx6s" event={"ID":"046651a0-5bd3-47ec-91a9-f45f86868e24","Type":"ContainerDied","Data":"caf745226829359899be0c347e6decc4f5d79c18b6d81d31427369fc6e70d753"} Nov 28 17:43:24 crc kubenswrapper[4909]: I1128 17:43:24.564744 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-bdc76b79f-jsx6s" Nov 28 17:43:24 crc kubenswrapper[4909]: I1128 17:43:24.564778 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-bdc76b79f-jsx6s" event={"ID":"046651a0-5bd3-47ec-91a9-f45f86868e24","Type":"ContainerDied","Data":"8f0abaa9031ebec09ba87d5aee6ac7425e8a0512056a6ca13027c065aa73bcf3"} Nov 28 17:43:24 crc kubenswrapper[4909]: I1128 17:43:24.564820 4909 scope.go:117] "RemoveContainer" containerID="caf745226829359899be0c347e6decc4f5d79c18b6d81d31427369fc6e70d753" Nov 28 17:43:24 crc kubenswrapper[4909]: I1128 17:43:24.601850 4909 scope.go:117] "RemoveContainer" containerID="eb9d133c9e9598660a3940e51effd3c62dda2f739a977e2fe4b2b8e55380d63c" Nov 28 17:43:24 crc kubenswrapper[4909]: I1128 17:43:24.606781 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-bdc76b79f-jsx6s"] Nov 28 17:43:24 crc kubenswrapper[4909]: I1128 17:43:24.613200 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-bdc76b79f-jsx6s"] Nov 28 17:43:24 crc kubenswrapper[4909]: I1128 17:43:24.625555 4909 scope.go:117] "RemoveContainer" containerID="caf745226829359899be0c347e6decc4f5d79c18b6d81d31427369fc6e70d753" Nov 28 17:43:24 crc kubenswrapper[4909]: E1128 17:43:24.626057 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"caf745226829359899be0c347e6decc4f5d79c18b6d81d31427369fc6e70d753\": container with ID starting with caf745226829359899be0c347e6decc4f5d79c18b6d81d31427369fc6e70d753 not found: ID does not exist" containerID="caf745226829359899be0c347e6decc4f5d79c18b6d81d31427369fc6e70d753" Nov 28 17:43:24 crc kubenswrapper[4909]: I1128 17:43:24.626099 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"caf745226829359899be0c347e6decc4f5d79c18b6d81d31427369fc6e70d753"} err="failed to get container status \"caf745226829359899be0c347e6decc4f5d79c18b6d81d31427369fc6e70d753\": rpc error: code = NotFound desc = could not find container \"caf745226829359899be0c347e6decc4f5d79c18b6d81d31427369fc6e70d753\": container with ID starting with caf745226829359899be0c347e6decc4f5d79c18b6d81d31427369fc6e70d753 not found: ID does not exist" Nov 28 17:43:24 crc kubenswrapper[4909]: I1128 17:43:24.626126 4909 scope.go:117] "RemoveContainer" containerID="eb9d133c9e9598660a3940e51effd3c62dda2f739a977e2fe4b2b8e55380d63c" Nov 28 17:43:24 crc kubenswrapper[4909]: E1128 17:43:24.626562 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"eb9d133c9e9598660a3940e51effd3c62dda2f739a977e2fe4b2b8e55380d63c\": container with ID starting with eb9d133c9e9598660a3940e51effd3c62dda2f739a977e2fe4b2b8e55380d63c not found: ID does not exist" containerID="eb9d133c9e9598660a3940e51effd3c62dda2f739a977e2fe4b2b8e55380d63c" Nov 28 17:43:24 crc kubenswrapper[4909]: I1128 17:43:24.626591 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"eb9d133c9e9598660a3940e51effd3c62dda2f739a977e2fe4b2b8e55380d63c"} err="failed to get container status \"eb9d133c9e9598660a3940e51effd3c62dda2f739a977e2fe4b2b8e55380d63c\": rpc error: code = NotFound desc = could not find container \"eb9d133c9e9598660a3940e51effd3c62dda2f739a977e2fe4b2b8e55380d63c\": container with ID starting with eb9d133c9e9598660a3940e51effd3c62dda2f739a977e2fe4b2b8e55380d63c not found: ID does not exist" Nov 28 17:43:25 crc kubenswrapper[4909]: I1128 17:43:25.912565 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="046651a0-5bd3-47ec-91a9-f45f86868e24" path="/var/lib/kubelet/pods/046651a0-5bd3-47ec-91a9-f45f86868e24/volumes" Nov 28 17:43:26 crc kubenswrapper[4909]: I1128 17:43:26.866799 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 28 17:43:26 crc kubenswrapper[4909]: I1128 17:43:26.866857 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 28 17:43:26 crc kubenswrapper[4909]: I1128 17:43:26.918936 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 28 17:43:26 crc kubenswrapper[4909]: I1128 17:43:26.947539 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 28 17:43:27 crc kubenswrapper[4909]: I1128 17:43:27.600602 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 28 17:43:27 crc kubenswrapper[4909]: I1128 17:43:27.600987 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 28 17:43:27 crc kubenswrapper[4909]: I1128 17:43:27.916113 4909 scope.go:117] "RemoveContainer" containerID="ba4943f4ba136c11fa217eba14fcdb34cf54ee4ef96ee334416ec901f5f4fe45" Nov 28 17:43:28 crc kubenswrapper[4909]: I1128 17:43:28.615016 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" event={"ID":"5f0ac931-d37b-4342-8c12-c2779b455cc5","Type":"ContainerStarted","Data":"a9c5475473dd9ba62a24558f25eeaca06c83d54c4c834c5612f2192cce1e1a09"} Nov 28 17:43:28 crc kubenswrapper[4909]: I1128 17:43:28.905003 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 28 17:43:28 crc kubenswrapper[4909]: I1128 17:43:28.905315 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 28 17:43:28 crc kubenswrapper[4909]: I1128 17:43:28.934235 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 28 17:43:28 crc kubenswrapper[4909]: I1128 17:43:28.955795 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 28 17:43:29 crc kubenswrapper[4909]: I1128 17:43:29.471153 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 28 17:43:29 crc kubenswrapper[4909]: I1128 17:43:29.579848 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 28 17:43:29 crc kubenswrapper[4909]: I1128 17:43:29.633398 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 28 17:43:29 crc kubenswrapper[4909]: I1128 17:43:29.633713 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 28 17:43:31 crc kubenswrapper[4909]: I1128 17:43:31.650377 4909 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 28 17:43:31 crc kubenswrapper[4909]: I1128 17:43:31.650750 4909 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 28 17:43:31 crc kubenswrapper[4909]: I1128 17:43:31.714347 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 28 17:43:31 crc kubenswrapper[4909]: I1128 17:43:31.726353 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 28 17:43:37 crc kubenswrapper[4909]: I1128 17:43:37.737366 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-create-vq4q8"] Nov 28 17:43:37 crc kubenswrapper[4909]: E1128 17:43:37.738336 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="046651a0-5bd3-47ec-91a9-f45f86868e24" containerName="dnsmasq-dns" Nov 28 17:43:37 crc kubenswrapper[4909]: I1128 17:43:37.738351 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="046651a0-5bd3-47ec-91a9-f45f86868e24" containerName="dnsmasq-dns" Nov 28 17:43:37 crc kubenswrapper[4909]: E1128 17:43:37.738376 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="046651a0-5bd3-47ec-91a9-f45f86868e24" containerName="init" Nov 28 17:43:37 crc kubenswrapper[4909]: I1128 17:43:37.738384 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="046651a0-5bd3-47ec-91a9-f45f86868e24" containerName="init" Nov 28 17:43:37 crc kubenswrapper[4909]: I1128 17:43:37.738589 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="046651a0-5bd3-47ec-91a9-f45f86868e24" containerName="dnsmasq-dns" Nov 28 17:43:37 crc kubenswrapper[4909]: I1128 17:43:37.739286 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-vq4q8" Nov 28 17:43:37 crc kubenswrapper[4909]: I1128 17:43:37.770897 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-d22a-account-create-update-4vb8l"] Nov 28 17:43:37 crc kubenswrapper[4909]: I1128 17:43:37.776312 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-d22a-account-create-update-4vb8l" Nov 28 17:43:37 crc kubenswrapper[4909]: I1128 17:43:37.786320 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-db-secret" Nov 28 17:43:37 crc kubenswrapper[4909]: I1128 17:43:37.787621 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-vq4q8"] Nov 28 17:43:37 crc kubenswrapper[4909]: I1128 17:43:37.799984 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-d22a-account-create-update-4vb8l"] Nov 28 17:43:37 crc kubenswrapper[4909]: I1128 17:43:37.841400 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jdcgt\" (UniqueName: \"kubernetes.io/projected/80c89465-bce9-4402-98a8-11da00a3cb88-kube-api-access-jdcgt\") pod \"placement-d22a-account-create-update-4vb8l\" (UID: \"80c89465-bce9-4402-98a8-11da00a3cb88\") " pod="openstack/placement-d22a-account-create-update-4vb8l" Nov 28 17:43:37 crc kubenswrapper[4909]: I1128 17:43:37.841464 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/80c89465-bce9-4402-98a8-11da00a3cb88-operator-scripts\") pod \"placement-d22a-account-create-update-4vb8l\" (UID: \"80c89465-bce9-4402-98a8-11da00a3cb88\") " pod="openstack/placement-d22a-account-create-update-4vb8l" Nov 28 17:43:37 crc kubenswrapper[4909]: I1128 17:43:37.841497 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xt9ws\" (UniqueName: \"kubernetes.io/projected/68825d63-727c-4c82-a876-6b85b0224061-kube-api-access-xt9ws\") pod \"placement-db-create-vq4q8\" (UID: \"68825d63-727c-4c82-a876-6b85b0224061\") " pod="openstack/placement-db-create-vq4q8" Nov 28 17:43:37 crc kubenswrapper[4909]: I1128 17:43:37.841533 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/68825d63-727c-4c82-a876-6b85b0224061-operator-scripts\") pod \"placement-db-create-vq4q8\" (UID: \"68825d63-727c-4c82-a876-6b85b0224061\") " pod="openstack/placement-db-create-vq4q8" Nov 28 17:43:37 crc kubenswrapper[4909]: I1128 17:43:37.942720 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/80c89465-bce9-4402-98a8-11da00a3cb88-operator-scripts\") pod \"placement-d22a-account-create-update-4vb8l\" (UID: \"80c89465-bce9-4402-98a8-11da00a3cb88\") " pod="openstack/placement-d22a-account-create-update-4vb8l" Nov 28 17:43:37 crc kubenswrapper[4909]: I1128 17:43:37.942772 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xt9ws\" (UniqueName: \"kubernetes.io/projected/68825d63-727c-4c82-a876-6b85b0224061-kube-api-access-xt9ws\") pod \"placement-db-create-vq4q8\" (UID: \"68825d63-727c-4c82-a876-6b85b0224061\") " pod="openstack/placement-db-create-vq4q8" Nov 28 17:43:37 crc kubenswrapper[4909]: I1128 17:43:37.942803 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/68825d63-727c-4c82-a876-6b85b0224061-operator-scripts\") pod \"placement-db-create-vq4q8\" (UID: \"68825d63-727c-4c82-a876-6b85b0224061\") " pod="openstack/placement-db-create-vq4q8" Nov 28 17:43:37 crc kubenswrapper[4909]: I1128 17:43:37.942922 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jdcgt\" (UniqueName: \"kubernetes.io/projected/80c89465-bce9-4402-98a8-11da00a3cb88-kube-api-access-jdcgt\") pod \"placement-d22a-account-create-update-4vb8l\" (UID: \"80c89465-bce9-4402-98a8-11da00a3cb88\") " pod="openstack/placement-d22a-account-create-update-4vb8l" Nov 28 17:43:37 crc kubenswrapper[4909]: I1128 17:43:37.943608 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/68825d63-727c-4c82-a876-6b85b0224061-operator-scripts\") pod \"placement-db-create-vq4q8\" (UID: \"68825d63-727c-4c82-a876-6b85b0224061\") " pod="openstack/placement-db-create-vq4q8" Nov 28 17:43:37 crc kubenswrapper[4909]: I1128 17:43:37.943681 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/80c89465-bce9-4402-98a8-11da00a3cb88-operator-scripts\") pod \"placement-d22a-account-create-update-4vb8l\" (UID: \"80c89465-bce9-4402-98a8-11da00a3cb88\") " pod="openstack/placement-d22a-account-create-update-4vb8l" Nov 28 17:43:37 crc kubenswrapper[4909]: I1128 17:43:37.960931 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xt9ws\" (UniqueName: \"kubernetes.io/projected/68825d63-727c-4c82-a876-6b85b0224061-kube-api-access-xt9ws\") pod \"placement-db-create-vq4q8\" (UID: \"68825d63-727c-4c82-a876-6b85b0224061\") " pod="openstack/placement-db-create-vq4q8" Nov 28 17:43:37 crc kubenswrapper[4909]: I1128 17:43:37.962222 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jdcgt\" (UniqueName: \"kubernetes.io/projected/80c89465-bce9-4402-98a8-11da00a3cb88-kube-api-access-jdcgt\") pod \"placement-d22a-account-create-update-4vb8l\" (UID: \"80c89465-bce9-4402-98a8-11da00a3cb88\") " pod="openstack/placement-d22a-account-create-update-4vb8l" Nov 28 17:43:38 crc kubenswrapper[4909]: I1128 17:43:38.074971 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-vq4q8" Nov 28 17:43:38 crc kubenswrapper[4909]: I1128 17:43:38.106606 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-d22a-account-create-update-4vb8l" Nov 28 17:43:38 crc kubenswrapper[4909]: I1128 17:43:38.524039 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-vq4q8"] Nov 28 17:43:38 crc kubenswrapper[4909]: W1128 17:43:38.528385 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod68825d63_727c_4c82_a876_6b85b0224061.slice/crio-31f109918da41896e2ba7cc13fc69609e391f512df1c812f6e0714f2aa7fb342 WatchSource:0}: Error finding container 31f109918da41896e2ba7cc13fc69609e391f512df1c812f6e0714f2aa7fb342: Status 404 returned error can't find the container with id 31f109918da41896e2ba7cc13fc69609e391f512df1c812f6e0714f2aa7fb342 Nov 28 17:43:38 crc kubenswrapper[4909]: I1128 17:43:38.612097 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-d22a-account-create-update-4vb8l"] Nov 28 17:43:38 crc kubenswrapper[4909]: W1128 17:43:38.614638 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod80c89465_bce9_4402_98a8_11da00a3cb88.slice/crio-79a895f29bdca3c1645db1daa444ee09c0648f3ba070450f7876931ea9ad6cb9 WatchSource:0}: Error finding container 79a895f29bdca3c1645db1daa444ee09c0648f3ba070450f7876931ea9ad6cb9: Status 404 returned error can't find the container with id 79a895f29bdca3c1645db1daa444ee09c0648f3ba070450f7876931ea9ad6cb9 Nov 28 17:43:38 crc kubenswrapper[4909]: I1128 17:43:38.712824 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-d22a-account-create-update-4vb8l" event={"ID":"80c89465-bce9-4402-98a8-11da00a3cb88","Type":"ContainerStarted","Data":"79a895f29bdca3c1645db1daa444ee09c0648f3ba070450f7876931ea9ad6cb9"} Nov 28 17:43:38 crc kubenswrapper[4909]: I1128 17:43:38.715091 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-vq4q8" event={"ID":"68825d63-727c-4c82-a876-6b85b0224061","Type":"ContainerStarted","Data":"31f109918da41896e2ba7cc13fc69609e391f512df1c812f6e0714f2aa7fb342"} Nov 28 17:43:39 crc kubenswrapper[4909]: I1128 17:43:39.724922 4909 generic.go:334] "Generic (PLEG): container finished" podID="68825d63-727c-4c82-a876-6b85b0224061" containerID="168caa014f44cb5b9c04bdf92347bcd35ab22cf92dafebb1b734eed08be00a23" exitCode=0 Nov 28 17:43:39 crc kubenswrapper[4909]: I1128 17:43:39.725027 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-vq4q8" event={"ID":"68825d63-727c-4c82-a876-6b85b0224061","Type":"ContainerDied","Data":"168caa014f44cb5b9c04bdf92347bcd35ab22cf92dafebb1b734eed08be00a23"} Nov 28 17:43:39 crc kubenswrapper[4909]: I1128 17:43:39.726518 4909 generic.go:334] "Generic (PLEG): container finished" podID="80c89465-bce9-4402-98a8-11da00a3cb88" containerID="9abebcbda14a8084e514996dec7508ff98ad5142765c975ba661e3c2d6e6cee5" exitCode=0 Nov 28 17:43:39 crc kubenswrapper[4909]: I1128 17:43:39.726554 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-d22a-account-create-update-4vb8l" event={"ID":"80c89465-bce9-4402-98a8-11da00a3cb88","Type":"ContainerDied","Data":"9abebcbda14a8084e514996dec7508ff98ad5142765c975ba661e3c2d6e6cee5"} Nov 28 17:43:41 crc kubenswrapper[4909]: I1128 17:43:41.117600 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-d22a-account-create-update-4vb8l" Nov 28 17:43:41 crc kubenswrapper[4909]: I1128 17:43:41.119777 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-vq4q8" Nov 28 17:43:41 crc kubenswrapper[4909]: I1128 17:43:41.198318 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xt9ws\" (UniqueName: \"kubernetes.io/projected/68825d63-727c-4c82-a876-6b85b0224061-kube-api-access-xt9ws\") pod \"68825d63-727c-4c82-a876-6b85b0224061\" (UID: \"68825d63-727c-4c82-a876-6b85b0224061\") " Nov 28 17:43:41 crc kubenswrapper[4909]: I1128 17:43:41.198378 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/68825d63-727c-4c82-a876-6b85b0224061-operator-scripts\") pod \"68825d63-727c-4c82-a876-6b85b0224061\" (UID: \"68825d63-727c-4c82-a876-6b85b0224061\") " Nov 28 17:43:41 crc kubenswrapper[4909]: I1128 17:43:41.198564 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jdcgt\" (UniqueName: \"kubernetes.io/projected/80c89465-bce9-4402-98a8-11da00a3cb88-kube-api-access-jdcgt\") pod \"80c89465-bce9-4402-98a8-11da00a3cb88\" (UID: \"80c89465-bce9-4402-98a8-11da00a3cb88\") " Nov 28 17:43:41 crc kubenswrapper[4909]: I1128 17:43:41.198649 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/80c89465-bce9-4402-98a8-11da00a3cb88-operator-scripts\") pod \"80c89465-bce9-4402-98a8-11da00a3cb88\" (UID: \"80c89465-bce9-4402-98a8-11da00a3cb88\") " Nov 28 17:43:41 crc kubenswrapper[4909]: I1128 17:43:41.199183 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/68825d63-727c-4c82-a876-6b85b0224061-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "68825d63-727c-4c82-a876-6b85b0224061" (UID: "68825d63-727c-4c82-a876-6b85b0224061"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 17:43:41 crc kubenswrapper[4909]: I1128 17:43:41.199435 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/80c89465-bce9-4402-98a8-11da00a3cb88-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "80c89465-bce9-4402-98a8-11da00a3cb88" (UID: "80c89465-bce9-4402-98a8-11da00a3cb88"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 17:43:41 crc kubenswrapper[4909]: I1128 17:43:41.204860 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/68825d63-727c-4c82-a876-6b85b0224061-kube-api-access-xt9ws" (OuterVolumeSpecName: "kube-api-access-xt9ws") pod "68825d63-727c-4c82-a876-6b85b0224061" (UID: "68825d63-727c-4c82-a876-6b85b0224061"). InnerVolumeSpecName "kube-api-access-xt9ws". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:43:41 crc kubenswrapper[4909]: I1128 17:43:41.204907 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/80c89465-bce9-4402-98a8-11da00a3cb88-kube-api-access-jdcgt" (OuterVolumeSpecName: "kube-api-access-jdcgt") pod "80c89465-bce9-4402-98a8-11da00a3cb88" (UID: "80c89465-bce9-4402-98a8-11da00a3cb88"). InnerVolumeSpecName "kube-api-access-jdcgt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:43:41 crc kubenswrapper[4909]: I1128 17:43:41.300977 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jdcgt\" (UniqueName: \"kubernetes.io/projected/80c89465-bce9-4402-98a8-11da00a3cb88-kube-api-access-jdcgt\") on node \"crc\" DevicePath \"\"" Nov 28 17:43:41 crc kubenswrapper[4909]: I1128 17:43:41.301005 4909 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/80c89465-bce9-4402-98a8-11da00a3cb88-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 17:43:41 crc kubenswrapper[4909]: I1128 17:43:41.301014 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xt9ws\" (UniqueName: \"kubernetes.io/projected/68825d63-727c-4c82-a876-6b85b0224061-kube-api-access-xt9ws\") on node \"crc\" DevicePath \"\"" Nov 28 17:43:41 crc kubenswrapper[4909]: I1128 17:43:41.301024 4909 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/68825d63-727c-4c82-a876-6b85b0224061-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 17:43:41 crc kubenswrapper[4909]: I1128 17:43:41.748477 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-vq4q8" Nov 28 17:43:41 crc kubenswrapper[4909]: I1128 17:43:41.748477 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-vq4q8" event={"ID":"68825d63-727c-4c82-a876-6b85b0224061","Type":"ContainerDied","Data":"31f109918da41896e2ba7cc13fc69609e391f512df1c812f6e0714f2aa7fb342"} Nov 28 17:43:41 crc kubenswrapper[4909]: I1128 17:43:41.748683 4909 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="31f109918da41896e2ba7cc13fc69609e391f512df1c812f6e0714f2aa7fb342" Nov 28 17:43:41 crc kubenswrapper[4909]: I1128 17:43:41.753338 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-d22a-account-create-update-4vb8l" event={"ID":"80c89465-bce9-4402-98a8-11da00a3cb88","Type":"ContainerDied","Data":"79a895f29bdca3c1645db1daa444ee09c0648f3ba070450f7876931ea9ad6cb9"} Nov 28 17:43:41 crc kubenswrapper[4909]: I1128 17:43:41.753603 4909 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="79a895f29bdca3c1645db1daa444ee09c0648f3ba070450f7876931ea9ad6cb9" Nov 28 17:43:41 crc kubenswrapper[4909]: I1128 17:43:41.753351 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-d22a-account-create-update-4vb8l" Nov 28 17:43:43 crc kubenswrapper[4909]: I1128 17:43:43.064009 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5dfd646bd7-l24xd"] Nov 28 17:43:43 crc kubenswrapper[4909]: E1128 17:43:43.064734 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="80c89465-bce9-4402-98a8-11da00a3cb88" containerName="mariadb-account-create-update" Nov 28 17:43:43 crc kubenswrapper[4909]: I1128 17:43:43.064752 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="80c89465-bce9-4402-98a8-11da00a3cb88" containerName="mariadb-account-create-update" Nov 28 17:43:43 crc kubenswrapper[4909]: E1128 17:43:43.064762 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="68825d63-727c-4c82-a876-6b85b0224061" containerName="mariadb-database-create" Nov 28 17:43:43 crc kubenswrapper[4909]: I1128 17:43:43.064770 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="68825d63-727c-4c82-a876-6b85b0224061" containerName="mariadb-database-create" Nov 28 17:43:43 crc kubenswrapper[4909]: I1128 17:43:43.065027 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="68825d63-727c-4c82-a876-6b85b0224061" containerName="mariadb-database-create" Nov 28 17:43:43 crc kubenswrapper[4909]: I1128 17:43:43.065047 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="80c89465-bce9-4402-98a8-11da00a3cb88" containerName="mariadb-account-create-update" Nov 28 17:43:43 crc kubenswrapper[4909]: I1128 17:43:43.066264 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5dfd646bd7-l24xd" Nov 28 17:43:43 crc kubenswrapper[4909]: I1128 17:43:43.094293 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5dfd646bd7-l24xd"] Nov 28 17:43:43 crc kubenswrapper[4909]: I1128 17:43:43.133347 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rjz95\" (UniqueName: \"kubernetes.io/projected/5bc8ecfe-e00f-420e-870a-a0f4a9b8678f-kube-api-access-rjz95\") pod \"dnsmasq-dns-5dfd646bd7-l24xd\" (UID: \"5bc8ecfe-e00f-420e-870a-a0f4a9b8678f\") " pod="openstack/dnsmasq-dns-5dfd646bd7-l24xd" Nov 28 17:43:43 crc kubenswrapper[4909]: I1128 17:43:43.133423 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/5bc8ecfe-e00f-420e-870a-a0f4a9b8678f-ovsdbserver-nb\") pod \"dnsmasq-dns-5dfd646bd7-l24xd\" (UID: \"5bc8ecfe-e00f-420e-870a-a0f4a9b8678f\") " pod="openstack/dnsmasq-dns-5dfd646bd7-l24xd" Nov 28 17:43:43 crc kubenswrapper[4909]: I1128 17:43:43.133481 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/5bc8ecfe-e00f-420e-870a-a0f4a9b8678f-ovsdbserver-sb\") pod \"dnsmasq-dns-5dfd646bd7-l24xd\" (UID: \"5bc8ecfe-e00f-420e-870a-a0f4a9b8678f\") " pod="openstack/dnsmasq-dns-5dfd646bd7-l24xd" Nov 28 17:43:43 crc kubenswrapper[4909]: I1128 17:43:43.133529 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5bc8ecfe-e00f-420e-870a-a0f4a9b8678f-dns-svc\") pod \"dnsmasq-dns-5dfd646bd7-l24xd\" (UID: \"5bc8ecfe-e00f-420e-870a-a0f4a9b8678f\") " pod="openstack/dnsmasq-dns-5dfd646bd7-l24xd" Nov 28 17:43:43 crc kubenswrapper[4909]: I1128 17:43:43.133562 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5bc8ecfe-e00f-420e-870a-a0f4a9b8678f-config\") pod \"dnsmasq-dns-5dfd646bd7-l24xd\" (UID: \"5bc8ecfe-e00f-420e-870a-a0f4a9b8678f\") " pod="openstack/dnsmasq-dns-5dfd646bd7-l24xd" Nov 28 17:43:43 crc kubenswrapper[4909]: I1128 17:43:43.164430 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-sync-2vp5l"] Nov 28 17:43:43 crc kubenswrapper[4909]: I1128 17:43:43.165948 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-2vp5l" Nov 28 17:43:43 crc kubenswrapper[4909]: I1128 17:43:43.169499 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-dv5kk" Nov 28 17:43:43 crc kubenswrapper[4909]: I1128 17:43:43.169763 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Nov 28 17:43:43 crc kubenswrapper[4909]: I1128 17:43:43.170990 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Nov 28 17:43:43 crc kubenswrapper[4909]: I1128 17:43:43.173477 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-2vp5l"] Nov 28 17:43:43 crc kubenswrapper[4909]: I1128 17:43:43.235350 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f35bbae0-db18-4b8a-924e-18e33ad585e2-scripts\") pod \"placement-db-sync-2vp5l\" (UID: \"f35bbae0-db18-4b8a-924e-18e33ad585e2\") " pod="openstack/placement-db-sync-2vp5l" Nov 28 17:43:43 crc kubenswrapper[4909]: I1128 17:43:43.235422 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5bc8ecfe-e00f-420e-870a-a0f4a9b8678f-config\") pod \"dnsmasq-dns-5dfd646bd7-l24xd\" (UID: \"5bc8ecfe-e00f-420e-870a-a0f4a9b8678f\") " pod="openstack/dnsmasq-dns-5dfd646bd7-l24xd" Nov 28 17:43:43 crc kubenswrapper[4909]: I1128 17:43:43.235455 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6mwlz\" (UniqueName: \"kubernetes.io/projected/f35bbae0-db18-4b8a-924e-18e33ad585e2-kube-api-access-6mwlz\") pod \"placement-db-sync-2vp5l\" (UID: \"f35bbae0-db18-4b8a-924e-18e33ad585e2\") " pod="openstack/placement-db-sync-2vp5l" Nov 28 17:43:43 crc kubenswrapper[4909]: I1128 17:43:43.235573 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rjz95\" (UniqueName: \"kubernetes.io/projected/5bc8ecfe-e00f-420e-870a-a0f4a9b8678f-kube-api-access-rjz95\") pod \"dnsmasq-dns-5dfd646bd7-l24xd\" (UID: \"5bc8ecfe-e00f-420e-870a-a0f4a9b8678f\") " pod="openstack/dnsmasq-dns-5dfd646bd7-l24xd" Nov 28 17:43:43 crc kubenswrapper[4909]: I1128 17:43:43.235605 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/5bc8ecfe-e00f-420e-870a-a0f4a9b8678f-ovsdbserver-nb\") pod \"dnsmasq-dns-5dfd646bd7-l24xd\" (UID: \"5bc8ecfe-e00f-420e-870a-a0f4a9b8678f\") " pod="openstack/dnsmasq-dns-5dfd646bd7-l24xd" Nov 28 17:43:43 crc kubenswrapper[4909]: I1128 17:43:43.235743 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/5bc8ecfe-e00f-420e-870a-a0f4a9b8678f-ovsdbserver-sb\") pod \"dnsmasq-dns-5dfd646bd7-l24xd\" (UID: \"5bc8ecfe-e00f-420e-870a-a0f4a9b8678f\") " pod="openstack/dnsmasq-dns-5dfd646bd7-l24xd" Nov 28 17:43:43 crc kubenswrapper[4909]: I1128 17:43:43.235813 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f35bbae0-db18-4b8a-924e-18e33ad585e2-config-data\") pod \"placement-db-sync-2vp5l\" (UID: \"f35bbae0-db18-4b8a-924e-18e33ad585e2\") " pod="openstack/placement-db-sync-2vp5l" Nov 28 17:43:43 crc kubenswrapper[4909]: I1128 17:43:43.235837 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f35bbae0-db18-4b8a-924e-18e33ad585e2-logs\") pod \"placement-db-sync-2vp5l\" (UID: \"f35bbae0-db18-4b8a-924e-18e33ad585e2\") " pod="openstack/placement-db-sync-2vp5l" Nov 28 17:43:43 crc kubenswrapper[4909]: I1128 17:43:43.235860 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f35bbae0-db18-4b8a-924e-18e33ad585e2-combined-ca-bundle\") pod \"placement-db-sync-2vp5l\" (UID: \"f35bbae0-db18-4b8a-924e-18e33ad585e2\") " pod="openstack/placement-db-sync-2vp5l" Nov 28 17:43:43 crc kubenswrapper[4909]: I1128 17:43:43.235910 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5bc8ecfe-e00f-420e-870a-a0f4a9b8678f-dns-svc\") pod \"dnsmasq-dns-5dfd646bd7-l24xd\" (UID: \"5bc8ecfe-e00f-420e-870a-a0f4a9b8678f\") " pod="openstack/dnsmasq-dns-5dfd646bd7-l24xd" Nov 28 17:43:43 crc kubenswrapper[4909]: I1128 17:43:43.236300 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5bc8ecfe-e00f-420e-870a-a0f4a9b8678f-config\") pod \"dnsmasq-dns-5dfd646bd7-l24xd\" (UID: \"5bc8ecfe-e00f-420e-870a-a0f4a9b8678f\") " pod="openstack/dnsmasq-dns-5dfd646bd7-l24xd" Nov 28 17:43:43 crc kubenswrapper[4909]: I1128 17:43:43.236451 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/5bc8ecfe-e00f-420e-870a-a0f4a9b8678f-ovsdbserver-nb\") pod \"dnsmasq-dns-5dfd646bd7-l24xd\" (UID: \"5bc8ecfe-e00f-420e-870a-a0f4a9b8678f\") " pod="openstack/dnsmasq-dns-5dfd646bd7-l24xd" Nov 28 17:43:43 crc kubenswrapper[4909]: I1128 17:43:43.236509 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5bc8ecfe-e00f-420e-870a-a0f4a9b8678f-dns-svc\") pod \"dnsmasq-dns-5dfd646bd7-l24xd\" (UID: \"5bc8ecfe-e00f-420e-870a-a0f4a9b8678f\") " pod="openstack/dnsmasq-dns-5dfd646bd7-l24xd" Nov 28 17:43:43 crc kubenswrapper[4909]: I1128 17:43:43.236815 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/5bc8ecfe-e00f-420e-870a-a0f4a9b8678f-ovsdbserver-sb\") pod \"dnsmasq-dns-5dfd646bd7-l24xd\" (UID: \"5bc8ecfe-e00f-420e-870a-a0f4a9b8678f\") " pod="openstack/dnsmasq-dns-5dfd646bd7-l24xd" Nov 28 17:43:43 crc kubenswrapper[4909]: I1128 17:43:43.271245 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rjz95\" (UniqueName: \"kubernetes.io/projected/5bc8ecfe-e00f-420e-870a-a0f4a9b8678f-kube-api-access-rjz95\") pod \"dnsmasq-dns-5dfd646bd7-l24xd\" (UID: \"5bc8ecfe-e00f-420e-870a-a0f4a9b8678f\") " pod="openstack/dnsmasq-dns-5dfd646bd7-l24xd" Nov 28 17:43:43 crc kubenswrapper[4909]: I1128 17:43:43.337239 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f35bbae0-db18-4b8a-924e-18e33ad585e2-config-data\") pod \"placement-db-sync-2vp5l\" (UID: \"f35bbae0-db18-4b8a-924e-18e33ad585e2\") " pod="openstack/placement-db-sync-2vp5l" Nov 28 17:43:43 crc kubenswrapper[4909]: I1128 17:43:43.337616 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f35bbae0-db18-4b8a-924e-18e33ad585e2-logs\") pod \"placement-db-sync-2vp5l\" (UID: \"f35bbae0-db18-4b8a-924e-18e33ad585e2\") " pod="openstack/placement-db-sync-2vp5l" Nov 28 17:43:43 crc kubenswrapper[4909]: I1128 17:43:43.337634 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f35bbae0-db18-4b8a-924e-18e33ad585e2-combined-ca-bundle\") pod \"placement-db-sync-2vp5l\" (UID: \"f35bbae0-db18-4b8a-924e-18e33ad585e2\") " pod="openstack/placement-db-sync-2vp5l" Nov 28 17:43:43 crc kubenswrapper[4909]: I1128 17:43:43.337676 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f35bbae0-db18-4b8a-924e-18e33ad585e2-scripts\") pod \"placement-db-sync-2vp5l\" (UID: \"f35bbae0-db18-4b8a-924e-18e33ad585e2\") " pod="openstack/placement-db-sync-2vp5l" Nov 28 17:43:43 crc kubenswrapper[4909]: I1128 17:43:43.337702 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6mwlz\" (UniqueName: \"kubernetes.io/projected/f35bbae0-db18-4b8a-924e-18e33ad585e2-kube-api-access-6mwlz\") pod \"placement-db-sync-2vp5l\" (UID: \"f35bbae0-db18-4b8a-924e-18e33ad585e2\") " pod="openstack/placement-db-sync-2vp5l" Nov 28 17:43:43 crc kubenswrapper[4909]: I1128 17:43:43.338457 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f35bbae0-db18-4b8a-924e-18e33ad585e2-logs\") pod \"placement-db-sync-2vp5l\" (UID: \"f35bbae0-db18-4b8a-924e-18e33ad585e2\") " pod="openstack/placement-db-sync-2vp5l" Nov 28 17:43:43 crc kubenswrapper[4909]: I1128 17:43:43.341524 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f35bbae0-db18-4b8a-924e-18e33ad585e2-config-data\") pod \"placement-db-sync-2vp5l\" (UID: \"f35bbae0-db18-4b8a-924e-18e33ad585e2\") " pod="openstack/placement-db-sync-2vp5l" Nov 28 17:43:43 crc kubenswrapper[4909]: I1128 17:43:43.341735 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f35bbae0-db18-4b8a-924e-18e33ad585e2-combined-ca-bundle\") pod \"placement-db-sync-2vp5l\" (UID: \"f35bbae0-db18-4b8a-924e-18e33ad585e2\") " pod="openstack/placement-db-sync-2vp5l" Nov 28 17:43:43 crc kubenswrapper[4909]: I1128 17:43:43.342168 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f35bbae0-db18-4b8a-924e-18e33ad585e2-scripts\") pod \"placement-db-sync-2vp5l\" (UID: \"f35bbae0-db18-4b8a-924e-18e33ad585e2\") " pod="openstack/placement-db-sync-2vp5l" Nov 28 17:43:43 crc kubenswrapper[4909]: I1128 17:43:43.353443 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6mwlz\" (UniqueName: \"kubernetes.io/projected/f35bbae0-db18-4b8a-924e-18e33ad585e2-kube-api-access-6mwlz\") pod \"placement-db-sync-2vp5l\" (UID: \"f35bbae0-db18-4b8a-924e-18e33ad585e2\") " pod="openstack/placement-db-sync-2vp5l" Nov 28 17:43:43 crc kubenswrapper[4909]: I1128 17:43:43.393160 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5dfd646bd7-l24xd" Nov 28 17:43:43 crc kubenswrapper[4909]: I1128 17:43:43.492168 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-2vp5l" Nov 28 17:43:43 crc kubenswrapper[4909]: I1128 17:43:43.860518 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5dfd646bd7-l24xd"] Nov 28 17:43:43 crc kubenswrapper[4909]: W1128 17:43:43.867340 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5bc8ecfe_e00f_420e_870a_a0f4a9b8678f.slice/crio-b964510354809875e414c6f26a04abe5aced8c475d59a41c75cb40126f46093b WatchSource:0}: Error finding container b964510354809875e414c6f26a04abe5aced8c475d59a41c75cb40126f46093b: Status 404 returned error can't find the container with id b964510354809875e414c6f26a04abe5aced8c475d59a41c75cb40126f46093b Nov 28 17:43:43 crc kubenswrapper[4909]: I1128 17:43:43.978953 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-2vp5l"] Nov 28 17:43:43 crc kubenswrapper[4909]: W1128 17:43:43.983761 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf35bbae0_db18_4b8a_924e_18e33ad585e2.slice/crio-eeea167a5acaa79eb657f2ba856ddc6af466409c1249cf0a725a284b53c3b4c7 WatchSource:0}: Error finding container eeea167a5acaa79eb657f2ba856ddc6af466409c1249cf0a725a284b53c3b4c7: Status 404 returned error can't find the container with id eeea167a5acaa79eb657f2ba856ddc6af466409c1249cf0a725a284b53c3b4c7 Nov 28 17:43:44 crc kubenswrapper[4909]: I1128 17:43:44.782636 4909 generic.go:334] "Generic (PLEG): container finished" podID="5bc8ecfe-e00f-420e-870a-a0f4a9b8678f" containerID="64137d02f47f111149e18875d742f444f7be4b7f2726da0294a61a9f4cd4aa06" exitCode=0 Nov 28 17:43:44 crc kubenswrapper[4909]: I1128 17:43:44.782708 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5dfd646bd7-l24xd" event={"ID":"5bc8ecfe-e00f-420e-870a-a0f4a9b8678f","Type":"ContainerDied","Data":"64137d02f47f111149e18875d742f444f7be4b7f2726da0294a61a9f4cd4aa06"} Nov 28 17:43:44 crc kubenswrapper[4909]: I1128 17:43:44.783021 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5dfd646bd7-l24xd" event={"ID":"5bc8ecfe-e00f-420e-870a-a0f4a9b8678f","Type":"ContainerStarted","Data":"b964510354809875e414c6f26a04abe5aced8c475d59a41c75cb40126f46093b"} Nov 28 17:43:44 crc kubenswrapper[4909]: I1128 17:43:44.784873 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-2vp5l" event={"ID":"f35bbae0-db18-4b8a-924e-18e33ad585e2","Type":"ContainerStarted","Data":"82cfeda0f7892f3fd3e81c1ffe3b3599a0185a6ef7d13388c7ad34860ae58818"} Nov 28 17:43:44 crc kubenswrapper[4909]: I1128 17:43:44.784936 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-2vp5l" event={"ID":"f35bbae0-db18-4b8a-924e-18e33ad585e2","Type":"ContainerStarted","Data":"eeea167a5acaa79eb657f2ba856ddc6af466409c1249cf0a725a284b53c3b4c7"} Nov 28 17:43:44 crc kubenswrapper[4909]: I1128 17:43:44.824254 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-db-sync-2vp5l" podStartSLOduration=1.82423457 podStartE2EDuration="1.82423457s" podCreationTimestamp="2025-11-28 17:43:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 17:43:44.814804076 +0000 UTC m=+5607.211488620" watchObservedRunningTime="2025-11-28 17:43:44.82423457 +0000 UTC m=+5607.220919094" Nov 28 17:43:45 crc kubenswrapper[4909]: E1128 17:43:45.311772 4909 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf35bbae0_db18_4b8a_924e_18e33ad585e2.slice/crio-conmon-82cfeda0f7892f3fd3e81c1ffe3b3599a0185a6ef7d13388c7ad34860ae58818.scope\": RecentStats: unable to find data in memory cache]" Nov 28 17:43:45 crc kubenswrapper[4909]: I1128 17:43:45.798335 4909 generic.go:334] "Generic (PLEG): container finished" podID="f35bbae0-db18-4b8a-924e-18e33ad585e2" containerID="82cfeda0f7892f3fd3e81c1ffe3b3599a0185a6ef7d13388c7ad34860ae58818" exitCode=0 Nov 28 17:43:45 crc kubenswrapper[4909]: I1128 17:43:45.798549 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-2vp5l" event={"ID":"f35bbae0-db18-4b8a-924e-18e33ad585e2","Type":"ContainerDied","Data":"82cfeda0f7892f3fd3e81c1ffe3b3599a0185a6ef7d13388c7ad34860ae58818"} Nov 28 17:43:45 crc kubenswrapper[4909]: I1128 17:43:45.801203 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5dfd646bd7-l24xd" event={"ID":"5bc8ecfe-e00f-420e-870a-a0f4a9b8678f","Type":"ContainerStarted","Data":"a353b2857d243b53f29d835c37f40759c17d3f802104d478d07445e09ae08ce9"} Nov 28 17:43:45 crc kubenswrapper[4909]: I1128 17:43:45.801362 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5dfd646bd7-l24xd" Nov 28 17:43:45 crc kubenswrapper[4909]: I1128 17:43:45.855377 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5dfd646bd7-l24xd" podStartSLOduration=2.8553485910000003 podStartE2EDuration="2.855348591s" podCreationTimestamp="2025-11-28 17:43:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 17:43:45.839483885 +0000 UTC m=+5608.236168419" watchObservedRunningTime="2025-11-28 17:43:45.855348591 +0000 UTC m=+5608.252033155" Nov 28 17:43:47 crc kubenswrapper[4909]: I1128 17:43:47.182215 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-2vp5l" Nov 28 17:43:47 crc kubenswrapper[4909]: I1128 17:43:47.255726 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f35bbae0-db18-4b8a-924e-18e33ad585e2-logs\") pod \"f35bbae0-db18-4b8a-924e-18e33ad585e2\" (UID: \"f35bbae0-db18-4b8a-924e-18e33ad585e2\") " Nov 28 17:43:47 crc kubenswrapper[4909]: I1128 17:43:47.256067 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f35bbae0-db18-4b8a-924e-18e33ad585e2-scripts\") pod \"f35bbae0-db18-4b8a-924e-18e33ad585e2\" (UID: \"f35bbae0-db18-4b8a-924e-18e33ad585e2\") " Nov 28 17:43:47 crc kubenswrapper[4909]: I1128 17:43:47.256263 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f35bbae0-db18-4b8a-924e-18e33ad585e2-combined-ca-bundle\") pod \"f35bbae0-db18-4b8a-924e-18e33ad585e2\" (UID: \"f35bbae0-db18-4b8a-924e-18e33ad585e2\") " Nov 28 17:43:47 crc kubenswrapper[4909]: I1128 17:43:47.256276 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f35bbae0-db18-4b8a-924e-18e33ad585e2-logs" (OuterVolumeSpecName: "logs") pod "f35bbae0-db18-4b8a-924e-18e33ad585e2" (UID: "f35bbae0-db18-4b8a-924e-18e33ad585e2"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:43:47 crc kubenswrapper[4909]: I1128 17:43:47.256572 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6mwlz\" (UniqueName: \"kubernetes.io/projected/f35bbae0-db18-4b8a-924e-18e33ad585e2-kube-api-access-6mwlz\") pod \"f35bbae0-db18-4b8a-924e-18e33ad585e2\" (UID: \"f35bbae0-db18-4b8a-924e-18e33ad585e2\") " Nov 28 17:43:47 crc kubenswrapper[4909]: I1128 17:43:47.256789 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f35bbae0-db18-4b8a-924e-18e33ad585e2-config-data\") pod \"f35bbae0-db18-4b8a-924e-18e33ad585e2\" (UID: \"f35bbae0-db18-4b8a-924e-18e33ad585e2\") " Nov 28 17:43:47 crc kubenswrapper[4909]: I1128 17:43:47.257278 4909 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f35bbae0-db18-4b8a-924e-18e33ad585e2-logs\") on node \"crc\" DevicePath \"\"" Nov 28 17:43:47 crc kubenswrapper[4909]: I1128 17:43:47.262728 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f35bbae0-db18-4b8a-924e-18e33ad585e2-kube-api-access-6mwlz" (OuterVolumeSpecName: "kube-api-access-6mwlz") pod "f35bbae0-db18-4b8a-924e-18e33ad585e2" (UID: "f35bbae0-db18-4b8a-924e-18e33ad585e2"). InnerVolumeSpecName "kube-api-access-6mwlz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:43:47 crc kubenswrapper[4909]: I1128 17:43:47.264874 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f35bbae0-db18-4b8a-924e-18e33ad585e2-scripts" (OuterVolumeSpecName: "scripts") pod "f35bbae0-db18-4b8a-924e-18e33ad585e2" (UID: "f35bbae0-db18-4b8a-924e-18e33ad585e2"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:43:47 crc kubenswrapper[4909]: I1128 17:43:47.288250 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f35bbae0-db18-4b8a-924e-18e33ad585e2-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f35bbae0-db18-4b8a-924e-18e33ad585e2" (UID: "f35bbae0-db18-4b8a-924e-18e33ad585e2"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:43:47 crc kubenswrapper[4909]: I1128 17:43:47.305140 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f35bbae0-db18-4b8a-924e-18e33ad585e2-config-data" (OuterVolumeSpecName: "config-data") pod "f35bbae0-db18-4b8a-924e-18e33ad585e2" (UID: "f35bbae0-db18-4b8a-924e-18e33ad585e2"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:43:47 crc kubenswrapper[4909]: I1128 17:43:47.361093 4909 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f35bbae0-db18-4b8a-924e-18e33ad585e2-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 17:43:47 crc kubenswrapper[4909]: I1128 17:43:47.361142 4909 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f35bbae0-db18-4b8a-924e-18e33ad585e2-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 17:43:47 crc kubenswrapper[4909]: I1128 17:43:47.361161 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6mwlz\" (UniqueName: \"kubernetes.io/projected/f35bbae0-db18-4b8a-924e-18e33ad585e2-kube-api-access-6mwlz\") on node \"crc\" DevicePath \"\"" Nov 28 17:43:47 crc kubenswrapper[4909]: I1128 17:43:47.361176 4909 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f35bbae0-db18-4b8a-924e-18e33ad585e2-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 17:43:47 crc kubenswrapper[4909]: I1128 17:43:47.829085 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-2vp5l" event={"ID":"f35bbae0-db18-4b8a-924e-18e33ad585e2","Type":"ContainerDied","Data":"eeea167a5acaa79eb657f2ba856ddc6af466409c1249cf0a725a284b53c3b4c7"} Nov 28 17:43:47 crc kubenswrapper[4909]: I1128 17:43:47.829415 4909 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="eeea167a5acaa79eb657f2ba856ddc6af466409c1249cf0a725a284b53c3b4c7" Nov 28 17:43:47 crc kubenswrapper[4909]: I1128 17:43:47.829150 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-2vp5l" Nov 28 17:43:47 crc kubenswrapper[4909]: I1128 17:43:47.933143 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-5bd68f754d-s54pf"] Nov 28 17:43:47 crc kubenswrapper[4909]: E1128 17:43:47.933617 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f35bbae0-db18-4b8a-924e-18e33ad585e2" containerName="placement-db-sync" Nov 28 17:43:47 crc kubenswrapper[4909]: I1128 17:43:47.933642 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="f35bbae0-db18-4b8a-924e-18e33ad585e2" containerName="placement-db-sync" Nov 28 17:43:47 crc kubenswrapper[4909]: I1128 17:43:47.933898 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="f35bbae0-db18-4b8a-924e-18e33ad585e2" containerName="placement-db-sync" Nov 28 17:43:47 crc kubenswrapper[4909]: I1128 17:43:47.935062 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-5bd68f754d-s54pf" Nov 28 17:43:47 crc kubenswrapper[4909]: I1128 17:43:47.940332 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Nov 28 17:43:47 crc kubenswrapper[4909]: I1128 17:43:47.940639 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Nov 28 17:43:47 crc kubenswrapper[4909]: I1128 17:43:47.941006 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-dv5kk" Nov 28 17:43:47 crc kubenswrapper[4909]: I1128 17:43:47.949784 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-5bd68f754d-s54pf"] Nov 28 17:43:48 crc kubenswrapper[4909]: I1128 17:43:48.073881 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9qlhk\" (UniqueName: \"kubernetes.io/projected/a1650cdd-5e8c-4c95-b414-2931314725d2-kube-api-access-9qlhk\") pod \"placement-5bd68f754d-s54pf\" (UID: \"a1650cdd-5e8c-4c95-b414-2931314725d2\") " pod="openstack/placement-5bd68f754d-s54pf" Nov 28 17:43:48 crc kubenswrapper[4909]: I1128 17:43:48.073925 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a1650cdd-5e8c-4c95-b414-2931314725d2-logs\") pod \"placement-5bd68f754d-s54pf\" (UID: \"a1650cdd-5e8c-4c95-b414-2931314725d2\") " pod="openstack/placement-5bd68f754d-s54pf" Nov 28 17:43:48 crc kubenswrapper[4909]: I1128 17:43:48.073961 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a1650cdd-5e8c-4c95-b414-2931314725d2-config-data\") pod \"placement-5bd68f754d-s54pf\" (UID: \"a1650cdd-5e8c-4c95-b414-2931314725d2\") " pod="openstack/placement-5bd68f754d-s54pf" Nov 28 17:43:48 crc kubenswrapper[4909]: I1128 17:43:48.074004 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a1650cdd-5e8c-4c95-b414-2931314725d2-combined-ca-bundle\") pod \"placement-5bd68f754d-s54pf\" (UID: \"a1650cdd-5e8c-4c95-b414-2931314725d2\") " pod="openstack/placement-5bd68f754d-s54pf" Nov 28 17:43:48 crc kubenswrapper[4909]: I1128 17:43:48.074028 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a1650cdd-5e8c-4c95-b414-2931314725d2-scripts\") pod \"placement-5bd68f754d-s54pf\" (UID: \"a1650cdd-5e8c-4c95-b414-2931314725d2\") " pod="openstack/placement-5bd68f754d-s54pf" Nov 28 17:43:48 crc kubenswrapper[4909]: I1128 17:43:48.175341 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9qlhk\" (UniqueName: \"kubernetes.io/projected/a1650cdd-5e8c-4c95-b414-2931314725d2-kube-api-access-9qlhk\") pod \"placement-5bd68f754d-s54pf\" (UID: \"a1650cdd-5e8c-4c95-b414-2931314725d2\") " pod="openstack/placement-5bd68f754d-s54pf" Nov 28 17:43:48 crc kubenswrapper[4909]: I1128 17:43:48.175400 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a1650cdd-5e8c-4c95-b414-2931314725d2-logs\") pod \"placement-5bd68f754d-s54pf\" (UID: \"a1650cdd-5e8c-4c95-b414-2931314725d2\") " pod="openstack/placement-5bd68f754d-s54pf" Nov 28 17:43:48 crc kubenswrapper[4909]: I1128 17:43:48.175447 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a1650cdd-5e8c-4c95-b414-2931314725d2-config-data\") pod \"placement-5bd68f754d-s54pf\" (UID: \"a1650cdd-5e8c-4c95-b414-2931314725d2\") " pod="openstack/placement-5bd68f754d-s54pf" Nov 28 17:43:48 crc kubenswrapper[4909]: I1128 17:43:48.175502 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a1650cdd-5e8c-4c95-b414-2931314725d2-combined-ca-bundle\") pod \"placement-5bd68f754d-s54pf\" (UID: \"a1650cdd-5e8c-4c95-b414-2931314725d2\") " pod="openstack/placement-5bd68f754d-s54pf" Nov 28 17:43:48 crc kubenswrapper[4909]: I1128 17:43:48.175528 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a1650cdd-5e8c-4c95-b414-2931314725d2-scripts\") pod \"placement-5bd68f754d-s54pf\" (UID: \"a1650cdd-5e8c-4c95-b414-2931314725d2\") " pod="openstack/placement-5bd68f754d-s54pf" Nov 28 17:43:48 crc kubenswrapper[4909]: I1128 17:43:48.175914 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a1650cdd-5e8c-4c95-b414-2931314725d2-logs\") pod \"placement-5bd68f754d-s54pf\" (UID: \"a1650cdd-5e8c-4c95-b414-2931314725d2\") " pod="openstack/placement-5bd68f754d-s54pf" Nov 28 17:43:48 crc kubenswrapper[4909]: I1128 17:43:48.181377 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a1650cdd-5e8c-4c95-b414-2931314725d2-combined-ca-bundle\") pod \"placement-5bd68f754d-s54pf\" (UID: \"a1650cdd-5e8c-4c95-b414-2931314725d2\") " pod="openstack/placement-5bd68f754d-s54pf" Nov 28 17:43:48 crc kubenswrapper[4909]: I1128 17:43:48.181583 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a1650cdd-5e8c-4c95-b414-2931314725d2-scripts\") pod \"placement-5bd68f754d-s54pf\" (UID: \"a1650cdd-5e8c-4c95-b414-2931314725d2\") " pod="openstack/placement-5bd68f754d-s54pf" Nov 28 17:43:48 crc kubenswrapper[4909]: I1128 17:43:48.185266 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a1650cdd-5e8c-4c95-b414-2931314725d2-config-data\") pod \"placement-5bd68f754d-s54pf\" (UID: \"a1650cdd-5e8c-4c95-b414-2931314725d2\") " pod="openstack/placement-5bd68f754d-s54pf" Nov 28 17:43:48 crc kubenswrapper[4909]: I1128 17:43:48.191559 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9qlhk\" (UniqueName: \"kubernetes.io/projected/a1650cdd-5e8c-4c95-b414-2931314725d2-kube-api-access-9qlhk\") pod \"placement-5bd68f754d-s54pf\" (UID: \"a1650cdd-5e8c-4c95-b414-2931314725d2\") " pod="openstack/placement-5bd68f754d-s54pf" Nov 28 17:43:48 crc kubenswrapper[4909]: I1128 17:43:48.300500 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-5bd68f754d-s54pf" Nov 28 17:43:48 crc kubenswrapper[4909]: I1128 17:43:48.765039 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-5bd68f754d-s54pf"] Nov 28 17:43:48 crc kubenswrapper[4909]: W1128 17:43:48.772775 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda1650cdd_5e8c_4c95_b414_2931314725d2.slice/crio-4bfdcbc25665e449fc6ec6be454d716647374982a81b7e33a6d69c67dfe345e5 WatchSource:0}: Error finding container 4bfdcbc25665e449fc6ec6be454d716647374982a81b7e33a6d69c67dfe345e5: Status 404 returned error can't find the container with id 4bfdcbc25665e449fc6ec6be454d716647374982a81b7e33a6d69c67dfe345e5 Nov 28 17:43:48 crc kubenswrapper[4909]: I1128 17:43:48.839333 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-5bd68f754d-s54pf" event={"ID":"a1650cdd-5e8c-4c95-b414-2931314725d2","Type":"ContainerStarted","Data":"4bfdcbc25665e449fc6ec6be454d716647374982a81b7e33a6d69c67dfe345e5"} Nov 28 17:43:49 crc kubenswrapper[4909]: I1128 17:43:49.847078 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-5bd68f754d-s54pf" event={"ID":"a1650cdd-5e8c-4c95-b414-2931314725d2","Type":"ContainerStarted","Data":"486c627013fd0316ec6705480e87b402bf1d52a1e580309129c8eb8270d8300b"} Nov 28 17:43:49 crc kubenswrapper[4909]: I1128 17:43:49.848497 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-5bd68f754d-s54pf" event={"ID":"a1650cdd-5e8c-4c95-b414-2931314725d2","Type":"ContainerStarted","Data":"f785cf165025bedbe62b90a29f109b56356f8e90e418d139a8856b7079b8283f"} Nov 28 17:43:49 crc kubenswrapper[4909]: I1128 17:43:49.849675 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-5bd68f754d-s54pf" Nov 28 17:43:49 crc kubenswrapper[4909]: I1128 17:43:49.849705 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-5bd68f754d-s54pf" Nov 28 17:43:49 crc kubenswrapper[4909]: I1128 17:43:49.873911 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-5bd68f754d-s54pf" podStartSLOduration=2.873864196 podStartE2EDuration="2.873864196s" podCreationTimestamp="2025-11-28 17:43:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 17:43:49.873229559 +0000 UTC m=+5612.269914083" watchObservedRunningTime="2025-11-28 17:43:49.873864196 +0000 UTC m=+5612.270548720" Nov 28 17:43:53 crc kubenswrapper[4909]: I1128 17:43:53.395835 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-5dfd646bd7-l24xd" Nov 28 17:43:53 crc kubenswrapper[4909]: I1128 17:43:53.503141 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-64d4964cf7-xrqbj"] Nov 28 17:43:53 crc kubenswrapper[4909]: I1128 17:43:53.504604 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-64d4964cf7-xrqbj" podUID="435e59f3-911f-41f4-8f2d-cdc24a55c38e" containerName="dnsmasq-dns" containerID="cri-o://d50e36c9ca6fbf1eee072a7a1b478677c3d8c0fee9b90731fa1be00667be6a4e" gracePeriod=10 Nov 28 17:43:53 crc kubenswrapper[4909]: I1128 17:43:53.920472 4909 generic.go:334] "Generic (PLEG): container finished" podID="435e59f3-911f-41f4-8f2d-cdc24a55c38e" containerID="d50e36c9ca6fbf1eee072a7a1b478677c3d8c0fee9b90731fa1be00667be6a4e" exitCode=0 Nov 28 17:43:53 crc kubenswrapper[4909]: I1128 17:43:53.923374 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-64d4964cf7-xrqbj" event={"ID":"435e59f3-911f-41f4-8f2d-cdc24a55c38e","Type":"ContainerDied","Data":"d50e36c9ca6fbf1eee072a7a1b478677c3d8c0fee9b90731fa1be00667be6a4e"} Nov 28 17:43:54 crc kubenswrapper[4909]: I1128 17:43:54.005069 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-64d4964cf7-xrqbj" Nov 28 17:43:54 crc kubenswrapper[4909]: I1128 17:43:54.091827 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f7vfj\" (UniqueName: \"kubernetes.io/projected/435e59f3-911f-41f4-8f2d-cdc24a55c38e-kube-api-access-f7vfj\") pod \"435e59f3-911f-41f4-8f2d-cdc24a55c38e\" (UID: \"435e59f3-911f-41f4-8f2d-cdc24a55c38e\") " Nov 28 17:43:54 crc kubenswrapper[4909]: I1128 17:43:54.091886 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/435e59f3-911f-41f4-8f2d-cdc24a55c38e-config\") pod \"435e59f3-911f-41f4-8f2d-cdc24a55c38e\" (UID: \"435e59f3-911f-41f4-8f2d-cdc24a55c38e\") " Nov 28 17:43:54 crc kubenswrapper[4909]: I1128 17:43:54.091946 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/435e59f3-911f-41f4-8f2d-cdc24a55c38e-ovsdbserver-nb\") pod \"435e59f3-911f-41f4-8f2d-cdc24a55c38e\" (UID: \"435e59f3-911f-41f4-8f2d-cdc24a55c38e\") " Nov 28 17:43:54 crc kubenswrapper[4909]: I1128 17:43:54.091972 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/435e59f3-911f-41f4-8f2d-cdc24a55c38e-dns-svc\") pod \"435e59f3-911f-41f4-8f2d-cdc24a55c38e\" (UID: \"435e59f3-911f-41f4-8f2d-cdc24a55c38e\") " Nov 28 17:43:54 crc kubenswrapper[4909]: I1128 17:43:54.092000 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/435e59f3-911f-41f4-8f2d-cdc24a55c38e-ovsdbserver-sb\") pod \"435e59f3-911f-41f4-8f2d-cdc24a55c38e\" (UID: \"435e59f3-911f-41f4-8f2d-cdc24a55c38e\") " Nov 28 17:43:54 crc kubenswrapper[4909]: I1128 17:43:54.101000 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/435e59f3-911f-41f4-8f2d-cdc24a55c38e-kube-api-access-f7vfj" (OuterVolumeSpecName: "kube-api-access-f7vfj") pod "435e59f3-911f-41f4-8f2d-cdc24a55c38e" (UID: "435e59f3-911f-41f4-8f2d-cdc24a55c38e"). InnerVolumeSpecName "kube-api-access-f7vfj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:43:54 crc kubenswrapper[4909]: I1128 17:43:54.135257 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/435e59f3-911f-41f4-8f2d-cdc24a55c38e-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "435e59f3-911f-41f4-8f2d-cdc24a55c38e" (UID: "435e59f3-911f-41f4-8f2d-cdc24a55c38e"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 17:43:54 crc kubenswrapper[4909]: I1128 17:43:54.138852 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/435e59f3-911f-41f4-8f2d-cdc24a55c38e-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "435e59f3-911f-41f4-8f2d-cdc24a55c38e" (UID: "435e59f3-911f-41f4-8f2d-cdc24a55c38e"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 17:43:54 crc kubenswrapper[4909]: I1128 17:43:54.140058 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/435e59f3-911f-41f4-8f2d-cdc24a55c38e-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "435e59f3-911f-41f4-8f2d-cdc24a55c38e" (UID: "435e59f3-911f-41f4-8f2d-cdc24a55c38e"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 17:43:54 crc kubenswrapper[4909]: I1128 17:43:54.146642 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/435e59f3-911f-41f4-8f2d-cdc24a55c38e-config" (OuterVolumeSpecName: "config") pod "435e59f3-911f-41f4-8f2d-cdc24a55c38e" (UID: "435e59f3-911f-41f4-8f2d-cdc24a55c38e"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 17:43:54 crc kubenswrapper[4909]: I1128 17:43:54.195161 4909 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/435e59f3-911f-41f4-8f2d-cdc24a55c38e-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 28 17:43:54 crc kubenswrapper[4909]: I1128 17:43:54.195195 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f7vfj\" (UniqueName: \"kubernetes.io/projected/435e59f3-911f-41f4-8f2d-cdc24a55c38e-kube-api-access-f7vfj\") on node \"crc\" DevicePath \"\"" Nov 28 17:43:54 crc kubenswrapper[4909]: I1128 17:43:54.195207 4909 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/435e59f3-911f-41f4-8f2d-cdc24a55c38e-config\") on node \"crc\" DevicePath \"\"" Nov 28 17:43:54 crc kubenswrapper[4909]: I1128 17:43:54.195217 4909 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/435e59f3-911f-41f4-8f2d-cdc24a55c38e-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 28 17:43:54 crc kubenswrapper[4909]: I1128 17:43:54.195227 4909 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/435e59f3-911f-41f4-8f2d-cdc24a55c38e-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 17:43:54 crc kubenswrapper[4909]: I1128 17:43:54.934067 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-64d4964cf7-xrqbj" event={"ID":"435e59f3-911f-41f4-8f2d-cdc24a55c38e","Type":"ContainerDied","Data":"6318fbaac04bde7c29d017f911ad2428c901619b56669540b432e4c25cdc3dff"} Nov 28 17:43:54 crc kubenswrapper[4909]: I1128 17:43:54.934391 4909 scope.go:117] "RemoveContainer" containerID="d50e36c9ca6fbf1eee072a7a1b478677c3d8c0fee9b90731fa1be00667be6a4e" Nov 28 17:43:54 crc kubenswrapper[4909]: I1128 17:43:54.934176 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-64d4964cf7-xrqbj" Nov 28 17:43:54 crc kubenswrapper[4909]: I1128 17:43:54.973613 4909 scope.go:117] "RemoveContainer" containerID="d9ed563b6335561f0a80e5fbdbfe73da4c165e713bff023e29138a70c60ca7e6" Nov 28 17:43:54 crc kubenswrapper[4909]: I1128 17:43:54.992403 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-64d4964cf7-xrqbj"] Nov 28 17:43:55 crc kubenswrapper[4909]: I1128 17:43:55.005961 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-64d4964cf7-xrqbj"] Nov 28 17:43:55 crc kubenswrapper[4909]: I1128 17:43:55.915805 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="435e59f3-911f-41f4-8f2d-cdc24a55c38e" path="/var/lib/kubelet/pods/435e59f3-911f-41f4-8f2d-cdc24a55c38e/volumes" Nov 28 17:44:19 crc kubenswrapper[4909]: I1128 17:44:19.264259 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-5bd68f754d-s54pf" Nov 28 17:44:19 crc kubenswrapper[4909]: I1128 17:44:19.315221 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-5bd68f754d-s54pf" Nov 28 17:44:32 crc kubenswrapper[4909]: I1128 17:44:32.090718 4909 scope.go:117] "RemoveContainer" containerID="90255f395c207fb48cbd301c57bd28118c5945ce5f5d995175d976577870855f" Nov 28 17:44:43 crc kubenswrapper[4909]: I1128 17:44:43.852643 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-db-create-rpjjz"] Nov 28 17:44:43 crc kubenswrapper[4909]: E1128 17:44:43.853773 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="435e59f3-911f-41f4-8f2d-cdc24a55c38e" containerName="init" Nov 28 17:44:43 crc kubenswrapper[4909]: I1128 17:44:43.853789 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="435e59f3-911f-41f4-8f2d-cdc24a55c38e" containerName="init" Nov 28 17:44:43 crc kubenswrapper[4909]: E1128 17:44:43.853824 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="435e59f3-911f-41f4-8f2d-cdc24a55c38e" containerName="dnsmasq-dns" Nov 28 17:44:43 crc kubenswrapper[4909]: I1128 17:44:43.853832 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="435e59f3-911f-41f4-8f2d-cdc24a55c38e" containerName="dnsmasq-dns" Nov 28 17:44:43 crc kubenswrapper[4909]: I1128 17:44:43.854040 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="435e59f3-911f-41f4-8f2d-cdc24a55c38e" containerName="dnsmasq-dns" Nov 28 17:44:43 crc kubenswrapper[4909]: I1128 17:44:43.854781 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-rpjjz" Nov 28 17:44:43 crc kubenswrapper[4909]: I1128 17:44:43.860557 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-rpjjz"] Nov 28 17:44:43 crc kubenswrapper[4909]: I1128 17:44:43.955006 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-db-create-bcc4g"] Nov 28 17:44:43 crc kubenswrapper[4909]: I1128 17:44:43.956651 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-bcc4g" Nov 28 17:44:43 crc kubenswrapper[4909]: I1128 17:44:43.966150 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-30f7-account-create-update-5k4q7"] Nov 28 17:44:43 crc kubenswrapper[4909]: I1128 17:44:43.967605 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-30f7-account-create-update-5k4q7" Nov 28 17:44:43 crc kubenswrapper[4909]: I1128 17:44:43.969420 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-db-secret" Nov 28 17:44:43 crc kubenswrapper[4909]: I1128 17:44:43.973310 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-bcc4g"] Nov 28 17:44:43 crc kubenswrapper[4909]: I1128 17:44:43.979614 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-30f7-account-create-update-5k4q7"] Nov 28 17:44:43 crc kubenswrapper[4909]: I1128 17:44:43.982279 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/22081f58-7375-4695-ae44-0d3523be341a-operator-scripts\") pod \"nova-api-db-create-rpjjz\" (UID: \"22081f58-7375-4695-ae44-0d3523be341a\") " pod="openstack/nova-api-db-create-rpjjz" Nov 28 17:44:43 crc kubenswrapper[4909]: I1128 17:44:43.982447 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gz6hh\" (UniqueName: \"kubernetes.io/projected/22081f58-7375-4695-ae44-0d3523be341a-kube-api-access-gz6hh\") pod \"nova-api-db-create-rpjjz\" (UID: \"22081f58-7375-4695-ae44-0d3523be341a\") " pod="openstack/nova-api-db-create-rpjjz" Nov 28 17:44:44 crc kubenswrapper[4909]: I1128 17:44:44.085543 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gz6hh\" (UniqueName: \"kubernetes.io/projected/22081f58-7375-4695-ae44-0d3523be341a-kube-api-access-gz6hh\") pod \"nova-api-db-create-rpjjz\" (UID: \"22081f58-7375-4695-ae44-0d3523be341a\") " pod="openstack/nova-api-db-create-rpjjz" Nov 28 17:44:44 crc kubenswrapper[4909]: I1128 17:44:44.085588 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1611532c-c609-460f-9376-7767f8caebec-operator-scripts\") pod \"nova-api-30f7-account-create-update-5k4q7\" (UID: \"1611532c-c609-460f-9376-7767f8caebec\") " pod="openstack/nova-api-30f7-account-create-update-5k4q7" Nov 28 17:44:44 crc kubenswrapper[4909]: I1128 17:44:44.085670 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/22081f58-7375-4695-ae44-0d3523be341a-operator-scripts\") pod \"nova-api-db-create-rpjjz\" (UID: \"22081f58-7375-4695-ae44-0d3523be341a\") " pod="openstack/nova-api-db-create-rpjjz" Nov 28 17:44:44 crc kubenswrapper[4909]: I1128 17:44:44.085725 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6f469cb4-de41-446a-b59c-6c7db8411f55-operator-scripts\") pod \"nova-cell0-db-create-bcc4g\" (UID: \"6f469cb4-de41-446a-b59c-6c7db8411f55\") " pod="openstack/nova-cell0-db-create-bcc4g" Nov 28 17:44:44 crc kubenswrapper[4909]: I1128 17:44:44.085747 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kkh69\" (UniqueName: \"kubernetes.io/projected/6f469cb4-de41-446a-b59c-6c7db8411f55-kube-api-access-kkh69\") pod \"nova-cell0-db-create-bcc4g\" (UID: \"6f469cb4-de41-446a-b59c-6c7db8411f55\") " pod="openstack/nova-cell0-db-create-bcc4g" Nov 28 17:44:44 crc kubenswrapper[4909]: I1128 17:44:44.085795 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qcgn9\" (UniqueName: \"kubernetes.io/projected/1611532c-c609-460f-9376-7767f8caebec-kube-api-access-qcgn9\") pod \"nova-api-30f7-account-create-update-5k4q7\" (UID: \"1611532c-c609-460f-9376-7767f8caebec\") " pod="openstack/nova-api-30f7-account-create-update-5k4q7" Nov 28 17:44:44 crc kubenswrapper[4909]: I1128 17:44:44.086491 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/22081f58-7375-4695-ae44-0d3523be341a-operator-scripts\") pod \"nova-api-db-create-rpjjz\" (UID: \"22081f58-7375-4695-ae44-0d3523be341a\") " pod="openstack/nova-api-db-create-rpjjz" Nov 28 17:44:44 crc kubenswrapper[4909]: I1128 17:44:44.111605 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gz6hh\" (UniqueName: \"kubernetes.io/projected/22081f58-7375-4695-ae44-0d3523be341a-kube-api-access-gz6hh\") pod \"nova-api-db-create-rpjjz\" (UID: \"22081f58-7375-4695-ae44-0d3523be341a\") " pod="openstack/nova-api-db-create-rpjjz" Nov 28 17:44:44 crc kubenswrapper[4909]: I1128 17:44:44.165094 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-db-create-rznwv"] Nov 28 17:44:44 crc kubenswrapper[4909]: I1128 17:44:44.166274 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-rznwv" Nov 28 17:44:44 crc kubenswrapper[4909]: I1128 17:44:44.179257 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-277a-account-create-update-j5vxd"] Nov 28 17:44:44 crc kubenswrapper[4909]: I1128 17:44:44.180316 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-277a-account-create-update-j5vxd" Nov 28 17:44:44 crc kubenswrapper[4909]: I1128 17:44:44.183746 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-db-secret" Nov 28 17:44:44 crc kubenswrapper[4909]: I1128 17:44:44.183880 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-rpjjz" Nov 28 17:44:44 crc kubenswrapper[4909]: I1128 17:44:44.187199 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6f469cb4-de41-446a-b59c-6c7db8411f55-operator-scripts\") pod \"nova-cell0-db-create-bcc4g\" (UID: \"6f469cb4-de41-446a-b59c-6c7db8411f55\") " pod="openstack/nova-cell0-db-create-bcc4g" Nov 28 17:44:44 crc kubenswrapper[4909]: I1128 17:44:44.187240 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kkh69\" (UniqueName: \"kubernetes.io/projected/6f469cb4-de41-446a-b59c-6c7db8411f55-kube-api-access-kkh69\") pod \"nova-cell0-db-create-bcc4g\" (UID: \"6f469cb4-de41-446a-b59c-6c7db8411f55\") " pod="openstack/nova-cell0-db-create-bcc4g" Nov 28 17:44:44 crc kubenswrapper[4909]: I1128 17:44:44.187279 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qcgn9\" (UniqueName: \"kubernetes.io/projected/1611532c-c609-460f-9376-7767f8caebec-kube-api-access-qcgn9\") pod \"nova-api-30f7-account-create-update-5k4q7\" (UID: \"1611532c-c609-460f-9376-7767f8caebec\") " pod="openstack/nova-api-30f7-account-create-update-5k4q7" Nov 28 17:44:44 crc kubenswrapper[4909]: I1128 17:44:44.187325 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1611532c-c609-460f-9376-7767f8caebec-operator-scripts\") pod \"nova-api-30f7-account-create-update-5k4q7\" (UID: \"1611532c-c609-460f-9376-7767f8caebec\") " pod="openstack/nova-api-30f7-account-create-update-5k4q7" Nov 28 17:44:44 crc kubenswrapper[4909]: I1128 17:44:44.187991 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1611532c-c609-460f-9376-7767f8caebec-operator-scripts\") pod \"nova-api-30f7-account-create-update-5k4q7\" (UID: \"1611532c-c609-460f-9376-7767f8caebec\") " pod="openstack/nova-api-30f7-account-create-update-5k4q7" Nov 28 17:44:44 crc kubenswrapper[4909]: I1128 17:44:44.188854 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6f469cb4-de41-446a-b59c-6c7db8411f55-operator-scripts\") pod \"nova-cell0-db-create-bcc4g\" (UID: \"6f469cb4-de41-446a-b59c-6c7db8411f55\") " pod="openstack/nova-cell0-db-create-bcc4g" Nov 28 17:44:44 crc kubenswrapper[4909]: I1128 17:44:44.221518 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-rznwv"] Nov 28 17:44:44 crc kubenswrapper[4909]: I1128 17:44:44.235058 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qcgn9\" (UniqueName: \"kubernetes.io/projected/1611532c-c609-460f-9376-7767f8caebec-kube-api-access-qcgn9\") pod \"nova-api-30f7-account-create-update-5k4q7\" (UID: \"1611532c-c609-460f-9376-7767f8caebec\") " pod="openstack/nova-api-30f7-account-create-update-5k4q7" Nov 28 17:44:44 crc kubenswrapper[4909]: I1128 17:44:44.236254 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kkh69\" (UniqueName: \"kubernetes.io/projected/6f469cb4-de41-446a-b59c-6c7db8411f55-kube-api-access-kkh69\") pod \"nova-cell0-db-create-bcc4g\" (UID: \"6f469cb4-de41-446a-b59c-6c7db8411f55\") " pod="openstack/nova-cell0-db-create-bcc4g" Nov 28 17:44:44 crc kubenswrapper[4909]: I1128 17:44:44.244722 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-277a-account-create-update-j5vxd"] Nov 28 17:44:44 crc kubenswrapper[4909]: I1128 17:44:44.289635 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sbkd5\" (UniqueName: \"kubernetes.io/projected/a99f1c9e-ad94-49bc-a382-f0615533a5ac-kube-api-access-sbkd5\") pod \"nova-cell0-277a-account-create-update-j5vxd\" (UID: \"a99f1c9e-ad94-49bc-a382-f0615533a5ac\") " pod="openstack/nova-cell0-277a-account-create-update-j5vxd" Nov 28 17:44:44 crc kubenswrapper[4909]: I1128 17:44:44.289707 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mftzt\" (UniqueName: \"kubernetes.io/projected/202367de-7bd5-4d73-8d87-536428b3638b-kube-api-access-mftzt\") pod \"nova-cell1-db-create-rznwv\" (UID: \"202367de-7bd5-4d73-8d87-536428b3638b\") " pod="openstack/nova-cell1-db-create-rznwv" Nov 28 17:44:44 crc kubenswrapper[4909]: I1128 17:44:44.289742 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/202367de-7bd5-4d73-8d87-536428b3638b-operator-scripts\") pod \"nova-cell1-db-create-rznwv\" (UID: \"202367de-7bd5-4d73-8d87-536428b3638b\") " pod="openstack/nova-cell1-db-create-rznwv" Nov 28 17:44:44 crc kubenswrapper[4909]: I1128 17:44:44.289821 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a99f1c9e-ad94-49bc-a382-f0615533a5ac-operator-scripts\") pod \"nova-cell0-277a-account-create-update-j5vxd\" (UID: \"a99f1c9e-ad94-49bc-a382-f0615533a5ac\") " pod="openstack/nova-cell0-277a-account-create-update-j5vxd" Nov 28 17:44:44 crc kubenswrapper[4909]: I1128 17:44:44.314065 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-bcc4g" Nov 28 17:44:44 crc kubenswrapper[4909]: I1128 17:44:44.326956 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-30f7-account-create-update-5k4q7" Nov 28 17:44:44 crc kubenswrapper[4909]: I1128 17:44:44.372257 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-a66f-account-create-update-b8qxp"] Nov 28 17:44:44 crc kubenswrapper[4909]: I1128 17:44:44.373479 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-a66f-account-create-update-b8qxp" Nov 28 17:44:44 crc kubenswrapper[4909]: I1128 17:44:44.376292 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-db-secret" Nov 28 17:44:44 crc kubenswrapper[4909]: I1128 17:44:44.390740 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-a66f-account-create-update-b8qxp"] Nov 28 17:44:44 crc kubenswrapper[4909]: I1128 17:44:44.391898 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/202367de-7bd5-4d73-8d87-536428b3638b-operator-scripts\") pod \"nova-cell1-db-create-rznwv\" (UID: \"202367de-7bd5-4d73-8d87-536428b3638b\") " pod="openstack/nova-cell1-db-create-rznwv" Nov 28 17:44:44 crc kubenswrapper[4909]: I1128 17:44:44.391942 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tf76g\" (UniqueName: \"kubernetes.io/projected/00679f33-a115-4278-b8dd-8515ab78adce-kube-api-access-tf76g\") pod \"nova-cell1-a66f-account-create-update-b8qxp\" (UID: \"00679f33-a115-4278-b8dd-8515ab78adce\") " pod="openstack/nova-cell1-a66f-account-create-update-b8qxp" Nov 28 17:44:44 crc kubenswrapper[4909]: I1128 17:44:44.392030 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a99f1c9e-ad94-49bc-a382-f0615533a5ac-operator-scripts\") pod \"nova-cell0-277a-account-create-update-j5vxd\" (UID: \"a99f1c9e-ad94-49bc-a382-f0615533a5ac\") " pod="openstack/nova-cell0-277a-account-create-update-j5vxd" Nov 28 17:44:44 crc kubenswrapper[4909]: I1128 17:44:44.392095 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sbkd5\" (UniqueName: \"kubernetes.io/projected/a99f1c9e-ad94-49bc-a382-f0615533a5ac-kube-api-access-sbkd5\") pod \"nova-cell0-277a-account-create-update-j5vxd\" (UID: \"a99f1c9e-ad94-49bc-a382-f0615533a5ac\") " pod="openstack/nova-cell0-277a-account-create-update-j5vxd" Nov 28 17:44:44 crc kubenswrapper[4909]: I1128 17:44:44.392125 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/00679f33-a115-4278-b8dd-8515ab78adce-operator-scripts\") pod \"nova-cell1-a66f-account-create-update-b8qxp\" (UID: \"00679f33-a115-4278-b8dd-8515ab78adce\") " pod="openstack/nova-cell1-a66f-account-create-update-b8qxp" Nov 28 17:44:44 crc kubenswrapper[4909]: I1128 17:44:44.392160 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mftzt\" (UniqueName: \"kubernetes.io/projected/202367de-7bd5-4d73-8d87-536428b3638b-kube-api-access-mftzt\") pod \"nova-cell1-db-create-rznwv\" (UID: \"202367de-7bd5-4d73-8d87-536428b3638b\") " pod="openstack/nova-cell1-db-create-rznwv" Nov 28 17:44:44 crc kubenswrapper[4909]: I1128 17:44:44.393194 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a99f1c9e-ad94-49bc-a382-f0615533a5ac-operator-scripts\") pod \"nova-cell0-277a-account-create-update-j5vxd\" (UID: \"a99f1c9e-ad94-49bc-a382-f0615533a5ac\") " pod="openstack/nova-cell0-277a-account-create-update-j5vxd" Nov 28 17:44:44 crc kubenswrapper[4909]: I1128 17:44:44.393807 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/202367de-7bd5-4d73-8d87-536428b3638b-operator-scripts\") pod \"nova-cell1-db-create-rznwv\" (UID: \"202367de-7bd5-4d73-8d87-536428b3638b\") " pod="openstack/nova-cell1-db-create-rznwv" Nov 28 17:44:44 crc kubenswrapper[4909]: I1128 17:44:44.409452 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sbkd5\" (UniqueName: \"kubernetes.io/projected/a99f1c9e-ad94-49bc-a382-f0615533a5ac-kube-api-access-sbkd5\") pod \"nova-cell0-277a-account-create-update-j5vxd\" (UID: \"a99f1c9e-ad94-49bc-a382-f0615533a5ac\") " pod="openstack/nova-cell0-277a-account-create-update-j5vxd" Nov 28 17:44:44 crc kubenswrapper[4909]: I1128 17:44:44.410966 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mftzt\" (UniqueName: \"kubernetes.io/projected/202367de-7bd5-4d73-8d87-536428b3638b-kube-api-access-mftzt\") pod \"nova-cell1-db-create-rznwv\" (UID: \"202367de-7bd5-4d73-8d87-536428b3638b\") " pod="openstack/nova-cell1-db-create-rznwv" Nov 28 17:44:44 crc kubenswrapper[4909]: I1128 17:44:44.493300 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/00679f33-a115-4278-b8dd-8515ab78adce-operator-scripts\") pod \"nova-cell1-a66f-account-create-update-b8qxp\" (UID: \"00679f33-a115-4278-b8dd-8515ab78adce\") " pod="openstack/nova-cell1-a66f-account-create-update-b8qxp" Nov 28 17:44:44 crc kubenswrapper[4909]: I1128 17:44:44.494040 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tf76g\" (UniqueName: \"kubernetes.io/projected/00679f33-a115-4278-b8dd-8515ab78adce-kube-api-access-tf76g\") pod \"nova-cell1-a66f-account-create-update-b8qxp\" (UID: \"00679f33-a115-4278-b8dd-8515ab78adce\") " pod="openstack/nova-cell1-a66f-account-create-update-b8qxp" Nov 28 17:44:44 crc kubenswrapper[4909]: I1128 17:44:44.494585 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/00679f33-a115-4278-b8dd-8515ab78adce-operator-scripts\") pod \"nova-cell1-a66f-account-create-update-b8qxp\" (UID: \"00679f33-a115-4278-b8dd-8515ab78adce\") " pod="openstack/nova-cell1-a66f-account-create-update-b8qxp" Nov 28 17:44:44 crc kubenswrapper[4909]: I1128 17:44:44.498827 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-rznwv" Nov 28 17:44:44 crc kubenswrapper[4909]: I1128 17:44:44.509348 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tf76g\" (UniqueName: \"kubernetes.io/projected/00679f33-a115-4278-b8dd-8515ab78adce-kube-api-access-tf76g\") pod \"nova-cell1-a66f-account-create-update-b8qxp\" (UID: \"00679f33-a115-4278-b8dd-8515ab78adce\") " pod="openstack/nova-cell1-a66f-account-create-update-b8qxp" Nov 28 17:44:44 crc kubenswrapper[4909]: I1128 17:44:44.592138 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-277a-account-create-update-j5vxd" Nov 28 17:44:44 crc kubenswrapper[4909]: I1128 17:44:44.719319 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-rpjjz"] Nov 28 17:44:44 crc kubenswrapper[4909]: I1128 17:44:44.742983 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-a66f-account-create-update-b8qxp" Nov 28 17:44:44 crc kubenswrapper[4909]: I1128 17:44:44.824147 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-30f7-account-create-update-5k4q7"] Nov 28 17:44:44 crc kubenswrapper[4909]: W1128 17:44:44.836295 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1611532c_c609_460f_9376_7767f8caebec.slice/crio-4d2eaf6c38d6f06506af132d75bce06ece960488a3f3982a9a53c16b91d1b194 WatchSource:0}: Error finding container 4d2eaf6c38d6f06506af132d75bce06ece960488a3f3982a9a53c16b91d1b194: Status 404 returned error can't find the container with id 4d2eaf6c38d6f06506af132d75bce06ece960488a3f3982a9a53c16b91d1b194 Nov 28 17:44:44 crc kubenswrapper[4909]: I1128 17:44:44.849752 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-bcc4g"] Nov 28 17:44:44 crc kubenswrapper[4909]: W1128 17:44:44.855373 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6f469cb4_de41_446a_b59c_6c7db8411f55.slice/crio-a23dca27a35a33047f750d36e09608ef1c4a26a12ba34a52a2ac560261387061 WatchSource:0}: Error finding container a23dca27a35a33047f750d36e09608ef1c4a26a12ba34a52a2ac560261387061: Status 404 returned error can't find the container with id a23dca27a35a33047f750d36e09608ef1c4a26a12ba34a52a2ac560261387061 Nov 28 17:44:45 crc kubenswrapper[4909]: I1128 17:44:45.002461 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-rznwv"] Nov 28 17:44:45 crc kubenswrapper[4909]: W1128 17:44:45.004533 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod202367de_7bd5_4d73_8d87_536428b3638b.slice/crio-9d0e2247e3b1c2b67ce55a2ff34ebd77bbe6ea74ae6eb577b6c16893c0887daa WatchSource:0}: Error finding container 9d0e2247e3b1c2b67ce55a2ff34ebd77bbe6ea74ae6eb577b6c16893c0887daa: Status 404 returned error can't find the container with id 9d0e2247e3b1c2b67ce55a2ff34ebd77bbe6ea74ae6eb577b6c16893c0887daa Nov 28 17:44:45 crc kubenswrapper[4909]: I1128 17:44:45.173598 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-277a-account-create-update-j5vxd"] Nov 28 17:44:45 crc kubenswrapper[4909]: I1128 17:44:45.242537 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-a66f-account-create-update-b8qxp"] Nov 28 17:44:45 crc kubenswrapper[4909]: W1128 17:44:45.265241 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod00679f33_a115_4278_b8dd_8515ab78adce.slice/crio-74a59556d64264f28ceb06bed5b00088d153efec7480b06e1bfe726bf4267e5b WatchSource:0}: Error finding container 74a59556d64264f28ceb06bed5b00088d153efec7480b06e1bfe726bf4267e5b: Status 404 returned error can't find the container with id 74a59556d64264f28ceb06bed5b00088d153efec7480b06e1bfe726bf4267e5b Nov 28 17:44:45 crc kubenswrapper[4909]: I1128 17:44:45.486715 4909 generic.go:334] "Generic (PLEG): container finished" podID="22081f58-7375-4695-ae44-0d3523be341a" containerID="fee70e379f18321de810331a3c37dc1dc62b18bca6b023997b76552c885b04e4" exitCode=0 Nov 28 17:44:45 crc kubenswrapper[4909]: I1128 17:44:45.486757 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-rpjjz" event={"ID":"22081f58-7375-4695-ae44-0d3523be341a","Type":"ContainerDied","Data":"fee70e379f18321de810331a3c37dc1dc62b18bca6b023997b76552c885b04e4"} Nov 28 17:44:45 crc kubenswrapper[4909]: I1128 17:44:45.486793 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-rpjjz" event={"ID":"22081f58-7375-4695-ae44-0d3523be341a","Type":"ContainerStarted","Data":"a1f8344024753837e4c87d125090d69400473b88d3457a4693b932972c9f0abc"} Nov 28 17:44:45 crc kubenswrapper[4909]: I1128 17:44:45.488561 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-277a-account-create-update-j5vxd" event={"ID":"a99f1c9e-ad94-49bc-a382-f0615533a5ac","Type":"ContainerStarted","Data":"4b62c1f3abc8860fc2a0398b19ee82b719e2b035877d5c70f13d146d46f94267"} Nov 28 17:44:45 crc kubenswrapper[4909]: I1128 17:44:45.491049 4909 generic.go:334] "Generic (PLEG): container finished" podID="1611532c-c609-460f-9376-7767f8caebec" containerID="b0b79c5acff85a28fbd7545fd478661c8c5e8f63ab6124f60e4a425495da8135" exitCode=0 Nov 28 17:44:45 crc kubenswrapper[4909]: I1128 17:44:45.491119 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-30f7-account-create-update-5k4q7" event={"ID":"1611532c-c609-460f-9376-7767f8caebec","Type":"ContainerDied","Data":"b0b79c5acff85a28fbd7545fd478661c8c5e8f63ab6124f60e4a425495da8135"} Nov 28 17:44:45 crc kubenswrapper[4909]: I1128 17:44:45.491144 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-30f7-account-create-update-5k4q7" event={"ID":"1611532c-c609-460f-9376-7767f8caebec","Type":"ContainerStarted","Data":"4d2eaf6c38d6f06506af132d75bce06ece960488a3f3982a9a53c16b91d1b194"} Nov 28 17:44:45 crc kubenswrapper[4909]: I1128 17:44:45.492345 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-rznwv" event={"ID":"202367de-7bd5-4d73-8d87-536428b3638b","Type":"ContainerStarted","Data":"9d0e2247e3b1c2b67ce55a2ff34ebd77bbe6ea74ae6eb577b6c16893c0887daa"} Nov 28 17:44:45 crc kubenswrapper[4909]: I1128 17:44:45.493751 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-a66f-account-create-update-b8qxp" event={"ID":"00679f33-a115-4278-b8dd-8515ab78adce","Type":"ContainerStarted","Data":"74a59556d64264f28ceb06bed5b00088d153efec7480b06e1bfe726bf4267e5b"} Nov 28 17:44:45 crc kubenswrapper[4909]: I1128 17:44:45.496148 4909 generic.go:334] "Generic (PLEG): container finished" podID="6f469cb4-de41-446a-b59c-6c7db8411f55" containerID="60240d1d35142df0c28caf64c807c1eecd8d0da966af14a7e39e9b1e61641f40" exitCode=0 Nov 28 17:44:45 crc kubenswrapper[4909]: I1128 17:44:45.496190 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-bcc4g" event={"ID":"6f469cb4-de41-446a-b59c-6c7db8411f55","Type":"ContainerDied","Data":"60240d1d35142df0c28caf64c807c1eecd8d0da966af14a7e39e9b1e61641f40"} Nov 28 17:44:45 crc kubenswrapper[4909]: I1128 17:44:45.496214 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-bcc4g" event={"ID":"6f469cb4-de41-446a-b59c-6c7db8411f55","Type":"ContainerStarted","Data":"a23dca27a35a33047f750d36e09608ef1c4a26a12ba34a52a2ac560261387061"} Nov 28 17:44:46 crc kubenswrapper[4909]: I1128 17:44:46.510928 4909 generic.go:334] "Generic (PLEG): container finished" podID="a99f1c9e-ad94-49bc-a382-f0615533a5ac" containerID="2e05a935d700a45c8148b75d65fbc2f4dc3273d480d7d5851e262f75c88325b6" exitCode=0 Nov 28 17:44:46 crc kubenswrapper[4909]: I1128 17:44:46.510995 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-277a-account-create-update-j5vxd" event={"ID":"a99f1c9e-ad94-49bc-a382-f0615533a5ac","Type":"ContainerDied","Data":"2e05a935d700a45c8148b75d65fbc2f4dc3273d480d7d5851e262f75c88325b6"} Nov 28 17:44:46 crc kubenswrapper[4909]: I1128 17:44:46.515742 4909 generic.go:334] "Generic (PLEG): container finished" podID="202367de-7bd5-4d73-8d87-536428b3638b" containerID="c216bd32a45143ab494c44c5d85485b874cbd98b2be1b50013581eb7f65cc07c" exitCode=0 Nov 28 17:44:46 crc kubenswrapper[4909]: I1128 17:44:46.515852 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-rznwv" event={"ID":"202367de-7bd5-4d73-8d87-536428b3638b","Type":"ContainerDied","Data":"c216bd32a45143ab494c44c5d85485b874cbd98b2be1b50013581eb7f65cc07c"} Nov 28 17:44:46 crc kubenswrapper[4909]: I1128 17:44:46.518727 4909 generic.go:334] "Generic (PLEG): container finished" podID="00679f33-a115-4278-b8dd-8515ab78adce" containerID="33bed1a3b657e444a60eb507f34cc6e84cda17feacca7ab808bf2aed15bd5975" exitCode=0 Nov 28 17:44:46 crc kubenswrapper[4909]: I1128 17:44:46.518804 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-a66f-account-create-update-b8qxp" event={"ID":"00679f33-a115-4278-b8dd-8515ab78adce","Type":"ContainerDied","Data":"33bed1a3b657e444a60eb507f34cc6e84cda17feacca7ab808bf2aed15bd5975"} Nov 28 17:44:46 crc kubenswrapper[4909]: I1128 17:44:46.952568 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-30f7-account-create-update-5k4q7" Nov 28 17:44:47 crc kubenswrapper[4909]: I1128 17:44:47.053653 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qcgn9\" (UniqueName: \"kubernetes.io/projected/1611532c-c609-460f-9376-7767f8caebec-kube-api-access-qcgn9\") pod \"1611532c-c609-460f-9376-7767f8caebec\" (UID: \"1611532c-c609-460f-9376-7767f8caebec\") " Nov 28 17:44:47 crc kubenswrapper[4909]: I1128 17:44:47.053936 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1611532c-c609-460f-9376-7767f8caebec-operator-scripts\") pod \"1611532c-c609-460f-9376-7767f8caebec\" (UID: \"1611532c-c609-460f-9376-7767f8caebec\") " Nov 28 17:44:47 crc kubenswrapper[4909]: I1128 17:44:47.054511 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1611532c-c609-460f-9376-7767f8caebec-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "1611532c-c609-460f-9376-7767f8caebec" (UID: "1611532c-c609-460f-9376-7767f8caebec"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 17:44:47 crc kubenswrapper[4909]: I1128 17:44:47.059105 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1611532c-c609-460f-9376-7767f8caebec-kube-api-access-qcgn9" (OuterVolumeSpecName: "kube-api-access-qcgn9") pod "1611532c-c609-460f-9376-7767f8caebec" (UID: "1611532c-c609-460f-9376-7767f8caebec"). InnerVolumeSpecName "kube-api-access-qcgn9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:44:47 crc kubenswrapper[4909]: I1128 17:44:47.060026 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-bcc4g" Nov 28 17:44:47 crc kubenswrapper[4909]: I1128 17:44:47.106397 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-rpjjz" Nov 28 17:44:47 crc kubenswrapper[4909]: I1128 17:44:47.154883 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kkh69\" (UniqueName: \"kubernetes.io/projected/6f469cb4-de41-446a-b59c-6c7db8411f55-kube-api-access-kkh69\") pod \"6f469cb4-de41-446a-b59c-6c7db8411f55\" (UID: \"6f469cb4-de41-446a-b59c-6c7db8411f55\") " Nov 28 17:44:47 crc kubenswrapper[4909]: I1128 17:44:47.154965 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gz6hh\" (UniqueName: \"kubernetes.io/projected/22081f58-7375-4695-ae44-0d3523be341a-kube-api-access-gz6hh\") pod \"22081f58-7375-4695-ae44-0d3523be341a\" (UID: \"22081f58-7375-4695-ae44-0d3523be341a\") " Nov 28 17:44:47 crc kubenswrapper[4909]: I1128 17:44:47.155001 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/22081f58-7375-4695-ae44-0d3523be341a-operator-scripts\") pod \"22081f58-7375-4695-ae44-0d3523be341a\" (UID: \"22081f58-7375-4695-ae44-0d3523be341a\") " Nov 28 17:44:47 crc kubenswrapper[4909]: I1128 17:44:47.155058 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6f469cb4-de41-446a-b59c-6c7db8411f55-operator-scripts\") pod \"6f469cb4-de41-446a-b59c-6c7db8411f55\" (UID: \"6f469cb4-de41-446a-b59c-6c7db8411f55\") " Nov 28 17:44:47 crc kubenswrapper[4909]: I1128 17:44:47.155383 4909 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1611532c-c609-460f-9376-7767f8caebec-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 17:44:47 crc kubenswrapper[4909]: I1128 17:44:47.155407 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qcgn9\" (UniqueName: \"kubernetes.io/projected/1611532c-c609-460f-9376-7767f8caebec-kube-api-access-qcgn9\") on node \"crc\" DevicePath \"\"" Nov 28 17:44:47 crc kubenswrapper[4909]: I1128 17:44:47.155481 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22081f58-7375-4695-ae44-0d3523be341a-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "22081f58-7375-4695-ae44-0d3523be341a" (UID: "22081f58-7375-4695-ae44-0d3523be341a"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 17:44:47 crc kubenswrapper[4909]: I1128 17:44:47.155695 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6f469cb4-de41-446a-b59c-6c7db8411f55-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "6f469cb4-de41-446a-b59c-6c7db8411f55" (UID: "6f469cb4-de41-446a-b59c-6c7db8411f55"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 17:44:47 crc kubenswrapper[4909]: I1128 17:44:47.157921 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/22081f58-7375-4695-ae44-0d3523be341a-kube-api-access-gz6hh" (OuterVolumeSpecName: "kube-api-access-gz6hh") pod "22081f58-7375-4695-ae44-0d3523be341a" (UID: "22081f58-7375-4695-ae44-0d3523be341a"). InnerVolumeSpecName "kube-api-access-gz6hh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:44:47 crc kubenswrapper[4909]: I1128 17:44:47.158586 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6f469cb4-de41-446a-b59c-6c7db8411f55-kube-api-access-kkh69" (OuterVolumeSpecName: "kube-api-access-kkh69") pod "6f469cb4-de41-446a-b59c-6c7db8411f55" (UID: "6f469cb4-de41-446a-b59c-6c7db8411f55"). InnerVolumeSpecName "kube-api-access-kkh69". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:44:47 crc kubenswrapper[4909]: I1128 17:44:47.257825 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gz6hh\" (UniqueName: \"kubernetes.io/projected/22081f58-7375-4695-ae44-0d3523be341a-kube-api-access-gz6hh\") on node \"crc\" DevicePath \"\"" Nov 28 17:44:47 crc kubenswrapper[4909]: I1128 17:44:47.257867 4909 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/22081f58-7375-4695-ae44-0d3523be341a-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 17:44:47 crc kubenswrapper[4909]: I1128 17:44:47.257881 4909 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6f469cb4-de41-446a-b59c-6c7db8411f55-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 17:44:47 crc kubenswrapper[4909]: I1128 17:44:47.257895 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kkh69\" (UniqueName: \"kubernetes.io/projected/6f469cb4-de41-446a-b59c-6c7db8411f55-kube-api-access-kkh69\") on node \"crc\" DevicePath \"\"" Nov 28 17:44:47 crc kubenswrapper[4909]: I1128 17:44:47.544815 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-bcc4g" event={"ID":"6f469cb4-de41-446a-b59c-6c7db8411f55","Type":"ContainerDied","Data":"a23dca27a35a33047f750d36e09608ef1c4a26a12ba34a52a2ac560261387061"} Nov 28 17:44:47 crc kubenswrapper[4909]: I1128 17:44:47.545207 4909 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a23dca27a35a33047f750d36e09608ef1c4a26a12ba34a52a2ac560261387061" Nov 28 17:44:47 crc kubenswrapper[4909]: I1128 17:44:47.544834 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-bcc4g" Nov 28 17:44:47 crc kubenswrapper[4909]: I1128 17:44:47.551384 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-rpjjz" Nov 28 17:44:47 crc kubenswrapper[4909]: I1128 17:44:47.551460 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-rpjjz" event={"ID":"22081f58-7375-4695-ae44-0d3523be341a","Type":"ContainerDied","Data":"a1f8344024753837e4c87d125090d69400473b88d3457a4693b932972c9f0abc"} Nov 28 17:44:47 crc kubenswrapper[4909]: I1128 17:44:47.551757 4909 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a1f8344024753837e4c87d125090d69400473b88d3457a4693b932972c9f0abc" Nov 28 17:44:47 crc kubenswrapper[4909]: I1128 17:44:47.558048 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-30f7-account-create-update-5k4q7" event={"ID":"1611532c-c609-460f-9376-7767f8caebec","Type":"ContainerDied","Data":"4d2eaf6c38d6f06506af132d75bce06ece960488a3f3982a9a53c16b91d1b194"} Nov 28 17:44:47 crc kubenswrapper[4909]: I1128 17:44:47.558098 4909 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4d2eaf6c38d6f06506af132d75bce06ece960488a3f3982a9a53c16b91d1b194" Nov 28 17:44:47 crc kubenswrapper[4909]: I1128 17:44:47.558099 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-30f7-account-create-update-5k4q7" Nov 28 17:44:47 crc kubenswrapper[4909]: I1128 17:44:47.982491 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-rznwv" Nov 28 17:44:48 crc kubenswrapper[4909]: I1128 17:44:48.116593 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-a66f-account-create-update-b8qxp" Nov 28 17:44:48 crc kubenswrapper[4909]: I1128 17:44:48.124386 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-277a-account-create-update-j5vxd" Nov 28 17:44:48 crc kubenswrapper[4909]: I1128 17:44:48.174151 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mftzt\" (UniqueName: \"kubernetes.io/projected/202367de-7bd5-4d73-8d87-536428b3638b-kube-api-access-mftzt\") pod \"202367de-7bd5-4d73-8d87-536428b3638b\" (UID: \"202367de-7bd5-4d73-8d87-536428b3638b\") " Nov 28 17:44:48 crc kubenswrapper[4909]: I1128 17:44:48.174239 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/202367de-7bd5-4d73-8d87-536428b3638b-operator-scripts\") pod \"202367de-7bd5-4d73-8d87-536428b3638b\" (UID: \"202367de-7bd5-4d73-8d87-536428b3638b\") " Nov 28 17:44:48 crc kubenswrapper[4909]: I1128 17:44:48.174956 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/202367de-7bd5-4d73-8d87-536428b3638b-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "202367de-7bd5-4d73-8d87-536428b3638b" (UID: "202367de-7bd5-4d73-8d87-536428b3638b"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 17:44:48 crc kubenswrapper[4909]: I1128 17:44:48.180369 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/202367de-7bd5-4d73-8d87-536428b3638b-kube-api-access-mftzt" (OuterVolumeSpecName: "kube-api-access-mftzt") pod "202367de-7bd5-4d73-8d87-536428b3638b" (UID: "202367de-7bd5-4d73-8d87-536428b3638b"). InnerVolumeSpecName "kube-api-access-mftzt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:44:48 crc kubenswrapper[4909]: I1128 17:44:48.275143 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/00679f33-a115-4278-b8dd-8515ab78adce-operator-scripts\") pod \"00679f33-a115-4278-b8dd-8515ab78adce\" (UID: \"00679f33-a115-4278-b8dd-8515ab78adce\") " Nov 28 17:44:48 crc kubenswrapper[4909]: I1128 17:44:48.275292 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tf76g\" (UniqueName: \"kubernetes.io/projected/00679f33-a115-4278-b8dd-8515ab78adce-kube-api-access-tf76g\") pod \"00679f33-a115-4278-b8dd-8515ab78adce\" (UID: \"00679f33-a115-4278-b8dd-8515ab78adce\") " Nov 28 17:44:48 crc kubenswrapper[4909]: I1128 17:44:48.275365 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a99f1c9e-ad94-49bc-a382-f0615533a5ac-operator-scripts\") pod \"a99f1c9e-ad94-49bc-a382-f0615533a5ac\" (UID: \"a99f1c9e-ad94-49bc-a382-f0615533a5ac\") " Nov 28 17:44:48 crc kubenswrapper[4909]: I1128 17:44:48.275382 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sbkd5\" (UniqueName: \"kubernetes.io/projected/a99f1c9e-ad94-49bc-a382-f0615533a5ac-kube-api-access-sbkd5\") pod \"a99f1c9e-ad94-49bc-a382-f0615533a5ac\" (UID: \"a99f1c9e-ad94-49bc-a382-f0615533a5ac\") " Nov 28 17:44:48 crc kubenswrapper[4909]: I1128 17:44:48.275736 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mftzt\" (UniqueName: \"kubernetes.io/projected/202367de-7bd5-4d73-8d87-536428b3638b-kube-api-access-mftzt\") on node \"crc\" DevicePath \"\"" Nov 28 17:44:48 crc kubenswrapper[4909]: I1128 17:44:48.275749 4909 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/202367de-7bd5-4d73-8d87-536428b3638b-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 17:44:48 crc kubenswrapper[4909]: I1128 17:44:48.276551 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/00679f33-a115-4278-b8dd-8515ab78adce-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "00679f33-a115-4278-b8dd-8515ab78adce" (UID: "00679f33-a115-4278-b8dd-8515ab78adce"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 17:44:48 crc kubenswrapper[4909]: I1128 17:44:48.276565 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a99f1c9e-ad94-49bc-a382-f0615533a5ac-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "a99f1c9e-ad94-49bc-a382-f0615533a5ac" (UID: "a99f1c9e-ad94-49bc-a382-f0615533a5ac"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 17:44:48 crc kubenswrapper[4909]: I1128 17:44:48.278841 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/00679f33-a115-4278-b8dd-8515ab78adce-kube-api-access-tf76g" (OuterVolumeSpecName: "kube-api-access-tf76g") pod "00679f33-a115-4278-b8dd-8515ab78adce" (UID: "00679f33-a115-4278-b8dd-8515ab78adce"). InnerVolumeSpecName "kube-api-access-tf76g". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:44:48 crc kubenswrapper[4909]: I1128 17:44:48.279751 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a99f1c9e-ad94-49bc-a382-f0615533a5ac-kube-api-access-sbkd5" (OuterVolumeSpecName: "kube-api-access-sbkd5") pod "a99f1c9e-ad94-49bc-a382-f0615533a5ac" (UID: "a99f1c9e-ad94-49bc-a382-f0615533a5ac"). InnerVolumeSpecName "kube-api-access-sbkd5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:44:48 crc kubenswrapper[4909]: I1128 17:44:48.378132 4909 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a99f1c9e-ad94-49bc-a382-f0615533a5ac-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 17:44:48 crc kubenswrapper[4909]: I1128 17:44:48.378564 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sbkd5\" (UniqueName: \"kubernetes.io/projected/a99f1c9e-ad94-49bc-a382-f0615533a5ac-kube-api-access-sbkd5\") on node \"crc\" DevicePath \"\"" Nov 28 17:44:48 crc kubenswrapper[4909]: I1128 17:44:48.378761 4909 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/00679f33-a115-4278-b8dd-8515ab78adce-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 17:44:48 crc kubenswrapper[4909]: I1128 17:44:48.378921 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tf76g\" (UniqueName: \"kubernetes.io/projected/00679f33-a115-4278-b8dd-8515ab78adce-kube-api-access-tf76g\") on node \"crc\" DevicePath \"\"" Nov 28 17:44:48 crc kubenswrapper[4909]: I1128 17:44:48.575410 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-a66f-account-create-update-b8qxp" event={"ID":"00679f33-a115-4278-b8dd-8515ab78adce","Type":"ContainerDied","Data":"74a59556d64264f28ceb06bed5b00088d153efec7480b06e1bfe726bf4267e5b"} Nov 28 17:44:48 crc kubenswrapper[4909]: I1128 17:44:48.575437 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-a66f-account-create-update-b8qxp" Nov 28 17:44:48 crc kubenswrapper[4909]: I1128 17:44:48.575463 4909 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="74a59556d64264f28ceb06bed5b00088d153efec7480b06e1bfe726bf4267e5b" Nov 28 17:44:48 crc kubenswrapper[4909]: I1128 17:44:48.577891 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-277a-account-create-update-j5vxd" Nov 28 17:44:48 crc kubenswrapper[4909]: I1128 17:44:48.578140 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-277a-account-create-update-j5vxd" event={"ID":"a99f1c9e-ad94-49bc-a382-f0615533a5ac","Type":"ContainerDied","Data":"4b62c1f3abc8860fc2a0398b19ee82b719e2b035877d5c70f13d146d46f94267"} Nov 28 17:44:48 crc kubenswrapper[4909]: I1128 17:44:48.578460 4909 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4b62c1f3abc8860fc2a0398b19ee82b719e2b035877d5c70f13d146d46f94267" Nov 28 17:44:48 crc kubenswrapper[4909]: I1128 17:44:48.580981 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-rznwv" event={"ID":"202367de-7bd5-4d73-8d87-536428b3638b","Type":"ContainerDied","Data":"9d0e2247e3b1c2b67ce55a2ff34ebd77bbe6ea74ae6eb577b6c16893c0887daa"} Nov 28 17:44:48 crc kubenswrapper[4909]: I1128 17:44:48.581047 4909 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9d0e2247e3b1c2b67ce55a2ff34ebd77bbe6ea74ae6eb577b6c16893c0887daa" Nov 28 17:44:48 crc kubenswrapper[4909]: I1128 17:44:48.581059 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-rznwv" Nov 28 17:44:49 crc kubenswrapper[4909]: I1128 17:44:49.469175 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-db-sync-4gwd6"] Nov 28 17:44:49 crc kubenswrapper[4909]: E1128 17:44:49.469558 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="00679f33-a115-4278-b8dd-8515ab78adce" containerName="mariadb-account-create-update" Nov 28 17:44:49 crc kubenswrapper[4909]: I1128 17:44:49.469574 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="00679f33-a115-4278-b8dd-8515ab78adce" containerName="mariadb-account-create-update" Nov 28 17:44:49 crc kubenswrapper[4909]: E1128 17:44:49.469588 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1611532c-c609-460f-9376-7767f8caebec" containerName="mariadb-account-create-update" Nov 28 17:44:49 crc kubenswrapper[4909]: I1128 17:44:49.469595 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="1611532c-c609-460f-9376-7767f8caebec" containerName="mariadb-account-create-update" Nov 28 17:44:49 crc kubenswrapper[4909]: E1128 17:44:49.469605 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6f469cb4-de41-446a-b59c-6c7db8411f55" containerName="mariadb-database-create" Nov 28 17:44:49 crc kubenswrapper[4909]: I1128 17:44:49.469612 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="6f469cb4-de41-446a-b59c-6c7db8411f55" containerName="mariadb-database-create" Nov 28 17:44:49 crc kubenswrapper[4909]: E1128 17:44:49.469620 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="22081f58-7375-4695-ae44-0d3523be341a" containerName="mariadb-database-create" Nov 28 17:44:49 crc kubenswrapper[4909]: I1128 17:44:49.469625 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="22081f58-7375-4695-ae44-0d3523be341a" containerName="mariadb-database-create" Nov 28 17:44:49 crc kubenswrapper[4909]: E1128 17:44:49.469636 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="202367de-7bd5-4d73-8d87-536428b3638b" containerName="mariadb-database-create" Nov 28 17:44:49 crc kubenswrapper[4909]: I1128 17:44:49.469677 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="202367de-7bd5-4d73-8d87-536428b3638b" containerName="mariadb-database-create" Nov 28 17:44:49 crc kubenswrapper[4909]: E1128 17:44:49.469692 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a99f1c9e-ad94-49bc-a382-f0615533a5ac" containerName="mariadb-account-create-update" Nov 28 17:44:49 crc kubenswrapper[4909]: I1128 17:44:49.469698 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="a99f1c9e-ad94-49bc-a382-f0615533a5ac" containerName="mariadb-account-create-update" Nov 28 17:44:49 crc kubenswrapper[4909]: I1128 17:44:49.469854 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="1611532c-c609-460f-9376-7767f8caebec" containerName="mariadb-account-create-update" Nov 28 17:44:49 crc kubenswrapper[4909]: I1128 17:44:49.469870 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="a99f1c9e-ad94-49bc-a382-f0615533a5ac" containerName="mariadb-account-create-update" Nov 28 17:44:49 crc kubenswrapper[4909]: I1128 17:44:49.469883 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="00679f33-a115-4278-b8dd-8515ab78adce" containerName="mariadb-account-create-update" Nov 28 17:44:49 crc kubenswrapper[4909]: I1128 17:44:49.469896 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="22081f58-7375-4695-ae44-0d3523be341a" containerName="mariadb-database-create" Nov 28 17:44:49 crc kubenswrapper[4909]: I1128 17:44:49.469903 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="202367de-7bd5-4d73-8d87-536428b3638b" containerName="mariadb-database-create" Nov 28 17:44:49 crc kubenswrapper[4909]: I1128 17:44:49.469914 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="6f469cb4-de41-446a-b59c-6c7db8411f55" containerName="mariadb-database-create" Nov 28 17:44:49 crc kubenswrapper[4909]: I1128 17:44:49.470517 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-4gwd6" Nov 28 17:44:49 crc kubenswrapper[4909]: I1128 17:44:49.472616 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-9gh62" Nov 28 17:44:49 crc kubenswrapper[4909]: I1128 17:44:49.473592 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-scripts" Nov 28 17:44:49 crc kubenswrapper[4909]: I1128 17:44:49.476399 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Nov 28 17:44:49 crc kubenswrapper[4909]: I1128 17:44:49.478688 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-4gwd6"] Nov 28 17:44:49 crc kubenswrapper[4909]: I1128 17:44:49.601246 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c682b238-7341-4d5f-bf11-45f1dfb386ce-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-4gwd6\" (UID: \"c682b238-7341-4d5f-bf11-45f1dfb386ce\") " pod="openstack/nova-cell0-conductor-db-sync-4gwd6" Nov 28 17:44:49 crc kubenswrapper[4909]: I1128 17:44:49.601324 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c682b238-7341-4d5f-bf11-45f1dfb386ce-config-data\") pod \"nova-cell0-conductor-db-sync-4gwd6\" (UID: \"c682b238-7341-4d5f-bf11-45f1dfb386ce\") " pod="openstack/nova-cell0-conductor-db-sync-4gwd6" Nov 28 17:44:49 crc kubenswrapper[4909]: I1128 17:44:49.601453 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q8bqc\" (UniqueName: \"kubernetes.io/projected/c682b238-7341-4d5f-bf11-45f1dfb386ce-kube-api-access-q8bqc\") pod \"nova-cell0-conductor-db-sync-4gwd6\" (UID: \"c682b238-7341-4d5f-bf11-45f1dfb386ce\") " pod="openstack/nova-cell0-conductor-db-sync-4gwd6" Nov 28 17:44:49 crc kubenswrapper[4909]: I1128 17:44:49.601509 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c682b238-7341-4d5f-bf11-45f1dfb386ce-scripts\") pod \"nova-cell0-conductor-db-sync-4gwd6\" (UID: \"c682b238-7341-4d5f-bf11-45f1dfb386ce\") " pod="openstack/nova-cell0-conductor-db-sync-4gwd6" Nov 28 17:44:49 crc kubenswrapper[4909]: I1128 17:44:49.702914 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c682b238-7341-4d5f-bf11-45f1dfb386ce-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-4gwd6\" (UID: \"c682b238-7341-4d5f-bf11-45f1dfb386ce\") " pod="openstack/nova-cell0-conductor-db-sync-4gwd6" Nov 28 17:44:49 crc kubenswrapper[4909]: I1128 17:44:49.703002 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c682b238-7341-4d5f-bf11-45f1dfb386ce-config-data\") pod \"nova-cell0-conductor-db-sync-4gwd6\" (UID: \"c682b238-7341-4d5f-bf11-45f1dfb386ce\") " pod="openstack/nova-cell0-conductor-db-sync-4gwd6" Nov 28 17:44:49 crc kubenswrapper[4909]: I1128 17:44:49.703042 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q8bqc\" (UniqueName: \"kubernetes.io/projected/c682b238-7341-4d5f-bf11-45f1dfb386ce-kube-api-access-q8bqc\") pod \"nova-cell0-conductor-db-sync-4gwd6\" (UID: \"c682b238-7341-4d5f-bf11-45f1dfb386ce\") " pod="openstack/nova-cell0-conductor-db-sync-4gwd6" Nov 28 17:44:49 crc kubenswrapper[4909]: I1128 17:44:49.703069 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c682b238-7341-4d5f-bf11-45f1dfb386ce-scripts\") pod \"nova-cell0-conductor-db-sync-4gwd6\" (UID: \"c682b238-7341-4d5f-bf11-45f1dfb386ce\") " pod="openstack/nova-cell0-conductor-db-sync-4gwd6" Nov 28 17:44:49 crc kubenswrapper[4909]: I1128 17:44:49.706973 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c682b238-7341-4d5f-bf11-45f1dfb386ce-scripts\") pod \"nova-cell0-conductor-db-sync-4gwd6\" (UID: \"c682b238-7341-4d5f-bf11-45f1dfb386ce\") " pod="openstack/nova-cell0-conductor-db-sync-4gwd6" Nov 28 17:44:49 crc kubenswrapper[4909]: I1128 17:44:49.707806 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c682b238-7341-4d5f-bf11-45f1dfb386ce-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-4gwd6\" (UID: \"c682b238-7341-4d5f-bf11-45f1dfb386ce\") " pod="openstack/nova-cell0-conductor-db-sync-4gwd6" Nov 28 17:44:49 crc kubenswrapper[4909]: I1128 17:44:49.710304 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c682b238-7341-4d5f-bf11-45f1dfb386ce-config-data\") pod \"nova-cell0-conductor-db-sync-4gwd6\" (UID: \"c682b238-7341-4d5f-bf11-45f1dfb386ce\") " pod="openstack/nova-cell0-conductor-db-sync-4gwd6" Nov 28 17:44:49 crc kubenswrapper[4909]: I1128 17:44:49.719383 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q8bqc\" (UniqueName: \"kubernetes.io/projected/c682b238-7341-4d5f-bf11-45f1dfb386ce-kube-api-access-q8bqc\") pod \"nova-cell0-conductor-db-sync-4gwd6\" (UID: \"c682b238-7341-4d5f-bf11-45f1dfb386ce\") " pod="openstack/nova-cell0-conductor-db-sync-4gwd6" Nov 28 17:44:49 crc kubenswrapper[4909]: I1128 17:44:49.787477 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-4gwd6" Nov 28 17:44:50 crc kubenswrapper[4909]: I1128 17:44:50.269894 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-4gwd6"] Nov 28 17:44:50 crc kubenswrapper[4909]: W1128 17:44:50.272797 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc682b238_7341_4d5f_bf11_45f1dfb386ce.slice/crio-453c6cfaaa053375712d1f0f98366b79e6aedcf8d7f107eae49ad556e7fb980c WatchSource:0}: Error finding container 453c6cfaaa053375712d1f0f98366b79e6aedcf8d7f107eae49ad556e7fb980c: Status 404 returned error can't find the container with id 453c6cfaaa053375712d1f0f98366b79e6aedcf8d7f107eae49ad556e7fb980c Nov 28 17:44:50 crc kubenswrapper[4909]: I1128 17:44:50.599338 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-4gwd6" event={"ID":"c682b238-7341-4d5f-bf11-45f1dfb386ce","Type":"ContainerStarted","Data":"f59a638d283155c2808c40b351bb1fb6ed0e0fcaa20b35283c12cff6d9201816"} Nov 28 17:44:50 crc kubenswrapper[4909]: I1128 17:44:50.599384 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-4gwd6" event={"ID":"c682b238-7341-4d5f-bf11-45f1dfb386ce","Type":"ContainerStarted","Data":"453c6cfaaa053375712d1f0f98366b79e6aedcf8d7f107eae49ad556e7fb980c"} Nov 28 17:44:50 crc kubenswrapper[4909]: I1128 17:44:50.621794 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-db-sync-4gwd6" podStartSLOduration=1.62177566 podStartE2EDuration="1.62177566s" podCreationTimestamp="2025-11-28 17:44:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 17:44:50.614400112 +0000 UTC m=+5673.011084636" watchObservedRunningTime="2025-11-28 17:44:50.62177566 +0000 UTC m=+5673.018460184" Nov 28 17:44:55 crc kubenswrapper[4909]: I1128 17:44:55.659947 4909 generic.go:334] "Generic (PLEG): container finished" podID="c682b238-7341-4d5f-bf11-45f1dfb386ce" containerID="f59a638d283155c2808c40b351bb1fb6ed0e0fcaa20b35283c12cff6d9201816" exitCode=0 Nov 28 17:44:55 crc kubenswrapper[4909]: I1128 17:44:55.660058 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-4gwd6" event={"ID":"c682b238-7341-4d5f-bf11-45f1dfb386ce","Type":"ContainerDied","Data":"f59a638d283155c2808c40b351bb1fb6ed0e0fcaa20b35283c12cff6d9201816"} Nov 28 17:44:56 crc kubenswrapper[4909]: I1128 17:44:56.971467 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-4gwd6" Nov 28 17:44:57 crc kubenswrapper[4909]: I1128 17:44:57.140528 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c682b238-7341-4d5f-bf11-45f1dfb386ce-config-data\") pod \"c682b238-7341-4d5f-bf11-45f1dfb386ce\" (UID: \"c682b238-7341-4d5f-bf11-45f1dfb386ce\") " Nov 28 17:44:57 crc kubenswrapper[4909]: I1128 17:44:57.140762 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c682b238-7341-4d5f-bf11-45f1dfb386ce-combined-ca-bundle\") pod \"c682b238-7341-4d5f-bf11-45f1dfb386ce\" (UID: \"c682b238-7341-4d5f-bf11-45f1dfb386ce\") " Nov 28 17:44:57 crc kubenswrapper[4909]: I1128 17:44:57.140950 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c682b238-7341-4d5f-bf11-45f1dfb386ce-scripts\") pod \"c682b238-7341-4d5f-bf11-45f1dfb386ce\" (UID: \"c682b238-7341-4d5f-bf11-45f1dfb386ce\") " Nov 28 17:44:57 crc kubenswrapper[4909]: I1128 17:44:57.141066 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q8bqc\" (UniqueName: \"kubernetes.io/projected/c682b238-7341-4d5f-bf11-45f1dfb386ce-kube-api-access-q8bqc\") pod \"c682b238-7341-4d5f-bf11-45f1dfb386ce\" (UID: \"c682b238-7341-4d5f-bf11-45f1dfb386ce\") " Nov 28 17:44:57 crc kubenswrapper[4909]: I1128 17:44:57.145808 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c682b238-7341-4d5f-bf11-45f1dfb386ce-scripts" (OuterVolumeSpecName: "scripts") pod "c682b238-7341-4d5f-bf11-45f1dfb386ce" (UID: "c682b238-7341-4d5f-bf11-45f1dfb386ce"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:44:57 crc kubenswrapper[4909]: I1128 17:44:57.146302 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c682b238-7341-4d5f-bf11-45f1dfb386ce-kube-api-access-q8bqc" (OuterVolumeSpecName: "kube-api-access-q8bqc") pod "c682b238-7341-4d5f-bf11-45f1dfb386ce" (UID: "c682b238-7341-4d5f-bf11-45f1dfb386ce"). InnerVolumeSpecName "kube-api-access-q8bqc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:44:57 crc kubenswrapper[4909]: I1128 17:44:57.178763 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c682b238-7341-4d5f-bf11-45f1dfb386ce-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c682b238-7341-4d5f-bf11-45f1dfb386ce" (UID: "c682b238-7341-4d5f-bf11-45f1dfb386ce"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:44:57 crc kubenswrapper[4909]: I1128 17:44:57.179706 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c682b238-7341-4d5f-bf11-45f1dfb386ce-config-data" (OuterVolumeSpecName: "config-data") pod "c682b238-7341-4d5f-bf11-45f1dfb386ce" (UID: "c682b238-7341-4d5f-bf11-45f1dfb386ce"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:44:57 crc kubenswrapper[4909]: I1128 17:44:57.243074 4909 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c682b238-7341-4d5f-bf11-45f1dfb386ce-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 17:44:57 crc kubenswrapper[4909]: I1128 17:44:57.243131 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q8bqc\" (UniqueName: \"kubernetes.io/projected/c682b238-7341-4d5f-bf11-45f1dfb386ce-kube-api-access-q8bqc\") on node \"crc\" DevicePath \"\"" Nov 28 17:44:57 crc kubenswrapper[4909]: I1128 17:44:57.243141 4909 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c682b238-7341-4d5f-bf11-45f1dfb386ce-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 17:44:57 crc kubenswrapper[4909]: I1128 17:44:57.243151 4909 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c682b238-7341-4d5f-bf11-45f1dfb386ce-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 17:44:57 crc kubenswrapper[4909]: I1128 17:44:57.682177 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-4gwd6" event={"ID":"c682b238-7341-4d5f-bf11-45f1dfb386ce","Type":"ContainerDied","Data":"453c6cfaaa053375712d1f0f98366b79e6aedcf8d7f107eae49ad556e7fb980c"} Nov 28 17:44:57 crc kubenswrapper[4909]: I1128 17:44:57.682233 4909 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="453c6cfaaa053375712d1f0f98366b79e6aedcf8d7f107eae49ad556e7fb980c" Nov 28 17:44:57 crc kubenswrapper[4909]: I1128 17:44:57.682313 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-4gwd6" Nov 28 17:44:57 crc kubenswrapper[4909]: I1128 17:44:57.777368 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 28 17:44:57 crc kubenswrapper[4909]: E1128 17:44:57.777832 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c682b238-7341-4d5f-bf11-45f1dfb386ce" containerName="nova-cell0-conductor-db-sync" Nov 28 17:44:57 crc kubenswrapper[4909]: I1128 17:44:57.777855 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="c682b238-7341-4d5f-bf11-45f1dfb386ce" containerName="nova-cell0-conductor-db-sync" Nov 28 17:44:57 crc kubenswrapper[4909]: I1128 17:44:57.778102 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="c682b238-7341-4d5f-bf11-45f1dfb386ce" containerName="nova-cell0-conductor-db-sync" Nov 28 17:44:57 crc kubenswrapper[4909]: I1128 17:44:57.778801 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 28 17:44:57 crc kubenswrapper[4909]: I1128 17:44:57.783548 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-9gh62" Nov 28 17:44:57 crc kubenswrapper[4909]: I1128 17:44:57.787738 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Nov 28 17:44:57 crc kubenswrapper[4909]: I1128 17:44:57.793987 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 28 17:44:57 crc kubenswrapper[4909]: I1128 17:44:57.956172 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t7szs\" (UniqueName: \"kubernetes.io/projected/8e02d2c0-edc6-43b9-b4a5-ac99b0c66d18-kube-api-access-t7szs\") pod \"nova-cell0-conductor-0\" (UID: \"8e02d2c0-edc6-43b9-b4a5-ac99b0c66d18\") " pod="openstack/nova-cell0-conductor-0" Nov 28 17:44:57 crc kubenswrapper[4909]: I1128 17:44:57.956241 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8e02d2c0-edc6-43b9-b4a5-ac99b0c66d18-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"8e02d2c0-edc6-43b9-b4a5-ac99b0c66d18\") " pod="openstack/nova-cell0-conductor-0" Nov 28 17:44:57 crc kubenswrapper[4909]: I1128 17:44:57.956688 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8e02d2c0-edc6-43b9-b4a5-ac99b0c66d18-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"8e02d2c0-edc6-43b9-b4a5-ac99b0c66d18\") " pod="openstack/nova-cell0-conductor-0" Nov 28 17:44:58 crc kubenswrapper[4909]: I1128 17:44:58.058210 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8e02d2c0-edc6-43b9-b4a5-ac99b0c66d18-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"8e02d2c0-edc6-43b9-b4a5-ac99b0c66d18\") " pod="openstack/nova-cell0-conductor-0" Nov 28 17:44:58 crc kubenswrapper[4909]: I1128 17:44:58.058281 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t7szs\" (UniqueName: \"kubernetes.io/projected/8e02d2c0-edc6-43b9-b4a5-ac99b0c66d18-kube-api-access-t7szs\") pod \"nova-cell0-conductor-0\" (UID: \"8e02d2c0-edc6-43b9-b4a5-ac99b0c66d18\") " pod="openstack/nova-cell0-conductor-0" Nov 28 17:44:58 crc kubenswrapper[4909]: I1128 17:44:58.058310 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8e02d2c0-edc6-43b9-b4a5-ac99b0c66d18-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"8e02d2c0-edc6-43b9-b4a5-ac99b0c66d18\") " pod="openstack/nova-cell0-conductor-0" Nov 28 17:44:58 crc kubenswrapper[4909]: I1128 17:44:58.062281 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8e02d2c0-edc6-43b9-b4a5-ac99b0c66d18-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"8e02d2c0-edc6-43b9-b4a5-ac99b0c66d18\") " pod="openstack/nova-cell0-conductor-0" Nov 28 17:44:58 crc kubenswrapper[4909]: I1128 17:44:58.063020 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8e02d2c0-edc6-43b9-b4a5-ac99b0c66d18-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"8e02d2c0-edc6-43b9-b4a5-ac99b0c66d18\") " pod="openstack/nova-cell0-conductor-0" Nov 28 17:44:58 crc kubenswrapper[4909]: I1128 17:44:58.075279 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t7szs\" (UniqueName: \"kubernetes.io/projected/8e02d2c0-edc6-43b9-b4a5-ac99b0c66d18-kube-api-access-t7szs\") pod \"nova-cell0-conductor-0\" (UID: \"8e02d2c0-edc6-43b9-b4a5-ac99b0c66d18\") " pod="openstack/nova-cell0-conductor-0" Nov 28 17:44:58 crc kubenswrapper[4909]: I1128 17:44:58.097155 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 28 17:44:58 crc kubenswrapper[4909]: I1128 17:44:58.594706 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 28 17:44:58 crc kubenswrapper[4909]: I1128 17:44:58.708918 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"8e02d2c0-edc6-43b9-b4a5-ac99b0c66d18","Type":"ContainerStarted","Data":"3430dc2bb769b29a47b5c7d8365e7a7598fceb37ac4d566449b3e59a8fda0fca"} Nov 28 17:44:59 crc kubenswrapper[4909]: I1128 17:44:59.717634 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"8e02d2c0-edc6-43b9-b4a5-ac99b0c66d18","Type":"ContainerStarted","Data":"c4677026a0b59ac10b561e36691160dc1c12fe09bf6c550e14d353cac0d11798"} Nov 28 17:44:59 crc kubenswrapper[4909]: I1128 17:44:59.719115 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell0-conductor-0" Nov 28 17:45:00 crc kubenswrapper[4909]: I1128 17:45:00.135002 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-0" podStartSLOduration=3.134977067 podStartE2EDuration="3.134977067s" podCreationTimestamp="2025-11-28 17:44:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 17:44:59.739783603 +0000 UTC m=+5682.136468137" watchObservedRunningTime="2025-11-28 17:45:00.134977067 +0000 UTC m=+5682.531661601" Nov 28 17:45:00 crc kubenswrapper[4909]: I1128 17:45:00.156629 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405865-v8wr6"] Nov 28 17:45:00 crc kubenswrapper[4909]: I1128 17:45:00.159197 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405865-v8wr6" Nov 28 17:45:00 crc kubenswrapper[4909]: I1128 17:45:00.163126 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 28 17:45:00 crc kubenswrapper[4909]: I1128 17:45:00.163460 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 28 17:45:00 crc kubenswrapper[4909]: I1128 17:45:00.187184 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405865-v8wr6"] Nov 28 17:45:00 crc kubenswrapper[4909]: I1128 17:45:00.305250 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/40bf5f05-5059-4119-aba9-8b46f5ffd953-secret-volume\") pod \"collect-profiles-29405865-v8wr6\" (UID: \"40bf5f05-5059-4119-aba9-8b46f5ffd953\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405865-v8wr6" Nov 28 17:45:00 crc kubenswrapper[4909]: I1128 17:45:00.305651 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/40bf5f05-5059-4119-aba9-8b46f5ffd953-config-volume\") pod \"collect-profiles-29405865-v8wr6\" (UID: \"40bf5f05-5059-4119-aba9-8b46f5ffd953\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405865-v8wr6" Nov 28 17:45:00 crc kubenswrapper[4909]: I1128 17:45:00.305713 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9mjkn\" (UniqueName: \"kubernetes.io/projected/40bf5f05-5059-4119-aba9-8b46f5ffd953-kube-api-access-9mjkn\") pod \"collect-profiles-29405865-v8wr6\" (UID: \"40bf5f05-5059-4119-aba9-8b46f5ffd953\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405865-v8wr6" Nov 28 17:45:00 crc kubenswrapper[4909]: I1128 17:45:00.407625 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/40bf5f05-5059-4119-aba9-8b46f5ffd953-config-volume\") pod \"collect-profiles-29405865-v8wr6\" (UID: \"40bf5f05-5059-4119-aba9-8b46f5ffd953\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405865-v8wr6" Nov 28 17:45:00 crc kubenswrapper[4909]: I1128 17:45:00.407714 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9mjkn\" (UniqueName: \"kubernetes.io/projected/40bf5f05-5059-4119-aba9-8b46f5ffd953-kube-api-access-9mjkn\") pod \"collect-profiles-29405865-v8wr6\" (UID: \"40bf5f05-5059-4119-aba9-8b46f5ffd953\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405865-v8wr6" Nov 28 17:45:00 crc kubenswrapper[4909]: I1128 17:45:00.407829 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/40bf5f05-5059-4119-aba9-8b46f5ffd953-secret-volume\") pod \"collect-profiles-29405865-v8wr6\" (UID: \"40bf5f05-5059-4119-aba9-8b46f5ffd953\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405865-v8wr6" Nov 28 17:45:00 crc kubenswrapper[4909]: I1128 17:45:00.408471 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/40bf5f05-5059-4119-aba9-8b46f5ffd953-config-volume\") pod \"collect-profiles-29405865-v8wr6\" (UID: \"40bf5f05-5059-4119-aba9-8b46f5ffd953\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405865-v8wr6" Nov 28 17:45:00 crc kubenswrapper[4909]: I1128 17:45:00.412307 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/40bf5f05-5059-4119-aba9-8b46f5ffd953-secret-volume\") pod \"collect-profiles-29405865-v8wr6\" (UID: \"40bf5f05-5059-4119-aba9-8b46f5ffd953\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405865-v8wr6" Nov 28 17:45:00 crc kubenswrapper[4909]: I1128 17:45:00.440247 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9mjkn\" (UniqueName: \"kubernetes.io/projected/40bf5f05-5059-4119-aba9-8b46f5ffd953-kube-api-access-9mjkn\") pod \"collect-profiles-29405865-v8wr6\" (UID: \"40bf5f05-5059-4119-aba9-8b46f5ffd953\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405865-v8wr6" Nov 28 17:45:00 crc kubenswrapper[4909]: I1128 17:45:00.515737 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405865-v8wr6" Nov 28 17:45:00 crc kubenswrapper[4909]: I1128 17:45:00.963402 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405865-v8wr6"] Nov 28 17:45:00 crc kubenswrapper[4909]: W1128 17:45:00.967715 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod40bf5f05_5059_4119_aba9_8b46f5ffd953.slice/crio-f1987849706c320c8f109113159214ed5d4cc8fe33c0f20b9333cdf14fdc214e WatchSource:0}: Error finding container f1987849706c320c8f109113159214ed5d4cc8fe33c0f20b9333cdf14fdc214e: Status 404 returned error can't find the container with id f1987849706c320c8f109113159214ed5d4cc8fe33c0f20b9333cdf14fdc214e Nov 28 17:45:01 crc kubenswrapper[4909]: I1128 17:45:01.743691 4909 generic.go:334] "Generic (PLEG): container finished" podID="40bf5f05-5059-4119-aba9-8b46f5ffd953" containerID="b6cafabfb3fd093c06d23f9ae6815454ffa8d52c69392dcc9a2bfafe87ebd975" exitCode=0 Nov 28 17:45:01 crc kubenswrapper[4909]: I1128 17:45:01.743814 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405865-v8wr6" event={"ID":"40bf5f05-5059-4119-aba9-8b46f5ffd953","Type":"ContainerDied","Data":"b6cafabfb3fd093c06d23f9ae6815454ffa8d52c69392dcc9a2bfafe87ebd975"} Nov 28 17:45:01 crc kubenswrapper[4909]: I1128 17:45:01.744134 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405865-v8wr6" event={"ID":"40bf5f05-5059-4119-aba9-8b46f5ffd953","Type":"ContainerStarted","Data":"f1987849706c320c8f109113159214ed5d4cc8fe33c0f20b9333cdf14fdc214e"} Nov 28 17:45:03 crc kubenswrapper[4909]: I1128 17:45:03.044088 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405865-v8wr6" Nov 28 17:45:03 crc kubenswrapper[4909]: I1128 17:45:03.129491 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell0-conductor-0" Nov 28 17:45:03 crc kubenswrapper[4909]: I1128 17:45:03.152857 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/40bf5f05-5059-4119-aba9-8b46f5ffd953-secret-volume\") pod \"40bf5f05-5059-4119-aba9-8b46f5ffd953\" (UID: \"40bf5f05-5059-4119-aba9-8b46f5ffd953\") " Nov 28 17:45:03 crc kubenswrapper[4909]: I1128 17:45:03.153000 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/40bf5f05-5059-4119-aba9-8b46f5ffd953-config-volume\") pod \"40bf5f05-5059-4119-aba9-8b46f5ffd953\" (UID: \"40bf5f05-5059-4119-aba9-8b46f5ffd953\") " Nov 28 17:45:03 crc kubenswrapper[4909]: I1128 17:45:03.153110 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9mjkn\" (UniqueName: \"kubernetes.io/projected/40bf5f05-5059-4119-aba9-8b46f5ffd953-kube-api-access-9mjkn\") pod \"40bf5f05-5059-4119-aba9-8b46f5ffd953\" (UID: \"40bf5f05-5059-4119-aba9-8b46f5ffd953\") " Nov 28 17:45:03 crc kubenswrapper[4909]: I1128 17:45:03.153927 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/40bf5f05-5059-4119-aba9-8b46f5ffd953-config-volume" (OuterVolumeSpecName: "config-volume") pod "40bf5f05-5059-4119-aba9-8b46f5ffd953" (UID: "40bf5f05-5059-4119-aba9-8b46f5ffd953"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 17:45:03 crc kubenswrapper[4909]: I1128 17:45:03.163232 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/40bf5f05-5059-4119-aba9-8b46f5ffd953-kube-api-access-9mjkn" (OuterVolumeSpecName: "kube-api-access-9mjkn") pod "40bf5f05-5059-4119-aba9-8b46f5ffd953" (UID: "40bf5f05-5059-4119-aba9-8b46f5ffd953"). InnerVolumeSpecName "kube-api-access-9mjkn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:45:03 crc kubenswrapper[4909]: I1128 17:45:03.172004 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/40bf5f05-5059-4119-aba9-8b46f5ffd953-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "40bf5f05-5059-4119-aba9-8b46f5ffd953" (UID: "40bf5f05-5059-4119-aba9-8b46f5ffd953"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:45:03 crc kubenswrapper[4909]: I1128 17:45:03.255590 4909 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/40bf5f05-5059-4119-aba9-8b46f5ffd953-config-volume\") on node \"crc\" DevicePath \"\"" Nov 28 17:45:03 crc kubenswrapper[4909]: I1128 17:45:03.255627 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9mjkn\" (UniqueName: \"kubernetes.io/projected/40bf5f05-5059-4119-aba9-8b46f5ffd953-kube-api-access-9mjkn\") on node \"crc\" DevicePath \"\"" Nov 28 17:45:03 crc kubenswrapper[4909]: I1128 17:45:03.255640 4909 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/40bf5f05-5059-4119-aba9-8b46f5ffd953-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 28 17:45:03 crc kubenswrapper[4909]: I1128 17:45:03.656958 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-cell-mapping-9d9sb"] Nov 28 17:45:03 crc kubenswrapper[4909]: E1128 17:45:03.657405 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="40bf5f05-5059-4119-aba9-8b46f5ffd953" containerName="collect-profiles" Nov 28 17:45:03 crc kubenswrapper[4909]: I1128 17:45:03.657422 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="40bf5f05-5059-4119-aba9-8b46f5ffd953" containerName="collect-profiles" Nov 28 17:45:03 crc kubenswrapper[4909]: I1128 17:45:03.657697 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="40bf5f05-5059-4119-aba9-8b46f5ffd953" containerName="collect-profiles" Nov 28 17:45:03 crc kubenswrapper[4909]: I1128 17:45:03.658416 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-9d9sb" Nov 28 17:45:03 crc kubenswrapper[4909]: I1128 17:45:03.660063 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-scripts" Nov 28 17:45:03 crc kubenswrapper[4909]: I1128 17:45:03.660288 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-config-data" Nov 28 17:45:03 crc kubenswrapper[4909]: I1128 17:45:03.681110 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-9d9sb"] Nov 28 17:45:03 crc kubenswrapper[4909]: I1128 17:45:03.761283 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405865-v8wr6" event={"ID":"40bf5f05-5059-4119-aba9-8b46f5ffd953","Type":"ContainerDied","Data":"f1987849706c320c8f109113159214ed5d4cc8fe33c0f20b9333cdf14fdc214e"} Nov 28 17:45:03 crc kubenswrapper[4909]: I1128 17:45:03.761329 4909 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f1987849706c320c8f109113159214ed5d4cc8fe33c0f20b9333cdf14fdc214e" Nov 28 17:45:03 crc kubenswrapper[4909]: I1128 17:45:03.761390 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405865-v8wr6" Nov 28 17:45:03 crc kubenswrapper[4909]: I1128 17:45:03.767258 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f5a78f44-565c-4a51-821b-0c83055e4fd3-config-data\") pod \"nova-cell0-cell-mapping-9d9sb\" (UID: \"f5a78f44-565c-4a51-821b-0c83055e4fd3\") " pod="openstack/nova-cell0-cell-mapping-9d9sb" Nov 28 17:45:03 crc kubenswrapper[4909]: I1128 17:45:03.767306 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f5a78f44-565c-4a51-821b-0c83055e4fd3-scripts\") pod \"nova-cell0-cell-mapping-9d9sb\" (UID: \"f5a78f44-565c-4a51-821b-0c83055e4fd3\") " pod="openstack/nova-cell0-cell-mapping-9d9sb" Nov 28 17:45:03 crc kubenswrapper[4909]: I1128 17:45:03.767448 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f5a78f44-565c-4a51-821b-0c83055e4fd3-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-9d9sb\" (UID: \"f5a78f44-565c-4a51-821b-0c83055e4fd3\") " pod="openstack/nova-cell0-cell-mapping-9d9sb" Nov 28 17:45:03 crc kubenswrapper[4909]: I1128 17:45:03.767506 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jtb9h\" (UniqueName: \"kubernetes.io/projected/f5a78f44-565c-4a51-821b-0c83055e4fd3-kube-api-access-jtb9h\") pod \"nova-cell0-cell-mapping-9d9sb\" (UID: \"f5a78f44-565c-4a51-821b-0c83055e4fd3\") " pod="openstack/nova-cell0-cell-mapping-9d9sb" Nov 28 17:45:03 crc kubenswrapper[4909]: I1128 17:45:03.818784 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 28 17:45:03 crc kubenswrapper[4909]: I1128 17:45:03.820205 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 17:45:03 crc kubenswrapper[4909]: I1128 17:45:03.821917 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 28 17:45:03 crc kubenswrapper[4909]: I1128 17:45:03.837291 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 28 17:45:03 crc kubenswrapper[4909]: I1128 17:45:03.869152 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f5a78f44-565c-4a51-821b-0c83055e4fd3-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-9d9sb\" (UID: \"f5a78f44-565c-4a51-821b-0c83055e4fd3\") " pod="openstack/nova-cell0-cell-mapping-9d9sb" Nov 28 17:45:03 crc kubenswrapper[4909]: I1128 17:45:03.869207 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jtb9h\" (UniqueName: \"kubernetes.io/projected/f5a78f44-565c-4a51-821b-0c83055e4fd3-kube-api-access-jtb9h\") pod \"nova-cell0-cell-mapping-9d9sb\" (UID: \"f5a78f44-565c-4a51-821b-0c83055e4fd3\") " pod="openstack/nova-cell0-cell-mapping-9d9sb" Nov 28 17:45:03 crc kubenswrapper[4909]: I1128 17:45:03.869247 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f5a78f44-565c-4a51-821b-0c83055e4fd3-config-data\") pod \"nova-cell0-cell-mapping-9d9sb\" (UID: \"f5a78f44-565c-4a51-821b-0c83055e4fd3\") " pod="openstack/nova-cell0-cell-mapping-9d9sb" Nov 28 17:45:03 crc kubenswrapper[4909]: I1128 17:45:03.869269 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f5a78f44-565c-4a51-821b-0c83055e4fd3-scripts\") pod \"nova-cell0-cell-mapping-9d9sb\" (UID: \"f5a78f44-565c-4a51-821b-0c83055e4fd3\") " pod="openstack/nova-cell0-cell-mapping-9d9sb" Nov 28 17:45:03 crc kubenswrapper[4909]: I1128 17:45:03.873591 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f5a78f44-565c-4a51-821b-0c83055e4fd3-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-9d9sb\" (UID: \"f5a78f44-565c-4a51-821b-0c83055e4fd3\") " pod="openstack/nova-cell0-cell-mapping-9d9sb" Nov 28 17:45:03 crc kubenswrapper[4909]: I1128 17:45:03.873932 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f5a78f44-565c-4a51-821b-0c83055e4fd3-config-data\") pod \"nova-cell0-cell-mapping-9d9sb\" (UID: \"f5a78f44-565c-4a51-821b-0c83055e4fd3\") " pod="openstack/nova-cell0-cell-mapping-9d9sb" Nov 28 17:45:03 crc kubenswrapper[4909]: I1128 17:45:03.875545 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f5a78f44-565c-4a51-821b-0c83055e4fd3-scripts\") pod \"nova-cell0-cell-mapping-9d9sb\" (UID: \"f5a78f44-565c-4a51-821b-0c83055e4fd3\") " pod="openstack/nova-cell0-cell-mapping-9d9sb" Nov 28 17:45:03 crc kubenswrapper[4909]: I1128 17:45:03.878972 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 28 17:45:03 crc kubenswrapper[4909]: I1128 17:45:03.880519 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 17:45:03 crc kubenswrapper[4909]: I1128 17:45:03.890212 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 28 17:45:03 crc kubenswrapper[4909]: I1128 17:45:03.903974 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jtb9h\" (UniqueName: \"kubernetes.io/projected/f5a78f44-565c-4a51-821b-0c83055e4fd3-kube-api-access-jtb9h\") pod \"nova-cell0-cell-mapping-9d9sb\" (UID: \"f5a78f44-565c-4a51-821b-0c83055e4fd3\") " pod="openstack/nova-cell0-cell-mapping-9d9sb" Nov 28 17:45:03 crc kubenswrapper[4909]: I1128 17:45:03.970759 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 17:45:03 crc kubenswrapper[4909]: I1128 17:45:03.971157 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rq8pr\" (UniqueName: \"kubernetes.io/projected/1a0a56a1-1441-45cf-91c7-f69a8175b39a-kube-api-access-rq8pr\") pod \"nova-api-0\" (UID: \"1a0a56a1-1441-45cf-91c7-f69a8175b39a\") " pod="openstack/nova-api-0" Nov 28 17:45:03 crc kubenswrapper[4909]: I1128 17:45:03.971223 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w7b4f\" (UniqueName: \"kubernetes.io/projected/afcd0737-ae40-453f-a854-927a2e182ddf-kube-api-access-w7b4f\") pod \"nova-metadata-0\" (UID: \"afcd0737-ae40-453f-a854-927a2e182ddf\") " pod="openstack/nova-metadata-0" Nov 28 17:45:03 crc kubenswrapper[4909]: I1128 17:45:03.971256 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1a0a56a1-1441-45cf-91c7-f69a8175b39a-logs\") pod \"nova-api-0\" (UID: \"1a0a56a1-1441-45cf-91c7-f69a8175b39a\") " pod="openstack/nova-api-0" Nov 28 17:45:03 crc kubenswrapper[4909]: I1128 17:45:03.971280 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/afcd0737-ae40-453f-a854-927a2e182ddf-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"afcd0737-ae40-453f-a854-927a2e182ddf\") " pod="openstack/nova-metadata-0" Nov 28 17:45:03 crc kubenswrapper[4909]: I1128 17:45:03.971334 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/afcd0737-ae40-453f-a854-927a2e182ddf-logs\") pod \"nova-metadata-0\" (UID: \"afcd0737-ae40-453f-a854-927a2e182ddf\") " pod="openstack/nova-metadata-0" Nov 28 17:45:03 crc kubenswrapper[4909]: I1128 17:45:03.971367 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/afcd0737-ae40-453f-a854-927a2e182ddf-config-data\") pod \"nova-metadata-0\" (UID: \"afcd0737-ae40-453f-a854-927a2e182ddf\") " pod="openstack/nova-metadata-0" Nov 28 17:45:03 crc kubenswrapper[4909]: I1128 17:45:03.971410 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1a0a56a1-1441-45cf-91c7-f69a8175b39a-config-data\") pod \"nova-api-0\" (UID: \"1a0a56a1-1441-45cf-91c7-f69a8175b39a\") " pod="openstack/nova-api-0" Nov 28 17:45:03 crc kubenswrapper[4909]: I1128 17:45:03.971436 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1a0a56a1-1441-45cf-91c7-f69a8175b39a-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"1a0a56a1-1441-45cf-91c7-f69a8175b39a\") " pod="openstack/nova-api-0" Nov 28 17:45:03 crc kubenswrapper[4909]: I1128 17:45:03.995634 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-9d9sb" Nov 28 17:45:04 crc kubenswrapper[4909]: I1128 17:45:04.057743 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 17:45:04 crc kubenswrapper[4909]: I1128 17:45:04.058969 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 17:45:04 crc kubenswrapper[4909]: I1128 17:45:04.063475 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 28 17:45:04 crc kubenswrapper[4909]: I1128 17:45:04.072542 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1a0a56a1-1441-45cf-91c7-f69a8175b39a-config-data\") pod \"nova-api-0\" (UID: \"1a0a56a1-1441-45cf-91c7-f69a8175b39a\") " pod="openstack/nova-api-0" Nov 28 17:45:04 crc kubenswrapper[4909]: I1128 17:45:04.072865 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1a0a56a1-1441-45cf-91c7-f69a8175b39a-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"1a0a56a1-1441-45cf-91c7-f69a8175b39a\") " pod="openstack/nova-api-0" Nov 28 17:45:04 crc kubenswrapper[4909]: I1128 17:45:04.072997 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rq8pr\" (UniqueName: \"kubernetes.io/projected/1a0a56a1-1441-45cf-91c7-f69a8175b39a-kube-api-access-rq8pr\") pod \"nova-api-0\" (UID: \"1a0a56a1-1441-45cf-91c7-f69a8175b39a\") " pod="openstack/nova-api-0" Nov 28 17:45:04 crc kubenswrapper[4909]: I1128 17:45:04.073121 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w7b4f\" (UniqueName: \"kubernetes.io/projected/afcd0737-ae40-453f-a854-927a2e182ddf-kube-api-access-w7b4f\") pod \"nova-metadata-0\" (UID: \"afcd0737-ae40-453f-a854-927a2e182ddf\") " pod="openstack/nova-metadata-0" Nov 28 17:45:04 crc kubenswrapper[4909]: I1128 17:45:04.073203 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1a0a56a1-1441-45cf-91c7-f69a8175b39a-logs\") pod \"nova-api-0\" (UID: \"1a0a56a1-1441-45cf-91c7-f69a8175b39a\") " pod="openstack/nova-api-0" Nov 28 17:45:04 crc kubenswrapper[4909]: I1128 17:45:04.073285 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/afcd0737-ae40-453f-a854-927a2e182ddf-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"afcd0737-ae40-453f-a854-927a2e182ddf\") " pod="openstack/nova-metadata-0" Nov 28 17:45:04 crc kubenswrapper[4909]: I1128 17:45:04.073406 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/afcd0737-ae40-453f-a854-927a2e182ddf-logs\") pod \"nova-metadata-0\" (UID: \"afcd0737-ae40-453f-a854-927a2e182ddf\") " pod="openstack/nova-metadata-0" Nov 28 17:45:04 crc kubenswrapper[4909]: I1128 17:45:04.073497 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/afcd0737-ae40-453f-a854-927a2e182ddf-config-data\") pod \"nova-metadata-0\" (UID: \"afcd0737-ae40-453f-a854-927a2e182ddf\") " pod="openstack/nova-metadata-0" Nov 28 17:45:04 crc kubenswrapper[4909]: I1128 17:45:04.078423 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1a0a56a1-1441-45cf-91c7-f69a8175b39a-config-data\") pod \"nova-api-0\" (UID: \"1a0a56a1-1441-45cf-91c7-f69a8175b39a\") " pod="openstack/nova-api-0" Nov 28 17:45:04 crc kubenswrapper[4909]: I1128 17:45:04.082990 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 17:45:04 crc kubenswrapper[4909]: I1128 17:45:04.086598 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1a0a56a1-1441-45cf-91c7-f69a8175b39a-logs\") pod \"nova-api-0\" (UID: \"1a0a56a1-1441-45cf-91c7-f69a8175b39a\") " pod="openstack/nova-api-0" Nov 28 17:45:04 crc kubenswrapper[4909]: I1128 17:45:04.086850 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/afcd0737-ae40-453f-a854-927a2e182ddf-config-data\") pod \"nova-metadata-0\" (UID: \"afcd0737-ae40-453f-a854-927a2e182ddf\") " pod="openstack/nova-metadata-0" Nov 28 17:45:04 crc kubenswrapper[4909]: I1128 17:45:04.087675 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/afcd0737-ae40-453f-a854-927a2e182ddf-logs\") pod \"nova-metadata-0\" (UID: \"afcd0737-ae40-453f-a854-927a2e182ddf\") " pod="openstack/nova-metadata-0" Nov 28 17:45:04 crc kubenswrapper[4909]: I1128 17:45:04.089231 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6b8d567c77-w7dlm"] Nov 28 17:45:04 crc kubenswrapper[4909]: I1128 17:45:04.091340 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6b8d567c77-w7dlm" Nov 28 17:45:04 crc kubenswrapper[4909]: I1128 17:45:04.092394 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/afcd0737-ae40-453f-a854-927a2e182ddf-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"afcd0737-ae40-453f-a854-927a2e182ddf\") " pod="openstack/nova-metadata-0" Nov 28 17:45:04 crc kubenswrapper[4909]: I1128 17:45:04.095231 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1a0a56a1-1441-45cf-91c7-f69a8175b39a-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"1a0a56a1-1441-45cf-91c7-f69a8175b39a\") " pod="openstack/nova-api-0" Nov 28 17:45:04 crc kubenswrapper[4909]: I1128 17:45:04.111158 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rq8pr\" (UniqueName: \"kubernetes.io/projected/1a0a56a1-1441-45cf-91c7-f69a8175b39a-kube-api-access-rq8pr\") pod \"nova-api-0\" (UID: \"1a0a56a1-1441-45cf-91c7-f69a8175b39a\") " pod="openstack/nova-api-0" Nov 28 17:45:04 crc kubenswrapper[4909]: I1128 17:45:04.113905 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6b8d567c77-w7dlm"] Nov 28 17:45:04 crc kubenswrapper[4909]: I1128 17:45:04.117002 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w7b4f\" (UniqueName: \"kubernetes.io/projected/afcd0737-ae40-453f-a854-927a2e182ddf-kube-api-access-w7b4f\") pod \"nova-metadata-0\" (UID: \"afcd0737-ae40-453f-a854-927a2e182ddf\") " pod="openstack/nova-metadata-0" Nov 28 17:45:04 crc kubenswrapper[4909]: I1128 17:45:04.134286 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 28 17:45:04 crc kubenswrapper[4909]: I1128 17:45:04.137029 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 17:45:04 crc kubenswrapper[4909]: I1128 17:45:04.138188 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 28 17:45:04 crc kubenswrapper[4909]: I1128 17:45:04.146117 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Nov 28 17:45:04 crc kubenswrapper[4909]: I1128 17:45:04.153390 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 28 17:45:04 crc kubenswrapper[4909]: I1128 17:45:04.176412 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b832fa8f-af08-4832-9f7e-a9d921aab4be-config\") pod \"dnsmasq-dns-6b8d567c77-w7dlm\" (UID: \"b832fa8f-af08-4832-9f7e-a9d921aab4be\") " pod="openstack/dnsmasq-dns-6b8d567c77-w7dlm" Nov 28 17:45:04 crc kubenswrapper[4909]: I1128 17:45:04.176470 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/00e109aa-1028-4d1f-8e3d-525f191e8e35-config-data\") pod \"nova-scheduler-0\" (UID: \"00e109aa-1028-4d1f-8e3d-525f191e8e35\") " pod="openstack/nova-scheduler-0" Nov 28 17:45:04 crc kubenswrapper[4909]: I1128 17:45:04.176493 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b832fa8f-af08-4832-9f7e-a9d921aab4be-ovsdbserver-nb\") pod \"dnsmasq-dns-6b8d567c77-w7dlm\" (UID: \"b832fa8f-af08-4832-9f7e-a9d921aab4be\") " pod="openstack/dnsmasq-dns-6b8d567c77-w7dlm" Nov 28 17:45:04 crc kubenswrapper[4909]: I1128 17:45:04.176523 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xt4wq\" (UniqueName: \"kubernetes.io/projected/b832fa8f-af08-4832-9f7e-a9d921aab4be-kube-api-access-xt4wq\") pod \"dnsmasq-dns-6b8d567c77-w7dlm\" (UID: \"b832fa8f-af08-4832-9f7e-a9d921aab4be\") " pod="openstack/dnsmasq-dns-6b8d567c77-w7dlm" Nov 28 17:45:04 crc kubenswrapper[4909]: I1128 17:45:04.176542 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b832fa8f-af08-4832-9f7e-a9d921aab4be-ovsdbserver-sb\") pod \"dnsmasq-dns-6b8d567c77-w7dlm\" (UID: \"b832fa8f-af08-4832-9f7e-a9d921aab4be\") " pod="openstack/dnsmasq-dns-6b8d567c77-w7dlm" Nov 28 17:45:04 crc kubenswrapper[4909]: I1128 17:45:04.176569 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/00e109aa-1028-4d1f-8e3d-525f191e8e35-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"00e109aa-1028-4d1f-8e3d-525f191e8e35\") " pod="openstack/nova-scheduler-0" Nov 28 17:45:04 crc kubenswrapper[4909]: I1128 17:45:04.176611 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mgdk6\" (UniqueName: \"kubernetes.io/projected/00e109aa-1028-4d1f-8e3d-525f191e8e35-kube-api-access-mgdk6\") pod \"nova-scheduler-0\" (UID: \"00e109aa-1028-4d1f-8e3d-525f191e8e35\") " pod="openstack/nova-scheduler-0" Nov 28 17:45:04 crc kubenswrapper[4909]: I1128 17:45:04.176638 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b832fa8f-af08-4832-9f7e-a9d921aab4be-dns-svc\") pod \"dnsmasq-dns-6b8d567c77-w7dlm\" (UID: \"b832fa8f-af08-4832-9f7e-a9d921aab4be\") " pod="openstack/dnsmasq-dns-6b8d567c77-w7dlm" Nov 28 17:45:04 crc kubenswrapper[4909]: I1128 17:45:04.206698 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405820-6xxj8"] Nov 28 17:45:04 crc kubenswrapper[4909]: I1128 17:45:04.221782 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405820-6xxj8"] Nov 28 17:45:04 crc kubenswrapper[4909]: I1128 17:45:04.253521 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 17:45:04 crc kubenswrapper[4909]: I1128 17:45:04.278195 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b832fa8f-af08-4832-9f7e-a9d921aab4be-config\") pod \"dnsmasq-dns-6b8d567c77-w7dlm\" (UID: \"b832fa8f-af08-4832-9f7e-a9d921aab4be\") " pod="openstack/dnsmasq-dns-6b8d567c77-w7dlm" Nov 28 17:45:04 crc kubenswrapper[4909]: I1128 17:45:04.278252 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gwjb5\" (UniqueName: \"kubernetes.io/projected/eec53938-b992-40c0-8b4d-3109fa149936-kube-api-access-gwjb5\") pod \"nova-cell1-novncproxy-0\" (UID: \"eec53938-b992-40c0-8b4d-3109fa149936\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 17:45:04 crc kubenswrapper[4909]: I1128 17:45:04.278298 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/00e109aa-1028-4d1f-8e3d-525f191e8e35-config-data\") pod \"nova-scheduler-0\" (UID: \"00e109aa-1028-4d1f-8e3d-525f191e8e35\") " pod="openstack/nova-scheduler-0" Nov 28 17:45:04 crc kubenswrapper[4909]: I1128 17:45:04.278323 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b832fa8f-af08-4832-9f7e-a9d921aab4be-ovsdbserver-nb\") pod \"dnsmasq-dns-6b8d567c77-w7dlm\" (UID: \"b832fa8f-af08-4832-9f7e-a9d921aab4be\") " pod="openstack/dnsmasq-dns-6b8d567c77-w7dlm" Nov 28 17:45:04 crc kubenswrapper[4909]: I1128 17:45:04.278368 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xt4wq\" (UniqueName: \"kubernetes.io/projected/b832fa8f-af08-4832-9f7e-a9d921aab4be-kube-api-access-xt4wq\") pod \"dnsmasq-dns-6b8d567c77-w7dlm\" (UID: \"b832fa8f-af08-4832-9f7e-a9d921aab4be\") " pod="openstack/dnsmasq-dns-6b8d567c77-w7dlm" Nov 28 17:45:04 crc kubenswrapper[4909]: I1128 17:45:04.278392 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b832fa8f-af08-4832-9f7e-a9d921aab4be-ovsdbserver-sb\") pod \"dnsmasq-dns-6b8d567c77-w7dlm\" (UID: \"b832fa8f-af08-4832-9f7e-a9d921aab4be\") " pod="openstack/dnsmasq-dns-6b8d567c77-w7dlm" Nov 28 17:45:04 crc kubenswrapper[4909]: I1128 17:45:04.278415 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/eec53938-b992-40c0-8b4d-3109fa149936-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"eec53938-b992-40c0-8b4d-3109fa149936\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 17:45:04 crc kubenswrapper[4909]: I1128 17:45:04.278438 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/eec53938-b992-40c0-8b4d-3109fa149936-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"eec53938-b992-40c0-8b4d-3109fa149936\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 17:45:04 crc kubenswrapper[4909]: I1128 17:45:04.278468 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/00e109aa-1028-4d1f-8e3d-525f191e8e35-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"00e109aa-1028-4d1f-8e3d-525f191e8e35\") " pod="openstack/nova-scheduler-0" Nov 28 17:45:04 crc kubenswrapper[4909]: I1128 17:45:04.278509 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mgdk6\" (UniqueName: \"kubernetes.io/projected/00e109aa-1028-4d1f-8e3d-525f191e8e35-kube-api-access-mgdk6\") pod \"nova-scheduler-0\" (UID: \"00e109aa-1028-4d1f-8e3d-525f191e8e35\") " pod="openstack/nova-scheduler-0" Nov 28 17:45:04 crc kubenswrapper[4909]: I1128 17:45:04.278537 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b832fa8f-af08-4832-9f7e-a9d921aab4be-dns-svc\") pod \"dnsmasq-dns-6b8d567c77-w7dlm\" (UID: \"b832fa8f-af08-4832-9f7e-a9d921aab4be\") " pod="openstack/dnsmasq-dns-6b8d567c77-w7dlm" Nov 28 17:45:04 crc kubenswrapper[4909]: I1128 17:45:04.279539 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b832fa8f-af08-4832-9f7e-a9d921aab4be-dns-svc\") pod \"dnsmasq-dns-6b8d567c77-w7dlm\" (UID: \"b832fa8f-af08-4832-9f7e-a9d921aab4be\") " pod="openstack/dnsmasq-dns-6b8d567c77-w7dlm" Nov 28 17:45:04 crc kubenswrapper[4909]: I1128 17:45:04.279806 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b832fa8f-af08-4832-9f7e-a9d921aab4be-config\") pod \"dnsmasq-dns-6b8d567c77-w7dlm\" (UID: \"b832fa8f-af08-4832-9f7e-a9d921aab4be\") " pod="openstack/dnsmasq-dns-6b8d567c77-w7dlm" Nov 28 17:45:04 crc kubenswrapper[4909]: I1128 17:45:04.280456 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b832fa8f-af08-4832-9f7e-a9d921aab4be-ovsdbserver-nb\") pod \"dnsmasq-dns-6b8d567c77-w7dlm\" (UID: \"b832fa8f-af08-4832-9f7e-a9d921aab4be\") " pod="openstack/dnsmasq-dns-6b8d567c77-w7dlm" Nov 28 17:45:04 crc kubenswrapper[4909]: I1128 17:45:04.282361 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b832fa8f-af08-4832-9f7e-a9d921aab4be-ovsdbserver-sb\") pod \"dnsmasq-dns-6b8d567c77-w7dlm\" (UID: \"b832fa8f-af08-4832-9f7e-a9d921aab4be\") " pod="openstack/dnsmasq-dns-6b8d567c77-w7dlm" Nov 28 17:45:04 crc kubenswrapper[4909]: I1128 17:45:04.284508 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/00e109aa-1028-4d1f-8e3d-525f191e8e35-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"00e109aa-1028-4d1f-8e3d-525f191e8e35\") " pod="openstack/nova-scheduler-0" Nov 28 17:45:04 crc kubenswrapper[4909]: I1128 17:45:04.284849 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/00e109aa-1028-4d1f-8e3d-525f191e8e35-config-data\") pod \"nova-scheduler-0\" (UID: \"00e109aa-1028-4d1f-8e3d-525f191e8e35\") " pod="openstack/nova-scheduler-0" Nov 28 17:45:04 crc kubenswrapper[4909]: I1128 17:45:04.297590 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xt4wq\" (UniqueName: \"kubernetes.io/projected/b832fa8f-af08-4832-9f7e-a9d921aab4be-kube-api-access-xt4wq\") pod \"dnsmasq-dns-6b8d567c77-w7dlm\" (UID: \"b832fa8f-af08-4832-9f7e-a9d921aab4be\") " pod="openstack/dnsmasq-dns-6b8d567c77-w7dlm" Nov 28 17:45:04 crc kubenswrapper[4909]: I1128 17:45:04.305365 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mgdk6\" (UniqueName: \"kubernetes.io/projected/00e109aa-1028-4d1f-8e3d-525f191e8e35-kube-api-access-mgdk6\") pod \"nova-scheduler-0\" (UID: \"00e109aa-1028-4d1f-8e3d-525f191e8e35\") " pod="openstack/nova-scheduler-0" Nov 28 17:45:04 crc kubenswrapper[4909]: I1128 17:45:04.379916 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gwjb5\" (UniqueName: \"kubernetes.io/projected/eec53938-b992-40c0-8b4d-3109fa149936-kube-api-access-gwjb5\") pod \"nova-cell1-novncproxy-0\" (UID: \"eec53938-b992-40c0-8b4d-3109fa149936\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 17:45:04 crc kubenswrapper[4909]: I1128 17:45:04.380024 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/eec53938-b992-40c0-8b4d-3109fa149936-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"eec53938-b992-40c0-8b4d-3109fa149936\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 17:45:04 crc kubenswrapper[4909]: I1128 17:45:04.380050 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/eec53938-b992-40c0-8b4d-3109fa149936-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"eec53938-b992-40c0-8b4d-3109fa149936\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 17:45:04 crc kubenswrapper[4909]: I1128 17:45:04.384734 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/eec53938-b992-40c0-8b4d-3109fa149936-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"eec53938-b992-40c0-8b4d-3109fa149936\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 17:45:04 crc kubenswrapper[4909]: I1128 17:45:04.387255 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/eec53938-b992-40c0-8b4d-3109fa149936-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"eec53938-b992-40c0-8b4d-3109fa149936\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 17:45:04 crc kubenswrapper[4909]: I1128 17:45:04.399084 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gwjb5\" (UniqueName: \"kubernetes.io/projected/eec53938-b992-40c0-8b4d-3109fa149936-kube-api-access-gwjb5\") pod \"nova-cell1-novncproxy-0\" (UID: \"eec53938-b992-40c0-8b4d-3109fa149936\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 17:45:05 crc kubenswrapper[4909]: I1128 17:45:04.482561 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 17:45:05 crc kubenswrapper[4909]: I1128 17:45:04.508103 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 28 17:45:05 crc kubenswrapper[4909]: I1128 17:45:04.508137 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6b8d567c77-w7dlm" Nov 28 17:45:05 crc kubenswrapper[4909]: I1128 17:45:04.557703 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-9d9sb"] Nov 28 17:45:05 crc kubenswrapper[4909]: I1128 17:45:04.573625 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 17:45:05 crc kubenswrapper[4909]: W1128 17:45:04.586632 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf5a78f44_565c_4a51_821b_0c83055e4fd3.slice/crio-d99566d00dfb7afdb115545cc31c2ed5c7e647e164a9e0a6cae581ee66cb15eb WatchSource:0}: Error finding container d99566d00dfb7afdb115545cc31c2ed5c7e647e164a9e0a6cae581ee66cb15eb: Status 404 returned error can't find the container with id d99566d00dfb7afdb115545cc31c2ed5c7e647e164a9e0a6cae581ee66cb15eb Nov 28 17:45:05 crc kubenswrapper[4909]: W1128 17:45:04.590021 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podafcd0737_ae40_453f_a854_927a2e182ddf.slice/crio-f31ac5c5e287193c7d3cc1d32a965d475a095088bde7af5d8f5cec27442707a0 WatchSource:0}: Error finding container f31ac5c5e287193c7d3cc1d32a965d475a095088bde7af5d8f5cec27442707a0: Status 404 returned error can't find the container with id f31ac5c5e287193c7d3cc1d32a965d475a095088bde7af5d8f5cec27442707a0 Nov 28 17:45:05 crc kubenswrapper[4909]: I1128 17:45:04.691147 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 28 17:45:05 crc kubenswrapper[4909]: W1128 17:45:04.717090 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1a0a56a1_1441_45cf_91c7_f69a8175b39a.slice/crio-96efdb48ac2822defb1493aa90697b4054ce768c21548d8ac2e7fc207fd770f4 WatchSource:0}: Error finding container 96efdb48ac2822defb1493aa90697b4054ce768c21548d8ac2e7fc207fd770f4: Status 404 returned error can't find the container with id 96efdb48ac2822defb1493aa90697b4054ce768c21548d8ac2e7fc207fd770f4 Nov 28 17:45:05 crc kubenswrapper[4909]: I1128 17:45:04.717132 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-db-sync-p9m6s"] Nov 28 17:45:05 crc kubenswrapper[4909]: I1128 17:45:04.718387 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-p9m6s" Nov 28 17:45:05 crc kubenswrapper[4909]: I1128 17:45:04.723734 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-scripts" Nov 28 17:45:05 crc kubenswrapper[4909]: I1128 17:45:04.723753 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Nov 28 17:45:05 crc kubenswrapper[4909]: I1128 17:45:04.756015 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-p9m6s"] Nov 28 17:45:05 crc kubenswrapper[4909]: I1128 17:45:04.779117 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-9d9sb" event={"ID":"f5a78f44-565c-4a51-821b-0c83055e4fd3","Type":"ContainerStarted","Data":"d99566d00dfb7afdb115545cc31c2ed5c7e647e164a9e0a6cae581ee66cb15eb"} Nov 28 17:45:05 crc kubenswrapper[4909]: I1128 17:45:04.780429 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"1a0a56a1-1441-45cf-91c7-f69a8175b39a","Type":"ContainerStarted","Data":"96efdb48ac2822defb1493aa90697b4054ce768c21548d8ac2e7fc207fd770f4"} Nov 28 17:45:05 crc kubenswrapper[4909]: I1128 17:45:04.782832 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"afcd0737-ae40-453f-a854-927a2e182ddf","Type":"ContainerStarted","Data":"f31ac5c5e287193c7d3cc1d32a965d475a095088bde7af5d8f5cec27442707a0"} Nov 28 17:45:05 crc kubenswrapper[4909]: I1128 17:45:04.927688 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2jsrk\" (UniqueName: \"kubernetes.io/projected/c2e858b1-afe6-4fde-a513-635c137a0275-kube-api-access-2jsrk\") pod \"nova-cell1-conductor-db-sync-p9m6s\" (UID: \"c2e858b1-afe6-4fde-a513-635c137a0275\") " pod="openstack/nova-cell1-conductor-db-sync-p9m6s" Nov 28 17:45:05 crc kubenswrapper[4909]: I1128 17:45:04.927778 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c2e858b1-afe6-4fde-a513-635c137a0275-scripts\") pod \"nova-cell1-conductor-db-sync-p9m6s\" (UID: \"c2e858b1-afe6-4fde-a513-635c137a0275\") " pod="openstack/nova-cell1-conductor-db-sync-p9m6s" Nov 28 17:45:05 crc kubenswrapper[4909]: I1128 17:45:04.927821 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c2e858b1-afe6-4fde-a513-635c137a0275-config-data\") pod \"nova-cell1-conductor-db-sync-p9m6s\" (UID: \"c2e858b1-afe6-4fde-a513-635c137a0275\") " pod="openstack/nova-cell1-conductor-db-sync-p9m6s" Nov 28 17:45:05 crc kubenswrapper[4909]: I1128 17:45:04.927849 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c2e858b1-afe6-4fde-a513-635c137a0275-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-p9m6s\" (UID: \"c2e858b1-afe6-4fde-a513-635c137a0275\") " pod="openstack/nova-cell1-conductor-db-sync-p9m6s" Nov 28 17:45:05 crc kubenswrapper[4909]: I1128 17:45:05.028832 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2jsrk\" (UniqueName: \"kubernetes.io/projected/c2e858b1-afe6-4fde-a513-635c137a0275-kube-api-access-2jsrk\") pod \"nova-cell1-conductor-db-sync-p9m6s\" (UID: \"c2e858b1-afe6-4fde-a513-635c137a0275\") " pod="openstack/nova-cell1-conductor-db-sync-p9m6s" Nov 28 17:45:05 crc kubenswrapper[4909]: I1128 17:45:05.028932 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c2e858b1-afe6-4fde-a513-635c137a0275-scripts\") pod \"nova-cell1-conductor-db-sync-p9m6s\" (UID: \"c2e858b1-afe6-4fde-a513-635c137a0275\") " pod="openstack/nova-cell1-conductor-db-sync-p9m6s" Nov 28 17:45:05 crc kubenswrapper[4909]: I1128 17:45:05.029008 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c2e858b1-afe6-4fde-a513-635c137a0275-config-data\") pod \"nova-cell1-conductor-db-sync-p9m6s\" (UID: \"c2e858b1-afe6-4fde-a513-635c137a0275\") " pod="openstack/nova-cell1-conductor-db-sync-p9m6s" Nov 28 17:45:05 crc kubenswrapper[4909]: I1128 17:45:05.029044 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c2e858b1-afe6-4fde-a513-635c137a0275-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-p9m6s\" (UID: \"c2e858b1-afe6-4fde-a513-635c137a0275\") " pod="openstack/nova-cell1-conductor-db-sync-p9m6s" Nov 28 17:45:05 crc kubenswrapper[4909]: I1128 17:45:05.034276 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c2e858b1-afe6-4fde-a513-635c137a0275-scripts\") pod \"nova-cell1-conductor-db-sync-p9m6s\" (UID: \"c2e858b1-afe6-4fde-a513-635c137a0275\") " pod="openstack/nova-cell1-conductor-db-sync-p9m6s" Nov 28 17:45:05 crc kubenswrapper[4909]: I1128 17:45:05.040436 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c2e858b1-afe6-4fde-a513-635c137a0275-config-data\") pod \"nova-cell1-conductor-db-sync-p9m6s\" (UID: \"c2e858b1-afe6-4fde-a513-635c137a0275\") " pod="openstack/nova-cell1-conductor-db-sync-p9m6s" Nov 28 17:45:05 crc kubenswrapper[4909]: I1128 17:45:05.040892 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c2e858b1-afe6-4fde-a513-635c137a0275-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-p9m6s\" (UID: \"c2e858b1-afe6-4fde-a513-635c137a0275\") " pod="openstack/nova-cell1-conductor-db-sync-p9m6s" Nov 28 17:45:05 crc kubenswrapper[4909]: I1128 17:45:05.058337 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2jsrk\" (UniqueName: \"kubernetes.io/projected/c2e858b1-afe6-4fde-a513-635c137a0275-kube-api-access-2jsrk\") pod \"nova-cell1-conductor-db-sync-p9m6s\" (UID: \"c2e858b1-afe6-4fde-a513-635c137a0275\") " pod="openstack/nova-cell1-conductor-db-sync-p9m6s" Nov 28 17:45:05 crc kubenswrapper[4909]: I1128 17:45:05.339197 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-p9m6s" Nov 28 17:45:05 crc kubenswrapper[4909]: I1128 17:45:05.636939 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 28 17:45:05 crc kubenswrapper[4909]: I1128 17:45:05.646613 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6b8d567c77-w7dlm"] Nov 28 17:45:05 crc kubenswrapper[4909]: W1128 17:45:05.647708 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb832fa8f_af08_4832_9f7e_a9d921aab4be.slice/crio-1edf90364ba494026fafff04176c86830b97b4856516537e8defc247b8c7f21a WatchSource:0}: Error finding container 1edf90364ba494026fafff04176c86830b97b4856516537e8defc247b8c7f21a: Status 404 returned error can't find the container with id 1edf90364ba494026fafff04176c86830b97b4856516537e8defc247b8c7f21a Nov 28 17:45:05 crc kubenswrapper[4909]: W1128 17:45:05.651166 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod00e109aa_1028_4d1f_8e3d_525f191e8e35.slice/crio-53bca10a31d4934f5e4ea6ecee767f65b4e814c0c06a82f8aa8e70678a62a9a8 WatchSource:0}: Error finding container 53bca10a31d4934f5e4ea6ecee767f65b4e814c0c06a82f8aa8e70678a62a9a8: Status 404 returned error can't find the container with id 53bca10a31d4934f5e4ea6ecee767f65b4e814c0c06a82f8aa8e70678a62a9a8 Nov 28 17:45:05 crc kubenswrapper[4909]: I1128 17:45:05.658740 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 17:45:05 crc kubenswrapper[4909]: I1128 17:45:05.802894 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6b8d567c77-w7dlm" event={"ID":"b832fa8f-af08-4832-9f7e-a9d921aab4be","Type":"ContainerStarted","Data":"1edf90364ba494026fafff04176c86830b97b4856516537e8defc247b8c7f21a"} Nov 28 17:45:05 crc kubenswrapper[4909]: I1128 17:45:05.807984 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"eec53938-b992-40c0-8b4d-3109fa149936","Type":"ContainerStarted","Data":"7b74307b23a827ccdf59bab61133b6340c7a9854c1b8ae1cb44e70f21f980489"} Nov 28 17:45:05 crc kubenswrapper[4909]: I1128 17:45:05.809220 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"00e109aa-1028-4d1f-8e3d-525f191e8e35","Type":"ContainerStarted","Data":"53bca10a31d4934f5e4ea6ecee767f65b4e814c0c06a82f8aa8e70678a62a9a8"} Nov 28 17:45:05 crc kubenswrapper[4909]: I1128 17:45:05.810539 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-9d9sb" event={"ID":"f5a78f44-565c-4a51-821b-0c83055e4fd3","Type":"ContainerStarted","Data":"0c124de33435ce2fe12221ae8431f18613e29973f9159df5f61fbeb56b3e2ff8"} Nov 28 17:45:05 crc kubenswrapper[4909]: I1128 17:45:05.813749 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"1a0a56a1-1441-45cf-91c7-f69a8175b39a","Type":"ContainerStarted","Data":"c27fe6a05fa150c12aec0bd3a8373d0d1e55ed4b04ae26109f66db7aadab7525"} Nov 28 17:45:05 crc kubenswrapper[4909]: I1128 17:45:05.813778 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"1a0a56a1-1441-45cf-91c7-f69a8175b39a","Type":"ContainerStarted","Data":"0b9a907c51344445790bde1a472324cef3ce0fbf5afe30bc294969104e8c37e8"} Nov 28 17:45:05 crc kubenswrapper[4909]: I1128 17:45:05.817010 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"afcd0737-ae40-453f-a854-927a2e182ddf","Type":"ContainerStarted","Data":"d53d577c211630d78c542853ea05b9145cc2c886a03c20aad6a7db97b3b551d3"} Nov 28 17:45:05 crc kubenswrapper[4909]: I1128 17:45:05.817046 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"afcd0737-ae40-453f-a854-927a2e182ddf","Type":"ContainerStarted","Data":"d47c436c94ecf5a38fa9ec3a2373f1b410b8fc12a9ceaba744bac15660950edf"} Nov 28 17:45:05 crc kubenswrapper[4909]: I1128 17:45:05.828399 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-cell-mapping-9d9sb" podStartSLOduration=2.828384482 podStartE2EDuration="2.828384482s" podCreationTimestamp="2025-11-28 17:45:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 17:45:05.825223887 +0000 UTC m=+5688.221908411" watchObservedRunningTime="2025-11-28 17:45:05.828384482 +0000 UTC m=+5688.225069006" Nov 28 17:45:05 crc kubenswrapper[4909]: I1128 17:45:05.847916 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.847901147 podStartE2EDuration="2.847901147s" podCreationTimestamp="2025-11-28 17:45:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 17:45:05.843423016 +0000 UTC m=+5688.240107550" watchObservedRunningTime="2025-11-28 17:45:05.847901147 +0000 UTC m=+5688.244585661" Nov 28 17:45:05 crc kubenswrapper[4909]: W1128 17:45:05.868410 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc2e858b1_afe6_4fde_a513_635c137a0275.slice/crio-21a278fa82a8ea2839438754da3158bb07fa2dd12c808b3f940a755977a307c5 WatchSource:0}: Error finding container 21a278fa82a8ea2839438754da3158bb07fa2dd12c808b3f940a755977a307c5: Status 404 returned error can't find the container with id 21a278fa82a8ea2839438754da3158bb07fa2dd12c808b3f940a755977a307c5 Nov 28 17:45:05 crc kubenswrapper[4909]: I1128 17:45:05.877771 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-p9m6s"] Nov 28 17:45:05 crc kubenswrapper[4909]: I1128 17:45:05.883179 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.883154395 podStartE2EDuration="2.883154395s" podCreationTimestamp="2025-11-28 17:45:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 17:45:05.860380672 +0000 UTC m=+5688.257065196" watchObservedRunningTime="2025-11-28 17:45:05.883154395 +0000 UTC m=+5688.279838919" Nov 28 17:45:05 crc kubenswrapper[4909]: I1128 17:45:05.917828 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2e890f14-3666-43fd-9809-568c1ea5d014" path="/var/lib/kubelet/pods/2e890f14-3666-43fd-9809-568c1ea5d014/volumes" Nov 28 17:45:05 crc kubenswrapper[4909]: I1128 17:45:05.971767 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-7fc2v"] Nov 28 17:45:05 crc kubenswrapper[4909]: I1128 17:45:05.973975 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-7fc2v" Nov 28 17:45:05 crc kubenswrapper[4909]: I1128 17:45:05.979769 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-7fc2v"] Nov 28 17:45:06 crc kubenswrapper[4909]: I1128 17:45:06.057045 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5b8f57fb-be47-431e-b192-3f6407348089-catalog-content\") pod \"redhat-marketplace-7fc2v\" (UID: \"5b8f57fb-be47-431e-b192-3f6407348089\") " pod="openshift-marketplace/redhat-marketplace-7fc2v" Nov 28 17:45:06 crc kubenswrapper[4909]: I1128 17:45:06.057139 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5b8f57fb-be47-431e-b192-3f6407348089-utilities\") pod \"redhat-marketplace-7fc2v\" (UID: \"5b8f57fb-be47-431e-b192-3f6407348089\") " pod="openshift-marketplace/redhat-marketplace-7fc2v" Nov 28 17:45:06 crc kubenswrapper[4909]: I1128 17:45:06.057209 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4cr2s\" (UniqueName: \"kubernetes.io/projected/5b8f57fb-be47-431e-b192-3f6407348089-kube-api-access-4cr2s\") pod \"redhat-marketplace-7fc2v\" (UID: \"5b8f57fb-be47-431e-b192-3f6407348089\") " pod="openshift-marketplace/redhat-marketplace-7fc2v" Nov 28 17:45:06 crc kubenswrapper[4909]: I1128 17:45:06.158667 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4cr2s\" (UniqueName: \"kubernetes.io/projected/5b8f57fb-be47-431e-b192-3f6407348089-kube-api-access-4cr2s\") pod \"redhat-marketplace-7fc2v\" (UID: \"5b8f57fb-be47-431e-b192-3f6407348089\") " pod="openshift-marketplace/redhat-marketplace-7fc2v" Nov 28 17:45:06 crc kubenswrapper[4909]: I1128 17:45:06.158740 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5b8f57fb-be47-431e-b192-3f6407348089-catalog-content\") pod \"redhat-marketplace-7fc2v\" (UID: \"5b8f57fb-be47-431e-b192-3f6407348089\") " pod="openshift-marketplace/redhat-marketplace-7fc2v" Nov 28 17:45:06 crc kubenswrapper[4909]: I1128 17:45:06.158791 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5b8f57fb-be47-431e-b192-3f6407348089-utilities\") pod \"redhat-marketplace-7fc2v\" (UID: \"5b8f57fb-be47-431e-b192-3f6407348089\") " pod="openshift-marketplace/redhat-marketplace-7fc2v" Nov 28 17:45:06 crc kubenswrapper[4909]: I1128 17:45:06.159266 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5b8f57fb-be47-431e-b192-3f6407348089-utilities\") pod \"redhat-marketplace-7fc2v\" (UID: \"5b8f57fb-be47-431e-b192-3f6407348089\") " pod="openshift-marketplace/redhat-marketplace-7fc2v" Nov 28 17:45:06 crc kubenswrapper[4909]: I1128 17:45:06.159720 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5b8f57fb-be47-431e-b192-3f6407348089-catalog-content\") pod \"redhat-marketplace-7fc2v\" (UID: \"5b8f57fb-be47-431e-b192-3f6407348089\") " pod="openshift-marketplace/redhat-marketplace-7fc2v" Nov 28 17:45:06 crc kubenswrapper[4909]: I1128 17:45:06.180426 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4cr2s\" (UniqueName: \"kubernetes.io/projected/5b8f57fb-be47-431e-b192-3f6407348089-kube-api-access-4cr2s\") pod \"redhat-marketplace-7fc2v\" (UID: \"5b8f57fb-be47-431e-b192-3f6407348089\") " pod="openshift-marketplace/redhat-marketplace-7fc2v" Nov 28 17:45:06 crc kubenswrapper[4909]: I1128 17:45:06.257282 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-7fc2v" Nov 28 17:45:06 crc kubenswrapper[4909]: I1128 17:45:06.750400 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-7fc2v"] Nov 28 17:45:06 crc kubenswrapper[4909]: W1128 17:45:06.753160 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5b8f57fb_be47_431e_b192_3f6407348089.slice/crio-633f01267d0a9f6e2220f3a80b97347de861707956ff7c9227371fec7c3e86cd WatchSource:0}: Error finding container 633f01267d0a9f6e2220f3a80b97347de861707956ff7c9227371fec7c3e86cd: Status 404 returned error can't find the container with id 633f01267d0a9f6e2220f3a80b97347de861707956ff7c9227371fec7c3e86cd Nov 28 17:45:06 crc kubenswrapper[4909]: I1128 17:45:06.853483 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"00e109aa-1028-4d1f-8e3d-525f191e8e35","Type":"ContainerStarted","Data":"345e013d91b4d5aeef882fb06e871f6c43b7f3f560a8a2fbc22d69c9c7265976"} Nov 28 17:45:06 crc kubenswrapper[4909]: I1128 17:45:06.857024 4909 generic.go:334] "Generic (PLEG): container finished" podID="b832fa8f-af08-4832-9f7e-a9d921aab4be" containerID="f191de5981361672a5e668ad37f6f1ab7da4d3987321bc62a1cdfb4529df68ce" exitCode=0 Nov 28 17:45:06 crc kubenswrapper[4909]: I1128 17:45:06.857091 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6b8d567c77-w7dlm" event={"ID":"b832fa8f-af08-4832-9f7e-a9d921aab4be","Type":"ContainerDied","Data":"f191de5981361672a5e668ad37f6f1ab7da4d3987321bc62a1cdfb4529df68ce"} Nov 28 17:45:06 crc kubenswrapper[4909]: I1128 17:45:06.859901 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"eec53938-b992-40c0-8b4d-3109fa149936","Type":"ContainerStarted","Data":"b69f9ff2964098d35a2fd7dead293efdda84f4d9c656cac98ecc53294b5c8ebd"} Nov 28 17:45:06 crc kubenswrapper[4909]: I1128 17:45:06.866170 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-7fc2v" event={"ID":"5b8f57fb-be47-431e-b192-3f6407348089","Type":"ContainerStarted","Data":"633f01267d0a9f6e2220f3a80b97347de861707956ff7c9227371fec7c3e86cd"} Nov 28 17:45:06 crc kubenswrapper[4909]: I1128 17:45:06.868927 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-p9m6s" event={"ID":"c2e858b1-afe6-4fde-a513-635c137a0275","Type":"ContainerStarted","Data":"7719531f915a8228b59dbb4f35d129d72e5414119d5f730c78cee695f12e2725"} Nov 28 17:45:06 crc kubenswrapper[4909]: I1128 17:45:06.868975 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-p9m6s" event={"ID":"c2e858b1-afe6-4fde-a513-635c137a0275","Type":"ContainerStarted","Data":"21a278fa82a8ea2839438754da3158bb07fa2dd12c808b3f940a755977a307c5"} Nov 28 17:45:06 crc kubenswrapper[4909]: I1128 17:45:06.877422 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=3.877400035 podStartE2EDuration="3.877400035s" podCreationTimestamp="2025-11-28 17:45:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 17:45:06.870296604 +0000 UTC m=+5689.266981138" watchObservedRunningTime="2025-11-28 17:45:06.877400035 +0000 UTC m=+5689.274084559" Nov 28 17:45:06 crc kubenswrapper[4909]: I1128 17:45:06.926637 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=2.926614208 podStartE2EDuration="2.926614208s" podCreationTimestamp="2025-11-28 17:45:04 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 17:45:06.920107583 +0000 UTC m=+5689.316792127" watchObservedRunningTime="2025-11-28 17:45:06.926614208 +0000 UTC m=+5689.323298732" Nov 28 17:45:06 crc kubenswrapper[4909]: I1128 17:45:06.943628 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-db-sync-p9m6s" podStartSLOduration=2.943610825 podStartE2EDuration="2.943610825s" podCreationTimestamp="2025-11-28 17:45:04 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 17:45:06.940294996 +0000 UTC m=+5689.336979520" watchObservedRunningTime="2025-11-28 17:45:06.943610825 +0000 UTC m=+5689.340295349" Nov 28 17:45:07 crc kubenswrapper[4909]: I1128 17:45:07.879277 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6b8d567c77-w7dlm" event={"ID":"b832fa8f-af08-4832-9f7e-a9d921aab4be","Type":"ContainerStarted","Data":"dde5f87023b06fc0c065d4ecdc2440ef61ee2578afc1e009d913580c4fba117e"} Nov 28 17:45:07 crc kubenswrapper[4909]: I1128 17:45:07.879548 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-6b8d567c77-w7dlm" Nov 28 17:45:07 crc kubenswrapper[4909]: I1128 17:45:07.882937 4909 generic.go:334] "Generic (PLEG): container finished" podID="5b8f57fb-be47-431e-b192-3f6407348089" containerID="58e97e8bead349de977cf00ae7a4e0e877221a4086c8ee9fd6071850c62fd924" exitCode=0 Nov 28 17:45:07 crc kubenswrapper[4909]: I1128 17:45:07.883090 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-7fc2v" event={"ID":"5b8f57fb-be47-431e-b192-3f6407348089","Type":"ContainerDied","Data":"58e97e8bead349de977cf00ae7a4e0e877221a4086c8ee9fd6071850c62fd924"} Nov 28 17:45:07 crc kubenswrapper[4909]: I1128 17:45:07.908318 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-6b8d567c77-w7dlm" podStartSLOduration=4.90830017 podStartE2EDuration="4.90830017s" podCreationTimestamp="2025-11-28 17:45:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 17:45:07.90678648 +0000 UTC m=+5690.303471014" watchObservedRunningTime="2025-11-28 17:45:07.90830017 +0000 UTC m=+5690.304984694" Nov 28 17:45:09 crc kubenswrapper[4909]: I1128 17:45:09.254347 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 28 17:45:09 crc kubenswrapper[4909]: I1128 17:45:09.254643 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 28 17:45:09 crc kubenswrapper[4909]: I1128 17:45:09.482847 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Nov 28 17:45:09 crc kubenswrapper[4909]: I1128 17:45:09.508738 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Nov 28 17:45:09 crc kubenswrapper[4909]: I1128 17:45:09.912073 4909 generic.go:334] "Generic (PLEG): container finished" podID="c2e858b1-afe6-4fde-a513-635c137a0275" containerID="7719531f915a8228b59dbb4f35d129d72e5414119d5f730c78cee695f12e2725" exitCode=0 Nov 28 17:45:09 crc kubenswrapper[4909]: I1128 17:45:09.917333 4909 generic.go:334] "Generic (PLEG): container finished" podID="5b8f57fb-be47-431e-b192-3f6407348089" containerID="8e5a89165de63841bc9b62c5633dcb93882df33e5caf518dd9e884ff850ef8d2" exitCode=0 Nov 28 17:45:09 crc kubenswrapper[4909]: I1128 17:45:09.922483 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-p9m6s" event={"ID":"c2e858b1-afe6-4fde-a513-635c137a0275","Type":"ContainerDied","Data":"7719531f915a8228b59dbb4f35d129d72e5414119d5f730c78cee695f12e2725"} Nov 28 17:45:09 crc kubenswrapper[4909]: I1128 17:45:09.922551 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-7fc2v" event={"ID":"5b8f57fb-be47-431e-b192-3f6407348089","Type":"ContainerDied","Data":"8e5a89165de63841bc9b62c5633dcb93882df33e5caf518dd9e884ff850ef8d2"} Nov 28 17:45:10 crc kubenswrapper[4909]: I1128 17:45:10.936032 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-7fc2v" event={"ID":"5b8f57fb-be47-431e-b192-3f6407348089","Type":"ContainerStarted","Data":"e4ec2484d405d188588ae6893edfde80e6691a6563ddd49185b54dba2787596c"} Nov 28 17:45:10 crc kubenswrapper[4909]: I1128 17:45:10.940335 4909 generic.go:334] "Generic (PLEG): container finished" podID="f5a78f44-565c-4a51-821b-0c83055e4fd3" containerID="0c124de33435ce2fe12221ae8431f18613e29973f9159df5f61fbeb56b3e2ff8" exitCode=0 Nov 28 17:45:10 crc kubenswrapper[4909]: I1128 17:45:10.940434 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-9d9sb" event={"ID":"f5a78f44-565c-4a51-821b-0c83055e4fd3","Type":"ContainerDied","Data":"0c124de33435ce2fe12221ae8431f18613e29973f9159df5f61fbeb56b3e2ff8"} Nov 28 17:45:10 crc kubenswrapper[4909]: I1128 17:45:10.962092 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-7fc2v" podStartSLOduration=3.444127727 podStartE2EDuration="5.96207554s" podCreationTimestamp="2025-11-28 17:45:05 +0000 UTC" firstStartedPulling="2025-11-28 17:45:07.885884788 +0000 UTC m=+5690.282569352" lastFinishedPulling="2025-11-28 17:45:10.403832631 +0000 UTC m=+5692.800517165" observedRunningTime="2025-11-28 17:45:10.959273744 +0000 UTC m=+5693.355958278" watchObservedRunningTime="2025-11-28 17:45:10.96207554 +0000 UTC m=+5693.358760064" Nov 28 17:45:11 crc kubenswrapper[4909]: I1128 17:45:11.301450 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-p9m6s" Nov 28 17:45:11 crc kubenswrapper[4909]: I1128 17:45:11.379392 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2jsrk\" (UniqueName: \"kubernetes.io/projected/c2e858b1-afe6-4fde-a513-635c137a0275-kube-api-access-2jsrk\") pod \"c2e858b1-afe6-4fde-a513-635c137a0275\" (UID: \"c2e858b1-afe6-4fde-a513-635c137a0275\") " Nov 28 17:45:11 crc kubenswrapper[4909]: I1128 17:45:11.379849 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c2e858b1-afe6-4fde-a513-635c137a0275-combined-ca-bundle\") pod \"c2e858b1-afe6-4fde-a513-635c137a0275\" (UID: \"c2e858b1-afe6-4fde-a513-635c137a0275\") " Nov 28 17:45:11 crc kubenswrapper[4909]: I1128 17:45:11.379895 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c2e858b1-afe6-4fde-a513-635c137a0275-config-data\") pod \"c2e858b1-afe6-4fde-a513-635c137a0275\" (UID: \"c2e858b1-afe6-4fde-a513-635c137a0275\") " Nov 28 17:45:11 crc kubenswrapper[4909]: I1128 17:45:11.379924 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c2e858b1-afe6-4fde-a513-635c137a0275-scripts\") pod \"c2e858b1-afe6-4fde-a513-635c137a0275\" (UID: \"c2e858b1-afe6-4fde-a513-635c137a0275\") " Nov 28 17:45:11 crc kubenswrapper[4909]: I1128 17:45:11.385577 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c2e858b1-afe6-4fde-a513-635c137a0275-scripts" (OuterVolumeSpecName: "scripts") pod "c2e858b1-afe6-4fde-a513-635c137a0275" (UID: "c2e858b1-afe6-4fde-a513-635c137a0275"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:45:11 crc kubenswrapper[4909]: I1128 17:45:11.385637 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c2e858b1-afe6-4fde-a513-635c137a0275-kube-api-access-2jsrk" (OuterVolumeSpecName: "kube-api-access-2jsrk") pod "c2e858b1-afe6-4fde-a513-635c137a0275" (UID: "c2e858b1-afe6-4fde-a513-635c137a0275"). InnerVolumeSpecName "kube-api-access-2jsrk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:45:11 crc kubenswrapper[4909]: I1128 17:45:11.407871 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c2e858b1-afe6-4fde-a513-635c137a0275-config-data" (OuterVolumeSpecName: "config-data") pod "c2e858b1-afe6-4fde-a513-635c137a0275" (UID: "c2e858b1-afe6-4fde-a513-635c137a0275"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:45:11 crc kubenswrapper[4909]: I1128 17:45:11.408030 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c2e858b1-afe6-4fde-a513-635c137a0275-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c2e858b1-afe6-4fde-a513-635c137a0275" (UID: "c2e858b1-afe6-4fde-a513-635c137a0275"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:45:11 crc kubenswrapper[4909]: I1128 17:45:11.482291 4909 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c2e858b1-afe6-4fde-a513-635c137a0275-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 17:45:11 crc kubenswrapper[4909]: I1128 17:45:11.482327 4909 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c2e858b1-afe6-4fde-a513-635c137a0275-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 17:45:11 crc kubenswrapper[4909]: I1128 17:45:11.482339 4909 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c2e858b1-afe6-4fde-a513-635c137a0275-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 17:45:11 crc kubenswrapper[4909]: I1128 17:45:11.482347 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2jsrk\" (UniqueName: \"kubernetes.io/projected/c2e858b1-afe6-4fde-a513-635c137a0275-kube-api-access-2jsrk\") on node \"crc\" DevicePath \"\"" Nov 28 17:45:11 crc kubenswrapper[4909]: I1128 17:45:11.953134 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-p9m6s" event={"ID":"c2e858b1-afe6-4fde-a513-635c137a0275","Type":"ContainerDied","Data":"21a278fa82a8ea2839438754da3158bb07fa2dd12c808b3f940a755977a307c5"} Nov 28 17:45:11 crc kubenswrapper[4909]: I1128 17:45:11.953285 4909 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="21a278fa82a8ea2839438754da3158bb07fa2dd12c808b3f940a755977a307c5" Nov 28 17:45:11 crc kubenswrapper[4909]: I1128 17:45:11.953238 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-p9m6s" Nov 28 17:45:12 crc kubenswrapper[4909]: I1128 17:45:12.049226 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 28 17:45:12 crc kubenswrapper[4909]: E1128 17:45:12.049628 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c2e858b1-afe6-4fde-a513-635c137a0275" containerName="nova-cell1-conductor-db-sync" Nov 28 17:45:12 crc kubenswrapper[4909]: I1128 17:45:12.049649 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="c2e858b1-afe6-4fde-a513-635c137a0275" containerName="nova-cell1-conductor-db-sync" Nov 28 17:45:12 crc kubenswrapper[4909]: I1128 17:45:12.049838 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="c2e858b1-afe6-4fde-a513-635c137a0275" containerName="nova-cell1-conductor-db-sync" Nov 28 17:45:12 crc kubenswrapper[4909]: I1128 17:45:12.050448 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 28 17:45:12 crc kubenswrapper[4909]: I1128 17:45:12.052143 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Nov 28 17:45:12 crc kubenswrapper[4909]: I1128 17:45:12.087321 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 28 17:45:12 crc kubenswrapper[4909]: I1128 17:45:12.194572 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e8fc342f-8d41-430e-8b7c-94ab6dd5fa5c-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"e8fc342f-8d41-430e-8b7c-94ab6dd5fa5c\") " pod="openstack/nova-cell1-conductor-0" Nov 28 17:45:12 crc kubenswrapper[4909]: I1128 17:45:12.194606 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e8fc342f-8d41-430e-8b7c-94ab6dd5fa5c-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"e8fc342f-8d41-430e-8b7c-94ab6dd5fa5c\") " pod="openstack/nova-cell1-conductor-0" Nov 28 17:45:12 crc kubenswrapper[4909]: I1128 17:45:12.194689 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-km74v\" (UniqueName: \"kubernetes.io/projected/e8fc342f-8d41-430e-8b7c-94ab6dd5fa5c-kube-api-access-km74v\") pod \"nova-cell1-conductor-0\" (UID: \"e8fc342f-8d41-430e-8b7c-94ab6dd5fa5c\") " pod="openstack/nova-cell1-conductor-0" Nov 28 17:45:12 crc kubenswrapper[4909]: I1128 17:45:12.296584 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e8fc342f-8d41-430e-8b7c-94ab6dd5fa5c-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"e8fc342f-8d41-430e-8b7c-94ab6dd5fa5c\") " pod="openstack/nova-cell1-conductor-0" Nov 28 17:45:12 crc kubenswrapper[4909]: I1128 17:45:12.296626 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e8fc342f-8d41-430e-8b7c-94ab6dd5fa5c-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"e8fc342f-8d41-430e-8b7c-94ab6dd5fa5c\") " pod="openstack/nova-cell1-conductor-0" Nov 28 17:45:12 crc kubenswrapper[4909]: I1128 17:45:12.296705 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-km74v\" (UniqueName: \"kubernetes.io/projected/e8fc342f-8d41-430e-8b7c-94ab6dd5fa5c-kube-api-access-km74v\") pod \"nova-cell1-conductor-0\" (UID: \"e8fc342f-8d41-430e-8b7c-94ab6dd5fa5c\") " pod="openstack/nova-cell1-conductor-0" Nov 28 17:45:12 crc kubenswrapper[4909]: I1128 17:45:12.301804 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e8fc342f-8d41-430e-8b7c-94ab6dd5fa5c-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"e8fc342f-8d41-430e-8b7c-94ab6dd5fa5c\") " pod="openstack/nova-cell1-conductor-0" Nov 28 17:45:12 crc kubenswrapper[4909]: I1128 17:45:12.305339 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e8fc342f-8d41-430e-8b7c-94ab6dd5fa5c-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"e8fc342f-8d41-430e-8b7c-94ab6dd5fa5c\") " pod="openstack/nova-cell1-conductor-0" Nov 28 17:45:12 crc kubenswrapper[4909]: I1128 17:45:12.311499 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-km74v\" (UniqueName: \"kubernetes.io/projected/e8fc342f-8d41-430e-8b7c-94ab6dd5fa5c-kube-api-access-km74v\") pod \"nova-cell1-conductor-0\" (UID: \"e8fc342f-8d41-430e-8b7c-94ab6dd5fa5c\") " pod="openstack/nova-cell1-conductor-0" Nov 28 17:45:12 crc kubenswrapper[4909]: I1128 17:45:12.371786 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 28 17:45:12 crc kubenswrapper[4909]: I1128 17:45:12.400077 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-9d9sb" Nov 28 17:45:12 crc kubenswrapper[4909]: I1128 17:45:12.501573 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jtb9h\" (UniqueName: \"kubernetes.io/projected/f5a78f44-565c-4a51-821b-0c83055e4fd3-kube-api-access-jtb9h\") pod \"f5a78f44-565c-4a51-821b-0c83055e4fd3\" (UID: \"f5a78f44-565c-4a51-821b-0c83055e4fd3\") " Nov 28 17:45:12 crc kubenswrapper[4909]: I1128 17:45:12.501625 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f5a78f44-565c-4a51-821b-0c83055e4fd3-config-data\") pod \"f5a78f44-565c-4a51-821b-0c83055e4fd3\" (UID: \"f5a78f44-565c-4a51-821b-0c83055e4fd3\") " Nov 28 17:45:12 crc kubenswrapper[4909]: I1128 17:45:12.501673 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f5a78f44-565c-4a51-821b-0c83055e4fd3-scripts\") pod \"f5a78f44-565c-4a51-821b-0c83055e4fd3\" (UID: \"f5a78f44-565c-4a51-821b-0c83055e4fd3\") " Nov 28 17:45:12 crc kubenswrapper[4909]: I1128 17:45:12.501795 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f5a78f44-565c-4a51-821b-0c83055e4fd3-combined-ca-bundle\") pod \"f5a78f44-565c-4a51-821b-0c83055e4fd3\" (UID: \"f5a78f44-565c-4a51-821b-0c83055e4fd3\") " Nov 28 17:45:12 crc kubenswrapper[4909]: I1128 17:45:12.505212 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f5a78f44-565c-4a51-821b-0c83055e4fd3-scripts" (OuterVolumeSpecName: "scripts") pod "f5a78f44-565c-4a51-821b-0c83055e4fd3" (UID: "f5a78f44-565c-4a51-821b-0c83055e4fd3"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:45:12 crc kubenswrapper[4909]: I1128 17:45:12.505278 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f5a78f44-565c-4a51-821b-0c83055e4fd3-kube-api-access-jtb9h" (OuterVolumeSpecName: "kube-api-access-jtb9h") pod "f5a78f44-565c-4a51-821b-0c83055e4fd3" (UID: "f5a78f44-565c-4a51-821b-0c83055e4fd3"). InnerVolumeSpecName "kube-api-access-jtb9h". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:45:12 crc kubenswrapper[4909]: I1128 17:45:12.530733 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f5a78f44-565c-4a51-821b-0c83055e4fd3-config-data" (OuterVolumeSpecName: "config-data") pod "f5a78f44-565c-4a51-821b-0c83055e4fd3" (UID: "f5a78f44-565c-4a51-821b-0c83055e4fd3"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:45:12 crc kubenswrapper[4909]: I1128 17:45:12.530772 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f5a78f44-565c-4a51-821b-0c83055e4fd3-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f5a78f44-565c-4a51-821b-0c83055e4fd3" (UID: "f5a78f44-565c-4a51-821b-0c83055e4fd3"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:45:12 crc kubenswrapper[4909]: I1128 17:45:12.605898 4909 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f5a78f44-565c-4a51-821b-0c83055e4fd3-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 17:45:12 crc kubenswrapper[4909]: I1128 17:45:12.605939 4909 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f5a78f44-565c-4a51-821b-0c83055e4fd3-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 17:45:12 crc kubenswrapper[4909]: I1128 17:45:12.605951 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jtb9h\" (UniqueName: \"kubernetes.io/projected/f5a78f44-565c-4a51-821b-0c83055e4fd3-kube-api-access-jtb9h\") on node \"crc\" DevicePath \"\"" Nov 28 17:45:12 crc kubenswrapper[4909]: I1128 17:45:12.605964 4909 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f5a78f44-565c-4a51-821b-0c83055e4fd3-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 17:45:12 crc kubenswrapper[4909]: I1128 17:45:12.712963 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 28 17:45:12 crc kubenswrapper[4909]: I1128 17:45:12.963151 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-9d9sb" event={"ID":"f5a78f44-565c-4a51-821b-0c83055e4fd3","Type":"ContainerDied","Data":"d99566d00dfb7afdb115545cc31c2ed5c7e647e164a9e0a6cae581ee66cb15eb"} Nov 28 17:45:12 crc kubenswrapper[4909]: I1128 17:45:12.963193 4909 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d99566d00dfb7afdb115545cc31c2ed5c7e647e164a9e0a6cae581ee66cb15eb" Nov 28 17:45:12 crc kubenswrapper[4909]: I1128 17:45:12.963244 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-9d9sb" Nov 28 17:45:12 crc kubenswrapper[4909]: I1128 17:45:12.969929 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"e8fc342f-8d41-430e-8b7c-94ab6dd5fa5c","Type":"ContainerStarted","Data":"e0f5e34fc2ba428776c3da47b5aedd7c3e29b80fbb3e4a59b085379fd4ee0104"} Nov 28 17:45:12 crc kubenswrapper[4909]: I1128 17:45:12.969978 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"e8fc342f-8d41-430e-8b7c-94ab6dd5fa5c","Type":"ContainerStarted","Data":"8fa86837a11b326907fb332381dea7418ba32038e08b04c9a50144ffb3507b72"} Nov 28 17:45:12 crc kubenswrapper[4909]: I1128 17:45:12.970223 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-conductor-0" Nov 28 17:45:12 crc kubenswrapper[4909]: I1128 17:45:12.997206 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-0" podStartSLOduration=0.997187493 podStartE2EDuration="997.187493ms" podCreationTimestamp="2025-11-28 17:45:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 17:45:12.985543669 +0000 UTC m=+5695.382228193" watchObservedRunningTime="2025-11-28 17:45:12.997187493 +0000 UTC m=+5695.393872017" Nov 28 17:45:13 crc kubenswrapper[4909]: I1128 17:45:13.153676 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 28 17:45:13 crc kubenswrapper[4909]: I1128 17:45:13.153940 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="1a0a56a1-1441-45cf-91c7-f69a8175b39a" containerName="nova-api-log" containerID="cri-o://0b9a907c51344445790bde1a472324cef3ce0fbf5afe30bc294969104e8c37e8" gracePeriod=30 Nov 28 17:45:13 crc kubenswrapper[4909]: I1128 17:45:13.154173 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="1a0a56a1-1441-45cf-91c7-f69a8175b39a" containerName="nova-api-api" containerID="cri-o://c27fe6a05fa150c12aec0bd3a8373d0d1e55ed4b04ae26109f66db7aadab7525" gracePeriod=30 Nov 28 17:45:13 crc kubenswrapper[4909]: I1128 17:45:13.165792 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 17:45:13 crc kubenswrapper[4909]: I1128 17:45:13.165991 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="00e109aa-1028-4d1f-8e3d-525f191e8e35" containerName="nova-scheduler-scheduler" containerID="cri-o://345e013d91b4d5aeef882fb06e871f6c43b7f3f560a8a2fbc22d69c9c7265976" gracePeriod=30 Nov 28 17:45:13 crc kubenswrapper[4909]: I1128 17:45:13.195904 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 17:45:13 crc kubenswrapper[4909]: I1128 17:45:13.196356 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="afcd0737-ae40-453f-a854-927a2e182ddf" containerName="nova-metadata-metadata" containerID="cri-o://d53d577c211630d78c542853ea05b9145cc2c886a03c20aad6a7db97b3b551d3" gracePeriod=30 Nov 28 17:45:13 crc kubenswrapper[4909]: I1128 17:45:13.196544 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="afcd0737-ae40-453f-a854-927a2e182ddf" containerName="nova-metadata-log" containerID="cri-o://d47c436c94ecf5a38fa9ec3a2373f1b410b8fc12a9ceaba744bac15660950edf" gracePeriod=30 Nov 28 17:45:13 crc kubenswrapper[4909]: I1128 17:45:13.856765 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 17:45:13 crc kubenswrapper[4909]: I1128 17:45:13.900145 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 17:45:13 crc kubenswrapper[4909]: I1128 17:45:13.930092 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w7b4f\" (UniqueName: \"kubernetes.io/projected/afcd0737-ae40-453f-a854-927a2e182ddf-kube-api-access-w7b4f\") pod \"afcd0737-ae40-453f-a854-927a2e182ddf\" (UID: \"afcd0737-ae40-453f-a854-927a2e182ddf\") " Nov 28 17:45:13 crc kubenswrapper[4909]: I1128 17:45:13.930143 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/afcd0737-ae40-453f-a854-927a2e182ddf-config-data\") pod \"afcd0737-ae40-453f-a854-927a2e182ddf\" (UID: \"afcd0737-ae40-453f-a854-927a2e182ddf\") " Nov 28 17:45:13 crc kubenswrapper[4909]: I1128 17:45:13.930165 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/afcd0737-ae40-453f-a854-927a2e182ddf-combined-ca-bundle\") pod \"afcd0737-ae40-453f-a854-927a2e182ddf\" (UID: \"afcd0737-ae40-453f-a854-927a2e182ddf\") " Nov 28 17:45:13 crc kubenswrapper[4909]: I1128 17:45:13.931089 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/afcd0737-ae40-453f-a854-927a2e182ddf-logs\") pod \"afcd0737-ae40-453f-a854-927a2e182ddf\" (UID: \"afcd0737-ae40-453f-a854-927a2e182ddf\") " Nov 28 17:45:13 crc kubenswrapper[4909]: I1128 17:45:13.931397 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/afcd0737-ae40-453f-a854-927a2e182ddf-logs" (OuterVolumeSpecName: "logs") pod "afcd0737-ae40-453f-a854-927a2e182ddf" (UID: "afcd0737-ae40-453f-a854-927a2e182ddf"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:45:13 crc kubenswrapper[4909]: I1128 17:45:13.931810 4909 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/afcd0737-ae40-453f-a854-927a2e182ddf-logs\") on node \"crc\" DevicePath \"\"" Nov 28 17:45:13 crc kubenswrapper[4909]: I1128 17:45:13.936265 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/afcd0737-ae40-453f-a854-927a2e182ddf-kube-api-access-w7b4f" (OuterVolumeSpecName: "kube-api-access-w7b4f") pod "afcd0737-ae40-453f-a854-927a2e182ddf" (UID: "afcd0737-ae40-453f-a854-927a2e182ddf"). InnerVolumeSpecName "kube-api-access-w7b4f". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:45:13 crc kubenswrapper[4909]: I1128 17:45:13.961298 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/afcd0737-ae40-453f-a854-927a2e182ddf-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "afcd0737-ae40-453f-a854-927a2e182ddf" (UID: "afcd0737-ae40-453f-a854-927a2e182ddf"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:45:13 crc kubenswrapper[4909]: I1128 17:45:13.963303 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/afcd0737-ae40-453f-a854-927a2e182ddf-config-data" (OuterVolumeSpecName: "config-data") pod "afcd0737-ae40-453f-a854-927a2e182ddf" (UID: "afcd0737-ae40-453f-a854-927a2e182ddf"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:45:13 crc kubenswrapper[4909]: I1128 17:45:13.979827 4909 generic.go:334] "Generic (PLEG): container finished" podID="1a0a56a1-1441-45cf-91c7-f69a8175b39a" containerID="c27fe6a05fa150c12aec0bd3a8373d0d1e55ed4b04ae26109f66db7aadab7525" exitCode=0 Nov 28 17:45:13 crc kubenswrapper[4909]: I1128 17:45:13.980040 4909 generic.go:334] "Generic (PLEG): container finished" podID="1a0a56a1-1441-45cf-91c7-f69a8175b39a" containerID="0b9a907c51344445790bde1a472324cef3ce0fbf5afe30bc294969104e8c37e8" exitCode=143 Nov 28 17:45:13 crc kubenswrapper[4909]: I1128 17:45:13.979892 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 17:45:13 crc kubenswrapper[4909]: I1128 17:45:13.979911 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"1a0a56a1-1441-45cf-91c7-f69a8175b39a","Type":"ContainerDied","Data":"c27fe6a05fa150c12aec0bd3a8373d0d1e55ed4b04ae26109f66db7aadab7525"} Nov 28 17:45:13 crc kubenswrapper[4909]: I1128 17:45:13.980289 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"1a0a56a1-1441-45cf-91c7-f69a8175b39a","Type":"ContainerDied","Data":"0b9a907c51344445790bde1a472324cef3ce0fbf5afe30bc294969104e8c37e8"} Nov 28 17:45:13 crc kubenswrapper[4909]: I1128 17:45:13.980305 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"1a0a56a1-1441-45cf-91c7-f69a8175b39a","Type":"ContainerDied","Data":"96efdb48ac2822defb1493aa90697b4054ce768c21548d8ac2e7fc207fd770f4"} Nov 28 17:45:13 crc kubenswrapper[4909]: I1128 17:45:13.980345 4909 scope.go:117] "RemoveContainer" containerID="c27fe6a05fa150c12aec0bd3a8373d0d1e55ed4b04ae26109f66db7aadab7525" Nov 28 17:45:13 crc kubenswrapper[4909]: I1128 17:45:13.982869 4909 generic.go:334] "Generic (PLEG): container finished" podID="afcd0737-ae40-453f-a854-927a2e182ddf" containerID="d53d577c211630d78c542853ea05b9145cc2c886a03c20aad6a7db97b3b551d3" exitCode=0 Nov 28 17:45:13 crc kubenswrapper[4909]: I1128 17:45:13.982888 4909 generic.go:334] "Generic (PLEG): container finished" podID="afcd0737-ae40-453f-a854-927a2e182ddf" containerID="d47c436c94ecf5a38fa9ec3a2373f1b410b8fc12a9ceaba744bac15660950edf" exitCode=143 Nov 28 17:45:13 crc kubenswrapper[4909]: I1128 17:45:13.983449 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 17:45:13 crc kubenswrapper[4909]: I1128 17:45:13.983533 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"afcd0737-ae40-453f-a854-927a2e182ddf","Type":"ContainerDied","Data":"d53d577c211630d78c542853ea05b9145cc2c886a03c20aad6a7db97b3b551d3"} Nov 28 17:45:13 crc kubenswrapper[4909]: I1128 17:45:13.983555 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"afcd0737-ae40-453f-a854-927a2e182ddf","Type":"ContainerDied","Data":"d47c436c94ecf5a38fa9ec3a2373f1b410b8fc12a9ceaba744bac15660950edf"} Nov 28 17:45:13 crc kubenswrapper[4909]: I1128 17:45:13.983566 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"afcd0737-ae40-453f-a854-927a2e182ddf","Type":"ContainerDied","Data":"f31ac5c5e287193c7d3cc1d32a965d475a095088bde7af5d8f5cec27442707a0"} Nov 28 17:45:14 crc kubenswrapper[4909]: I1128 17:45:14.018981 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 17:45:14 crc kubenswrapper[4909]: I1128 17:45:14.027907 4909 scope.go:117] "RemoveContainer" containerID="0b9a907c51344445790bde1a472324cef3ce0fbf5afe30bc294969104e8c37e8" Nov 28 17:45:14 crc kubenswrapper[4909]: I1128 17:45:14.030054 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 17:45:14 crc kubenswrapper[4909]: I1128 17:45:14.032935 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rq8pr\" (UniqueName: \"kubernetes.io/projected/1a0a56a1-1441-45cf-91c7-f69a8175b39a-kube-api-access-rq8pr\") pod \"1a0a56a1-1441-45cf-91c7-f69a8175b39a\" (UID: \"1a0a56a1-1441-45cf-91c7-f69a8175b39a\") " Nov 28 17:45:14 crc kubenswrapper[4909]: I1128 17:45:14.033051 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1a0a56a1-1441-45cf-91c7-f69a8175b39a-config-data\") pod \"1a0a56a1-1441-45cf-91c7-f69a8175b39a\" (UID: \"1a0a56a1-1441-45cf-91c7-f69a8175b39a\") " Nov 28 17:45:14 crc kubenswrapper[4909]: I1128 17:45:14.033094 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1a0a56a1-1441-45cf-91c7-f69a8175b39a-logs\") pod \"1a0a56a1-1441-45cf-91c7-f69a8175b39a\" (UID: \"1a0a56a1-1441-45cf-91c7-f69a8175b39a\") " Nov 28 17:45:14 crc kubenswrapper[4909]: I1128 17:45:14.033146 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1a0a56a1-1441-45cf-91c7-f69a8175b39a-combined-ca-bundle\") pod \"1a0a56a1-1441-45cf-91c7-f69a8175b39a\" (UID: \"1a0a56a1-1441-45cf-91c7-f69a8175b39a\") " Nov 28 17:45:14 crc kubenswrapper[4909]: I1128 17:45:14.033591 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w7b4f\" (UniqueName: \"kubernetes.io/projected/afcd0737-ae40-453f-a854-927a2e182ddf-kube-api-access-w7b4f\") on node \"crc\" DevicePath \"\"" Nov 28 17:45:14 crc kubenswrapper[4909]: I1128 17:45:14.033604 4909 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/afcd0737-ae40-453f-a854-927a2e182ddf-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 17:45:14 crc kubenswrapper[4909]: I1128 17:45:14.033612 4909 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/afcd0737-ae40-453f-a854-927a2e182ddf-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 17:45:14 crc kubenswrapper[4909]: I1128 17:45:14.034068 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1a0a56a1-1441-45cf-91c7-f69a8175b39a-logs" (OuterVolumeSpecName: "logs") pod "1a0a56a1-1441-45cf-91c7-f69a8175b39a" (UID: "1a0a56a1-1441-45cf-91c7-f69a8175b39a"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:45:14 crc kubenswrapper[4909]: I1128 17:45:14.037178 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1a0a56a1-1441-45cf-91c7-f69a8175b39a-kube-api-access-rq8pr" (OuterVolumeSpecName: "kube-api-access-rq8pr") pod "1a0a56a1-1441-45cf-91c7-f69a8175b39a" (UID: "1a0a56a1-1441-45cf-91c7-f69a8175b39a"). InnerVolumeSpecName "kube-api-access-rq8pr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:45:14 crc kubenswrapper[4909]: I1128 17:45:14.053089 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 28 17:45:14 crc kubenswrapper[4909]: E1128 17:45:14.053489 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1a0a56a1-1441-45cf-91c7-f69a8175b39a" containerName="nova-api-log" Nov 28 17:45:14 crc kubenswrapper[4909]: I1128 17:45:14.053504 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="1a0a56a1-1441-45cf-91c7-f69a8175b39a" containerName="nova-api-log" Nov 28 17:45:14 crc kubenswrapper[4909]: E1128 17:45:14.053519 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="afcd0737-ae40-453f-a854-927a2e182ddf" containerName="nova-metadata-log" Nov 28 17:45:14 crc kubenswrapper[4909]: I1128 17:45:14.053525 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="afcd0737-ae40-453f-a854-927a2e182ddf" containerName="nova-metadata-log" Nov 28 17:45:14 crc kubenswrapper[4909]: E1128 17:45:14.053532 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f5a78f44-565c-4a51-821b-0c83055e4fd3" containerName="nova-manage" Nov 28 17:45:14 crc kubenswrapper[4909]: I1128 17:45:14.053538 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="f5a78f44-565c-4a51-821b-0c83055e4fd3" containerName="nova-manage" Nov 28 17:45:14 crc kubenswrapper[4909]: E1128 17:45:14.053548 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1a0a56a1-1441-45cf-91c7-f69a8175b39a" containerName="nova-api-api" Nov 28 17:45:14 crc kubenswrapper[4909]: I1128 17:45:14.053554 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="1a0a56a1-1441-45cf-91c7-f69a8175b39a" containerName="nova-api-api" Nov 28 17:45:14 crc kubenswrapper[4909]: E1128 17:45:14.053564 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="afcd0737-ae40-453f-a854-927a2e182ddf" containerName="nova-metadata-metadata" Nov 28 17:45:14 crc kubenswrapper[4909]: I1128 17:45:14.053569 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="afcd0737-ae40-453f-a854-927a2e182ddf" containerName="nova-metadata-metadata" Nov 28 17:45:14 crc kubenswrapper[4909]: I1128 17:45:14.053757 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="afcd0737-ae40-453f-a854-927a2e182ddf" containerName="nova-metadata-metadata" Nov 28 17:45:14 crc kubenswrapper[4909]: I1128 17:45:14.053769 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="f5a78f44-565c-4a51-821b-0c83055e4fd3" containerName="nova-manage" Nov 28 17:45:14 crc kubenswrapper[4909]: I1128 17:45:14.053788 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="1a0a56a1-1441-45cf-91c7-f69a8175b39a" containerName="nova-api-log" Nov 28 17:45:14 crc kubenswrapper[4909]: I1128 17:45:14.053801 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="afcd0737-ae40-453f-a854-927a2e182ddf" containerName="nova-metadata-log" Nov 28 17:45:14 crc kubenswrapper[4909]: I1128 17:45:14.053813 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="1a0a56a1-1441-45cf-91c7-f69a8175b39a" containerName="nova-api-api" Nov 28 17:45:14 crc kubenswrapper[4909]: I1128 17:45:14.055807 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 17:45:14 crc kubenswrapper[4909]: I1128 17:45:14.057993 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 28 17:45:14 crc kubenswrapper[4909]: I1128 17:45:14.080452 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1a0a56a1-1441-45cf-91c7-f69a8175b39a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "1a0a56a1-1441-45cf-91c7-f69a8175b39a" (UID: "1a0a56a1-1441-45cf-91c7-f69a8175b39a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:45:14 crc kubenswrapper[4909]: I1128 17:45:14.080831 4909 scope.go:117] "RemoveContainer" containerID="c27fe6a05fa150c12aec0bd3a8373d0d1e55ed4b04ae26109f66db7aadab7525" Nov 28 17:45:14 crc kubenswrapper[4909]: E1128 17:45:14.082213 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c27fe6a05fa150c12aec0bd3a8373d0d1e55ed4b04ae26109f66db7aadab7525\": container with ID starting with c27fe6a05fa150c12aec0bd3a8373d0d1e55ed4b04ae26109f66db7aadab7525 not found: ID does not exist" containerID="c27fe6a05fa150c12aec0bd3a8373d0d1e55ed4b04ae26109f66db7aadab7525" Nov 28 17:45:14 crc kubenswrapper[4909]: I1128 17:45:14.082240 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c27fe6a05fa150c12aec0bd3a8373d0d1e55ed4b04ae26109f66db7aadab7525"} err="failed to get container status \"c27fe6a05fa150c12aec0bd3a8373d0d1e55ed4b04ae26109f66db7aadab7525\": rpc error: code = NotFound desc = could not find container \"c27fe6a05fa150c12aec0bd3a8373d0d1e55ed4b04ae26109f66db7aadab7525\": container with ID starting with c27fe6a05fa150c12aec0bd3a8373d0d1e55ed4b04ae26109f66db7aadab7525 not found: ID does not exist" Nov 28 17:45:14 crc kubenswrapper[4909]: I1128 17:45:14.082262 4909 scope.go:117] "RemoveContainer" containerID="0b9a907c51344445790bde1a472324cef3ce0fbf5afe30bc294969104e8c37e8" Nov 28 17:45:14 crc kubenswrapper[4909]: I1128 17:45:14.082326 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 17:45:14 crc kubenswrapper[4909]: E1128 17:45:14.082610 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0b9a907c51344445790bde1a472324cef3ce0fbf5afe30bc294969104e8c37e8\": container with ID starting with 0b9a907c51344445790bde1a472324cef3ce0fbf5afe30bc294969104e8c37e8 not found: ID does not exist" containerID="0b9a907c51344445790bde1a472324cef3ce0fbf5afe30bc294969104e8c37e8" Nov 28 17:45:14 crc kubenswrapper[4909]: I1128 17:45:14.082637 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0b9a907c51344445790bde1a472324cef3ce0fbf5afe30bc294969104e8c37e8"} err="failed to get container status \"0b9a907c51344445790bde1a472324cef3ce0fbf5afe30bc294969104e8c37e8\": rpc error: code = NotFound desc = could not find container \"0b9a907c51344445790bde1a472324cef3ce0fbf5afe30bc294969104e8c37e8\": container with ID starting with 0b9a907c51344445790bde1a472324cef3ce0fbf5afe30bc294969104e8c37e8 not found: ID does not exist" Nov 28 17:45:14 crc kubenswrapper[4909]: I1128 17:45:14.082649 4909 scope.go:117] "RemoveContainer" containerID="c27fe6a05fa150c12aec0bd3a8373d0d1e55ed4b04ae26109f66db7aadab7525" Nov 28 17:45:14 crc kubenswrapper[4909]: I1128 17:45:14.084146 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c27fe6a05fa150c12aec0bd3a8373d0d1e55ed4b04ae26109f66db7aadab7525"} err="failed to get container status \"c27fe6a05fa150c12aec0bd3a8373d0d1e55ed4b04ae26109f66db7aadab7525\": rpc error: code = NotFound desc = could not find container \"c27fe6a05fa150c12aec0bd3a8373d0d1e55ed4b04ae26109f66db7aadab7525\": container with ID starting with c27fe6a05fa150c12aec0bd3a8373d0d1e55ed4b04ae26109f66db7aadab7525 not found: ID does not exist" Nov 28 17:45:14 crc kubenswrapper[4909]: I1128 17:45:14.084194 4909 scope.go:117] "RemoveContainer" containerID="0b9a907c51344445790bde1a472324cef3ce0fbf5afe30bc294969104e8c37e8" Nov 28 17:45:14 crc kubenswrapper[4909]: I1128 17:45:14.084452 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0b9a907c51344445790bde1a472324cef3ce0fbf5afe30bc294969104e8c37e8"} err="failed to get container status \"0b9a907c51344445790bde1a472324cef3ce0fbf5afe30bc294969104e8c37e8\": rpc error: code = NotFound desc = could not find container \"0b9a907c51344445790bde1a472324cef3ce0fbf5afe30bc294969104e8c37e8\": container with ID starting with 0b9a907c51344445790bde1a472324cef3ce0fbf5afe30bc294969104e8c37e8 not found: ID does not exist" Nov 28 17:45:14 crc kubenswrapper[4909]: I1128 17:45:14.084843 4909 scope.go:117] "RemoveContainer" containerID="d53d577c211630d78c542853ea05b9145cc2c886a03c20aad6a7db97b3b551d3" Nov 28 17:45:14 crc kubenswrapper[4909]: I1128 17:45:14.103866 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1a0a56a1-1441-45cf-91c7-f69a8175b39a-config-data" (OuterVolumeSpecName: "config-data") pod "1a0a56a1-1441-45cf-91c7-f69a8175b39a" (UID: "1a0a56a1-1441-45cf-91c7-f69a8175b39a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:45:14 crc kubenswrapper[4909]: I1128 17:45:14.104309 4909 scope.go:117] "RemoveContainer" containerID="d47c436c94ecf5a38fa9ec3a2373f1b410b8fc12a9ceaba744bac15660950edf" Nov 28 17:45:14 crc kubenswrapper[4909]: I1128 17:45:14.122084 4909 scope.go:117] "RemoveContainer" containerID="d53d577c211630d78c542853ea05b9145cc2c886a03c20aad6a7db97b3b551d3" Nov 28 17:45:14 crc kubenswrapper[4909]: E1128 17:45:14.122536 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d53d577c211630d78c542853ea05b9145cc2c886a03c20aad6a7db97b3b551d3\": container with ID starting with d53d577c211630d78c542853ea05b9145cc2c886a03c20aad6a7db97b3b551d3 not found: ID does not exist" containerID="d53d577c211630d78c542853ea05b9145cc2c886a03c20aad6a7db97b3b551d3" Nov 28 17:45:14 crc kubenswrapper[4909]: I1128 17:45:14.122608 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d53d577c211630d78c542853ea05b9145cc2c886a03c20aad6a7db97b3b551d3"} err="failed to get container status \"d53d577c211630d78c542853ea05b9145cc2c886a03c20aad6a7db97b3b551d3\": rpc error: code = NotFound desc = could not find container \"d53d577c211630d78c542853ea05b9145cc2c886a03c20aad6a7db97b3b551d3\": container with ID starting with d53d577c211630d78c542853ea05b9145cc2c886a03c20aad6a7db97b3b551d3 not found: ID does not exist" Nov 28 17:45:14 crc kubenswrapper[4909]: I1128 17:45:14.122649 4909 scope.go:117] "RemoveContainer" containerID="d47c436c94ecf5a38fa9ec3a2373f1b410b8fc12a9ceaba744bac15660950edf" Nov 28 17:45:14 crc kubenswrapper[4909]: E1128 17:45:14.123121 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d47c436c94ecf5a38fa9ec3a2373f1b410b8fc12a9ceaba744bac15660950edf\": container with ID starting with d47c436c94ecf5a38fa9ec3a2373f1b410b8fc12a9ceaba744bac15660950edf not found: ID does not exist" containerID="d47c436c94ecf5a38fa9ec3a2373f1b410b8fc12a9ceaba744bac15660950edf" Nov 28 17:45:14 crc kubenswrapper[4909]: I1128 17:45:14.123167 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d47c436c94ecf5a38fa9ec3a2373f1b410b8fc12a9ceaba744bac15660950edf"} err="failed to get container status \"d47c436c94ecf5a38fa9ec3a2373f1b410b8fc12a9ceaba744bac15660950edf\": rpc error: code = NotFound desc = could not find container \"d47c436c94ecf5a38fa9ec3a2373f1b410b8fc12a9ceaba744bac15660950edf\": container with ID starting with d47c436c94ecf5a38fa9ec3a2373f1b410b8fc12a9ceaba744bac15660950edf not found: ID does not exist" Nov 28 17:45:14 crc kubenswrapper[4909]: I1128 17:45:14.123195 4909 scope.go:117] "RemoveContainer" containerID="d53d577c211630d78c542853ea05b9145cc2c886a03c20aad6a7db97b3b551d3" Nov 28 17:45:14 crc kubenswrapper[4909]: I1128 17:45:14.123523 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d53d577c211630d78c542853ea05b9145cc2c886a03c20aad6a7db97b3b551d3"} err="failed to get container status \"d53d577c211630d78c542853ea05b9145cc2c886a03c20aad6a7db97b3b551d3\": rpc error: code = NotFound desc = could not find container \"d53d577c211630d78c542853ea05b9145cc2c886a03c20aad6a7db97b3b551d3\": container with ID starting with d53d577c211630d78c542853ea05b9145cc2c886a03c20aad6a7db97b3b551d3 not found: ID does not exist" Nov 28 17:45:14 crc kubenswrapper[4909]: I1128 17:45:14.123556 4909 scope.go:117] "RemoveContainer" containerID="d47c436c94ecf5a38fa9ec3a2373f1b410b8fc12a9ceaba744bac15660950edf" Nov 28 17:45:14 crc kubenswrapper[4909]: I1128 17:45:14.123920 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d47c436c94ecf5a38fa9ec3a2373f1b410b8fc12a9ceaba744bac15660950edf"} err="failed to get container status \"d47c436c94ecf5a38fa9ec3a2373f1b410b8fc12a9ceaba744bac15660950edf\": rpc error: code = NotFound desc = could not find container \"d47c436c94ecf5a38fa9ec3a2373f1b410b8fc12a9ceaba744bac15660950edf\": container with ID starting with d47c436c94ecf5a38fa9ec3a2373f1b410b8fc12a9ceaba744bac15660950edf not found: ID does not exist" Nov 28 17:45:14 crc kubenswrapper[4909]: I1128 17:45:14.134739 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8487cde4-87ee-4dcb-b4b1-ac47ca899154-config-data\") pod \"nova-metadata-0\" (UID: \"8487cde4-87ee-4dcb-b4b1-ac47ca899154\") " pod="openstack/nova-metadata-0" Nov 28 17:45:14 crc kubenswrapper[4909]: I1128 17:45:14.134811 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8vz5d\" (UniqueName: \"kubernetes.io/projected/8487cde4-87ee-4dcb-b4b1-ac47ca899154-kube-api-access-8vz5d\") pod \"nova-metadata-0\" (UID: \"8487cde4-87ee-4dcb-b4b1-ac47ca899154\") " pod="openstack/nova-metadata-0" Nov 28 17:45:14 crc kubenswrapper[4909]: I1128 17:45:14.134835 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8487cde4-87ee-4dcb-b4b1-ac47ca899154-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"8487cde4-87ee-4dcb-b4b1-ac47ca899154\") " pod="openstack/nova-metadata-0" Nov 28 17:45:14 crc kubenswrapper[4909]: I1128 17:45:14.135069 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8487cde4-87ee-4dcb-b4b1-ac47ca899154-logs\") pod \"nova-metadata-0\" (UID: \"8487cde4-87ee-4dcb-b4b1-ac47ca899154\") " pod="openstack/nova-metadata-0" Nov 28 17:45:14 crc kubenswrapper[4909]: I1128 17:45:14.135380 4909 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1a0a56a1-1441-45cf-91c7-f69a8175b39a-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 17:45:14 crc kubenswrapper[4909]: I1128 17:45:14.135403 4909 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1a0a56a1-1441-45cf-91c7-f69a8175b39a-logs\") on node \"crc\" DevicePath \"\"" Nov 28 17:45:14 crc kubenswrapper[4909]: I1128 17:45:14.135412 4909 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1a0a56a1-1441-45cf-91c7-f69a8175b39a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 17:45:14 crc kubenswrapper[4909]: I1128 17:45:14.135425 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rq8pr\" (UniqueName: \"kubernetes.io/projected/1a0a56a1-1441-45cf-91c7-f69a8175b39a-kube-api-access-rq8pr\") on node \"crc\" DevicePath \"\"" Nov 28 17:45:14 crc kubenswrapper[4909]: I1128 17:45:14.238582 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8487cde4-87ee-4dcb-b4b1-ac47ca899154-logs\") pod \"nova-metadata-0\" (UID: \"8487cde4-87ee-4dcb-b4b1-ac47ca899154\") " pod="openstack/nova-metadata-0" Nov 28 17:45:14 crc kubenswrapper[4909]: I1128 17:45:14.238713 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8487cde4-87ee-4dcb-b4b1-ac47ca899154-config-data\") pod \"nova-metadata-0\" (UID: \"8487cde4-87ee-4dcb-b4b1-ac47ca899154\") " pod="openstack/nova-metadata-0" Nov 28 17:45:14 crc kubenswrapper[4909]: I1128 17:45:14.238750 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8vz5d\" (UniqueName: \"kubernetes.io/projected/8487cde4-87ee-4dcb-b4b1-ac47ca899154-kube-api-access-8vz5d\") pod \"nova-metadata-0\" (UID: \"8487cde4-87ee-4dcb-b4b1-ac47ca899154\") " pod="openstack/nova-metadata-0" Nov 28 17:45:14 crc kubenswrapper[4909]: I1128 17:45:14.238769 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8487cde4-87ee-4dcb-b4b1-ac47ca899154-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"8487cde4-87ee-4dcb-b4b1-ac47ca899154\") " pod="openstack/nova-metadata-0" Nov 28 17:45:14 crc kubenswrapper[4909]: I1128 17:45:14.240700 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8487cde4-87ee-4dcb-b4b1-ac47ca899154-logs\") pod \"nova-metadata-0\" (UID: \"8487cde4-87ee-4dcb-b4b1-ac47ca899154\") " pod="openstack/nova-metadata-0" Nov 28 17:45:14 crc kubenswrapper[4909]: I1128 17:45:14.242876 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8487cde4-87ee-4dcb-b4b1-ac47ca899154-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"8487cde4-87ee-4dcb-b4b1-ac47ca899154\") " pod="openstack/nova-metadata-0" Nov 28 17:45:14 crc kubenswrapper[4909]: I1128 17:45:14.243935 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8487cde4-87ee-4dcb-b4b1-ac47ca899154-config-data\") pod \"nova-metadata-0\" (UID: \"8487cde4-87ee-4dcb-b4b1-ac47ca899154\") " pod="openstack/nova-metadata-0" Nov 28 17:45:14 crc kubenswrapper[4909]: I1128 17:45:14.259966 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8vz5d\" (UniqueName: \"kubernetes.io/projected/8487cde4-87ee-4dcb-b4b1-ac47ca899154-kube-api-access-8vz5d\") pod \"nova-metadata-0\" (UID: \"8487cde4-87ee-4dcb-b4b1-ac47ca899154\") " pod="openstack/nova-metadata-0" Nov 28 17:45:14 crc kubenswrapper[4909]: I1128 17:45:14.345320 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 28 17:45:14 crc kubenswrapper[4909]: I1128 17:45:14.355772 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 28 17:45:14 crc kubenswrapper[4909]: I1128 17:45:14.366837 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 28 17:45:14 crc kubenswrapper[4909]: I1128 17:45:14.368265 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 17:45:14 crc kubenswrapper[4909]: I1128 17:45:14.371829 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 28 17:45:14 crc kubenswrapper[4909]: I1128 17:45:14.383597 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 28 17:45:14 crc kubenswrapper[4909]: I1128 17:45:14.387306 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 17:45:14 crc kubenswrapper[4909]: I1128 17:45:14.442086 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7deadec1-ac91-4d6d-9667-a91148b3fd3d-logs\") pod \"nova-api-0\" (UID: \"7deadec1-ac91-4d6d-9667-a91148b3fd3d\") " pod="openstack/nova-api-0" Nov 28 17:45:14 crc kubenswrapper[4909]: I1128 17:45:14.442142 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7deadec1-ac91-4d6d-9667-a91148b3fd3d-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"7deadec1-ac91-4d6d-9667-a91148b3fd3d\") " pod="openstack/nova-api-0" Nov 28 17:45:14 crc kubenswrapper[4909]: I1128 17:45:14.442166 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2vwbs\" (UniqueName: \"kubernetes.io/projected/7deadec1-ac91-4d6d-9667-a91148b3fd3d-kube-api-access-2vwbs\") pod \"nova-api-0\" (UID: \"7deadec1-ac91-4d6d-9667-a91148b3fd3d\") " pod="openstack/nova-api-0" Nov 28 17:45:14 crc kubenswrapper[4909]: I1128 17:45:14.442247 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7deadec1-ac91-4d6d-9667-a91148b3fd3d-config-data\") pod \"nova-api-0\" (UID: \"7deadec1-ac91-4d6d-9667-a91148b3fd3d\") " pod="openstack/nova-api-0" Nov 28 17:45:14 crc kubenswrapper[4909]: I1128 17:45:14.508749 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-cell1-novncproxy-0" Nov 28 17:45:14 crc kubenswrapper[4909]: I1128 17:45:14.509854 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-6b8d567c77-w7dlm" Nov 28 17:45:14 crc kubenswrapper[4909]: I1128 17:45:14.526498 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-cell1-novncproxy-0" Nov 28 17:45:14 crc kubenswrapper[4909]: I1128 17:45:14.543331 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7deadec1-ac91-4d6d-9667-a91148b3fd3d-config-data\") pod \"nova-api-0\" (UID: \"7deadec1-ac91-4d6d-9667-a91148b3fd3d\") " pod="openstack/nova-api-0" Nov 28 17:45:14 crc kubenswrapper[4909]: I1128 17:45:14.543413 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7deadec1-ac91-4d6d-9667-a91148b3fd3d-logs\") pod \"nova-api-0\" (UID: \"7deadec1-ac91-4d6d-9667-a91148b3fd3d\") " pod="openstack/nova-api-0" Nov 28 17:45:14 crc kubenswrapper[4909]: I1128 17:45:14.543449 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7deadec1-ac91-4d6d-9667-a91148b3fd3d-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"7deadec1-ac91-4d6d-9667-a91148b3fd3d\") " pod="openstack/nova-api-0" Nov 28 17:45:14 crc kubenswrapper[4909]: I1128 17:45:14.543470 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2vwbs\" (UniqueName: \"kubernetes.io/projected/7deadec1-ac91-4d6d-9667-a91148b3fd3d-kube-api-access-2vwbs\") pod \"nova-api-0\" (UID: \"7deadec1-ac91-4d6d-9667-a91148b3fd3d\") " pod="openstack/nova-api-0" Nov 28 17:45:14 crc kubenswrapper[4909]: I1128 17:45:14.544078 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7deadec1-ac91-4d6d-9667-a91148b3fd3d-logs\") pod \"nova-api-0\" (UID: \"7deadec1-ac91-4d6d-9667-a91148b3fd3d\") " pod="openstack/nova-api-0" Nov 28 17:45:14 crc kubenswrapper[4909]: I1128 17:45:14.549389 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7deadec1-ac91-4d6d-9667-a91148b3fd3d-config-data\") pod \"nova-api-0\" (UID: \"7deadec1-ac91-4d6d-9667-a91148b3fd3d\") " pod="openstack/nova-api-0" Nov 28 17:45:14 crc kubenswrapper[4909]: I1128 17:45:14.564500 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7deadec1-ac91-4d6d-9667-a91148b3fd3d-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"7deadec1-ac91-4d6d-9667-a91148b3fd3d\") " pod="openstack/nova-api-0" Nov 28 17:45:14 crc kubenswrapper[4909]: I1128 17:45:14.565805 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2vwbs\" (UniqueName: \"kubernetes.io/projected/7deadec1-ac91-4d6d-9667-a91148b3fd3d-kube-api-access-2vwbs\") pod \"nova-api-0\" (UID: \"7deadec1-ac91-4d6d-9667-a91148b3fd3d\") " pod="openstack/nova-api-0" Nov 28 17:45:14 crc kubenswrapper[4909]: I1128 17:45:14.604348 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5dfd646bd7-l24xd"] Nov 28 17:45:14 crc kubenswrapper[4909]: I1128 17:45:14.604572 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5dfd646bd7-l24xd" podUID="5bc8ecfe-e00f-420e-870a-a0f4a9b8678f" containerName="dnsmasq-dns" containerID="cri-o://a353b2857d243b53f29d835c37f40759c17d3f802104d478d07445e09ae08ce9" gracePeriod=10 Nov 28 17:45:14 crc kubenswrapper[4909]: I1128 17:45:14.687110 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 17:45:14 crc kubenswrapper[4909]: W1128 17:45:14.937544 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8487cde4_87ee_4dcb_b4b1_ac47ca899154.slice/crio-7b9ff197460cd4b7740b673949321b863c5ead6579f5fdc5d32e026f80724710 WatchSource:0}: Error finding container 7b9ff197460cd4b7740b673949321b863c5ead6579f5fdc5d32e026f80724710: Status 404 returned error can't find the container with id 7b9ff197460cd4b7740b673949321b863c5ead6579f5fdc5d32e026f80724710 Nov 28 17:45:14 crc kubenswrapper[4909]: I1128 17:45:14.942118 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 17:45:14 crc kubenswrapper[4909]: I1128 17:45:14.996443 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"8487cde4-87ee-4dcb-b4b1-ac47ca899154","Type":"ContainerStarted","Data":"7b9ff197460cd4b7740b673949321b863c5ead6579f5fdc5d32e026f80724710"} Nov 28 17:45:15 crc kubenswrapper[4909]: I1128 17:45:15.010097 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-novncproxy-0" Nov 28 17:45:15 crc kubenswrapper[4909]: I1128 17:45:15.137864 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 28 17:45:15 crc kubenswrapper[4909]: I1128 17:45:15.663539 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5dfd646bd7-l24xd" Nov 28 17:45:15 crc kubenswrapper[4909]: I1128 17:45:15.762814 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rjz95\" (UniqueName: \"kubernetes.io/projected/5bc8ecfe-e00f-420e-870a-a0f4a9b8678f-kube-api-access-rjz95\") pod \"5bc8ecfe-e00f-420e-870a-a0f4a9b8678f\" (UID: \"5bc8ecfe-e00f-420e-870a-a0f4a9b8678f\") " Nov 28 17:45:15 crc kubenswrapper[4909]: I1128 17:45:15.762947 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/5bc8ecfe-e00f-420e-870a-a0f4a9b8678f-ovsdbserver-nb\") pod \"5bc8ecfe-e00f-420e-870a-a0f4a9b8678f\" (UID: \"5bc8ecfe-e00f-420e-870a-a0f4a9b8678f\") " Nov 28 17:45:15 crc kubenswrapper[4909]: I1128 17:45:15.762992 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5bc8ecfe-e00f-420e-870a-a0f4a9b8678f-dns-svc\") pod \"5bc8ecfe-e00f-420e-870a-a0f4a9b8678f\" (UID: \"5bc8ecfe-e00f-420e-870a-a0f4a9b8678f\") " Nov 28 17:45:15 crc kubenswrapper[4909]: I1128 17:45:15.763079 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/5bc8ecfe-e00f-420e-870a-a0f4a9b8678f-ovsdbserver-sb\") pod \"5bc8ecfe-e00f-420e-870a-a0f4a9b8678f\" (UID: \"5bc8ecfe-e00f-420e-870a-a0f4a9b8678f\") " Nov 28 17:45:15 crc kubenswrapper[4909]: I1128 17:45:15.763103 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5bc8ecfe-e00f-420e-870a-a0f4a9b8678f-config\") pod \"5bc8ecfe-e00f-420e-870a-a0f4a9b8678f\" (UID: \"5bc8ecfe-e00f-420e-870a-a0f4a9b8678f\") " Nov 28 17:45:15 crc kubenswrapper[4909]: I1128 17:45:15.766887 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5bc8ecfe-e00f-420e-870a-a0f4a9b8678f-kube-api-access-rjz95" (OuterVolumeSpecName: "kube-api-access-rjz95") pod "5bc8ecfe-e00f-420e-870a-a0f4a9b8678f" (UID: "5bc8ecfe-e00f-420e-870a-a0f4a9b8678f"). InnerVolumeSpecName "kube-api-access-rjz95". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:45:15 crc kubenswrapper[4909]: I1128 17:45:15.812011 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5bc8ecfe-e00f-420e-870a-a0f4a9b8678f-config" (OuterVolumeSpecName: "config") pod "5bc8ecfe-e00f-420e-870a-a0f4a9b8678f" (UID: "5bc8ecfe-e00f-420e-870a-a0f4a9b8678f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 17:45:15 crc kubenswrapper[4909]: I1128 17:45:15.812366 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5bc8ecfe-e00f-420e-870a-a0f4a9b8678f-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "5bc8ecfe-e00f-420e-870a-a0f4a9b8678f" (UID: "5bc8ecfe-e00f-420e-870a-a0f4a9b8678f"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 17:45:15 crc kubenswrapper[4909]: E1128 17:45:15.817265 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5bc8ecfe-e00f-420e-870a-a0f4a9b8678f-ovsdbserver-nb podName:5bc8ecfe-e00f-420e-870a-a0f4a9b8678f nodeName:}" failed. No retries permitted until 2025-11-28 17:45:16.317245508 +0000 UTC m=+5698.713930032 (durationBeforeRetry 500ms). Error: error cleaning subPath mounts for volume "ovsdbserver-nb" (UniqueName: "kubernetes.io/configmap/5bc8ecfe-e00f-420e-870a-a0f4a9b8678f-ovsdbserver-nb") pod "5bc8ecfe-e00f-420e-870a-a0f4a9b8678f" (UID: "5bc8ecfe-e00f-420e-870a-a0f4a9b8678f") : error deleting /var/lib/kubelet/pods/5bc8ecfe-e00f-420e-870a-a0f4a9b8678f/volume-subpaths: remove /var/lib/kubelet/pods/5bc8ecfe-e00f-420e-870a-a0f4a9b8678f/volume-subpaths: no such file or directory Nov 28 17:45:15 crc kubenswrapper[4909]: I1128 17:45:15.817479 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5bc8ecfe-e00f-420e-870a-a0f4a9b8678f-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "5bc8ecfe-e00f-420e-870a-a0f4a9b8678f" (UID: "5bc8ecfe-e00f-420e-870a-a0f4a9b8678f"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 17:45:15 crc kubenswrapper[4909]: I1128 17:45:15.865017 4909 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/5bc8ecfe-e00f-420e-870a-a0f4a9b8678f-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 28 17:45:15 crc kubenswrapper[4909]: I1128 17:45:15.865049 4909 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5bc8ecfe-e00f-420e-870a-a0f4a9b8678f-config\") on node \"crc\" DevicePath \"\"" Nov 28 17:45:15 crc kubenswrapper[4909]: I1128 17:45:15.865062 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rjz95\" (UniqueName: \"kubernetes.io/projected/5bc8ecfe-e00f-420e-870a-a0f4a9b8678f-kube-api-access-rjz95\") on node \"crc\" DevicePath \"\"" Nov 28 17:45:15 crc kubenswrapper[4909]: I1128 17:45:15.865077 4909 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5bc8ecfe-e00f-420e-870a-a0f4a9b8678f-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 17:45:15 crc kubenswrapper[4909]: I1128 17:45:15.912610 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1a0a56a1-1441-45cf-91c7-f69a8175b39a" path="/var/lib/kubelet/pods/1a0a56a1-1441-45cf-91c7-f69a8175b39a/volumes" Nov 28 17:45:15 crc kubenswrapper[4909]: I1128 17:45:15.913263 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="afcd0737-ae40-453f-a854-927a2e182ddf" path="/var/lib/kubelet/pods/afcd0737-ae40-453f-a854-927a2e182ddf/volumes" Nov 28 17:45:16 crc kubenswrapper[4909]: I1128 17:45:16.005449 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"7deadec1-ac91-4d6d-9667-a91148b3fd3d","Type":"ContainerStarted","Data":"332f5504289f8a9ed6334ea0d5d7cc532713d992626e1650b36228985ab7eff1"} Nov 28 17:45:16 crc kubenswrapper[4909]: I1128 17:45:16.005489 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"7deadec1-ac91-4d6d-9667-a91148b3fd3d","Type":"ContainerStarted","Data":"5d831a15b6468c23b481d4a1a3d345a6dde06a32d87c71d92a8efdc88f497d7d"} Nov 28 17:45:16 crc kubenswrapper[4909]: I1128 17:45:16.005499 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"7deadec1-ac91-4d6d-9667-a91148b3fd3d","Type":"ContainerStarted","Data":"4dda0e9cf1bb76c1700ce7aaba5eb0749997a2502f0c0f4fe99161ae8488512d"} Nov 28 17:45:16 crc kubenswrapper[4909]: I1128 17:45:16.010191 4909 generic.go:334] "Generic (PLEG): container finished" podID="5bc8ecfe-e00f-420e-870a-a0f4a9b8678f" containerID="a353b2857d243b53f29d835c37f40759c17d3f802104d478d07445e09ae08ce9" exitCode=0 Nov 28 17:45:16 crc kubenswrapper[4909]: I1128 17:45:16.010238 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5dfd646bd7-l24xd" Nov 28 17:45:16 crc kubenswrapper[4909]: I1128 17:45:16.010266 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5dfd646bd7-l24xd" event={"ID":"5bc8ecfe-e00f-420e-870a-a0f4a9b8678f","Type":"ContainerDied","Data":"a353b2857d243b53f29d835c37f40759c17d3f802104d478d07445e09ae08ce9"} Nov 28 17:45:16 crc kubenswrapper[4909]: I1128 17:45:16.010631 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5dfd646bd7-l24xd" event={"ID":"5bc8ecfe-e00f-420e-870a-a0f4a9b8678f","Type":"ContainerDied","Data":"b964510354809875e414c6f26a04abe5aced8c475d59a41c75cb40126f46093b"} Nov 28 17:45:16 crc kubenswrapper[4909]: I1128 17:45:16.010669 4909 scope.go:117] "RemoveContainer" containerID="a353b2857d243b53f29d835c37f40759c17d3f802104d478d07445e09ae08ce9" Nov 28 17:45:16 crc kubenswrapper[4909]: I1128 17:45:16.015287 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"8487cde4-87ee-4dcb-b4b1-ac47ca899154","Type":"ContainerStarted","Data":"5d546ecbf4e9f4c6725f9278cd75bda9b2bf6f3321d224afd07055c3c9076f2f"} Nov 28 17:45:16 crc kubenswrapper[4909]: I1128 17:45:16.015315 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"8487cde4-87ee-4dcb-b4b1-ac47ca899154","Type":"ContainerStarted","Data":"1e8f253edf2feb8a9ef876c0d68b5f2e2e9898587f99da9553788231dc4d87a9"} Nov 28 17:45:16 crc kubenswrapper[4909]: I1128 17:45:16.028359 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.028339194 podStartE2EDuration="2.028339194s" podCreationTimestamp="2025-11-28 17:45:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 17:45:16.022221409 +0000 UTC m=+5698.418905943" watchObservedRunningTime="2025-11-28 17:45:16.028339194 +0000 UTC m=+5698.425023738" Nov 28 17:45:16 crc kubenswrapper[4909]: I1128 17:45:16.035396 4909 scope.go:117] "RemoveContainer" containerID="64137d02f47f111149e18875d742f444f7be4b7f2726da0294a61a9f4cd4aa06" Nov 28 17:45:16 crc kubenswrapper[4909]: I1128 17:45:16.045556 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.045538556 podStartE2EDuration="2.045538556s" podCreationTimestamp="2025-11-28 17:45:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 17:45:16.043529472 +0000 UTC m=+5698.440214016" watchObservedRunningTime="2025-11-28 17:45:16.045538556 +0000 UTC m=+5698.442223080" Nov 28 17:45:16 crc kubenswrapper[4909]: I1128 17:45:16.057467 4909 scope.go:117] "RemoveContainer" containerID="a353b2857d243b53f29d835c37f40759c17d3f802104d478d07445e09ae08ce9" Nov 28 17:45:16 crc kubenswrapper[4909]: E1128 17:45:16.057832 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a353b2857d243b53f29d835c37f40759c17d3f802104d478d07445e09ae08ce9\": container with ID starting with a353b2857d243b53f29d835c37f40759c17d3f802104d478d07445e09ae08ce9 not found: ID does not exist" containerID="a353b2857d243b53f29d835c37f40759c17d3f802104d478d07445e09ae08ce9" Nov 28 17:45:16 crc kubenswrapper[4909]: I1128 17:45:16.057863 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a353b2857d243b53f29d835c37f40759c17d3f802104d478d07445e09ae08ce9"} err="failed to get container status \"a353b2857d243b53f29d835c37f40759c17d3f802104d478d07445e09ae08ce9\": rpc error: code = NotFound desc = could not find container \"a353b2857d243b53f29d835c37f40759c17d3f802104d478d07445e09ae08ce9\": container with ID starting with a353b2857d243b53f29d835c37f40759c17d3f802104d478d07445e09ae08ce9 not found: ID does not exist" Nov 28 17:45:16 crc kubenswrapper[4909]: I1128 17:45:16.057883 4909 scope.go:117] "RemoveContainer" containerID="64137d02f47f111149e18875d742f444f7be4b7f2726da0294a61a9f4cd4aa06" Nov 28 17:45:16 crc kubenswrapper[4909]: E1128 17:45:16.058106 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"64137d02f47f111149e18875d742f444f7be4b7f2726da0294a61a9f4cd4aa06\": container with ID starting with 64137d02f47f111149e18875d742f444f7be4b7f2726da0294a61a9f4cd4aa06 not found: ID does not exist" containerID="64137d02f47f111149e18875d742f444f7be4b7f2726da0294a61a9f4cd4aa06" Nov 28 17:45:16 crc kubenswrapper[4909]: I1128 17:45:16.058127 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"64137d02f47f111149e18875d742f444f7be4b7f2726da0294a61a9f4cd4aa06"} err="failed to get container status \"64137d02f47f111149e18875d742f444f7be4b7f2726da0294a61a9f4cd4aa06\": rpc error: code = NotFound desc = could not find container \"64137d02f47f111149e18875d742f444f7be4b7f2726da0294a61a9f4cd4aa06\": container with ID starting with 64137d02f47f111149e18875d742f444f7be4b7f2726da0294a61a9f4cd4aa06 not found: ID does not exist" Nov 28 17:45:16 crc kubenswrapper[4909]: I1128 17:45:16.259885 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-7fc2v" Nov 28 17:45:16 crc kubenswrapper[4909]: I1128 17:45:16.259928 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-7fc2v" Nov 28 17:45:16 crc kubenswrapper[4909]: I1128 17:45:16.352288 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-7fc2v" Nov 28 17:45:16 crc kubenswrapper[4909]: I1128 17:45:16.372924 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/5bc8ecfe-e00f-420e-870a-a0f4a9b8678f-ovsdbserver-nb\") pod \"5bc8ecfe-e00f-420e-870a-a0f4a9b8678f\" (UID: \"5bc8ecfe-e00f-420e-870a-a0f4a9b8678f\") " Nov 28 17:45:16 crc kubenswrapper[4909]: I1128 17:45:16.373555 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5bc8ecfe-e00f-420e-870a-a0f4a9b8678f-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "5bc8ecfe-e00f-420e-870a-a0f4a9b8678f" (UID: "5bc8ecfe-e00f-420e-870a-a0f4a9b8678f"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 17:45:16 crc kubenswrapper[4909]: I1128 17:45:16.475026 4909 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/5bc8ecfe-e00f-420e-870a-a0f4a9b8678f-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 28 17:45:16 crc kubenswrapper[4909]: I1128 17:45:16.653056 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5dfd646bd7-l24xd"] Nov 28 17:45:16 crc kubenswrapper[4909]: I1128 17:45:16.678812 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5dfd646bd7-l24xd"] Nov 28 17:45:17 crc kubenswrapper[4909]: I1128 17:45:17.114220 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-7fc2v" Nov 28 17:45:17 crc kubenswrapper[4909]: I1128 17:45:17.192476 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-7fc2v"] Nov 28 17:45:17 crc kubenswrapper[4909]: I1128 17:45:17.410192 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-conductor-0" Nov 28 17:45:17 crc kubenswrapper[4909]: E1128 17:45:17.571501 4909 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod00e109aa_1028_4d1f_8e3d_525f191e8e35.slice/crio-conmon-345e013d91b4d5aeef882fb06e871f6c43b7f3f560a8a2fbc22d69c9c7265976.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod00e109aa_1028_4d1f_8e3d_525f191e8e35.slice/crio-345e013d91b4d5aeef882fb06e871f6c43b7f3f560a8a2fbc22d69c9c7265976.scope\": RecentStats: unable to find data in memory cache]" Nov 28 17:45:17 crc kubenswrapper[4909]: I1128 17:45:17.806810 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 17:45:17 crc kubenswrapper[4909]: I1128 17:45:17.901346 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/00e109aa-1028-4d1f-8e3d-525f191e8e35-combined-ca-bundle\") pod \"00e109aa-1028-4d1f-8e3d-525f191e8e35\" (UID: \"00e109aa-1028-4d1f-8e3d-525f191e8e35\") " Nov 28 17:45:17 crc kubenswrapper[4909]: I1128 17:45:17.901400 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/00e109aa-1028-4d1f-8e3d-525f191e8e35-config-data\") pod \"00e109aa-1028-4d1f-8e3d-525f191e8e35\" (UID: \"00e109aa-1028-4d1f-8e3d-525f191e8e35\") " Nov 28 17:45:17 crc kubenswrapper[4909]: I1128 17:45:17.901464 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mgdk6\" (UniqueName: \"kubernetes.io/projected/00e109aa-1028-4d1f-8e3d-525f191e8e35-kube-api-access-mgdk6\") pod \"00e109aa-1028-4d1f-8e3d-525f191e8e35\" (UID: \"00e109aa-1028-4d1f-8e3d-525f191e8e35\") " Nov 28 17:45:17 crc kubenswrapper[4909]: I1128 17:45:17.914992 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/00e109aa-1028-4d1f-8e3d-525f191e8e35-kube-api-access-mgdk6" (OuterVolumeSpecName: "kube-api-access-mgdk6") pod "00e109aa-1028-4d1f-8e3d-525f191e8e35" (UID: "00e109aa-1028-4d1f-8e3d-525f191e8e35"). InnerVolumeSpecName "kube-api-access-mgdk6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:45:17 crc kubenswrapper[4909]: I1128 17:45:17.919604 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5bc8ecfe-e00f-420e-870a-a0f4a9b8678f" path="/var/lib/kubelet/pods/5bc8ecfe-e00f-420e-870a-a0f4a9b8678f/volumes" Nov 28 17:45:17 crc kubenswrapper[4909]: I1128 17:45:17.928294 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/00e109aa-1028-4d1f-8e3d-525f191e8e35-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "00e109aa-1028-4d1f-8e3d-525f191e8e35" (UID: "00e109aa-1028-4d1f-8e3d-525f191e8e35"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:45:17 crc kubenswrapper[4909]: I1128 17:45:17.942928 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/00e109aa-1028-4d1f-8e3d-525f191e8e35-config-data" (OuterVolumeSpecName: "config-data") pod "00e109aa-1028-4d1f-8e3d-525f191e8e35" (UID: "00e109aa-1028-4d1f-8e3d-525f191e8e35"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:45:18 crc kubenswrapper[4909]: I1128 17:45:18.004542 4909 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/00e109aa-1028-4d1f-8e3d-525f191e8e35-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 17:45:18 crc kubenswrapper[4909]: I1128 17:45:18.004575 4909 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/00e109aa-1028-4d1f-8e3d-525f191e8e35-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 17:45:18 crc kubenswrapper[4909]: I1128 17:45:18.004585 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mgdk6\" (UniqueName: \"kubernetes.io/projected/00e109aa-1028-4d1f-8e3d-525f191e8e35-kube-api-access-mgdk6\") on node \"crc\" DevicePath \"\"" Nov 28 17:45:18 crc kubenswrapper[4909]: I1128 17:45:18.014064 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-cell-mapping-zhtp2"] Nov 28 17:45:18 crc kubenswrapper[4909]: E1128 17:45:18.014527 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="00e109aa-1028-4d1f-8e3d-525f191e8e35" containerName="nova-scheduler-scheduler" Nov 28 17:45:18 crc kubenswrapper[4909]: I1128 17:45:18.014548 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="00e109aa-1028-4d1f-8e3d-525f191e8e35" containerName="nova-scheduler-scheduler" Nov 28 17:45:18 crc kubenswrapper[4909]: E1128 17:45:18.014570 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5bc8ecfe-e00f-420e-870a-a0f4a9b8678f" containerName="init" Nov 28 17:45:18 crc kubenswrapper[4909]: I1128 17:45:18.014578 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="5bc8ecfe-e00f-420e-870a-a0f4a9b8678f" containerName="init" Nov 28 17:45:18 crc kubenswrapper[4909]: E1128 17:45:18.014608 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5bc8ecfe-e00f-420e-870a-a0f4a9b8678f" containerName="dnsmasq-dns" Nov 28 17:45:18 crc kubenswrapper[4909]: I1128 17:45:18.014615 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="5bc8ecfe-e00f-420e-870a-a0f4a9b8678f" containerName="dnsmasq-dns" Nov 28 17:45:18 crc kubenswrapper[4909]: I1128 17:45:18.014864 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="00e109aa-1028-4d1f-8e3d-525f191e8e35" containerName="nova-scheduler-scheduler" Nov 28 17:45:18 crc kubenswrapper[4909]: I1128 17:45:18.014901 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="5bc8ecfe-e00f-420e-870a-a0f4a9b8678f" containerName="dnsmasq-dns" Nov 28 17:45:18 crc kubenswrapper[4909]: I1128 17:45:18.015534 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-zhtp2"] Nov 28 17:45:18 crc kubenswrapper[4909]: I1128 17:45:18.015624 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-zhtp2" Nov 28 17:45:18 crc kubenswrapper[4909]: I1128 17:45:18.017728 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-config-data" Nov 28 17:45:18 crc kubenswrapper[4909]: I1128 17:45:18.018464 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-scripts" Nov 28 17:45:18 crc kubenswrapper[4909]: I1128 17:45:18.036849 4909 generic.go:334] "Generic (PLEG): container finished" podID="00e109aa-1028-4d1f-8e3d-525f191e8e35" containerID="345e013d91b4d5aeef882fb06e871f6c43b7f3f560a8a2fbc22d69c9c7265976" exitCode=0 Nov 28 17:45:18 crc kubenswrapper[4909]: I1128 17:45:18.037711 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 17:45:18 crc kubenswrapper[4909]: I1128 17:45:18.039521 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"00e109aa-1028-4d1f-8e3d-525f191e8e35","Type":"ContainerDied","Data":"345e013d91b4d5aeef882fb06e871f6c43b7f3f560a8a2fbc22d69c9c7265976"} Nov 28 17:45:18 crc kubenswrapper[4909]: I1128 17:45:18.039573 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"00e109aa-1028-4d1f-8e3d-525f191e8e35","Type":"ContainerDied","Data":"53bca10a31d4934f5e4ea6ecee767f65b4e814c0c06a82f8aa8e70678a62a9a8"} Nov 28 17:45:18 crc kubenswrapper[4909]: I1128 17:45:18.039593 4909 scope.go:117] "RemoveContainer" containerID="345e013d91b4d5aeef882fb06e871f6c43b7f3f560a8a2fbc22d69c9c7265976" Nov 28 17:45:18 crc kubenswrapper[4909]: I1128 17:45:18.073732 4909 scope.go:117] "RemoveContainer" containerID="345e013d91b4d5aeef882fb06e871f6c43b7f3f560a8a2fbc22d69c9c7265976" Nov 28 17:45:18 crc kubenswrapper[4909]: E1128 17:45:18.077779 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"345e013d91b4d5aeef882fb06e871f6c43b7f3f560a8a2fbc22d69c9c7265976\": container with ID starting with 345e013d91b4d5aeef882fb06e871f6c43b7f3f560a8a2fbc22d69c9c7265976 not found: ID does not exist" containerID="345e013d91b4d5aeef882fb06e871f6c43b7f3f560a8a2fbc22d69c9c7265976" Nov 28 17:45:18 crc kubenswrapper[4909]: I1128 17:45:18.077825 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"345e013d91b4d5aeef882fb06e871f6c43b7f3f560a8a2fbc22d69c9c7265976"} err="failed to get container status \"345e013d91b4d5aeef882fb06e871f6c43b7f3f560a8a2fbc22d69c9c7265976\": rpc error: code = NotFound desc = could not find container \"345e013d91b4d5aeef882fb06e871f6c43b7f3f560a8a2fbc22d69c9c7265976\": container with ID starting with 345e013d91b4d5aeef882fb06e871f6c43b7f3f560a8a2fbc22d69c9c7265976 not found: ID does not exist" Nov 28 17:45:18 crc kubenswrapper[4909]: I1128 17:45:18.084616 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 17:45:18 crc kubenswrapper[4909]: I1128 17:45:18.096052 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 17:45:18 crc kubenswrapper[4909]: I1128 17:45:18.105560 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/32b73e05-07dd-49d0-8b6d-6e2a7258a66b-scripts\") pod \"nova-cell1-cell-mapping-zhtp2\" (UID: \"32b73e05-07dd-49d0-8b6d-6e2a7258a66b\") " pod="openstack/nova-cell1-cell-mapping-zhtp2" Nov 28 17:45:18 crc kubenswrapper[4909]: I1128 17:45:18.105642 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b9fw6\" (UniqueName: \"kubernetes.io/projected/32b73e05-07dd-49d0-8b6d-6e2a7258a66b-kube-api-access-b9fw6\") pod \"nova-cell1-cell-mapping-zhtp2\" (UID: \"32b73e05-07dd-49d0-8b6d-6e2a7258a66b\") " pod="openstack/nova-cell1-cell-mapping-zhtp2" Nov 28 17:45:18 crc kubenswrapper[4909]: I1128 17:45:18.105700 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/32b73e05-07dd-49d0-8b6d-6e2a7258a66b-config-data\") pod \"nova-cell1-cell-mapping-zhtp2\" (UID: \"32b73e05-07dd-49d0-8b6d-6e2a7258a66b\") " pod="openstack/nova-cell1-cell-mapping-zhtp2" Nov 28 17:45:18 crc kubenswrapper[4909]: I1128 17:45:18.105742 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/32b73e05-07dd-49d0-8b6d-6e2a7258a66b-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-zhtp2\" (UID: \"32b73e05-07dd-49d0-8b6d-6e2a7258a66b\") " pod="openstack/nova-cell1-cell-mapping-zhtp2" Nov 28 17:45:18 crc kubenswrapper[4909]: I1128 17:45:18.106021 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 17:45:18 crc kubenswrapper[4909]: I1128 17:45:18.107304 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 17:45:18 crc kubenswrapper[4909]: I1128 17:45:18.109703 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 28 17:45:18 crc kubenswrapper[4909]: I1128 17:45:18.119107 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 17:45:18 crc kubenswrapper[4909]: I1128 17:45:18.207317 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/32b73e05-07dd-49d0-8b6d-6e2a7258a66b-scripts\") pod \"nova-cell1-cell-mapping-zhtp2\" (UID: \"32b73e05-07dd-49d0-8b6d-6e2a7258a66b\") " pod="openstack/nova-cell1-cell-mapping-zhtp2" Nov 28 17:45:18 crc kubenswrapper[4909]: I1128 17:45:18.207699 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b9fw6\" (UniqueName: \"kubernetes.io/projected/32b73e05-07dd-49d0-8b6d-6e2a7258a66b-kube-api-access-b9fw6\") pod \"nova-cell1-cell-mapping-zhtp2\" (UID: \"32b73e05-07dd-49d0-8b6d-6e2a7258a66b\") " pod="openstack/nova-cell1-cell-mapping-zhtp2" Nov 28 17:45:18 crc kubenswrapper[4909]: I1128 17:45:18.207781 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kf4g8\" (UniqueName: \"kubernetes.io/projected/7cb9588c-7bab-435a-b005-4080895a8b56-kube-api-access-kf4g8\") pod \"nova-scheduler-0\" (UID: \"7cb9588c-7bab-435a-b005-4080895a8b56\") " pod="openstack/nova-scheduler-0" Nov 28 17:45:18 crc kubenswrapper[4909]: I1128 17:45:18.207808 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7cb9588c-7bab-435a-b005-4080895a8b56-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"7cb9588c-7bab-435a-b005-4080895a8b56\") " pod="openstack/nova-scheduler-0" Nov 28 17:45:18 crc kubenswrapper[4909]: I1128 17:45:18.207830 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/32b73e05-07dd-49d0-8b6d-6e2a7258a66b-config-data\") pod \"nova-cell1-cell-mapping-zhtp2\" (UID: \"32b73e05-07dd-49d0-8b6d-6e2a7258a66b\") " pod="openstack/nova-cell1-cell-mapping-zhtp2" Nov 28 17:45:18 crc kubenswrapper[4909]: I1128 17:45:18.207873 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/32b73e05-07dd-49d0-8b6d-6e2a7258a66b-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-zhtp2\" (UID: \"32b73e05-07dd-49d0-8b6d-6e2a7258a66b\") " pod="openstack/nova-cell1-cell-mapping-zhtp2" Nov 28 17:45:18 crc kubenswrapper[4909]: I1128 17:45:18.207894 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7cb9588c-7bab-435a-b005-4080895a8b56-config-data\") pod \"nova-scheduler-0\" (UID: \"7cb9588c-7bab-435a-b005-4080895a8b56\") " pod="openstack/nova-scheduler-0" Nov 28 17:45:18 crc kubenswrapper[4909]: I1128 17:45:18.212755 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/32b73e05-07dd-49d0-8b6d-6e2a7258a66b-scripts\") pod \"nova-cell1-cell-mapping-zhtp2\" (UID: \"32b73e05-07dd-49d0-8b6d-6e2a7258a66b\") " pod="openstack/nova-cell1-cell-mapping-zhtp2" Nov 28 17:45:18 crc kubenswrapper[4909]: I1128 17:45:18.214234 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/32b73e05-07dd-49d0-8b6d-6e2a7258a66b-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-zhtp2\" (UID: \"32b73e05-07dd-49d0-8b6d-6e2a7258a66b\") " pod="openstack/nova-cell1-cell-mapping-zhtp2" Nov 28 17:45:18 crc kubenswrapper[4909]: I1128 17:45:18.225921 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/32b73e05-07dd-49d0-8b6d-6e2a7258a66b-config-data\") pod \"nova-cell1-cell-mapping-zhtp2\" (UID: \"32b73e05-07dd-49d0-8b6d-6e2a7258a66b\") " pod="openstack/nova-cell1-cell-mapping-zhtp2" Nov 28 17:45:18 crc kubenswrapper[4909]: I1128 17:45:18.231313 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b9fw6\" (UniqueName: \"kubernetes.io/projected/32b73e05-07dd-49d0-8b6d-6e2a7258a66b-kube-api-access-b9fw6\") pod \"nova-cell1-cell-mapping-zhtp2\" (UID: \"32b73e05-07dd-49d0-8b6d-6e2a7258a66b\") " pod="openstack/nova-cell1-cell-mapping-zhtp2" Nov 28 17:45:18 crc kubenswrapper[4909]: I1128 17:45:18.309697 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7cb9588c-7bab-435a-b005-4080895a8b56-config-data\") pod \"nova-scheduler-0\" (UID: \"7cb9588c-7bab-435a-b005-4080895a8b56\") " pod="openstack/nova-scheduler-0" Nov 28 17:45:18 crc kubenswrapper[4909]: I1128 17:45:18.309941 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kf4g8\" (UniqueName: \"kubernetes.io/projected/7cb9588c-7bab-435a-b005-4080895a8b56-kube-api-access-kf4g8\") pod \"nova-scheduler-0\" (UID: \"7cb9588c-7bab-435a-b005-4080895a8b56\") " pod="openstack/nova-scheduler-0" Nov 28 17:45:18 crc kubenswrapper[4909]: I1128 17:45:18.309978 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7cb9588c-7bab-435a-b005-4080895a8b56-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"7cb9588c-7bab-435a-b005-4080895a8b56\") " pod="openstack/nova-scheduler-0" Nov 28 17:45:18 crc kubenswrapper[4909]: I1128 17:45:18.313490 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7cb9588c-7bab-435a-b005-4080895a8b56-config-data\") pod \"nova-scheduler-0\" (UID: \"7cb9588c-7bab-435a-b005-4080895a8b56\") " pod="openstack/nova-scheduler-0" Nov 28 17:45:18 crc kubenswrapper[4909]: I1128 17:45:18.314316 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7cb9588c-7bab-435a-b005-4080895a8b56-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"7cb9588c-7bab-435a-b005-4080895a8b56\") " pod="openstack/nova-scheduler-0" Nov 28 17:45:18 crc kubenswrapper[4909]: I1128 17:45:18.328224 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kf4g8\" (UniqueName: \"kubernetes.io/projected/7cb9588c-7bab-435a-b005-4080895a8b56-kube-api-access-kf4g8\") pod \"nova-scheduler-0\" (UID: \"7cb9588c-7bab-435a-b005-4080895a8b56\") " pod="openstack/nova-scheduler-0" Nov 28 17:45:18 crc kubenswrapper[4909]: I1128 17:45:18.335713 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-zhtp2" Nov 28 17:45:18 crc kubenswrapper[4909]: I1128 17:45:18.433425 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 17:45:18 crc kubenswrapper[4909]: I1128 17:45:18.577800 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-zhtp2"] Nov 28 17:45:18 crc kubenswrapper[4909]: I1128 17:45:18.890978 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 17:45:18 crc kubenswrapper[4909]: W1128 17:45:18.897767 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7cb9588c_7bab_435a_b005_4080895a8b56.slice/crio-241e073887db79436e7738bcf247c7ad733405b62bea595ea9fe5a43e86cdc00 WatchSource:0}: Error finding container 241e073887db79436e7738bcf247c7ad733405b62bea595ea9fe5a43e86cdc00: Status 404 returned error can't find the container with id 241e073887db79436e7738bcf247c7ad733405b62bea595ea9fe5a43e86cdc00 Nov 28 17:45:19 crc kubenswrapper[4909]: I1128 17:45:19.050160 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-zhtp2" event={"ID":"32b73e05-07dd-49d0-8b6d-6e2a7258a66b","Type":"ContainerStarted","Data":"ee678844165960f867f4193cc477f1d7f98e4bc318cf99967e273c736e61d5a4"} Nov 28 17:45:19 crc kubenswrapper[4909]: I1128 17:45:19.050546 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-zhtp2" event={"ID":"32b73e05-07dd-49d0-8b6d-6e2a7258a66b","Type":"ContainerStarted","Data":"3b1ab2d3dce8d5a8b768549f16fb5d874faa7a77c027ec9f60c72fef73316ecb"} Nov 28 17:45:19 crc kubenswrapper[4909]: I1128 17:45:19.057442 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-7fc2v" podUID="5b8f57fb-be47-431e-b192-3f6407348089" containerName="registry-server" containerID="cri-o://e4ec2484d405d188588ae6893edfde80e6691a6563ddd49185b54dba2787596c" gracePeriod=2 Nov 28 17:45:19 crc kubenswrapper[4909]: I1128 17:45:19.057964 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"7cb9588c-7bab-435a-b005-4080895a8b56","Type":"ContainerStarted","Data":"241e073887db79436e7738bcf247c7ad733405b62bea595ea9fe5a43e86cdc00"} Nov 28 17:45:19 crc kubenswrapper[4909]: I1128 17:45:19.065871 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-cell-mapping-zhtp2" podStartSLOduration=2.065857707 podStartE2EDuration="2.065857707s" podCreationTimestamp="2025-11-28 17:45:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 17:45:19.063183675 +0000 UTC m=+5701.459868199" watchObservedRunningTime="2025-11-28 17:45:19.065857707 +0000 UTC m=+5701.462542221" Nov 28 17:45:19 crc kubenswrapper[4909]: I1128 17:45:19.387696 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 28 17:45:19 crc kubenswrapper[4909]: I1128 17:45:19.388732 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 28 17:45:19 crc kubenswrapper[4909]: I1128 17:45:19.600343 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-7fc2v" Nov 28 17:45:19 crc kubenswrapper[4909]: I1128 17:45:19.630854 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4cr2s\" (UniqueName: \"kubernetes.io/projected/5b8f57fb-be47-431e-b192-3f6407348089-kube-api-access-4cr2s\") pod \"5b8f57fb-be47-431e-b192-3f6407348089\" (UID: \"5b8f57fb-be47-431e-b192-3f6407348089\") " Nov 28 17:45:19 crc kubenswrapper[4909]: I1128 17:45:19.630930 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5b8f57fb-be47-431e-b192-3f6407348089-catalog-content\") pod \"5b8f57fb-be47-431e-b192-3f6407348089\" (UID: \"5b8f57fb-be47-431e-b192-3f6407348089\") " Nov 28 17:45:19 crc kubenswrapper[4909]: I1128 17:45:19.630962 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5b8f57fb-be47-431e-b192-3f6407348089-utilities\") pod \"5b8f57fb-be47-431e-b192-3f6407348089\" (UID: \"5b8f57fb-be47-431e-b192-3f6407348089\") " Nov 28 17:45:19 crc kubenswrapper[4909]: I1128 17:45:19.632352 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5b8f57fb-be47-431e-b192-3f6407348089-utilities" (OuterVolumeSpecName: "utilities") pod "5b8f57fb-be47-431e-b192-3f6407348089" (UID: "5b8f57fb-be47-431e-b192-3f6407348089"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:45:19 crc kubenswrapper[4909]: I1128 17:45:19.642191 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5b8f57fb-be47-431e-b192-3f6407348089-kube-api-access-4cr2s" (OuterVolumeSpecName: "kube-api-access-4cr2s") pod "5b8f57fb-be47-431e-b192-3f6407348089" (UID: "5b8f57fb-be47-431e-b192-3f6407348089"). InnerVolumeSpecName "kube-api-access-4cr2s". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:45:19 crc kubenswrapper[4909]: I1128 17:45:19.655109 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5b8f57fb-be47-431e-b192-3f6407348089-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5b8f57fb-be47-431e-b192-3f6407348089" (UID: "5b8f57fb-be47-431e-b192-3f6407348089"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:45:19 crc kubenswrapper[4909]: I1128 17:45:19.733129 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4cr2s\" (UniqueName: \"kubernetes.io/projected/5b8f57fb-be47-431e-b192-3f6407348089-kube-api-access-4cr2s\") on node \"crc\" DevicePath \"\"" Nov 28 17:45:19 crc kubenswrapper[4909]: I1128 17:45:19.733154 4909 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5b8f57fb-be47-431e-b192-3f6407348089-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 17:45:19 crc kubenswrapper[4909]: I1128 17:45:19.733164 4909 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5b8f57fb-be47-431e-b192-3f6407348089-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 17:45:19 crc kubenswrapper[4909]: I1128 17:45:19.913921 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="00e109aa-1028-4d1f-8e3d-525f191e8e35" path="/var/lib/kubelet/pods/00e109aa-1028-4d1f-8e3d-525f191e8e35/volumes" Nov 28 17:45:20 crc kubenswrapper[4909]: I1128 17:45:20.067583 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"7cb9588c-7bab-435a-b005-4080895a8b56","Type":"ContainerStarted","Data":"b3cd5abbe185bcd978a6c05f374211e4e8613e08e83e75dd55991df288e6d4ae"} Nov 28 17:45:20 crc kubenswrapper[4909]: I1128 17:45:20.069957 4909 generic.go:334] "Generic (PLEG): container finished" podID="5b8f57fb-be47-431e-b192-3f6407348089" containerID="e4ec2484d405d188588ae6893edfde80e6691a6563ddd49185b54dba2787596c" exitCode=0 Nov 28 17:45:20 crc kubenswrapper[4909]: I1128 17:45:20.070022 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-7fc2v" Nov 28 17:45:20 crc kubenswrapper[4909]: I1128 17:45:20.070043 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-7fc2v" event={"ID":"5b8f57fb-be47-431e-b192-3f6407348089","Type":"ContainerDied","Data":"e4ec2484d405d188588ae6893edfde80e6691a6563ddd49185b54dba2787596c"} Nov 28 17:45:20 crc kubenswrapper[4909]: I1128 17:45:20.070086 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-7fc2v" event={"ID":"5b8f57fb-be47-431e-b192-3f6407348089","Type":"ContainerDied","Data":"633f01267d0a9f6e2220f3a80b97347de861707956ff7c9227371fec7c3e86cd"} Nov 28 17:45:20 crc kubenswrapper[4909]: I1128 17:45:20.070103 4909 scope.go:117] "RemoveContainer" containerID="e4ec2484d405d188588ae6893edfde80e6691a6563ddd49185b54dba2787596c" Nov 28 17:45:20 crc kubenswrapper[4909]: I1128 17:45:20.095027 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.095002914 podStartE2EDuration="2.095002914s" podCreationTimestamp="2025-11-28 17:45:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 17:45:20.089488786 +0000 UTC m=+5702.486173310" watchObservedRunningTime="2025-11-28 17:45:20.095002914 +0000 UTC m=+5702.491687438" Nov 28 17:45:20 crc kubenswrapper[4909]: I1128 17:45:20.104053 4909 scope.go:117] "RemoveContainer" containerID="8e5a89165de63841bc9b62c5633dcb93882df33e5caf518dd9e884ff850ef8d2" Nov 28 17:45:20 crc kubenswrapper[4909]: I1128 17:45:20.115020 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-7fc2v"] Nov 28 17:45:20 crc kubenswrapper[4909]: I1128 17:45:20.125196 4909 scope.go:117] "RemoveContainer" containerID="58e97e8bead349de977cf00ae7a4e0e877221a4086c8ee9fd6071850c62fd924" Nov 28 17:45:20 crc kubenswrapper[4909]: I1128 17:45:20.129426 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-7fc2v"] Nov 28 17:45:20 crc kubenswrapper[4909]: I1128 17:45:20.160642 4909 scope.go:117] "RemoveContainer" containerID="e4ec2484d405d188588ae6893edfde80e6691a6563ddd49185b54dba2787596c" Nov 28 17:45:20 crc kubenswrapper[4909]: E1128 17:45:20.161072 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e4ec2484d405d188588ae6893edfde80e6691a6563ddd49185b54dba2787596c\": container with ID starting with e4ec2484d405d188588ae6893edfde80e6691a6563ddd49185b54dba2787596c not found: ID does not exist" containerID="e4ec2484d405d188588ae6893edfde80e6691a6563ddd49185b54dba2787596c" Nov 28 17:45:20 crc kubenswrapper[4909]: I1128 17:45:20.161113 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e4ec2484d405d188588ae6893edfde80e6691a6563ddd49185b54dba2787596c"} err="failed to get container status \"e4ec2484d405d188588ae6893edfde80e6691a6563ddd49185b54dba2787596c\": rpc error: code = NotFound desc = could not find container \"e4ec2484d405d188588ae6893edfde80e6691a6563ddd49185b54dba2787596c\": container with ID starting with e4ec2484d405d188588ae6893edfde80e6691a6563ddd49185b54dba2787596c not found: ID does not exist" Nov 28 17:45:20 crc kubenswrapper[4909]: I1128 17:45:20.161136 4909 scope.go:117] "RemoveContainer" containerID="8e5a89165de63841bc9b62c5633dcb93882df33e5caf518dd9e884ff850ef8d2" Nov 28 17:45:20 crc kubenswrapper[4909]: E1128 17:45:20.161509 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8e5a89165de63841bc9b62c5633dcb93882df33e5caf518dd9e884ff850ef8d2\": container with ID starting with 8e5a89165de63841bc9b62c5633dcb93882df33e5caf518dd9e884ff850ef8d2 not found: ID does not exist" containerID="8e5a89165de63841bc9b62c5633dcb93882df33e5caf518dd9e884ff850ef8d2" Nov 28 17:45:20 crc kubenswrapper[4909]: I1128 17:45:20.161539 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8e5a89165de63841bc9b62c5633dcb93882df33e5caf518dd9e884ff850ef8d2"} err="failed to get container status \"8e5a89165de63841bc9b62c5633dcb93882df33e5caf518dd9e884ff850ef8d2\": rpc error: code = NotFound desc = could not find container \"8e5a89165de63841bc9b62c5633dcb93882df33e5caf518dd9e884ff850ef8d2\": container with ID starting with 8e5a89165de63841bc9b62c5633dcb93882df33e5caf518dd9e884ff850ef8d2 not found: ID does not exist" Nov 28 17:45:20 crc kubenswrapper[4909]: I1128 17:45:20.161560 4909 scope.go:117] "RemoveContainer" containerID="58e97e8bead349de977cf00ae7a4e0e877221a4086c8ee9fd6071850c62fd924" Nov 28 17:45:20 crc kubenswrapper[4909]: E1128 17:45:20.161881 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"58e97e8bead349de977cf00ae7a4e0e877221a4086c8ee9fd6071850c62fd924\": container with ID starting with 58e97e8bead349de977cf00ae7a4e0e877221a4086c8ee9fd6071850c62fd924 not found: ID does not exist" containerID="58e97e8bead349de977cf00ae7a4e0e877221a4086c8ee9fd6071850c62fd924" Nov 28 17:45:20 crc kubenswrapper[4909]: I1128 17:45:20.161902 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"58e97e8bead349de977cf00ae7a4e0e877221a4086c8ee9fd6071850c62fd924"} err="failed to get container status \"58e97e8bead349de977cf00ae7a4e0e877221a4086c8ee9fd6071850c62fd924\": rpc error: code = NotFound desc = could not find container \"58e97e8bead349de977cf00ae7a4e0e877221a4086c8ee9fd6071850c62fd924\": container with ID starting with 58e97e8bead349de977cf00ae7a4e0e877221a4086c8ee9fd6071850c62fd924 not found: ID does not exist" Nov 28 17:45:21 crc kubenswrapper[4909]: I1128 17:45:21.924326 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5b8f57fb-be47-431e-b192-3f6407348089" path="/var/lib/kubelet/pods/5b8f57fb-be47-431e-b192-3f6407348089/volumes" Nov 28 17:45:23 crc kubenswrapper[4909]: I1128 17:45:23.434854 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Nov 28 17:45:24 crc kubenswrapper[4909]: I1128 17:45:24.211666 4909 generic.go:334] "Generic (PLEG): container finished" podID="32b73e05-07dd-49d0-8b6d-6e2a7258a66b" containerID="ee678844165960f867f4193cc477f1d7f98e4bc318cf99967e273c736e61d5a4" exitCode=0 Nov 28 17:45:24 crc kubenswrapper[4909]: I1128 17:45:24.211725 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-zhtp2" event={"ID":"32b73e05-07dd-49d0-8b6d-6e2a7258a66b","Type":"ContainerDied","Data":"ee678844165960f867f4193cc477f1d7f98e4bc318cf99967e273c736e61d5a4"} Nov 28 17:45:24 crc kubenswrapper[4909]: I1128 17:45:24.416554 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 28 17:45:24 crc kubenswrapper[4909]: I1128 17:45:24.416597 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 28 17:45:24 crc kubenswrapper[4909]: I1128 17:45:24.688700 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 28 17:45:24 crc kubenswrapper[4909]: I1128 17:45:24.688754 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 28 17:45:25 crc kubenswrapper[4909]: I1128 17:45:25.504780 4909 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="8487cde4-87ee-4dcb-b4b1-ac47ca899154" containerName="nova-metadata-log" probeResult="failure" output="Get \"http://10.217.1.68:8775/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 28 17:45:25 crc kubenswrapper[4909]: I1128 17:45:25.504780 4909 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="8487cde4-87ee-4dcb-b4b1-ac47ca899154" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"http://10.217.1.68:8775/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 28 17:45:25 crc kubenswrapper[4909]: I1128 17:45:25.597638 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-zhtp2" Nov 28 17:45:25 crc kubenswrapper[4909]: I1128 17:45:25.706413 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/32b73e05-07dd-49d0-8b6d-6e2a7258a66b-combined-ca-bundle\") pod \"32b73e05-07dd-49d0-8b6d-6e2a7258a66b\" (UID: \"32b73e05-07dd-49d0-8b6d-6e2a7258a66b\") " Nov 28 17:45:25 crc kubenswrapper[4909]: I1128 17:45:25.706509 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/32b73e05-07dd-49d0-8b6d-6e2a7258a66b-scripts\") pod \"32b73e05-07dd-49d0-8b6d-6e2a7258a66b\" (UID: \"32b73e05-07dd-49d0-8b6d-6e2a7258a66b\") " Nov 28 17:45:25 crc kubenswrapper[4909]: I1128 17:45:25.706589 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/32b73e05-07dd-49d0-8b6d-6e2a7258a66b-config-data\") pod \"32b73e05-07dd-49d0-8b6d-6e2a7258a66b\" (UID: \"32b73e05-07dd-49d0-8b6d-6e2a7258a66b\") " Nov 28 17:45:25 crc kubenswrapper[4909]: I1128 17:45:25.706709 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-b9fw6\" (UniqueName: \"kubernetes.io/projected/32b73e05-07dd-49d0-8b6d-6e2a7258a66b-kube-api-access-b9fw6\") pod \"32b73e05-07dd-49d0-8b6d-6e2a7258a66b\" (UID: \"32b73e05-07dd-49d0-8b6d-6e2a7258a66b\") " Nov 28 17:45:25 crc kubenswrapper[4909]: I1128 17:45:25.712369 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/32b73e05-07dd-49d0-8b6d-6e2a7258a66b-scripts" (OuterVolumeSpecName: "scripts") pod "32b73e05-07dd-49d0-8b6d-6e2a7258a66b" (UID: "32b73e05-07dd-49d0-8b6d-6e2a7258a66b"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:45:25 crc kubenswrapper[4909]: I1128 17:45:25.712519 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/32b73e05-07dd-49d0-8b6d-6e2a7258a66b-kube-api-access-b9fw6" (OuterVolumeSpecName: "kube-api-access-b9fw6") pod "32b73e05-07dd-49d0-8b6d-6e2a7258a66b" (UID: "32b73e05-07dd-49d0-8b6d-6e2a7258a66b"). InnerVolumeSpecName "kube-api-access-b9fw6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:45:25 crc kubenswrapper[4909]: I1128 17:45:25.765906 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/32b73e05-07dd-49d0-8b6d-6e2a7258a66b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "32b73e05-07dd-49d0-8b6d-6e2a7258a66b" (UID: "32b73e05-07dd-49d0-8b6d-6e2a7258a66b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:45:25 crc kubenswrapper[4909]: I1128 17:45:25.768780 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/32b73e05-07dd-49d0-8b6d-6e2a7258a66b-config-data" (OuterVolumeSpecName: "config-data") pod "32b73e05-07dd-49d0-8b6d-6e2a7258a66b" (UID: "32b73e05-07dd-49d0-8b6d-6e2a7258a66b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:45:25 crc kubenswrapper[4909]: I1128 17:45:25.770854 4909 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="7deadec1-ac91-4d6d-9667-a91148b3fd3d" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.1.69:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 28 17:45:25 crc kubenswrapper[4909]: I1128 17:45:25.771198 4909 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="7deadec1-ac91-4d6d-9667-a91148b3fd3d" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.1.69:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 28 17:45:25 crc kubenswrapper[4909]: I1128 17:45:25.808258 4909 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/32b73e05-07dd-49d0-8b6d-6e2a7258a66b-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 17:45:25 crc kubenswrapper[4909]: I1128 17:45:25.808301 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-b9fw6\" (UniqueName: \"kubernetes.io/projected/32b73e05-07dd-49d0-8b6d-6e2a7258a66b-kube-api-access-b9fw6\") on node \"crc\" DevicePath \"\"" Nov 28 17:45:25 crc kubenswrapper[4909]: I1128 17:45:25.808333 4909 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/32b73e05-07dd-49d0-8b6d-6e2a7258a66b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 17:45:25 crc kubenswrapper[4909]: I1128 17:45:25.808346 4909 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/32b73e05-07dd-49d0-8b6d-6e2a7258a66b-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 17:45:26 crc kubenswrapper[4909]: I1128 17:45:26.228522 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-zhtp2" event={"ID":"32b73e05-07dd-49d0-8b6d-6e2a7258a66b","Type":"ContainerDied","Data":"3b1ab2d3dce8d5a8b768549f16fb5d874faa7a77c027ec9f60c72fef73316ecb"} Nov 28 17:45:26 crc kubenswrapper[4909]: I1128 17:45:26.228567 4909 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3b1ab2d3dce8d5a8b768549f16fb5d874faa7a77c027ec9f60c72fef73316ecb" Nov 28 17:45:26 crc kubenswrapper[4909]: I1128 17:45:26.228568 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-zhtp2" Nov 28 17:45:26 crc kubenswrapper[4909]: I1128 17:45:26.420990 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 28 17:45:26 crc kubenswrapper[4909]: I1128 17:45:26.421187 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="7deadec1-ac91-4d6d-9667-a91148b3fd3d" containerName="nova-api-log" containerID="cri-o://5d831a15b6468c23b481d4a1a3d345a6dde06a32d87c71d92a8efdc88f497d7d" gracePeriod=30 Nov 28 17:45:26 crc kubenswrapper[4909]: I1128 17:45:26.421561 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="7deadec1-ac91-4d6d-9667-a91148b3fd3d" containerName="nova-api-api" containerID="cri-o://332f5504289f8a9ed6334ea0d5d7cc532713d992626e1650b36228985ab7eff1" gracePeriod=30 Nov 28 17:45:26 crc kubenswrapper[4909]: I1128 17:45:26.442079 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 17:45:26 crc kubenswrapper[4909]: I1128 17:45:26.442360 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="7cb9588c-7bab-435a-b005-4080895a8b56" containerName="nova-scheduler-scheduler" containerID="cri-o://b3cd5abbe185bcd978a6c05f374211e4e8613e08e83e75dd55991df288e6d4ae" gracePeriod=30 Nov 28 17:45:26 crc kubenswrapper[4909]: I1128 17:45:26.464539 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 17:45:26 crc kubenswrapper[4909]: I1128 17:45:26.464820 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="8487cde4-87ee-4dcb-b4b1-ac47ca899154" containerName="nova-metadata-log" containerID="cri-o://1e8f253edf2feb8a9ef876c0d68b5f2e2e9898587f99da9553788231dc4d87a9" gracePeriod=30 Nov 28 17:45:26 crc kubenswrapper[4909]: I1128 17:45:26.464962 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="8487cde4-87ee-4dcb-b4b1-ac47ca899154" containerName="nova-metadata-metadata" containerID="cri-o://5d546ecbf4e9f4c6725f9278cd75bda9b2bf6f3321d224afd07055c3c9076f2f" gracePeriod=30 Nov 28 17:45:27 crc kubenswrapper[4909]: I1128 17:45:27.256238 4909 generic.go:334] "Generic (PLEG): container finished" podID="7deadec1-ac91-4d6d-9667-a91148b3fd3d" containerID="5d831a15b6468c23b481d4a1a3d345a6dde06a32d87c71d92a8efdc88f497d7d" exitCode=143 Nov 28 17:45:27 crc kubenswrapper[4909]: I1128 17:45:27.256339 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"7deadec1-ac91-4d6d-9667-a91148b3fd3d","Type":"ContainerDied","Data":"5d831a15b6468c23b481d4a1a3d345a6dde06a32d87c71d92a8efdc88f497d7d"} Nov 28 17:45:27 crc kubenswrapper[4909]: I1128 17:45:27.260733 4909 generic.go:334] "Generic (PLEG): container finished" podID="8487cde4-87ee-4dcb-b4b1-ac47ca899154" containerID="1e8f253edf2feb8a9ef876c0d68b5f2e2e9898587f99da9553788231dc4d87a9" exitCode=143 Nov 28 17:45:27 crc kubenswrapper[4909]: I1128 17:45:27.260769 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"8487cde4-87ee-4dcb-b4b1-ac47ca899154","Type":"ContainerDied","Data":"1e8f253edf2feb8a9ef876c0d68b5f2e2e9898587f99da9553788231dc4d87a9"} Nov 28 17:45:30 crc kubenswrapper[4909]: I1128 17:45:30.296312 4909 generic.go:334] "Generic (PLEG): container finished" podID="8487cde4-87ee-4dcb-b4b1-ac47ca899154" containerID="5d546ecbf4e9f4c6725f9278cd75bda9b2bf6f3321d224afd07055c3c9076f2f" exitCode=0 Nov 28 17:45:30 crc kubenswrapper[4909]: I1128 17:45:30.297020 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"8487cde4-87ee-4dcb-b4b1-ac47ca899154","Type":"ContainerDied","Data":"5d546ecbf4e9f4c6725f9278cd75bda9b2bf6f3321d224afd07055c3c9076f2f"} Nov 28 17:45:30 crc kubenswrapper[4909]: I1128 17:45:30.297055 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"8487cde4-87ee-4dcb-b4b1-ac47ca899154","Type":"ContainerDied","Data":"7b9ff197460cd4b7740b673949321b863c5ead6579f5fdc5d32e026f80724710"} Nov 28 17:45:30 crc kubenswrapper[4909]: I1128 17:45:30.297069 4909 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7b9ff197460cd4b7740b673949321b863c5ead6579f5fdc5d32e026f80724710" Nov 28 17:45:30 crc kubenswrapper[4909]: I1128 17:45:30.298424 4909 generic.go:334] "Generic (PLEG): container finished" podID="7cb9588c-7bab-435a-b005-4080895a8b56" containerID="b3cd5abbe185bcd978a6c05f374211e4e8613e08e83e75dd55991df288e6d4ae" exitCode=0 Nov 28 17:45:30 crc kubenswrapper[4909]: I1128 17:45:30.298446 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"7cb9588c-7bab-435a-b005-4080895a8b56","Type":"ContainerDied","Data":"b3cd5abbe185bcd978a6c05f374211e4e8613e08e83e75dd55991df288e6d4ae"} Nov 28 17:45:30 crc kubenswrapper[4909]: I1128 17:45:30.299123 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 17:45:30 crc kubenswrapper[4909]: I1128 17:45:30.391277 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8vz5d\" (UniqueName: \"kubernetes.io/projected/8487cde4-87ee-4dcb-b4b1-ac47ca899154-kube-api-access-8vz5d\") pod \"8487cde4-87ee-4dcb-b4b1-ac47ca899154\" (UID: \"8487cde4-87ee-4dcb-b4b1-ac47ca899154\") " Nov 28 17:45:30 crc kubenswrapper[4909]: I1128 17:45:30.391446 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8487cde4-87ee-4dcb-b4b1-ac47ca899154-logs\") pod \"8487cde4-87ee-4dcb-b4b1-ac47ca899154\" (UID: \"8487cde4-87ee-4dcb-b4b1-ac47ca899154\") " Nov 28 17:45:30 crc kubenswrapper[4909]: I1128 17:45:30.391497 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8487cde4-87ee-4dcb-b4b1-ac47ca899154-config-data\") pod \"8487cde4-87ee-4dcb-b4b1-ac47ca899154\" (UID: \"8487cde4-87ee-4dcb-b4b1-ac47ca899154\") " Nov 28 17:45:30 crc kubenswrapper[4909]: I1128 17:45:30.391756 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8487cde4-87ee-4dcb-b4b1-ac47ca899154-combined-ca-bundle\") pod \"8487cde4-87ee-4dcb-b4b1-ac47ca899154\" (UID: \"8487cde4-87ee-4dcb-b4b1-ac47ca899154\") " Nov 28 17:45:30 crc kubenswrapper[4909]: I1128 17:45:30.392462 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8487cde4-87ee-4dcb-b4b1-ac47ca899154-logs" (OuterVolumeSpecName: "logs") pod "8487cde4-87ee-4dcb-b4b1-ac47ca899154" (UID: "8487cde4-87ee-4dcb-b4b1-ac47ca899154"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:45:30 crc kubenswrapper[4909]: I1128 17:45:30.401467 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8487cde4-87ee-4dcb-b4b1-ac47ca899154-kube-api-access-8vz5d" (OuterVolumeSpecName: "kube-api-access-8vz5d") pod "8487cde4-87ee-4dcb-b4b1-ac47ca899154" (UID: "8487cde4-87ee-4dcb-b4b1-ac47ca899154"). InnerVolumeSpecName "kube-api-access-8vz5d". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:45:30 crc kubenswrapper[4909]: I1128 17:45:30.419003 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8487cde4-87ee-4dcb-b4b1-ac47ca899154-config-data" (OuterVolumeSpecName: "config-data") pod "8487cde4-87ee-4dcb-b4b1-ac47ca899154" (UID: "8487cde4-87ee-4dcb-b4b1-ac47ca899154"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:45:30 crc kubenswrapper[4909]: I1128 17:45:30.419849 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8487cde4-87ee-4dcb-b4b1-ac47ca899154-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "8487cde4-87ee-4dcb-b4b1-ac47ca899154" (UID: "8487cde4-87ee-4dcb-b4b1-ac47ca899154"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:45:30 crc kubenswrapper[4909]: I1128 17:45:30.454455 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 17:45:30 crc kubenswrapper[4909]: I1128 17:45:30.493864 4909 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8487cde4-87ee-4dcb-b4b1-ac47ca899154-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 17:45:30 crc kubenswrapper[4909]: I1128 17:45:30.493899 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8vz5d\" (UniqueName: \"kubernetes.io/projected/8487cde4-87ee-4dcb-b4b1-ac47ca899154-kube-api-access-8vz5d\") on node \"crc\" DevicePath \"\"" Nov 28 17:45:30 crc kubenswrapper[4909]: I1128 17:45:30.493911 4909 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8487cde4-87ee-4dcb-b4b1-ac47ca899154-logs\") on node \"crc\" DevicePath \"\"" Nov 28 17:45:30 crc kubenswrapper[4909]: I1128 17:45:30.493920 4909 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8487cde4-87ee-4dcb-b4b1-ac47ca899154-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 17:45:30 crc kubenswrapper[4909]: I1128 17:45:30.595546 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7cb9588c-7bab-435a-b005-4080895a8b56-combined-ca-bundle\") pod \"7cb9588c-7bab-435a-b005-4080895a8b56\" (UID: \"7cb9588c-7bab-435a-b005-4080895a8b56\") " Nov 28 17:45:30 crc kubenswrapper[4909]: I1128 17:45:30.596338 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kf4g8\" (UniqueName: \"kubernetes.io/projected/7cb9588c-7bab-435a-b005-4080895a8b56-kube-api-access-kf4g8\") pod \"7cb9588c-7bab-435a-b005-4080895a8b56\" (UID: \"7cb9588c-7bab-435a-b005-4080895a8b56\") " Nov 28 17:45:30 crc kubenswrapper[4909]: I1128 17:45:30.596562 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7cb9588c-7bab-435a-b005-4080895a8b56-config-data\") pod \"7cb9588c-7bab-435a-b005-4080895a8b56\" (UID: \"7cb9588c-7bab-435a-b005-4080895a8b56\") " Nov 28 17:45:30 crc kubenswrapper[4909]: I1128 17:45:30.600067 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7cb9588c-7bab-435a-b005-4080895a8b56-kube-api-access-kf4g8" (OuterVolumeSpecName: "kube-api-access-kf4g8") pod "7cb9588c-7bab-435a-b005-4080895a8b56" (UID: "7cb9588c-7bab-435a-b005-4080895a8b56"). InnerVolumeSpecName "kube-api-access-kf4g8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:45:30 crc kubenswrapper[4909]: I1128 17:45:30.619038 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7cb9588c-7bab-435a-b005-4080895a8b56-config-data" (OuterVolumeSpecName: "config-data") pod "7cb9588c-7bab-435a-b005-4080895a8b56" (UID: "7cb9588c-7bab-435a-b005-4080895a8b56"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:45:30 crc kubenswrapper[4909]: I1128 17:45:30.620123 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7cb9588c-7bab-435a-b005-4080895a8b56-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "7cb9588c-7bab-435a-b005-4080895a8b56" (UID: "7cb9588c-7bab-435a-b005-4080895a8b56"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:45:30 crc kubenswrapper[4909]: I1128 17:45:30.699358 4909 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7cb9588c-7bab-435a-b005-4080895a8b56-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 17:45:30 crc kubenswrapper[4909]: I1128 17:45:30.699390 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kf4g8\" (UniqueName: \"kubernetes.io/projected/7cb9588c-7bab-435a-b005-4080895a8b56-kube-api-access-kf4g8\") on node \"crc\" DevicePath \"\"" Nov 28 17:45:30 crc kubenswrapper[4909]: I1128 17:45:30.699406 4909 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7cb9588c-7bab-435a-b005-4080895a8b56-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 17:45:31 crc kubenswrapper[4909]: I1128 17:45:31.283768 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 17:45:31 crc kubenswrapper[4909]: I1128 17:45:31.311730 4909 generic.go:334] "Generic (PLEG): container finished" podID="7deadec1-ac91-4d6d-9667-a91148b3fd3d" containerID="332f5504289f8a9ed6334ea0d5d7cc532713d992626e1650b36228985ab7eff1" exitCode=0 Nov 28 17:45:31 crc kubenswrapper[4909]: I1128 17:45:31.311783 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 17:45:31 crc kubenswrapper[4909]: I1128 17:45:31.311818 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"7deadec1-ac91-4d6d-9667-a91148b3fd3d","Type":"ContainerDied","Data":"332f5504289f8a9ed6334ea0d5d7cc532713d992626e1650b36228985ab7eff1"} Nov 28 17:45:31 crc kubenswrapper[4909]: I1128 17:45:31.311862 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"7deadec1-ac91-4d6d-9667-a91148b3fd3d","Type":"ContainerDied","Data":"4dda0e9cf1bb76c1700ce7aaba5eb0749997a2502f0c0f4fe99161ae8488512d"} Nov 28 17:45:31 crc kubenswrapper[4909]: I1128 17:45:31.311882 4909 scope.go:117] "RemoveContainer" containerID="332f5504289f8a9ed6334ea0d5d7cc532713d992626e1650b36228985ab7eff1" Nov 28 17:45:31 crc kubenswrapper[4909]: I1128 17:45:31.316842 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 17:45:31 crc kubenswrapper[4909]: I1128 17:45:31.316837 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"7cb9588c-7bab-435a-b005-4080895a8b56","Type":"ContainerDied","Data":"241e073887db79436e7738bcf247c7ad733405b62bea595ea9fe5a43e86cdc00"} Nov 28 17:45:31 crc kubenswrapper[4909]: I1128 17:45:31.316842 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 17:45:31 crc kubenswrapper[4909]: I1128 17:45:31.344325 4909 scope.go:117] "RemoveContainer" containerID="5d831a15b6468c23b481d4a1a3d345a6dde06a32d87c71d92a8efdc88f497d7d" Nov 28 17:45:31 crc kubenswrapper[4909]: I1128 17:45:31.348094 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 17:45:31 crc kubenswrapper[4909]: I1128 17:45:31.362625 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 17:45:31 crc kubenswrapper[4909]: I1128 17:45:31.392899 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 17:45:31 crc kubenswrapper[4909]: I1128 17:45:31.399883 4909 scope.go:117] "RemoveContainer" containerID="332f5504289f8a9ed6334ea0d5d7cc532713d992626e1650b36228985ab7eff1" Nov 28 17:45:31 crc kubenswrapper[4909]: E1128 17:45:31.400460 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"332f5504289f8a9ed6334ea0d5d7cc532713d992626e1650b36228985ab7eff1\": container with ID starting with 332f5504289f8a9ed6334ea0d5d7cc532713d992626e1650b36228985ab7eff1 not found: ID does not exist" containerID="332f5504289f8a9ed6334ea0d5d7cc532713d992626e1650b36228985ab7eff1" Nov 28 17:45:31 crc kubenswrapper[4909]: I1128 17:45:31.400492 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"332f5504289f8a9ed6334ea0d5d7cc532713d992626e1650b36228985ab7eff1"} err="failed to get container status \"332f5504289f8a9ed6334ea0d5d7cc532713d992626e1650b36228985ab7eff1\": rpc error: code = NotFound desc = could not find container \"332f5504289f8a9ed6334ea0d5d7cc532713d992626e1650b36228985ab7eff1\": container with ID starting with 332f5504289f8a9ed6334ea0d5d7cc532713d992626e1650b36228985ab7eff1 not found: ID does not exist" Nov 28 17:45:31 crc kubenswrapper[4909]: I1128 17:45:31.400511 4909 scope.go:117] "RemoveContainer" containerID="5d831a15b6468c23b481d4a1a3d345a6dde06a32d87c71d92a8efdc88f497d7d" Nov 28 17:45:31 crc kubenswrapper[4909]: E1128 17:45:31.401314 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5d831a15b6468c23b481d4a1a3d345a6dde06a32d87c71d92a8efdc88f497d7d\": container with ID starting with 5d831a15b6468c23b481d4a1a3d345a6dde06a32d87c71d92a8efdc88f497d7d not found: ID does not exist" containerID="5d831a15b6468c23b481d4a1a3d345a6dde06a32d87c71d92a8efdc88f497d7d" Nov 28 17:45:31 crc kubenswrapper[4909]: I1128 17:45:31.401338 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5d831a15b6468c23b481d4a1a3d345a6dde06a32d87c71d92a8efdc88f497d7d"} err="failed to get container status \"5d831a15b6468c23b481d4a1a3d345a6dde06a32d87c71d92a8efdc88f497d7d\": rpc error: code = NotFound desc = could not find container \"5d831a15b6468c23b481d4a1a3d345a6dde06a32d87c71d92a8efdc88f497d7d\": container with ID starting with 5d831a15b6468c23b481d4a1a3d345a6dde06a32d87c71d92a8efdc88f497d7d not found: ID does not exist" Nov 28 17:45:31 crc kubenswrapper[4909]: I1128 17:45:31.401351 4909 scope.go:117] "RemoveContainer" containerID="b3cd5abbe185bcd978a6c05f374211e4e8613e08e83e75dd55991df288e6d4ae" Nov 28 17:45:31 crc kubenswrapper[4909]: I1128 17:45:31.406583 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 17:45:31 crc kubenswrapper[4909]: I1128 17:45:31.410231 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7deadec1-ac91-4d6d-9667-a91148b3fd3d-config-data\") pod \"7deadec1-ac91-4d6d-9667-a91148b3fd3d\" (UID: \"7deadec1-ac91-4d6d-9667-a91148b3fd3d\") " Nov 28 17:45:31 crc kubenswrapper[4909]: I1128 17:45:31.410291 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7deadec1-ac91-4d6d-9667-a91148b3fd3d-combined-ca-bundle\") pod \"7deadec1-ac91-4d6d-9667-a91148b3fd3d\" (UID: \"7deadec1-ac91-4d6d-9667-a91148b3fd3d\") " Nov 28 17:45:31 crc kubenswrapper[4909]: I1128 17:45:31.410335 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2vwbs\" (UniqueName: \"kubernetes.io/projected/7deadec1-ac91-4d6d-9667-a91148b3fd3d-kube-api-access-2vwbs\") pod \"7deadec1-ac91-4d6d-9667-a91148b3fd3d\" (UID: \"7deadec1-ac91-4d6d-9667-a91148b3fd3d\") " Nov 28 17:45:31 crc kubenswrapper[4909]: I1128 17:45:31.410407 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7deadec1-ac91-4d6d-9667-a91148b3fd3d-logs\") pod \"7deadec1-ac91-4d6d-9667-a91148b3fd3d\" (UID: \"7deadec1-ac91-4d6d-9667-a91148b3fd3d\") " Nov 28 17:45:31 crc kubenswrapper[4909]: I1128 17:45:31.411112 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7deadec1-ac91-4d6d-9667-a91148b3fd3d-logs" (OuterVolumeSpecName: "logs") pod "7deadec1-ac91-4d6d-9667-a91148b3fd3d" (UID: "7deadec1-ac91-4d6d-9667-a91148b3fd3d"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:45:31 crc kubenswrapper[4909]: I1128 17:45:31.423857 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7deadec1-ac91-4d6d-9667-a91148b3fd3d-kube-api-access-2vwbs" (OuterVolumeSpecName: "kube-api-access-2vwbs") pod "7deadec1-ac91-4d6d-9667-a91148b3fd3d" (UID: "7deadec1-ac91-4d6d-9667-a91148b3fd3d"). InnerVolumeSpecName "kube-api-access-2vwbs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:45:31 crc kubenswrapper[4909]: I1128 17:45:31.426215 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 28 17:45:31 crc kubenswrapper[4909]: E1128 17:45:31.426808 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5b8f57fb-be47-431e-b192-3f6407348089" containerName="registry-server" Nov 28 17:45:31 crc kubenswrapper[4909]: I1128 17:45:31.426843 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="5b8f57fb-be47-431e-b192-3f6407348089" containerName="registry-server" Nov 28 17:45:31 crc kubenswrapper[4909]: E1128 17:45:31.426872 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7deadec1-ac91-4d6d-9667-a91148b3fd3d" containerName="nova-api-api" Nov 28 17:45:31 crc kubenswrapper[4909]: I1128 17:45:31.426878 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="7deadec1-ac91-4d6d-9667-a91148b3fd3d" containerName="nova-api-api" Nov 28 17:45:31 crc kubenswrapper[4909]: E1128 17:45:31.426891 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8487cde4-87ee-4dcb-b4b1-ac47ca899154" containerName="nova-metadata-log" Nov 28 17:45:31 crc kubenswrapper[4909]: I1128 17:45:31.426897 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="8487cde4-87ee-4dcb-b4b1-ac47ca899154" containerName="nova-metadata-log" Nov 28 17:45:31 crc kubenswrapper[4909]: E1128 17:45:31.426925 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5b8f57fb-be47-431e-b192-3f6407348089" containerName="extract-utilities" Nov 28 17:45:31 crc kubenswrapper[4909]: I1128 17:45:31.426931 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="5b8f57fb-be47-431e-b192-3f6407348089" containerName="extract-utilities" Nov 28 17:45:31 crc kubenswrapper[4909]: E1128 17:45:31.426946 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8487cde4-87ee-4dcb-b4b1-ac47ca899154" containerName="nova-metadata-metadata" Nov 28 17:45:31 crc kubenswrapper[4909]: I1128 17:45:31.426952 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="8487cde4-87ee-4dcb-b4b1-ac47ca899154" containerName="nova-metadata-metadata" Nov 28 17:45:31 crc kubenswrapper[4909]: E1128 17:45:31.426963 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="32b73e05-07dd-49d0-8b6d-6e2a7258a66b" containerName="nova-manage" Nov 28 17:45:31 crc kubenswrapper[4909]: I1128 17:45:31.426971 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="32b73e05-07dd-49d0-8b6d-6e2a7258a66b" containerName="nova-manage" Nov 28 17:45:31 crc kubenswrapper[4909]: E1128 17:45:31.426979 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7cb9588c-7bab-435a-b005-4080895a8b56" containerName="nova-scheduler-scheduler" Nov 28 17:45:31 crc kubenswrapper[4909]: I1128 17:45:31.427004 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="7cb9588c-7bab-435a-b005-4080895a8b56" containerName="nova-scheduler-scheduler" Nov 28 17:45:31 crc kubenswrapper[4909]: E1128 17:45:31.427016 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5b8f57fb-be47-431e-b192-3f6407348089" containerName="extract-content" Nov 28 17:45:31 crc kubenswrapper[4909]: I1128 17:45:31.427023 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="5b8f57fb-be47-431e-b192-3f6407348089" containerName="extract-content" Nov 28 17:45:31 crc kubenswrapper[4909]: E1128 17:45:31.427038 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7deadec1-ac91-4d6d-9667-a91148b3fd3d" containerName="nova-api-log" Nov 28 17:45:31 crc kubenswrapper[4909]: I1128 17:45:31.427044 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="7deadec1-ac91-4d6d-9667-a91148b3fd3d" containerName="nova-api-log" Nov 28 17:45:31 crc kubenswrapper[4909]: I1128 17:45:31.427267 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="7cb9588c-7bab-435a-b005-4080895a8b56" containerName="nova-scheduler-scheduler" Nov 28 17:45:31 crc kubenswrapper[4909]: I1128 17:45:31.427280 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="32b73e05-07dd-49d0-8b6d-6e2a7258a66b" containerName="nova-manage" Nov 28 17:45:31 crc kubenswrapper[4909]: I1128 17:45:31.427294 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="7deadec1-ac91-4d6d-9667-a91148b3fd3d" containerName="nova-api-api" Nov 28 17:45:31 crc kubenswrapper[4909]: I1128 17:45:31.427327 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="7deadec1-ac91-4d6d-9667-a91148b3fd3d" containerName="nova-api-log" Nov 28 17:45:31 crc kubenswrapper[4909]: I1128 17:45:31.427341 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="8487cde4-87ee-4dcb-b4b1-ac47ca899154" containerName="nova-metadata-metadata" Nov 28 17:45:31 crc kubenswrapper[4909]: I1128 17:45:31.427348 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="5b8f57fb-be47-431e-b192-3f6407348089" containerName="registry-server" Nov 28 17:45:31 crc kubenswrapper[4909]: I1128 17:45:31.427360 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="8487cde4-87ee-4dcb-b4b1-ac47ca899154" containerName="nova-metadata-log" Nov 28 17:45:31 crc kubenswrapper[4909]: I1128 17:45:31.428545 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 17:45:31 crc kubenswrapper[4909]: I1128 17:45:31.430979 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 28 17:45:31 crc kubenswrapper[4909]: I1128 17:45:31.432291 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7deadec1-ac91-4d6d-9667-a91148b3fd3d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "7deadec1-ac91-4d6d-9667-a91148b3fd3d" (UID: "7deadec1-ac91-4d6d-9667-a91148b3fd3d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:45:31 crc kubenswrapper[4909]: I1128 17:45:31.437341 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 17:45:31 crc kubenswrapper[4909]: I1128 17:45:31.450013 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 17:45:31 crc kubenswrapper[4909]: I1128 17:45:31.451329 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7deadec1-ac91-4d6d-9667-a91148b3fd3d-config-data" (OuterVolumeSpecName: "config-data") pod "7deadec1-ac91-4d6d-9667-a91148b3fd3d" (UID: "7deadec1-ac91-4d6d-9667-a91148b3fd3d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:45:31 crc kubenswrapper[4909]: I1128 17:45:31.451458 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 17:45:31 crc kubenswrapper[4909]: I1128 17:45:31.453497 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 28 17:45:31 crc kubenswrapper[4909]: I1128 17:45:31.465482 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 17:45:31 crc kubenswrapper[4909]: I1128 17:45:31.511793 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f64ea2a3-228b-4b81-a969-4ee779d2d101-logs\") pod \"nova-metadata-0\" (UID: \"f64ea2a3-228b-4b81-a969-4ee779d2d101\") " pod="openstack/nova-metadata-0" Nov 28 17:45:31 crc kubenswrapper[4909]: I1128 17:45:31.512213 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f64ea2a3-228b-4b81-a969-4ee779d2d101-config-data\") pod \"nova-metadata-0\" (UID: \"f64ea2a3-228b-4b81-a969-4ee779d2d101\") " pod="openstack/nova-metadata-0" Nov 28 17:45:31 crc kubenswrapper[4909]: I1128 17:45:31.512262 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f64ea2a3-228b-4b81-a969-4ee779d2d101-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"f64ea2a3-228b-4b81-a969-4ee779d2d101\") " pod="openstack/nova-metadata-0" Nov 28 17:45:31 crc kubenswrapper[4909]: I1128 17:45:31.512301 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/917bd8d7-4c4a-43ae-a1e8-72ea0c6cffad-config-data\") pod \"nova-scheduler-0\" (UID: \"917bd8d7-4c4a-43ae-a1e8-72ea0c6cffad\") " pod="openstack/nova-scheduler-0" Nov 28 17:45:31 crc kubenswrapper[4909]: I1128 17:45:31.512329 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lkqs9\" (UniqueName: \"kubernetes.io/projected/f64ea2a3-228b-4b81-a969-4ee779d2d101-kube-api-access-lkqs9\") pod \"nova-metadata-0\" (UID: \"f64ea2a3-228b-4b81-a969-4ee779d2d101\") " pod="openstack/nova-metadata-0" Nov 28 17:45:31 crc kubenswrapper[4909]: I1128 17:45:31.512357 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/917bd8d7-4c4a-43ae-a1e8-72ea0c6cffad-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"917bd8d7-4c4a-43ae-a1e8-72ea0c6cffad\") " pod="openstack/nova-scheduler-0" Nov 28 17:45:31 crc kubenswrapper[4909]: I1128 17:45:31.512506 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mh4cb\" (UniqueName: \"kubernetes.io/projected/917bd8d7-4c4a-43ae-a1e8-72ea0c6cffad-kube-api-access-mh4cb\") pod \"nova-scheduler-0\" (UID: \"917bd8d7-4c4a-43ae-a1e8-72ea0c6cffad\") " pod="openstack/nova-scheduler-0" Nov 28 17:45:31 crc kubenswrapper[4909]: I1128 17:45:31.512797 4909 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7deadec1-ac91-4d6d-9667-a91148b3fd3d-logs\") on node \"crc\" DevicePath \"\"" Nov 28 17:45:31 crc kubenswrapper[4909]: I1128 17:45:31.512813 4909 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7deadec1-ac91-4d6d-9667-a91148b3fd3d-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 17:45:31 crc kubenswrapper[4909]: I1128 17:45:31.512827 4909 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7deadec1-ac91-4d6d-9667-a91148b3fd3d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 17:45:31 crc kubenswrapper[4909]: I1128 17:45:31.512840 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2vwbs\" (UniqueName: \"kubernetes.io/projected/7deadec1-ac91-4d6d-9667-a91148b3fd3d-kube-api-access-2vwbs\") on node \"crc\" DevicePath \"\"" Nov 28 17:45:31 crc kubenswrapper[4909]: I1128 17:45:31.614529 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/917bd8d7-4c4a-43ae-a1e8-72ea0c6cffad-config-data\") pod \"nova-scheduler-0\" (UID: \"917bd8d7-4c4a-43ae-a1e8-72ea0c6cffad\") " pod="openstack/nova-scheduler-0" Nov 28 17:45:31 crc kubenswrapper[4909]: I1128 17:45:31.614583 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lkqs9\" (UniqueName: \"kubernetes.io/projected/f64ea2a3-228b-4b81-a969-4ee779d2d101-kube-api-access-lkqs9\") pod \"nova-metadata-0\" (UID: \"f64ea2a3-228b-4b81-a969-4ee779d2d101\") " pod="openstack/nova-metadata-0" Nov 28 17:45:31 crc kubenswrapper[4909]: I1128 17:45:31.614615 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/917bd8d7-4c4a-43ae-a1e8-72ea0c6cffad-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"917bd8d7-4c4a-43ae-a1e8-72ea0c6cffad\") " pod="openstack/nova-scheduler-0" Nov 28 17:45:31 crc kubenswrapper[4909]: I1128 17:45:31.614649 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mh4cb\" (UniqueName: \"kubernetes.io/projected/917bd8d7-4c4a-43ae-a1e8-72ea0c6cffad-kube-api-access-mh4cb\") pod \"nova-scheduler-0\" (UID: \"917bd8d7-4c4a-43ae-a1e8-72ea0c6cffad\") " pod="openstack/nova-scheduler-0" Nov 28 17:45:31 crc kubenswrapper[4909]: I1128 17:45:31.614726 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f64ea2a3-228b-4b81-a969-4ee779d2d101-logs\") pod \"nova-metadata-0\" (UID: \"f64ea2a3-228b-4b81-a969-4ee779d2d101\") " pod="openstack/nova-metadata-0" Nov 28 17:45:31 crc kubenswrapper[4909]: I1128 17:45:31.614751 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f64ea2a3-228b-4b81-a969-4ee779d2d101-config-data\") pod \"nova-metadata-0\" (UID: \"f64ea2a3-228b-4b81-a969-4ee779d2d101\") " pod="openstack/nova-metadata-0" Nov 28 17:45:31 crc kubenswrapper[4909]: I1128 17:45:31.614781 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f64ea2a3-228b-4b81-a969-4ee779d2d101-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"f64ea2a3-228b-4b81-a969-4ee779d2d101\") " pod="openstack/nova-metadata-0" Nov 28 17:45:31 crc kubenswrapper[4909]: I1128 17:45:31.615759 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f64ea2a3-228b-4b81-a969-4ee779d2d101-logs\") pod \"nova-metadata-0\" (UID: \"f64ea2a3-228b-4b81-a969-4ee779d2d101\") " pod="openstack/nova-metadata-0" Nov 28 17:45:31 crc kubenswrapper[4909]: I1128 17:45:31.619649 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/917bd8d7-4c4a-43ae-a1e8-72ea0c6cffad-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"917bd8d7-4c4a-43ae-a1e8-72ea0c6cffad\") " pod="openstack/nova-scheduler-0" Nov 28 17:45:31 crc kubenswrapper[4909]: I1128 17:45:31.619721 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f64ea2a3-228b-4b81-a969-4ee779d2d101-config-data\") pod \"nova-metadata-0\" (UID: \"f64ea2a3-228b-4b81-a969-4ee779d2d101\") " pod="openstack/nova-metadata-0" Nov 28 17:45:31 crc kubenswrapper[4909]: I1128 17:45:31.619733 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/917bd8d7-4c4a-43ae-a1e8-72ea0c6cffad-config-data\") pod \"nova-scheduler-0\" (UID: \"917bd8d7-4c4a-43ae-a1e8-72ea0c6cffad\") " pod="openstack/nova-scheduler-0" Nov 28 17:45:31 crc kubenswrapper[4909]: I1128 17:45:31.620060 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f64ea2a3-228b-4b81-a969-4ee779d2d101-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"f64ea2a3-228b-4b81-a969-4ee779d2d101\") " pod="openstack/nova-metadata-0" Nov 28 17:45:31 crc kubenswrapper[4909]: I1128 17:45:31.629419 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lkqs9\" (UniqueName: \"kubernetes.io/projected/f64ea2a3-228b-4b81-a969-4ee779d2d101-kube-api-access-lkqs9\") pod \"nova-metadata-0\" (UID: \"f64ea2a3-228b-4b81-a969-4ee779d2d101\") " pod="openstack/nova-metadata-0" Nov 28 17:45:31 crc kubenswrapper[4909]: I1128 17:45:31.629520 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mh4cb\" (UniqueName: \"kubernetes.io/projected/917bd8d7-4c4a-43ae-a1e8-72ea0c6cffad-kube-api-access-mh4cb\") pod \"nova-scheduler-0\" (UID: \"917bd8d7-4c4a-43ae-a1e8-72ea0c6cffad\") " pod="openstack/nova-scheduler-0" Nov 28 17:45:31 crc kubenswrapper[4909]: I1128 17:45:31.658765 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 28 17:45:31 crc kubenswrapper[4909]: I1128 17:45:31.673023 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 28 17:45:31 crc kubenswrapper[4909]: I1128 17:45:31.691881 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 28 17:45:31 crc kubenswrapper[4909]: I1128 17:45:31.694556 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 17:45:31 crc kubenswrapper[4909]: I1128 17:45:31.696748 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 28 17:45:31 crc kubenswrapper[4909]: I1128 17:45:31.717781 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 28 17:45:31 crc kubenswrapper[4909]: I1128 17:45:31.818360 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 17:45:31 crc kubenswrapper[4909]: I1128 17:45:31.818773 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2b676cf3-d4d3-4e1d-9ce3-eb7b4c871bd8-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"2b676cf3-d4d3-4e1d-9ce3-eb7b4c871bd8\") " pod="openstack/nova-api-0" Nov 28 17:45:31 crc kubenswrapper[4909]: I1128 17:45:31.818809 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2b676cf3-d4d3-4e1d-9ce3-eb7b4c871bd8-config-data\") pod \"nova-api-0\" (UID: \"2b676cf3-d4d3-4e1d-9ce3-eb7b4c871bd8\") " pod="openstack/nova-api-0" Nov 28 17:45:31 crc kubenswrapper[4909]: I1128 17:45:31.818901 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2b676cf3-d4d3-4e1d-9ce3-eb7b4c871bd8-logs\") pod \"nova-api-0\" (UID: \"2b676cf3-d4d3-4e1d-9ce3-eb7b4c871bd8\") " pod="openstack/nova-api-0" Nov 28 17:45:31 crc kubenswrapper[4909]: I1128 17:45:31.818970 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tjgs6\" (UniqueName: \"kubernetes.io/projected/2b676cf3-d4d3-4e1d-9ce3-eb7b4c871bd8-kube-api-access-tjgs6\") pod \"nova-api-0\" (UID: \"2b676cf3-d4d3-4e1d-9ce3-eb7b4c871bd8\") " pod="openstack/nova-api-0" Nov 28 17:45:31 crc kubenswrapper[4909]: I1128 17:45:31.825352 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 17:45:31 crc kubenswrapper[4909]: I1128 17:45:31.916293 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7cb9588c-7bab-435a-b005-4080895a8b56" path="/var/lib/kubelet/pods/7cb9588c-7bab-435a-b005-4080895a8b56/volumes" Nov 28 17:45:31 crc kubenswrapper[4909]: I1128 17:45:31.916999 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7deadec1-ac91-4d6d-9667-a91148b3fd3d" path="/var/lib/kubelet/pods/7deadec1-ac91-4d6d-9667-a91148b3fd3d/volumes" Nov 28 17:45:31 crc kubenswrapper[4909]: I1128 17:45:31.917596 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8487cde4-87ee-4dcb-b4b1-ac47ca899154" path="/var/lib/kubelet/pods/8487cde4-87ee-4dcb-b4b1-ac47ca899154/volumes" Nov 28 17:45:31 crc kubenswrapper[4909]: I1128 17:45:31.919952 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2b676cf3-d4d3-4e1d-9ce3-eb7b4c871bd8-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"2b676cf3-d4d3-4e1d-9ce3-eb7b4c871bd8\") " pod="openstack/nova-api-0" Nov 28 17:45:31 crc kubenswrapper[4909]: I1128 17:45:31.919996 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2b676cf3-d4d3-4e1d-9ce3-eb7b4c871bd8-config-data\") pod \"nova-api-0\" (UID: \"2b676cf3-d4d3-4e1d-9ce3-eb7b4c871bd8\") " pod="openstack/nova-api-0" Nov 28 17:45:31 crc kubenswrapper[4909]: I1128 17:45:31.920052 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2b676cf3-d4d3-4e1d-9ce3-eb7b4c871bd8-logs\") pod \"nova-api-0\" (UID: \"2b676cf3-d4d3-4e1d-9ce3-eb7b4c871bd8\") " pod="openstack/nova-api-0" Nov 28 17:45:31 crc kubenswrapper[4909]: I1128 17:45:31.920091 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tjgs6\" (UniqueName: \"kubernetes.io/projected/2b676cf3-d4d3-4e1d-9ce3-eb7b4c871bd8-kube-api-access-tjgs6\") pod \"nova-api-0\" (UID: \"2b676cf3-d4d3-4e1d-9ce3-eb7b4c871bd8\") " pod="openstack/nova-api-0" Nov 28 17:45:31 crc kubenswrapper[4909]: I1128 17:45:31.920715 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2b676cf3-d4d3-4e1d-9ce3-eb7b4c871bd8-logs\") pod \"nova-api-0\" (UID: \"2b676cf3-d4d3-4e1d-9ce3-eb7b4c871bd8\") " pod="openstack/nova-api-0" Nov 28 17:45:31 crc kubenswrapper[4909]: I1128 17:45:31.925070 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2b676cf3-d4d3-4e1d-9ce3-eb7b4c871bd8-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"2b676cf3-d4d3-4e1d-9ce3-eb7b4c871bd8\") " pod="openstack/nova-api-0" Nov 28 17:45:31 crc kubenswrapper[4909]: I1128 17:45:31.926650 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2b676cf3-d4d3-4e1d-9ce3-eb7b4c871bd8-config-data\") pod \"nova-api-0\" (UID: \"2b676cf3-d4d3-4e1d-9ce3-eb7b4c871bd8\") " pod="openstack/nova-api-0" Nov 28 17:45:31 crc kubenswrapper[4909]: I1128 17:45:31.948225 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tjgs6\" (UniqueName: \"kubernetes.io/projected/2b676cf3-d4d3-4e1d-9ce3-eb7b4c871bd8-kube-api-access-tjgs6\") pod \"nova-api-0\" (UID: \"2b676cf3-d4d3-4e1d-9ce3-eb7b4c871bd8\") " pod="openstack/nova-api-0" Nov 28 17:45:32 crc kubenswrapper[4909]: I1128 17:45:32.015459 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 17:45:32 crc kubenswrapper[4909]: I1128 17:45:32.203905 4909 scope.go:117] "RemoveContainer" containerID="0e7ce8997695d0e3fa03e38fcaa1e8558e1d78da8ccd96fdbce3fee63f52c05b" Nov 28 17:45:32 crc kubenswrapper[4909]: I1128 17:45:32.226217 4909 scope.go:117] "RemoveContainer" containerID="5616564468cc67b3105708c4adb7fdef9c93a9b70402c2c83d2e7f88e6f9aafb" Nov 28 17:45:32 crc kubenswrapper[4909]: I1128 17:45:32.380406 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 17:45:32 crc kubenswrapper[4909]: I1128 17:45:32.466720 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 17:45:32 crc kubenswrapper[4909]: W1128 17:45:32.506751 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod917bd8d7_4c4a_43ae_a1e8_72ea0c6cffad.slice/crio-701d0b86ef676cb389e70b9398915868e80da37162a150fb8ae2d4e7f3bb13fe WatchSource:0}: Error finding container 701d0b86ef676cb389e70b9398915868e80da37162a150fb8ae2d4e7f3bb13fe: Status 404 returned error can't find the container with id 701d0b86ef676cb389e70b9398915868e80da37162a150fb8ae2d4e7f3bb13fe Nov 28 17:45:32 crc kubenswrapper[4909]: I1128 17:45:32.562510 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 28 17:45:32 crc kubenswrapper[4909]: W1128 17:45:32.576522 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2b676cf3_d4d3_4e1d_9ce3_eb7b4c871bd8.slice/crio-e4fe20d9034181485f10a28fda1555d95bdaec65bbe1199bc1fb0465af36932f WatchSource:0}: Error finding container e4fe20d9034181485f10a28fda1555d95bdaec65bbe1199bc1fb0465af36932f: Status 404 returned error can't find the container with id e4fe20d9034181485f10a28fda1555d95bdaec65bbe1199bc1fb0465af36932f Nov 28 17:45:33 crc kubenswrapper[4909]: I1128 17:45:33.516843 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"f64ea2a3-228b-4b81-a969-4ee779d2d101","Type":"ContainerStarted","Data":"ad836900de66d3e644697f1c55555f68f0092b849060f19aa4cbb392067437d8"} Nov 28 17:45:33 crc kubenswrapper[4909]: I1128 17:45:33.517634 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"f64ea2a3-228b-4b81-a969-4ee779d2d101","Type":"ContainerStarted","Data":"568645e165dd3d242a258c3ef8d737831758332d69789fc0e4ec1f70e853dbf0"} Nov 28 17:45:33 crc kubenswrapper[4909]: I1128 17:45:33.517668 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"f64ea2a3-228b-4b81-a969-4ee779d2d101","Type":"ContainerStarted","Data":"31c32b751b99e761b1c36d8a0ce02ef1a30466818b13d367d656f78ab4d62cac"} Nov 28 17:45:33 crc kubenswrapper[4909]: I1128 17:45:33.522523 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"917bd8d7-4c4a-43ae-a1e8-72ea0c6cffad","Type":"ContainerStarted","Data":"0dbc180878761ececcd4a47aacf0ed8e00a17b69916a2b1f7a043b02416c15a5"} Nov 28 17:45:33 crc kubenswrapper[4909]: I1128 17:45:33.522569 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"917bd8d7-4c4a-43ae-a1e8-72ea0c6cffad","Type":"ContainerStarted","Data":"701d0b86ef676cb389e70b9398915868e80da37162a150fb8ae2d4e7f3bb13fe"} Nov 28 17:45:33 crc kubenswrapper[4909]: I1128 17:45:33.526936 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"2b676cf3-d4d3-4e1d-9ce3-eb7b4c871bd8","Type":"ContainerStarted","Data":"3943dbfbce0f1e8ad73faefa427c93d2067e8e7e08a7c12686097b5532f76318"} Nov 28 17:45:33 crc kubenswrapper[4909]: I1128 17:45:33.526983 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"2b676cf3-d4d3-4e1d-9ce3-eb7b4c871bd8","Type":"ContainerStarted","Data":"8acb6f15942b98cab5e60d236c923b7c5432f28b3ad0365acc150216894182e0"} Nov 28 17:45:33 crc kubenswrapper[4909]: I1128 17:45:33.526992 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"2b676cf3-d4d3-4e1d-9ce3-eb7b4c871bd8","Type":"ContainerStarted","Data":"e4fe20d9034181485f10a28fda1555d95bdaec65bbe1199bc1fb0465af36932f"} Nov 28 17:45:33 crc kubenswrapper[4909]: I1128 17:45:33.547573 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.547553402 podStartE2EDuration="2.547553402s" podCreationTimestamp="2025-11-28 17:45:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 17:45:33.536095124 +0000 UTC m=+5715.932779648" watchObservedRunningTime="2025-11-28 17:45:33.547553402 +0000 UTC m=+5715.944237926" Nov 28 17:45:33 crc kubenswrapper[4909]: I1128 17:45:33.563350 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.5633329270000003 podStartE2EDuration="2.563332927s" podCreationTimestamp="2025-11-28 17:45:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 17:45:33.554099238 +0000 UTC m=+5715.950783762" watchObservedRunningTime="2025-11-28 17:45:33.563332927 +0000 UTC m=+5715.960017441" Nov 28 17:45:33 crc kubenswrapper[4909]: I1128 17:45:33.576096 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.576071689 podStartE2EDuration="2.576071689s" podCreationTimestamp="2025-11-28 17:45:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 17:45:33.570563631 +0000 UTC m=+5715.967248165" watchObservedRunningTime="2025-11-28 17:45:33.576071689 +0000 UTC m=+5715.972756223" Nov 28 17:45:36 crc kubenswrapper[4909]: I1128 17:45:36.819453 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 28 17:45:36 crc kubenswrapper[4909]: I1128 17:45:36.820374 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 28 17:45:36 crc kubenswrapper[4909]: I1128 17:45:36.825626 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Nov 28 17:45:41 crc kubenswrapper[4909]: I1128 17:45:41.818829 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 28 17:45:41 crc kubenswrapper[4909]: I1128 17:45:41.819485 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 28 17:45:41 crc kubenswrapper[4909]: I1128 17:45:41.825861 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Nov 28 17:45:41 crc kubenswrapper[4909]: I1128 17:45:41.858826 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Nov 28 17:45:42 crc kubenswrapper[4909]: I1128 17:45:42.015993 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 28 17:45:42 crc kubenswrapper[4909]: I1128 17:45:42.016043 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 28 17:45:42 crc kubenswrapper[4909]: I1128 17:45:42.645970 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Nov 28 17:45:42 crc kubenswrapper[4909]: I1128 17:45:42.902842 4909 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="f64ea2a3-228b-4b81-a969-4ee779d2d101" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"http://10.217.1.72:8775/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 28 17:45:42 crc kubenswrapper[4909]: I1128 17:45:42.902853 4909 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="f64ea2a3-228b-4b81-a969-4ee779d2d101" containerName="nova-metadata-log" probeResult="failure" output="Get \"http://10.217.1.72:8775/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 28 17:45:43 crc kubenswrapper[4909]: I1128 17:45:43.057874 4909 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="2b676cf3-d4d3-4e1d-9ce3-eb7b4c871bd8" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.1.74:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 28 17:45:43 crc kubenswrapper[4909]: I1128 17:45:43.099850 4909 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="2b676cf3-d4d3-4e1d-9ce3-eb7b4c871bd8" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.1.74:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 28 17:45:49 crc kubenswrapper[4909]: I1128 17:45:49.910779 4909 patch_prober.go:28] interesting pod/machine-config-daemon-d5nd7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 17:45:49 crc kubenswrapper[4909]: I1128 17:45:49.911595 4909 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 17:45:51 crc kubenswrapper[4909]: I1128 17:45:51.821319 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 28 17:45:51 crc kubenswrapper[4909]: I1128 17:45:51.821688 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 28 17:45:51 crc kubenswrapper[4909]: I1128 17:45:51.824084 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 28 17:45:51 crc kubenswrapper[4909]: I1128 17:45:51.824217 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 28 17:45:52 crc kubenswrapper[4909]: I1128 17:45:52.024520 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 28 17:45:52 crc kubenswrapper[4909]: I1128 17:45:52.025110 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 28 17:45:52 crc kubenswrapper[4909]: I1128 17:45:52.029906 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 28 17:45:52 crc kubenswrapper[4909]: I1128 17:45:52.036294 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 28 17:45:52 crc kubenswrapper[4909]: I1128 17:45:52.707385 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 28 17:45:52 crc kubenswrapper[4909]: I1128 17:45:52.711880 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 28 17:45:52 crc kubenswrapper[4909]: I1128 17:45:52.932650 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-55cd5466f5-dkfbz"] Nov 28 17:45:52 crc kubenswrapper[4909]: I1128 17:45:52.934614 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-55cd5466f5-dkfbz" Nov 28 17:45:52 crc kubenswrapper[4909]: I1128 17:45:52.974399 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-55cd5466f5-dkfbz"] Nov 28 17:45:53 crc kubenswrapper[4909]: I1128 17:45:53.032551 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-whw5z\" (UniqueName: \"kubernetes.io/projected/b8edd8a1-86bd-4299-8585-d36cabfbf0a6-kube-api-access-whw5z\") pod \"dnsmasq-dns-55cd5466f5-dkfbz\" (UID: \"b8edd8a1-86bd-4299-8585-d36cabfbf0a6\") " pod="openstack/dnsmasq-dns-55cd5466f5-dkfbz" Nov 28 17:45:53 crc kubenswrapper[4909]: I1128 17:45:53.032750 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b8edd8a1-86bd-4299-8585-d36cabfbf0a6-ovsdbserver-sb\") pod \"dnsmasq-dns-55cd5466f5-dkfbz\" (UID: \"b8edd8a1-86bd-4299-8585-d36cabfbf0a6\") " pod="openstack/dnsmasq-dns-55cd5466f5-dkfbz" Nov 28 17:45:53 crc kubenswrapper[4909]: I1128 17:45:53.032791 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b8edd8a1-86bd-4299-8585-d36cabfbf0a6-dns-svc\") pod \"dnsmasq-dns-55cd5466f5-dkfbz\" (UID: \"b8edd8a1-86bd-4299-8585-d36cabfbf0a6\") " pod="openstack/dnsmasq-dns-55cd5466f5-dkfbz" Nov 28 17:45:53 crc kubenswrapper[4909]: I1128 17:45:53.032814 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b8edd8a1-86bd-4299-8585-d36cabfbf0a6-ovsdbserver-nb\") pod \"dnsmasq-dns-55cd5466f5-dkfbz\" (UID: \"b8edd8a1-86bd-4299-8585-d36cabfbf0a6\") " pod="openstack/dnsmasq-dns-55cd5466f5-dkfbz" Nov 28 17:45:53 crc kubenswrapper[4909]: I1128 17:45:53.032914 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b8edd8a1-86bd-4299-8585-d36cabfbf0a6-config\") pod \"dnsmasq-dns-55cd5466f5-dkfbz\" (UID: \"b8edd8a1-86bd-4299-8585-d36cabfbf0a6\") " pod="openstack/dnsmasq-dns-55cd5466f5-dkfbz" Nov 28 17:45:53 crc kubenswrapper[4909]: I1128 17:45:53.134616 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-whw5z\" (UniqueName: \"kubernetes.io/projected/b8edd8a1-86bd-4299-8585-d36cabfbf0a6-kube-api-access-whw5z\") pod \"dnsmasq-dns-55cd5466f5-dkfbz\" (UID: \"b8edd8a1-86bd-4299-8585-d36cabfbf0a6\") " pod="openstack/dnsmasq-dns-55cd5466f5-dkfbz" Nov 28 17:45:53 crc kubenswrapper[4909]: I1128 17:45:53.134754 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b8edd8a1-86bd-4299-8585-d36cabfbf0a6-ovsdbserver-sb\") pod \"dnsmasq-dns-55cd5466f5-dkfbz\" (UID: \"b8edd8a1-86bd-4299-8585-d36cabfbf0a6\") " pod="openstack/dnsmasq-dns-55cd5466f5-dkfbz" Nov 28 17:45:53 crc kubenswrapper[4909]: I1128 17:45:53.134783 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b8edd8a1-86bd-4299-8585-d36cabfbf0a6-dns-svc\") pod \"dnsmasq-dns-55cd5466f5-dkfbz\" (UID: \"b8edd8a1-86bd-4299-8585-d36cabfbf0a6\") " pod="openstack/dnsmasq-dns-55cd5466f5-dkfbz" Nov 28 17:45:53 crc kubenswrapper[4909]: I1128 17:45:53.134803 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b8edd8a1-86bd-4299-8585-d36cabfbf0a6-ovsdbserver-nb\") pod \"dnsmasq-dns-55cd5466f5-dkfbz\" (UID: \"b8edd8a1-86bd-4299-8585-d36cabfbf0a6\") " pod="openstack/dnsmasq-dns-55cd5466f5-dkfbz" Nov 28 17:45:53 crc kubenswrapper[4909]: I1128 17:45:53.134858 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b8edd8a1-86bd-4299-8585-d36cabfbf0a6-config\") pod \"dnsmasq-dns-55cd5466f5-dkfbz\" (UID: \"b8edd8a1-86bd-4299-8585-d36cabfbf0a6\") " pod="openstack/dnsmasq-dns-55cd5466f5-dkfbz" Nov 28 17:45:53 crc kubenswrapper[4909]: I1128 17:45:53.135973 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b8edd8a1-86bd-4299-8585-d36cabfbf0a6-ovsdbserver-nb\") pod \"dnsmasq-dns-55cd5466f5-dkfbz\" (UID: \"b8edd8a1-86bd-4299-8585-d36cabfbf0a6\") " pod="openstack/dnsmasq-dns-55cd5466f5-dkfbz" Nov 28 17:45:53 crc kubenswrapper[4909]: I1128 17:45:53.135973 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b8edd8a1-86bd-4299-8585-d36cabfbf0a6-dns-svc\") pod \"dnsmasq-dns-55cd5466f5-dkfbz\" (UID: \"b8edd8a1-86bd-4299-8585-d36cabfbf0a6\") " pod="openstack/dnsmasq-dns-55cd5466f5-dkfbz" Nov 28 17:45:53 crc kubenswrapper[4909]: I1128 17:45:53.136216 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b8edd8a1-86bd-4299-8585-d36cabfbf0a6-ovsdbserver-sb\") pod \"dnsmasq-dns-55cd5466f5-dkfbz\" (UID: \"b8edd8a1-86bd-4299-8585-d36cabfbf0a6\") " pod="openstack/dnsmasq-dns-55cd5466f5-dkfbz" Nov 28 17:45:53 crc kubenswrapper[4909]: I1128 17:45:53.136683 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b8edd8a1-86bd-4299-8585-d36cabfbf0a6-config\") pod \"dnsmasq-dns-55cd5466f5-dkfbz\" (UID: \"b8edd8a1-86bd-4299-8585-d36cabfbf0a6\") " pod="openstack/dnsmasq-dns-55cd5466f5-dkfbz" Nov 28 17:45:53 crc kubenswrapper[4909]: I1128 17:45:53.154516 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-whw5z\" (UniqueName: \"kubernetes.io/projected/b8edd8a1-86bd-4299-8585-d36cabfbf0a6-kube-api-access-whw5z\") pod \"dnsmasq-dns-55cd5466f5-dkfbz\" (UID: \"b8edd8a1-86bd-4299-8585-d36cabfbf0a6\") " pod="openstack/dnsmasq-dns-55cd5466f5-dkfbz" Nov 28 17:45:53 crc kubenswrapper[4909]: I1128 17:45:53.264511 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-55cd5466f5-dkfbz" Nov 28 17:45:53 crc kubenswrapper[4909]: I1128 17:45:53.765913 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-55cd5466f5-dkfbz"] Nov 28 17:45:54 crc kubenswrapper[4909]: I1128 17:45:54.727632 4909 generic.go:334] "Generic (PLEG): container finished" podID="b8edd8a1-86bd-4299-8585-d36cabfbf0a6" containerID="13fca75e8769f3a78135339df5c858acca273e77867c355c34dbcec6d659d34d" exitCode=0 Nov 28 17:45:54 crc kubenswrapper[4909]: I1128 17:45:54.731597 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-55cd5466f5-dkfbz" event={"ID":"b8edd8a1-86bd-4299-8585-d36cabfbf0a6","Type":"ContainerDied","Data":"13fca75e8769f3a78135339df5c858acca273e77867c355c34dbcec6d659d34d"} Nov 28 17:45:54 crc kubenswrapper[4909]: I1128 17:45:54.733401 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-55cd5466f5-dkfbz" event={"ID":"b8edd8a1-86bd-4299-8585-d36cabfbf0a6","Type":"ContainerStarted","Data":"9cbb9f187d371ff89702e2f90be80c8a6de47f6e11fa141175cacae30a95b047"} Nov 28 17:45:55 crc kubenswrapper[4909]: I1128 17:45:55.742452 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-55cd5466f5-dkfbz" event={"ID":"b8edd8a1-86bd-4299-8585-d36cabfbf0a6","Type":"ContainerStarted","Data":"a29c9a1d1677f1d575ccc20983a118c3ee21826435a95d9fc205818e6e15ffbb"} Nov 28 17:45:55 crc kubenswrapper[4909]: I1128 17:45:55.742998 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-55cd5466f5-dkfbz" Nov 28 17:45:55 crc kubenswrapper[4909]: I1128 17:45:55.776624 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-55cd5466f5-dkfbz" podStartSLOduration=3.776601451 podStartE2EDuration="3.776601451s" podCreationTimestamp="2025-11-28 17:45:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 17:45:55.763144979 +0000 UTC m=+5738.159829513" watchObservedRunningTime="2025-11-28 17:45:55.776601451 +0000 UTC m=+5738.173285995" Nov 28 17:46:03 crc kubenswrapper[4909]: I1128 17:46:03.266984 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-55cd5466f5-dkfbz" Nov 28 17:46:03 crc kubenswrapper[4909]: I1128 17:46:03.363081 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6b8d567c77-w7dlm"] Nov 28 17:46:03 crc kubenswrapper[4909]: I1128 17:46:03.363729 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-6b8d567c77-w7dlm" podUID="b832fa8f-af08-4832-9f7e-a9d921aab4be" containerName="dnsmasq-dns" containerID="cri-o://dde5f87023b06fc0c065d4ecdc2440ef61ee2578afc1e009d913580c4fba117e" gracePeriod=10 Nov 28 17:46:03 crc kubenswrapper[4909]: I1128 17:46:03.831045 4909 generic.go:334] "Generic (PLEG): container finished" podID="b832fa8f-af08-4832-9f7e-a9d921aab4be" containerID="dde5f87023b06fc0c065d4ecdc2440ef61ee2578afc1e009d913580c4fba117e" exitCode=0 Nov 28 17:46:03 crc kubenswrapper[4909]: I1128 17:46:03.831085 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6b8d567c77-w7dlm" event={"ID":"b832fa8f-af08-4832-9f7e-a9d921aab4be","Type":"ContainerDied","Data":"dde5f87023b06fc0c065d4ecdc2440ef61ee2578afc1e009d913580c4fba117e"} Nov 28 17:46:03 crc kubenswrapper[4909]: I1128 17:46:03.831109 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6b8d567c77-w7dlm" event={"ID":"b832fa8f-af08-4832-9f7e-a9d921aab4be","Type":"ContainerDied","Data":"1edf90364ba494026fafff04176c86830b97b4856516537e8defc247b8c7f21a"} Nov 28 17:46:03 crc kubenswrapper[4909]: I1128 17:46:03.831119 4909 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1edf90364ba494026fafff04176c86830b97b4856516537e8defc247b8c7f21a" Nov 28 17:46:03 crc kubenswrapper[4909]: I1128 17:46:03.860642 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6b8d567c77-w7dlm" Nov 28 17:46:03 crc kubenswrapper[4909]: I1128 17:46:03.868168 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b832fa8f-af08-4832-9f7e-a9d921aab4be-ovsdbserver-nb\") pod \"b832fa8f-af08-4832-9f7e-a9d921aab4be\" (UID: \"b832fa8f-af08-4832-9f7e-a9d921aab4be\") " Nov 28 17:46:03 crc kubenswrapper[4909]: I1128 17:46:03.868213 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b832fa8f-af08-4832-9f7e-a9d921aab4be-ovsdbserver-sb\") pod \"b832fa8f-af08-4832-9f7e-a9d921aab4be\" (UID: \"b832fa8f-af08-4832-9f7e-a9d921aab4be\") " Nov 28 17:46:03 crc kubenswrapper[4909]: I1128 17:46:03.868258 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b832fa8f-af08-4832-9f7e-a9d921aab4be-dns-svc\") pod \"b832fa8f-af08-4832-9f7e-a9d921aab4be\" (UID: \"b832fa8f-af08-4832-9f7e-a9d921aab4be\") " Nov 28 17:46:03 crc kubenswrapper[4909]: I1128 17:46:03.868277 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xt4wq\" (UniqueName: \"kubernetes.io/projected/b832fa8f-af08-4832-9f7e-a9d921aab4be-kube-api-access-xt4wq\") pod \"b832fa8f-af08-4832-9f7e-a9d921aab4be\" (UID: \"b832fa8f-af08-4832-9f7e-a9d921aab4be\") " Nov 28 17:46:03 crc kubenswrapper[4909]: I1128 17:46:03.868359 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b832fa8f-af08-4832-9f7e-a9d921aab4be-config\") pod \"b832fa8f-af08-4832-9f7e-a9d921aab4be\" (UID: \"b832fa8f-af08-4832-9f7e-a9d921aab4be\") " Nov 28 17:46:03 crc kubenswrapper[4909]: I1128 17:46:03.874186 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b832fa8f-af08-4832-9f7e-a9d921aab4be-kube-api-access-xt4wq" (OuterVolumeSpecName: "kube-api-access-xt4wq") pod "b832fa8f-af08-4832-9f7e-a9d921aab4be" (UID: "b832fa8f-af08-4832-9f7e-a9d921aab4be"). InnerVolumeSpecName "kube-api-access-xt4wq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:46:03 crc kubenswrapper[4909]: I1128 17:46:03.928329 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b832fa8f-af08-4832-9f7e-a9d921aab4be-config" (OuterVolumeSpecName: "config") pod "b832fa8f-af08-4832-9f7e-a9d921aab4be" (UID: "b832fa8f-af08-4832-9f7e-a9d921aab4be"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 17:46:03 crc kubenswrapper[4909]: I1128 17:46:03.935099 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b832fa8f-af08-4832-9f7e-a9d921aab4be-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "b832fa8f-af08-4832-9f7e-a9d921aab4be" (UID: "b832fa8f-af08-4832-9f7e-a9d921aab4be"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 17:46:03 crc kubenswrapper[4909]: I1128 17:46:03.942436 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b832fa8f-af08-4832-9f7e-a9d921aab4be-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "b832fa8f-af08-4832-9f7e-a9d921aab4be" (UID: "b832fa8f-af08-4832-9f7e-a9d921aab4be"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 17:46:03 crc kubenswrapper[4909]: I1128 17:46:03.946826 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b832fa8f-af08-4832-9f7e-a9d921aab4be-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "b832fa8f-af08-4832-9f7e-a9d921aab4be" (UID: "b832fa8f-af08-4832-9f7e-a9d921aab4be"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 17:46:03 crc kubenswrapper[4909]: I1128 17:46:03.970062 4909 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b832fa8f-af08-4832-9f7e-a9d921aab4be-config\") on node \"crc\" DevicePath \"\"" Nov 28 17:46:03 crc kubenswrapper[4909]: I1128 17:46:03.970097 4909 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b832fa8f-af08-4832-9f7e-a9d921aab4be-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 28 17:46:03 crc kubenswrapper[4909]: I1128 17:46:03.970110 4909 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b832fa8f-af08-4832-9f7e-a9d921aab4be-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 28 17:46:03 crc kubenswrapper[4909]: I1128 17:46:03.970122 4909 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b832fa8f-af08-4832-9f7e-a9d921aab4be-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 17:46:03 crc kubenswrapper[4909]: I1128 17:46:03.970135 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xt4wq\" (UniqueName: \"kubernetes.io/projected/b832fa8f-af08-4832-9f7e-a9d921aab4be-kube-api-access-xt4wq\") on node \"crc\" DevicePath \"\"" Nov 28 17:46:04 crc kubenswrapper[4909]: I1128 17:46:04.839049 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6b8d567c77-w7dlm" Nov 28 17:46:04 crc kubenswrapper[4909]: I1128 17:46:04.884395 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6b8d567c77-w7dlm"] Nov 28 17:46:04 crc kubenswrapper[4909]: I1128 17:46:04.893799 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6b8d567c77-w7dlm"] Nov 28 17:46:05 crc kubenswrapper[4909]: I1128 17:46:05.922095 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b832fa8f-af08-4832-9f7e-a9d921aab4be" path="/var/lib/kubelet/pods/b832fa8f-af08-4832-9f7e-a9d921aab4be/volumes" Nov 28 17:46:06 crc kubenswrapper[4909]: I1128 17:46:06.396844 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-create-vlvbk"] Nov 28 17:46:06 crc kubenswrapper[4909]: E1128 17:46:06.397583 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b832fa8f-af08-4832-9f7e-a9d921aab4be" containerName="dnsmasq-dns" Nov 28 17:46:06 crc kubenswrapper[4909]: I1128 17:46:06.397604 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="b832fa8f-af08-4832-9f7e-a9d921aab4be" containerName="dnsmasq-dns" Nov 28 17:46:06 crc kubenswrapper[4909]: E1128 17:46:06.397638 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b832fa8f-af08-4832-9f7e-a9d921aab4be" containerName="init" Nov 28 17:46:06 crc kubenswrapper[4909]: I1128 17:46:06.397648 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="b832fa8f-af08-4832-9f7e-a9d921aab4be" containerName="init" Nov 28 17:46:06 crc kubenswrapper[4909]: I1128 17:46:06.397905 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="b832fa8f-af08-4832-9f7e-a9d921aab4be" containerName="dnsmasq-dns" Nov 28 17:46:06 crc kubenswrapper[4909]: I1128 17:46:06.398727 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-vlvbk" Nov 28 17:46:06 crc kubenswrapper[4909]: I1128 17:46:06.409365 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-vlvbk"] Nov 28 17:46:06 crc kubenswrapper[4909]: I1128 17:46:06.417734 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cqzpv\" (UniqueName: \"kubernetes.io/projected/ed18cc94-ace5-4bf8-a055-dd864ec5e5c8-kube-api-access-cqzpv\") pod \"cinder-db-create-vlvbk\" (UID: \"ed18cc94-ace5-4bf8-a055-dd864ec5e5c8\") " pod="openstack/cinder-db-create-vlvbk" Nov 28 17:46:06 crc kubenswrapper[4909]: I1128 17:46:06.417812 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ed18cc94-ace5-4bf8-a055-dd864ec5e5c8-operator-scripts\") pod \"cinder-db-create-vlvbk\" (UID: \"ed18cc94-ace5-4bf8-a055-dd864ec5e5c8\") " pod="openstack/cinder-db-create-vlvbk" Nov 28 17:46:06 crc kubenswrapper[4909]: I1128 17:46:06.499742 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-4ece-account-create-update-8gc9p"] Nov 28 17:46:06 crc kubenswrapper[4909]: I1128 17:46:06.501281 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-4ece-account-create-update-8gc9p" Nov 28 17:46:06 crc kubenswrapper[4909]: I1128 17:46:06.504102 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-db-secret" Nov 28 17:46:06 crc kubenswrapper[4909]: I1128 17:46:06.510435 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-4ece-account-create-update-8gc9p"] Nov 28 17:46:06 crc kubenswrapper[4909]: I1128 17:46:06.519509 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c13ce55f-0d0a-4e2f-adcc-98b7829703a4-operator-scripts\") pod \"cinder-4ece-account-create-update-8gc9p\" (UID: \"c13ce55f-0d0a-4e2f-adcc-98b7829703a4\") " pod="openstack/cinder-4ece-account-create-update-8gc9p" Nov 28 17:46:06 crc kubenswrapper[4909]: I1128 17:46:06.519566 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqzpv\" (UniqueName: \"kubernetes.io/projected/ed18cc94-ace5-4bf8-a055-dd864ec5e5c8-kube-api-access-cqzpv\") pod \"cinder-db-create-vlvbk\" (UID: \"ed18cc94-ace5-4bf8-a055-dd864ec5e5c8\") " pod="openstack/cinder-db-create-vlvbk" Nov 28 17:46:06 crc kubenswrapper[4909]: I1128 17:46:06.519600 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7hqgq\" (UniqueName: \"kubernetes.io/projected/c13ce55f-0d0a-4e2f-adcc-98b7829703a4-kube-api-access-7hqgq\") pod \"cinder-4ece-account-create-update-8gc9p\" (UID: \"c13ce55f-0d0a-4e2f-adcc-98b7829703a4\") " pod="openstack/cinder-4ece-account-create-update-8gc9p" Nov 28 17:46:06 crc kubenswrapper[4909]: I1128 17:46:06.519657 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ed18cc94-ace5-4bf8-a055-dd864ec5e5c8-operator-scripts\") pod \"cinder-db-create-vlvbk\" (UID: \"ed18cc94-ace5-4bf8-a055-dd864ec5e5c8\") " pod="openstack/cinder-db-create-vlvbk" Nov 28 17:46:06 crc kubenswrapper[4909]: I1128 17:46:06.520639 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ed18cc94-ace5-4bf8-a055-dd864ec5e5c8-operator-scripts\") pod \"cinder-db-create-vlvbk\" (UID: \"ed18cc94-ace5-4bf8-a055-dd864ec5e5c8\") " pod="openstack/cinder-db-create-vlvbk" Nov 28 17:46:06 crc kubenswrapper[4909]: I1128 17:46:06.538875 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cqzpv\" (UniqueName: \"kubernetes.io/projected/ed18cc94-ace5-4bf8-a055-dd864ec5e5c8-kube-api-access-cqzpv\") pod \"cinder-db-create-vlvbk\" (UID: \"ed18cc94-ace5-4bf8-a055-dd864ec5e5c8\") " pod="openstack/cinder-db-create-vlvbk" Nov 28 17:46:06 crc kubenswrapper[4909]: I1128 17:46:06.621601 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c13ce55f-0d0a-4e2f-adcc-98b7829703a4-operator-scripts\") pod \"cinder-4ece-account-create-update-8gc9p\" (UID: \"c13ce55f-0d0a-4e2f-adcc-98b7829703a4\") " pod="openstack/cinder-4ece-account-create-update-8gc9p" Nov 28 17:46:06 crc kubenswrapper[4909]: I1128 17:46:06.621758 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7hqgq\" (UniqueName: \"kubernetes.io/projected/c13ce55f-0d0a-4e2f-adcc-98b7829703a4-kube-api-access-7hqgq\") pod \"cinder-4ece-account-create-update-8gc9p\" (UID: \"c13ce55f-0d0a-4e2f-adcc-98b7829703a4\") " pod="openstack/cinder-4ece-account-create-update-8gc9p" Nov 28 17:46:06 crc kubenswrapper[4909]: I1128 17:46:06.623036 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c13ce55f-0d0a-4e2f-adcc-98b7829703a4-operator-scripts\") pod \"cinder-4ece-account-create-update-8gc9p\" (UID: \"c13ce55f-0d0a-4e2f-adcc-98b7829703a4\") " pod="openstack/cinder-4ece-account-create-update-8gc9p" Nov 28 17:46:06 crc kubenswrapper[4909]: I1128 17:46:06.640574 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7hqgq\" (UniqueName: \"kubernetes.io/projected/c13ce55f-0d0a-4e2f-adcc-98b7829703a4-kube-api-access-7hqgq\") pod \"cinder-4ece-account-create-update-8gc9p\" (UID: \"c13ce55f-0d0a-4e2f-adcc-98b7829703a4\") " pod="openstack/cinder-4ece-account-create-update-8gc9p" Nov 28 17:46:06 crc kubenswrapper[4909]: I1128 17:46:06.767109 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-vlvbk" Nov 28 17:46:06 crc kubenswrapper[4909]: I1128 17:46:06.818613 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-4ece-account-create-update-8gc9p" Nov 28 17:46:07 crc kubenswrapper[4909]: I1128 17:46:07.252433 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-vlvbk"] Nov 28 17:46:07 crc kubenswrapper[4909]: W1128 17:46:07.255290 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poded18cc94_ace5_4bf8_a055_dd864ec5e5c8.slice/crio-974558d9767ed3a0393dc50f0a6534382a2b0b7c19fb58de5d387828b46ec7b9 WatchSource:0}: Error finding container 974558d9767ed3a0393dc50f0a6534382a2b0b7c19fb58de5d387828b46ec7b9: Status 404 returned error can't find the container with id 974558d9767ed3a0393dc50f0a6534382a2b0b7c19fb58de5d387828b46ec7b9 Nov 28 17:46:07 crc kubenswrapper[4909]: I1128 17:46:07.396932 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-4ece-account-create-update-8gc9p"] Nov 28 17:46:07 crc kubenswrapper[4909]: I1128 17:46:07.879119 4909 generic.go:334] "Generic (PLEG): container finished" podID="c13ce55f-0d0a-4e2f-adcc-98b7829703a4" containerID="d920b9a243174ad11b0fa447095605f8c457c29b2ed5908c15d82acc47d5f07f" exitCode=0 Nov 28 17:46:07 crc kubenswrapper[4909]: I1128 17:46:07.879229 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-4ece-account-create-update-8gc9p" event={"ID":"c13ce55f-0d0a-4e2f-adcc-98b7829703a4","Type":"ContainerDied","Data":"d920b9a243174ad11b0fa447095605f8c457c29b2ed5908c15d82acc47d5f07f"} Nov 28 17:46:07 crc kubenswrapper[4909]: I1128 17:46:07.879708 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-4ece-account-create-update-8gc9p" event={"ID":"c13ce55f-0d0a-4e2f-adcc-98b7829703a4","Type":"ContainerStarted","Data":"000aeb39a93609eeefe04072ee23a8be16ab1cff8bca1ed64d13938aa5d1c402"} Nov 28 17:46:07 crc kubenswrapper[4909]: I1128 17:46:07.881828 4909 generic.go:334] "Generic (PLEG): container finished" podID="ed18cc94-ace5-4bf8-a055-dd864ec5e5c8" containerID="ef877dfe87c9b17c27387bc59b869068bf1fd5496a499fbb12b09e82056e5efb" exitCode=0 Nov 28 17:46:07 crc kubenswrapper[4909]: I1128 17:46:07.881885 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-vlvbk" event={"ID":"ed18cc94-ace5-4bf8-a055-dd864ec5e5c8","Type":"ContainerDied","Data":"ef877dfe87c9b17c27387bc59b869068bf1fd5496a499fbb12b09e82056e5efb"} Nov 28 17:46:07 crc kubenswrapper[4909]: I1128 17:46:07.882011 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-vlvbk" event={"ID":"ed18cc94-ace5-4bf8-a055-dd864ec5e5c8","Type":"ContainerStarted","Data":"974558d9767ed3a0393dc50f0a6534382a2b0b7c19fb58de5d387828b46ec7b9"} Nov 28 17:46:09 crc kubenswrapper[4909]: I1128 17:46:09.453022 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-vlvbk" Nov 28 17:46:09 crc kubenswrapper[4909]: I1128 17:46:09.460487 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-4ece-account-create-update-8gc9p" Nov 28 17:46:09 crc kubenswrapper[4909]: I1128 17:46:09.581218 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7hqgq\" (UniqueName: \"kubernetes.io/projected/c13ce55f-0d0a-4e2f-adcc-98b7829703a4-kube-api-access-7hqgq\") pod \"c13ce55f-0d0a-4e2f-adcc-98b7829703a4\" (UID: \"c13ce55f-0d0a-4e2f-adcc-98b7829703a4\") " Nov 28 17:46:09 crc kubenswrapper[4909]: I1128 17:46:09.581363 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ed18cc94-ace5-4bf8-a055-dd864ec5e5c8-operator-scripts\") pod \"ed18cc94-ace5-4bf8-a055-dd864ec5e5c8\" (UID: \"ed18cc94-ace5-4bf8-a055-dd864ec5e5c8\") " Nov 28 17:46:09 crc kubenswrapper[4909]: I1128 17:46:09.581414 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cqzpv\" (UniqueName: \"kubernetes.io/projected/ed18cc94-ace5-4bf8-a055-dd864ec5e5c8-kube-api-access-cqzpv\") pod \"ed18cc94-ace5-4bf8-a055-dd864ec5e5c8\" (UID: \"ed18cc94-ace5-4bf8-a055-dd864ec5e5c8\") " Nov 28 17:46:09 crc kubenswrapper[4909]: I1128 17:46:09.581522 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c13ce55f-0d0a-4e2f-adcc-98b7829703a4-operator-scripts\") pod \"c13ce55f-0d0a-4e2f-adcc-98b7829703a4\" (UID: \"c13ce55f-0d0a-4e2f-adcc-98b7829703a4\") " Nov 28 17:46:09 crc kubenswrapper[4909]: I1128 17:46:09.582218 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ed18cc94-ace5-4bf8-a055-dd864ec5e5c8-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "ed18cc94-ace5-4bf8-a055-dd864ec5e5c8" (UID: "ed18cc94-ace5-4bf8-a055-dd864ec5e5c8"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 17:46:09 crc kubenswrapper[4909]: I1128 17:46:09.582273 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c13ce55f-0d0a-4e2f-adcc-98b7829703a4-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "c13ce55f-0d0a-4e2f-adcc-98b7829703a4" (UID: "c13ce55f-0d0a-4e2f-adcc-98b7829703a4"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 17:46:09 crc kubenswrapper[4909]: I1128 17:46:09.590114 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ed18cc94-ace5-4bf8-a055-dd864ec5e5c8-kube-api-access-cqzpv" (OuterVolumeSpecName: "kube-api-access-cqzpv") pod "ed18cc94-ace5-4bf8-a055-dd864ec5e5c8" (UID: "ed18cc94-ace5-4bf8-a055-dd864ec5e5c8"). InnerVolumeSpecName "kube-api-access-cqzpv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:46:09 crc kubenswrapper[4909]: I1128 17:46:09.592103 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c13ce55f-0d0a-4e2f-adcc-98b7829703a4-kube-api-access-7hqgq" (OuterVolumeSpecName: "kube-api-access-7hqgq") pod "c13ce55f-0d0a-4e2f-adcc-98b7829703a4" (UID: "c13ce55f-0d0a-4e2f-adcc-98b7829703a4"). InnerVolumeSpecName "kube-api-access-7hqgq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:46:09 crc kubenswrapper[4909]: I1128 17:46:09.683217 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7hqgq\" (UniqueName: \"kubernetes.io/projected/c13ce55f-0d0a-4e2f-adcc-98b7829703a4-kube-api-access-7hqgq\") on node \"crc\" DevicePath \"\"" Nov 28 17:46:09 crc kubenswrapper[4909]: I1128 17:46:09.683260 4909 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ed18cc94-ace5-4bf8-a055-dd864ec5e5c8-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 17:46:09 crc kubenswrapper[4909]: I1128 17:46:09.683274 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cqzpv\" (UniqueName: \"kubernetes.io/projected/ed18cc94-ace5-4bf8-a055-dd864ec5e5c8-kube-api-access-cqzpv\") on node \"crc\" DevicePath \"\"" Nov 28 17:46:09 crc kubenswrapper[4909]: I1128 17:46:09.683287 4909 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c13ce55f-0d0a-4e2f-adcc-98b7829703a4-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 17:46:09 crc kubenswrapper[4909]: I1128 17:46:09.907159 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-4ece-account-create-update-8gc9p" Nov 28 17:46:09 crc kubenswrapper[4909]: I1128 17:46:09.909295 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-vlvbk" Nov 28 17:46:09 crc kubenswrapper[4909]: I1128 17:46:09.915534 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-4ece-account-create-update-8gc9p" event={"ID":"c13ce55f-0d0a-4e2f-adcc-98b7829703a4","Type":"ContainerDied","Data":"000aeb39a93609eeefe04072ee23a8be16ab1cff8bca1ed64d13938aa5d1c402"} Nov 28 17:46:09 crc kubenswrapper[4909]: I1128 17:46:09.915592 4909 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="000aeb39a93609eeefe04072ee23a8be16ab1cff8bca1ed64d13938aa5d1c402" Nov 28 17:46:09 crc kubenswrapper[4909]: I1128 17:46:09.915608 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-vlvbk" event={"ID":"ed18cc94-ace5-4bf8-a055-dd864ec5e5c8","Type":"ContainerDied","Data":"974558d9767ed3a0393dc50f0a6534382a2b0b7c19fb58de5d387828b46ec7b9"} Nov 28 17:46:09 crc kubenswrapper[4909]: I1128 17:46:09.915624 4909 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="974558d9767ed3a0393dc50f0a6534382a2b0b7c19fb58de5d387828b46ec7b9" Nov 28 17:46:11 crc kubenswrapper[4909]: I1128 17:46:11.919222 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-sync-6m8rl"] Nov 28 17:46:11 crc kubenswrapper[4909]: E1128 17:46:11.920123 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c13ce55f-0d0a-4e2f-adcc-98b7829703a4" containerName="mariadb-account-create-update" Nov 28 17:46:11 crc kubenswrapper[4909]: I1128 17:46:11.920139 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="c13ce55f-0d0a-4e2f-adcc-98b7829703a4" containerName="mariadb-account-create-update" Nov 28 17:46:11 crc kubenswrapper[4909]: E1128 17:46:11.920166 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ed18cc94-ace5-4bf8-a055-dd864ec5e5c8" containerName="mariadb-database-create" Nov 28 17:46:11 crc kubenswrapper[4909]: I1128 17:46:11.920174 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="ed18cc94-ace5-4bf8-a055-dd864ec5e5c8" containerName="mariadb-database-create" Nov 28 17:46:11 crc kubenswrapper[4909]: I1128 17:46:11.920420 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="ed18cc94-ace5-4bf8-a055-dd864ec5e5c8" containerName="mariadb-database-create" Nov 28 17:46:11 crc kubenswrapper[4909]: I1128 17:46:11.920434 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="c13ce55f-0d0a-4e2f-adcc-98b7829703a4" containerName="mariadb-account-create-update" Nov 28 17:46:11 crc kubenswrapper[4909]: I1128 17:46:11.921203 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-6m8rl" Nov 28 17:46:11 crc kubenswrapper[4909]: I1128 17:46:11.925096 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-dbj7x" Nov 28 17:46:11 crc kubenswrapper[4909]: I1128 17:46:11.925117 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Nov 28 17:46:11 crc kubenswrapper[4909]: I1128 17:46:11.925803 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Nov 28 17:46:11 crc kubenswrapper[4909]: I1128 17:46:11.929218 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-6m8rl"] Nov 28 17:46:12 crc kubenswrapper[4909]: I1128 17:46:12.023436 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bdcedded-3fc7-4242-a4b8-c34f0560daef-config-data\") pod \"cinder-db-sync-6m8rl\" (UID: \"bdcedded-3fc7-4242-a4b8-c34f0560daef\") " pod="openstack/cinder-db-sync-6m8rl" Nov 28 17:46:12 crc kubenswrapper[4909]: I1128 17:46:12.023533 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/bdcedded-3fc7-4242-a4b8-c34f0560daef-etc-machine-id\") pod \"cinder-db-sync-6m8rl\" (UID: \"bdcedded-3fc7-4242-a4b8-c34f0560daef\") " pod="openstack/cinder-db-sync-6m8rl" Nov 28 17:46:12 crc kubenswrapper[4909]: I1128 17:46:12.023562 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/bdcedded-3fc7-4242-a4b8-c34f0560daef-db-sync-config-data\") pod \"cinder-db-sync-6m8rl\" (UID: \"bdcedded-3fc7-4242-a4b8-c34f0560daef\") " pod="openstack/cinder-db-sync-6m8rl" Nov 28 17:46:12 crc kubenswrapper[4909]: I1128 17:46:12.023591 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bdcedded-3fc7-4242-a4b8-c34f0560daef-combined-ca-bundle\") pod \"cinder-db-sync-6m8rl\" (UID: \"bdcedded-3fc7-4242-a4b8-c34f0560daef\") " pod="openstack/cinder-db-sync-6m8rl" Nov 28 17:46:12 crc kubenswrapper[4909]: I1128 17:46:12.023628 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bdcedded-3fc7-4242-a4b8-c34f0560daef-scripts\") pod \"cinder-db-sync-6m8rl\" (UID: \"bdcedded-3fc7-4242-a4b8-c34f0560daef\") " pod="openstack/cinder-db-sync-6m8rl" Nov 28 17:46:12 crc kubenswrapper[4909]: I1128 17:46:12.023754 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bx6nh\" (UniqueName: \"kubernetes.io/projected/bdcedded-3fc7-4242-a4b8-c34f0560daef-kube-api-access-bx6nh\") pod \"cinder-db-sync-6m8rl\" (UID: \"bdcedded-3fc7-4242-a4b8-c34f0560daef\") " pod="openstack/cinder-db-sync-6m8rl" Nov 28 17:46:12 crc kubenswrapper[4909]: I1128 17:46:12.124984 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bx6nh\" (UniqueName: \"kubernetes.io/projected/bdcedded-3fc7-4242-a4b8-c34f0560daef-kube-api-access-bx6nh\") pod \"cinder-db-sync-6m8rl\" (UID: \"bdcedded-3fc7-4242-a4b8-c34f0560daef\") " pod="openstack/cinder-db-sync-6m8rl" Nov 28 17:46:12 crc kubenswrapper[4909]: I1128 17:46:12.125092 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bdcedded-3fc7-4242-a4b8-c34f0560daef-config-data\") pod \"cinder-db-sync-6m8rl\" (UID: \"bdcedded-3fc7-4242-a4b8-c34f0560daef\") " pod="openstack/cinder-db-sync-6m8rl" Nov 28 17:46:12 crc kubenswrapper[4909]: I1128 17:46:12.125144 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/bdcedded-3fc7-4242-a4b8-c34f0560daef-etc-machine-id\") pod \"cinder-db-sync-6m8rl\" (UID: \"bdcedded-3fc7-4242-a4b8-c34f0560daef\") " pod="openstack/cinder-db-sync-6m8rl" Nov 28 17:46:12 crc kubenswrapper[4909]: I1128 17:46:12.125165 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/bdcedded-3fc7-4242-a4b8-c34f0560daef-db-sync-config-data\") pod \"cinder-db-sync-6m8rl\" (UID: \"bdcedded-3fc7-4242-a4b8-c34f0560daef\") " pod="openstack/cinder-db-sync-6m8rl" Nov 28 17:46:12 crc kubenswrapper[4909]: I1128 17:46:12.125185 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bdcedded-3fc7-4242-a4b8-c34f0560daef-combined-ca-bundle\") pod \"cinder-db-sync-6m8rl\" (UID: \"bdcedded-3fc7-4242-a4b8-c34f0560daef\") " pod="openstack/cinder-db-sync-6m8rl" Nov 28 17:46:12 crc kubenswrapper[4909]: I1128 17:46:12.125211 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bdcedded-3fc7-4242-a4b8-c34f0560daef-scripts\") pod \"cinder-db-sync-6m8rl\" (UID: \"bdcedded-3fc7-4242-a4b8-c34f0560daef\") " pod="openstack/cinder-db-sync-6m8rl" Nov 28 17:46:12 crc kubenswrapper[4909]: I1128 17:46:12.125210 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/bdcedded-3fc7-4242-a4b8-c34f0560daef-etc-machine-id\") pod \"cinder-db-sync-6m8rl\" (UID: \"bdcedded-3fc7-4242-a4b8-c34f0560daef\") " pod="openstack/cinder-db-sync-6m8rl" Nov 28 17:46:12 crc kubenswrapper[4909]: I1128 17:46:12.138846 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/bdcedded-3fc7-4242-a4b8-c34f0560daef-db-sync-config-data\") pod \"cinder-db-sync-6m8rl\" (UID: \"bdcedded-3fc7-4242-a4b8-c34f0560daef\") " pod="openstack/cinder-db-sync-6m8rl" Nov 28 17:46:12 crc kubenswrapper[4909]: I1128 17:46:12.151394 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bx6nh\" (UniqueName: \"kubernetes.io/projected/bdcedded-3fc7-4242-a4b8-c34f0560daef-kube-api-access-bx6nh\") pod \"cinder-db-sync-6m8rl\" (UID: \"bdcedded-3fc7-4242-a4b8-c34f0560daef\") " pod="openstack/cinder-db-sync-6m8rl" Nov 28 17:46:12 crc kubenswrapper[4909]: I1128 17:46:12.153538 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bdcedded-3fc7-4242-a4b8-c34f0560daef-config-data\") pod \"cinder-db-sync-6m8rl\" (UID: \"bdcedded-3fc7-4242-a4b8-c34f0560daef\") " pod="openstack/cinder-db-sync-6m8rl" Nov 28 17:46:12 crc kubenswrapper[4909]: I1128 17:46:12.160375 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bdcedded-3fc7-4242-a4b8-c34f0560daef-combined-ca-bundle\") pod \"cinder-db-sync-6m8rl\" (UID: \"bdcedded-3fc7-4242-a4b8-c34f0560daef\") " pod="openstack/cinder-db-sync-6m8rl" Nov 28 17:46:12 crc kubenswrapper[4909]: I1128 17:46:12.165541 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bdcedded-3fc7-4242-a4b8-c34f0560daef-scripts\") pod \"cinder-db-sync-6m8rl\" (UID: \"bdcedded-3fc7-4242-a4b8-c34f0560daef\") " pod="openstack/cinder-db-sync-6m8rl" Nov 28 17:46:12 crc kubenswrapper[4909]: I1128 17:46:12.251866 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-6m8rl" Nov 28 17:46:12 crc kubenswrapper[4909]: I1128 17:46:12.745703 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-6m8rl"] Nov 28 17:46:12 crc kubenswrapper[4909]: W1128 17:46:12.755198 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podbdcedded_3fc7_4242_a4b8_c34f0560daef.slice/crio-e4552646edfffb27d17f380839b6d20aa0473abf2ff244117c4708bc0b71751c WatchSource:0}: Error finding container e4552646edfffb27d17f380839b6d20aa0473abf2ff244117c4708bc0b71751c: Status 404 returned error can't find the container with id e4552646edfffb27d17f380839b6d20aa0473abf2ff244117c4708bc0b71751c Nov 28 17:46:12 crc kubenswrapper[4909]: I1128 17:46:12.946177 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-6m8rl" event={"ID":"bdcedded-3fc7-4242-a4b8-c34f0560daef","Type":"ContainerStarted","Data":"e4552646edfffb27d17f380839b6d20aa0473abf2ff244117c4708bc0b71751c"} Nov 28 17:46:13 crc kubenswrapper[4909]: I1128 17:46:13.958588 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-6m8rl" event={"ID":"bdcedded-3fc7-4242-a4b8-c34f0560daef","Type":"ContainerStarted","Data":"d6e386292cc7e130a2615622530583498f2425035b61655d00b32ecc1a026b0d"} Nov 28 17:46:13 crc kubenswrapper[4909]: I1128 17:46:13.983095 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-db-sync-6m8rl" podStartSLOduration=2.98307628 podStartE2EDuration="2.98307628s" podCreationTimestamp="2025-11-28 17:46:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 17:46:13.976714119 +0000 UTC m=+5756.373398653" watchObservedRunningTime="2025-11-28 17:46:13.98307628 +0000 UTC m=+5756.379760804" Nov 28 17:46:15 crc kubenswrapper[4909]: I1128 17:46:15.977210 4909 generic.go:334] "Generic (PLEG): container finished" podID="bdcedded-3fc7-4242-a4b8-c34f0560daef" containerID="d6e386292cc7e130a2615622530583498f2425035b61655d00b32ecc1a026b0d" exitCode=0 Nov 28 17:46:15 crc kubenswrapper[4909]: I1128 17:46:15.977299 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-6m8rl" event={"ID":"bdcedded-3fc7-4242-a4b8-c34f0560daef","Type":"ContainerDied","Data":"d6e386292cc7e130a2615622530583498f2425035b61655d00b32ecc1a026b0d"} Nov 28 17:46:17 crc kubenswrapper[4909]: I1128 17:46:17.374111 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-6m8rl" Nov 28 17:46:17 crc kubenswrapper[4909]: I1128 17:46:17.520100 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bdcedded-3fc7-4242-a4b8-c34f0560daef-combined-ca-bundle\") pod \"bdcedded-3fc7-4242-a4b8-c34f0560daef\" (UID: \"bdcedded-3fc7-4242-a4b8-c34f0560daef\") " Nov 28 17:46:17 crc kubenswrapper[4909]: I1128 17:46:17.520297 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/bdcedded-3fc7-4242-a4b8-c34f0560daef-etc-machine-id\") pod \"bdcedded-3fc7-4242-a4b8-c34f0560daef\" (UID: \"bdcedded-3fc7-4242-a4b8-c34f0560daef\") " Nov 28 17:46:17 crc kubenswrapper[4909]: I1128 17:46:17.520359 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bx6nh\" (UniqueName: \"kubernetes.io/projected/bdcedded-3fc7-4242-a4b8-c34f0560daef-kube-api-access-bx6nh\") pod \"bdcedded-3fc7-4242-a4b8-c34f0560daef\" (UID: \"bdcedded-3fc7-4242-a4b8-c34f0560daef\") " Nov 28 17:46:17 crc kubenswrapper[4909]: I1128 17:46:17.520418 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bdcedded-3fc7-4242-a4b8-c34f0560daef-config-data\") pod \"bdcedded-3fc7-4242-a4b8-c34f0560daef\" (UID: \"bdcedded-3fc7-4242-a4b8-c34f0560daef\") " Nov 28 17:46:17 crc kubenswrapper[4909]: I1128 17:46:17.520435 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/bdcedded-3fc7-4242-a4b8-c34f0560daef-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "bdcedded-3fc7-4242-a4b8-c34f0560daef" (UID: "bdcedded-3fc7-4242-a4b8-c34f0560daef"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 17:46:17 crc kubenswrapper[4909]: I1128 17:46:17.520486 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/bdcedded-3fc7-4242-a4b8-c34f0560daef-db-sync-config-data\") pod \"bdcedded-3fc7-4242-a4b8-c34f0560daef\" (UID: \"bdcedded-3fc7-4242-a4b8-c34f0560daef\") " Nov 28 17:46:17 crc kubenswrapper[4909]: I1128 17:46:17.520565 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bdcedded-3fc7-4242-a4b8-c34f0560daef-scripts\") pod \"bdcedded-3fc7-4242-a4b8-c34f0560daef\" (UID: \"bdcedded-3fc7-4242-a4b8-c34f0560daef\") " Nov 28 17:46:17 crc kubenswrapper[4909]: I1128 17:46:17.521198 4909 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/bdcedded-3fc7-4242-a4b8-c34f0560daef-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 28 17:46:17 crc kubenswrapper[4909]: I1128 17:46:17.528303 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bdcedded-3fc7-4242-a4b8-c34f0560daef-scripts" (OuterVolumeSpecName: "scripts") pod "bdcedded-3fc7-4242-a4b8-c34f0560daef" (UID: "bdcedded-3fc7-4242-a4b8-c34f0560daef"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:46:17 crc kubenswrapper[4909]: I1128 17:46:17.528359 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bdcedded-3fc7-4242-a4b8-c34f0560daef-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "bdcedded-3fc7-4242-a4b8-c34f0560daef" (UID: "bdcedded-3fc7-4242-a4b8-c34f0560daef"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:46:17 crc kubenswrapper[4909]: I1128 17:46:17.529938 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bdcedded-3fc7-4242-a4b8-c34f0560daef-kube-api-access-bx6nh" (OuterVolumeSpecName: "kube-api-access-bx6nh") pod "bdcedded-3fc7-4242-a4b8-c34f0560daef" (UID: "bdcedded-3fc7-4242-a4b8-c34f0560daef"). InnerVolumeSpecName "kube-api-access-bx6nh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:46:17 crc kubenswrapper[4909]: I1128 17:46:17.546442 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bdcedded-3fc7-4242-a4b8-c34f0560daef-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "bdcedded-3fc7-4242-a4b8-c34f0560daef" (UID: "bdcedded-3fc7-4242-a4b8-c34f0560daef"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:46:17 crc kubenswrapper[4909]: I1128 17:46:17.622988 4909 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bdcedded-3fc7-4242-a4b8-c34f0560daef-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 17:46:17 crc kubenswrapper[4909]: I1128 17:46:17.623027 4909 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bdcedded-3fc7-4242-a4b8-c34f0560daef-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 17:46:17 crc kubenswrapper[4909]: I1128 17:46:17.623042 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bx6nh\" (UniqueName: \"kubernetes.io/projected/bdcedded-3fc7-4242-a4b8-c34f0560daef-kube-api-access-bx6nh\") on node \"crc\" DevicePath \"\"" Nov 28 17:46:17 crc kubenswrapper[4909]: I1128 17:46:17.623059 4909 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/bdcedded-3fc7-4242-a4b8-c34f0560daef-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 17:46:17 crc kubenswrapper[4909]: I1128 17:46:17.628009 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bdcedded-3fc7-4242-a4b8-c34f0560daef-config-data" (OuterVolumeSpecName: "config-data") pod "bdcedded-3fc7-4242-a4b8-c34f0560daef" (UID: "bdcedded-3fc7-4242-a4b8-c34f0560daef"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:46:17 crc kubenswrapper[4909]: I1128 17:46:17.725222 4909 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bdcedded-3fc7-4242-a4b8-c34f0560daef-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 17:46:18 crc kubenswrapper[4909]: I1128 17:46:18.000864 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-6m8rl" event={"ID":"bdcedded-3fc7-4242-a4b8-c34f0560daef","Type":"ContainerDied","Data":"e4552646edfffb27d17f380839b6d20aa0473abf2ff244117c4708bc0b71751c"} Nov 28 17:46:18 crc kubenswrapper[4909]: I1128 17:46:18.001290 4909 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e4552646edfffb27d17f380839b6d20aa0473abf2ff244117c4708bc0b71751c" Nov 28 17:46:18 crc kubenswrapper[4909]: I1128 17:46:18.000912 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-6m8rl" Nov 28 17:46:18 crc kubenswrapper[4909]: I1128 17:46:18.327219 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5bcf776997-bfqmp"] Nov 28 17:46:18 crc kubenswrapper[4909]: E1128 17:46:18.327698 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bdcedded-3fc7-4242-a4b8-c34f0560daef" containerName="cinder-db-sync" Nov 28 17:46:18 crc kubenswrapper[4909]: I1128 17:46:18.327720 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="bdcedded-3fc7-4242-a4b8-c34f0560daef" containerName="cinder-db-sync" Nov 28 17:46:18 crc kubenswrapper[4909]: I1128 17:46:18.327966 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="bdcedded-3fc7-4242-a4b8-c34f0560daef" containerName="cinder-db-sync" Nov 28 17:46:18 crc kubenswrapper[4909]: I1128 17:46:18.329259 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5bcf776997-bfqmp" Nov 28 17:46:18 crc kubenswrapper[4909]: I1128 17:46:18.416560 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5bcf776997-bfqmp"] Nov 28 17:46:18 crc kubenswrapper[4909]: I1128 17:46:18.447560 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zhtrq\" (UniqueName: \"kubernetes.io/projected/45872bd0-84d2-43ff-92dc-86de32a67a64-kube-api-access-zhtrq\") pod \"dnsmasq-dns-5bcf776997-bfqmp\" (UID: \"45872bd0-84d2-43ff-92dc-86de32a67a64\") " pod="openstack/dnsmasq-dns-5bcf776997-bfqmp" Nov 28 17:46:18 crc kubenswrapper[4909]: I1128 17:46:18.447724 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/45872bd0-84d2-43ff-92dc-86de32a67a64-config\") pod \"dnsmasq-dns-5bcf776997-bfqmp\" (UID: \"45872bd0-84d2-43ff-92dc-86de32a67a64\") " pod="openstack/dnsmasq-dns-5bcf776997-bfqmp" Nov 28 17:46:18 crc kubenswrapper[4909]: I1128 17:46:18.448158 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/45872bd0-84d2-43ff-92dc-86de32a67a64-dns-svc\") pod \"dnsmasq-dns-5bcf776997-bfqmp\" (UID: \"45872bd0-84d2-43ff-92dc-86de32a67a64\") " pod="openstack/dnsmasq-dns-5bcf776997-bfqmp" Nov 28 17:46:18 crc kubenswrapper[4909]: I1128 17:46:18.448343 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/45872bd0-84d2-43ff-92dc-86de32a67a64-ovsdbserver-sb\") pod \"dnsmasq-dns-5bcf776997-bfqmp\" (UID: \"45872bd0-84d2-43ff-92dc-86de32a67a64\") " pod="openstack/dnsmasq-dns-5bcf776997-bfqmp" Nov 28 17:46:18 crc kubenswrapper[4909]: I1128 17:46:18.448441 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/45872bd0-84d2-43ff-92dc-86de32a67a64-ovsdbserver-nb\") pod \"dnsmasq-dns-5bcf776997-bfqmp\" (UID: \"45872bd0-84d2-43ff-92dc-86de32a67a64\") " pod="openstack/dnsmasq-dns-5bcf776997-bfqmp" Nov 28 17:46:18 crc kubenswrapper[4909]: I1128 17:46:18.538507 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Nov 28 17:46:18 crc kubenswrapper[4909]: I1128 17:46:18.540307 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 28 17:46:18 crc kubenswrapper[4909]: I1128 17:46:18.545430 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Nov 28 17:46:18 crc kubenswrapper[4909]: I1128 17:46:18.545619 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Nov 28 17:46:18 crc kubenswrapper[4909]: I1128 17:46:18.545799 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Nov 28 17:46:18 crc kubenswrapper[4909]: I1128 17:46:18.549689 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/45872bd0-84d2-43ff-92dc-86de32a67a64-dns-svc\") pod \"dnsmasq-dns-5bcf776997-bfqmp\" (UID: \"45872bd0-84d2-43ff-92dc-86de32a67a64\") " pod="openstack/dnsmasq-dns-5bcf776997-bfqmp" Nov 28 17:46:18 crc kubenswrapper[4909]: I1128 17:46:18.549846 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/45872bd0-84d2-43ff-92dc-86de32a67a64-ovsdbserver-sb\") pod \"dnsmasq-dns-5bcf776997-bfqmp\" (UID: \"45872bd0-84d2-43ff-92dc-86de32a67a64\") " pod="openstack/dnsmasq-dns-5bcf776997-bfqmp" Nov 28 17:46:18 crc kubenswrapper[4909]: I1128 17:46:18.549962 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/45872bd0-84d2-43ff-92dc-86de32a67a64-ovsdbserver-nb\") pod \"dnsmasq-dns-5bcf776997-bfqmp\" (UID: \"45872bd0-84d2-43ff-92dc-86de32a67a64\") " pod="openstack/dnsmasq-dns-5bcf776997-bfqmp" Nov 28 17:46:18 crc kubenswrapper[4909]: I1128 17:46:18.550098 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zhtrq\" (UniqueName: \"kubernetes.io/projected/45872bd0-84d2-43ff-92dc-86de32a67a64-kube-api-access-zhtrq\") pod \"dnsmasq-dns-5bcf776997-bfqmp\" (UID: \"45872bd0-84d2-43ff-92dc-86de32a67a64\") " pod="openstack/dnsmasq-dns-5bcf776997-bfqmp" Nov 28 17:46:18 crc kubenswrapper[4909]: I1128 17:46:18.550229 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/45872bd0-84d2-43ff-92dc-86de32a67a64-config\") pod \"dnsmasq-dns-5bcf776997-bfqmp\" (UID: \"45872bd0-84d2-43ff-92dc-86de32a67a64\") " pod="openstack/dnsmasq-dns-5bcf776997-bfqmp" Nov 28 17:46:18 crc kubenswrapper[4909]: I1128 17:46:18.551345 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/45872bd0-84d2-43ff-92dc-86de32a67a64-config\") pod \"dnsmasq-dns-5bcf776997-bfqmp\" (UID: \"45872bd0-84d2-43ff-92dc-86de32a67a64\") " pod="openstack/dnsmasq-dns-5bcf776997-bfqmp" Nov 28 17:46:18 crc kubenswrapper[4909]: I1128 17:46:18.552119 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/45872bd0-84d2-43ff-92dc-86de32a67a64-dns-svc\") pod \"dnsmasq-dns-5bcf776997-bfqmp\" (UID: \"45872bd0-84d2-43ff-92dc-86de32a67a64\") " pod="openstack/dnsmasq-dns-5bcf776997-bfqmp" Nov 28 17:46:18 crc kubenswrapper[4909]: I1128 17:46:18.552143 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-dbj7x" Nov 28 17:46:18 crc kubenswrapper[4909]: I1128 17:46:18.552541 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/45872bd0-84d2-43ff-92dc-86de32a67a64-ovsdbserver-sb\") pod \"dnsmasq-dns-5bcf776997-bfqmp\" (UID: \"45872bd0-84d2-43ff-92dc-86de32a67a64\") " pod="openstack/dnsmasq-dns-5bcf776997-bfqmp" Nov 28 17:46:18 crc kubenswrapper[4909]: I1128 17:46:18.552625 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/45872bd0-84d2-43ff-92dc-86de32a67a64-ovsdbserver-nb\") pod \"dnsmasq-dns-5bcf776997-bfqmp\" (UID: \"45872bd0-84d2-43ff-92dc-86de32a67a64\") " pod="openstack/dnsmasq-dns-5bcf776997-bfqmp" Nov 28 17:46:18 crc kubenswrapper[4909]: I1128 17:46:18.560479 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 28 17:46:18 crc kubenswrapper[4909]: I1128 17:46:18.580651 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zhtrq\" (UniqueName: \"kubernetes.io/projected/45872bd0-84d2-43ff-92dc-86de32a67a64-kube-api-access-zhtrq\") pod \"dnsmasq-dns-5bcf776997-bfqmp\" (UID: \"45872bd0-84d2-43ff-92dc-86de32a67a64\") " pod="openstack/dnsmasq-dns-5bcf776997-bfqmp" Nov 28 17:46:18 crc kubenswrapper[4909]: I1128 17:46:18.658551 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5a6ccec4-c48a-4d29-ad8e-cb132e181d86-scripts\") pod \"cinder-api-0\" (UID: \"5a6ccec4-c48a-4d29-ad8e-cb132e181d86\") " pod="openstack/cinder-api-0" Nov 28 17:46:18 crc kubenswrapper[4909]: I1128 17:46:18.658619 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5a6ccec4-c48a-4d29-ad8e-cb132e181d86-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"5a6ccec4-c48a-4d29-ad8e-cb132e181d86\") " pod="openstack/cinder-api-0" Nov 28 17:46:18 crc kubenswrapper[4909]: I1128 17:46:18.658643 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5a6ccec4-c48a-4d29-ad8e-cb132e181d86-config-data\") pod \"cinder-api-0\" (UID: \"5a6ccec4-c48a-4d29-ad8e-cb132e181d86\") " pod="openstack/cinder-api-0" Nov 28 17:46:18 crc kubenswrapper[4909]: I1128 17:46:18.658736 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/5a6ccec4-c48a-4d29-ad8e-cb132e181d86-etc-machine-id\") pod \"cinder-api-0\" (UID: \"5a6ccec4-c48a-4d29-ad8e-cb132e181d86\") " pod="openstack/cinder-api-0" Nov 28 17:46:18 crc kubenswrapper[4909]: I1128 17:46:18.658890 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/5a6ccec4-c48a-4d29-ad8e-cb132e181d86-config-data-custom\") pod \"cinder-api-0\" (UID: \"5a6ccec4-c48a-4d29-ad8e-cb132e181d86\") " pod="openstack/cinder-api-0" Nov 28 17:46:18 crc kubenswrapper[4909]: I1128 17:46:18.658937 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9xh6m\" (UniqueName: \"kubernetes.io/projected/5a6ccec4-c48a-4d29-ad8e-cb132e181d86-kube-api-access-9xh6m\") pod \"cinder-api-0\" (UID: \"5a6ccec4-c48a-4d29-ad8e-cb132e181d86\") " pod="openstack/cinder-api-0" Nov 28 17:46:18 crc kubenswrapper[4909]: I1128 17:46:18.659120 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5a6ccec4-c48a-4d29-ad8e-cb132e181d86-logs\") pod \"cinder-api-0\" (UID: \"5a6ccec4-c48a-4d29-ad8e-cb132e181d86\") " pod="openstack/cinder-api-0" Nov 28 17:46:18 crc kubenswrapper[4909]: I1128 17:46:18.663081 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5bcf776997-bfqmp" Nov 28 17:46:18 crc kubenswrapper[4909]: I1128 17:46:18.761820 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5a6ccec4-c48a-4d29-ad8e-cb132e181d86-logs\") pod \"cinder-api-0\" (UID: \"5a6ccec4-c48a-4d29-ad8e-cb132e181d86\") " pod="openstack/cinder-api-0" Nov 28 17:46:18 crc kubenswrapper[4909]: I1128 17:46:18.762241 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5a6ccec4-c48a-4d29-ad8e-cb132e181d86-scripts\") pod \"cinder-api-0\" (UID: \"5a6ccec4-c48a-4d29-ad8e-cb132e181d86\") " pod="openstack/cinder-api-0" Nov 28 17:46:18 crc kubenswrapper[4909]: I1128 17:46:18.762287 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5a6ccec4-c48a-4d29-ad8e-cb132e181d86-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"5a6ccec4-c48a-4d29-ad8e-cb132e181d86\") " pod="openstack/cinder-api-0" Nov 28 17:46:18 crc kubenswrapper[4909]: I1128 17:46:18.762310 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5a6ccec4-c48a-4d29-ad8e-cb132e181d86-config-data\") pod \"cinder-api-0\" (UID: \"5a6ccec4-c48a-4d29-ad8e-cb132e181d86\") " pod="openstack/cinder-api-0" Nov 28 17:46:18 crc kubenswrapper[4909]: I1128 17:46:18.762361 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/5a6ccec4-c48a-4d29-ad8e-cb132e181d86-etc-machine-id\") pod \"cinder-api-0\" (UID: \"5a6ccec4-c48a-4d29-ad8e-cb132e181d86\") " pod="openstack/cinder-api-0" Nov 28 17:46:18 crc kubenswrapper[4909]: I1128 17:46:18.762416 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/5a6ccec4-c48a-4d29-ad8e-cb132e181d86-config-data-custom\") pod \"cinder-api-0\" (UID: \"5a6ccec4-c48a-4d29-ad8e-cb132e181d86\") " pod="openstack/cinder-api-0" Nov 28 17:46:18 crc kubenswrapper[4909]: I1128 17:46:18.762439 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9xh6m\" (UniqueName: \"kubernetes.io/projected/5a6ccec4-c48a-4d29-ad8e-cb132e181d86-kube-api-access-9xh6m\") pod \"cinder-api-0\" (UID: \"5a6ccec4-c48a-4d29-ad8e-cb132e181d86\") " pod="openstack/cinder-api-0" Nov 28 17:46:18 crc kubenswrapper[4909]: I1128 17:46:18.763366 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5a6ccec4-c48a-4d29-ad8e-cb132e181d86-logs\") pod \"cinder-api-0\" (UID: \"5a6ccec4-c48a-4d29-ad8e-cb132e181d86\") " pod="openstack/cinder-api-0" Nov 28 17:46:18 crc kubenswrapper[4909]: I1128 17:46:18.764969 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/5a6ccec4-c48a-4d29-ad8e-cb132e181d86-etc-machine-id\") pod \"cinder-api-0\" (UID: \"5a6ccec4-c48a-4d29-ad8e-cb132e181d86\") " pod="openstack/cinder-api-0" Nov 28 17:46:18 crc kubenswrapper[4909]: I1128 17:46:18.769799 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5a6ccec4-c48a-4d29-ad8e-cb132e181d86-config-data\") pod \"cinder-api-0\" (UID: \"5a6ccec4-c48a-4d29-ad8e-cb132e181d86\") " pod="openstack/cinder-api-0" Nov 28 17:46:18 crc kubenswrapper[4909]: I1128 17:46:18.770156 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5a6ccec4-c48a-4d29-ad8e-cb132e181d86-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"5a6ccec4-c48a-4d29-ad8e-cb132e181d86\") " pod="openstack/cinder-api-0" Nov 28 17:46:18 crc kubenswrapper[4909]: I1128 17:46:18.770316 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/5a6ccec4-c48a-4d29-ad8e-cb132e181d86-config-data-custom\") pod \"cinder-api-0\" (UID: \"5a6ccec4-c48a-4d29-ad8e-cb132e181d86\") " pod="openstack/cinder-api-0" Nov 28 17:46:18 crc kubenswrapper[4909]: I1128 17:46:18.776102 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5a6ccec4-c48a-4d29-ad8e-cb132e181d86-scripts\") pod \"cinder-api-0\" (UID: \"5a6ccec4-c48a-4d29-ad8e-cb132e181d86\") " pod="openstack/cinder-api-0" Nov 28 17:46:18 crc kubenswrapper[4909]: I1128 17:46:18.791039 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9xh6m\" (UniqueName: \"kubernetes.io/projected/5a6ccec4-c48a-4d29-ad8e-cb132e181d86-kube-api-access-9xh6m\") pod \"cinder-api-0\" (UID: \"5a6ccec4-c48a-4d29-ad8e-cb132e181d86\") " pod="openstack/cinder-api-0" Nov 28 17:46:18 crc kubenswrapper[4909]: I1128 17:46:18.867502 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 28 17:46:19 crc kubenswrapper[4909]: I1128 17:46:19.147087 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5bcf776997-bfqmp"] Nov 28 17:46:19 crc kubenswrapper[4909]: W1128 17:46:19.169110 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod45872bd0_84d2_43ff_92dc_86de32a67a64.slice/crio-2ac729cea6c6b25ff586da491d105ad3331ac009e06d1784b930a434c19eab95 WatchSource:0}: Error finding container 2ac729cea6c6b25ff586da491d105ad3331ac009e06d1784b930a434c19eab95: Status 404 returned error can't find the container with id 2ac729cea6c6b25ff586da491d105ad3331ac009e06d1784b930a434c19eab95 Nov 28 17:46:19 crc kubenswrapper[4909]: I1128 17:46:19.402795 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 28 17:46:19 crc kubenswrapper[4909]: W1128 17:46:19.465774 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5a6ccec4_c48a_4d29_ad8e_cb132e181d86.slice/crio-8b18917785a5e6272d19f857f92ff4d21cd67e55f7be2ff4dab20b2b4c9a5903 WatchSource:0}: Error finding container 8b18917785a5e6272d19f857f92ff4d21cd67e55f7be2ff4dab20b2b4c9a5903: Status 404 returned error can't find the container with id 8b18917785a5e6272d19f857f92ff4d21cd67e55f7be2ff4dab20b2b4c9a5903 Nov 28 17:46:19 crc kubenswrapper[4909]: I1128 17:46:19.912995 4909 patch_prober.go:28] interesting pod/machine-config-daemon-d5nd7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 17:46:19 crc kubenswrapper[4909]: I1128 17:46:19.913386 4909 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 17:46:20 crc kubenswrapper[4909]: I1128 17:46:20.025484 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"5a6ccec4-c48a-4d29-ad8e-cb132e181d86","Type":"ContainerStarted","Data":"8b18917785a5e6272d19f857f92ff4d21cd67e55f7be2ff4dab20b2b4c9a5903"} Nov 28 17:46:20 crc kubenswrapper[4909]: I1128 17:46:20.028033 4909 generic.go:334] "Generic (PLEG): container finished" podID="45872bd0-84d2-43ff-92dc-86de32a67a64" containerID="358757d9a7a0cfd070b1faaf6d1987193c8f32e8b14620eb01c08eb5dcb3f680" exitCode=0 Nov 28 17:46:20 crc kubenswrapper[4909]: I1128 17:46:20.028070 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5bcf776997-bfqmp" event={"ID":"45872bd0-84d2-43ff-92dc-86de32a67a64","Type":"ContainerDied","Data":"358757d9a7a0cfd070b1faaf6d1987193c8f32e8b14620eb01c08eb5dcb3f680"} Nov 28 17:46:20 crc kubenswrapper[4909]: I1128 17:46:20.028090 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5bcf776997-bfqmp" event={"ID":"45872bd0-84d2-43ff-92dc-86de32a67a64","Type":"ContainerStarted","Data":"2ac729cea6c6b25ff586da491d105ad3331ac009e06d1784b930a434c19eab95"} Nov 28 17:46:21 crc kubenswrapper[4909]: I1128 17:46:21.037478 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"5a6ccec4-c48a-4d29-ad8e-cb132e181d86","Type":"ContainerStarted","Data":"9d2efc6c84db61762d42255c13f0b261b1510b198f46ee001a7501cd0962431a"} Nov 28 17:46:21 crc kubenswrapper[4909]: I1128 17:46:21.037781 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"5a6ccec4-c48a-4d29-ad8e-cb132e181d86","Type":"ContainerStarted","Data":"0b16538e33190c922f3962621f620fa2a4048e371c3c32c88856b62d826dab8b"} Nov 28 17:46:21 crc kubenswrapper[4909]: I1128 17:46:21.039000 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Nov 28 17:46:21 crc kubenswrapper[4909]: I1128 17:46:21.041399 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5bcf776997-bfqmp" event={"ID":"45872bd0-84d2-43ff-92dc-86de32a67a64","Type":"ContainerStarted","Data":"f0b18132ed8a6bffb6ef9d4d938f8473a5f0e34c997df09948a44ba46f7c58c1"} Nov 28 17:46:21 crc kubenswrapper[4909]: I1128 17:46:21.041934 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5bcf776997-bfqmp" Nov 28 17:46:21 crc kubenswrapper[4909]: I1128 17:46:21.056487 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=3.056458603 podStartE2EDuration="3.056458603s" podCreationTimestamp="2025-11-28 17:46:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 17:46:21.055822695 +0000 UTC m=+5763.452507219" watchObservedRunningTime="2025-11-28 17:46:21.056458603 +0000 UTC m=+5763.453143137" Nov 28 17:46:21 crc kubenswrapper[4909]: I1128 17:46:21.079189 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5bcf776997-bfqmp" podStartSLOduration=3.079169222 podStartE2EDuration="3.079169222s" podCreationTimestamp="2025-11-28 17:46:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 17:46:21.076752587 +0000 UTC m=+5763.473437111" watchObservedRunningTime="2025-11-28 17:46:21.079169222 +0000 UTC m=+5763.475853756" Nov 28 17:46:28 crc kubenswrapper[4909]: I1128 17:46:28.664761 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-5bcf776997-bfqmp" Nov 28 17:46:28 crc kubenswrapper[4909]: I1128 17:46:28.755168 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-55cd5466f5-dkfbz"] Nov 28 17:46:28 crc kubenswrapper[4909]: I1128 17:46:28.755413 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-55cd5466f5-dkfbz" podUID="b8edd8a1-86bd-4299-8585-d36cabfbf0a6" containerName="dnsmasq-dns" containerID="cri-o://a29c9a1d1677f1d575ccc20983a118c3ee21826435a95d9fc205818e6e15ffbb" gracePeriod=10 Nov 28 17:46:29 crc kubenswrapper[4909]: I1128 17:46:29.120627 4909 generic.go:334] "Generic (PLEG): container finished" podID="b8edd8a1-86bd-4299-8585-d36cabfbf0a6" containerID="a29c9a1d1677f1d575ccc20983a118c3ee21826435a95d9fc205818e6e15ffbb" exitCode=0 Nov 28 17:46:29 crc kubenswrapper[4909]: I1128 17:46:29.120928 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-55cd5466f5-dkfbz" event={"ID":"b8edd8a1-86bd-4299-8585-d36cabfbf0a6","Type":"ContainerDied","Data":"a29c9a1d1677f1d575ccc20983a118c3ee21826435a95d9fc205818e6e15ffbb"} Nov 28 17:46:29 crc kubenswrapper[4909]: I1128 17:46:29.310885 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-55cd5466f5-dkfbz" Nov 28 17:46:29 crc kubenswrapper[4909]: I1128 17:46:29.486535 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b8edd8a1-86bd-4299-8585-d36cabfbf0a6-dns-svc\") pod \"b8edd8a1-86bd-4299-8585-d36cabfbf0a6\" (UID: \"b8edd8a1-86bd-4299-8585-d36cabfbf0a6\") " Nov 28 17:46:29 crc kubenswrapper[4909]: I1128 17:46:29.486605 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-whw5z\" (UniqueName: \"kubernetes.io/projected/b8edd8a1-86bd-4299-8585-d36cabfbf0a6-kube-api-access-whw5z\") pod \"b8edd8a1-86bd-4299-8585-d36cabfbf0a6\" (UID: \"b8edd8a1-86bd-4299-8585-d36cabfbf0a6\") " Nov 28 17:46:29 crc kubenswrapper[4909]: I1128 17:46:29.486665 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b8edd8a1-86bd-4299-8585-d36cabfbf0a6-config\") pod \"b8edd8a1-86bd-4299-8585-d36cabfbf0a6\" (UID: \"b8edd8a1-86bd-4299-8585-d36cabfbf0a6\") " Nov 28 17:46:29 crc kubenswrapper[4909]: I1128 17:46:29.486688 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b8edd8a1-86bd-4299-8585-d36cabfbf0a6-ovsdbserver-sb\") pod \"b8edd8a1-86bd-4299-8585-d36cabfbf0a6\" (UID: \"b8edd8a1-86bd-4299-8585-d36cabfbf0a6\") " Nov 28 17:46:29 crc kubenswrapper[4909]: I1128 17:46:29.486772 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b8edd8a1-86bd-4299-8585-d36cabfbf0a6-ovsdbserver-nb\") pod \"b8edd8a1-86bd-4299-8585-d36cabfbf0a6\" (UID: \"b8edd8a1-86bd-4299-8585-d36cabfbf0a6\") " Nov 28 17:46:29 crc kubenswrapper[4909]: I1128 17:46:29.525732 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b8edd8a1-86bd-4299-8585-d36cabfbf0a6-kube-api-access-whw5z" (OuterVolumeSpecName: "kube-api-access-whw5z") pod "b8edd8a1-86bd-4299-8585-d36cabfbf0a6" (UID: "b8edd8a1-86bd-4299-8585-d36cabfbf0a6"). InnerVolumeSpecName "kube-api-access-whw5z". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:46:29 crc kubenswrapper[4909]: I1128 17:46:29.560869 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b8edd8a1-86bd-4299-8585-d36cabfbf0a6-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "b8edd8a1-86bd-4299-8585-d36cabfbf0a6" (UID: "b8edd8a1-86bd-4299-8585-d36cabfbf0a6"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 17:46:29 crc kubenswrapper[4909]: I1128 17:46:29.563356 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b8edd8a1-86bd-4299-8585-d36cabfbf0a6-config" (OuterVolumeSpecName: "config") pod "b8edd8a1-86bd-4299-8585-d36cabfbf0a6" (UID: "b8edd8a1-86bd-4299-8585-d36cabfbf0a6"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 17:46:29 crc kubenswrapper[4909]: I1128 17:46:29.564171 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b8edd8a1-86bd-4299-8585-d36cabfbf0a6-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "b8edd8a1-86bd-4299-8585-d36cabfbf0a6" (UID: "b8edd8a1-86bd-4299-8585-d36cabfbf0a6"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 17:46:29 crc kubenswrapper[4909]: I1128 17:46:29.588633 4909 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b8edd8a1-86bd-4299-8585-d36cabfbf0a6-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 17:46:29 crc kubenswrapper[4909]: I1128 17:46:29.588688 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-whw5z\" (UniqueName: \"kubernetes.io/projected/b8edd8a1-86bd-4299-8585-d36cabfbf0a6-kube-api-access-whw5z\") on node \"crc\" DevicePath \"\"" Nov 28 17:46:29 crc kubenswrapper[4909]: I1128 17:46:29.588700 4909 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b8edd8a1-86bd-4299-8585-d36cabfbf0a6-config\") on node \"crc\" DevicePath \"\"" Nov 28 17:46:29 crc kubenswrapper[4909]: I1128 17:46:29.588709 4909 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b8edd8a1-86bd-4299-8585-d36cabfbf0a6-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 28 17:46:29 crc kubenswrapper[4909]: I1128 17:46:29.596792 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b8edd8a1-86bd-4299-8585-d36cabfbf0a6-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "b8edd8a1-86bd-4299-8585-d36cabfbf0a6" (UID: "b8edd8a1-86bd-4299-8585-d36cabfbf0a6"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 17:46:29 crc kubenswrapper[4909]: I1128 17:46:29.689887 4909 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b8edd8a1-86bd-4299-8585-d36cabfbf0a6-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 28 17:46:30 crc kubenswrapper[4909]: I1128 17:46:30.130649 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-55cd5466f5-dkfbz" event={"ID":"b8edd8a1-86bd-4299-8585-d36cabfbf0a6","Type":"ContainerDied","Data":"9cbb9f187d371ff89702e2f90be80c8a6de47f6e11fa141175cacae30a95b047"} Nov 28 17:46:30 crc kubenswrapper[4909]: I1128 17:46:30.131022 4909 scope.go:117] "RemoveContainer" containerID="a29c9a1d1677f1d575ccc20983a118c3ee21826435a95d9fc205818e6e15ffbb" Nov 28 17:46:30 crc kubenswrapper[4909]: I1128 17:46:30.130732 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-55cd5466f5-dkfbz" Nov 28 17:46:30 crc kubenswrapper[4909]: I1128 17:46:30.161405 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-55cd5466f5-dkfbz"] Nov 28 17:46:30 crc kubenswrapper[4909]: I1128 17:46:30.189815 4909 scope.go:117] "RemoveContainer" containerID="13fca75e8769f3a78135339df5c858acca273e77867c355c34dbcec6d659d34d" Nov 28 17:46:30 crc kubenswrapper[4909]: I1128 17:46:30.245680 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-55cd5466f5-dkfbz"] Nov 28 17:46:30 crc kubenswrapper[4909]: I1128 17:46:30.273731 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 28 17:46:30 crc kubenswrapper[4909]: I1128 17:46:30.273990 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell0-conductor-0" podUID="8e02d2c0-edc6-43b9-b4a5-ac99b0c66d18" containerName="nova-cell0-conductor-conductor" containerID="cri-o://c4677026a0b59ac10b561e36691160dc1c12fe09bf6c550e14d353cac0d11798" gracePeriod=30 Nov 28 17:46:30 crc kubenswrapper[4909]: I1128 17:46:30.297291 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 17:46:30 crc kubenswrapper[4909]: I1128 17:46:30.297512 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="f64ea2a3-228b-4b81-a969-4ee779d2d101" containerName="nova-metadata-log" containerID="cri-o://568645e165dd3d242a258c3ef8d737831758332d69789fc0e4ec1f70e853dbf0" gracePeriod=30 Nov 28 17:46:30 crc kubenswrapper[4909]: I1128 17:46:30.297816 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="f64ea2a3-228b-4b81-a969-4ee779d2d101" containerName="nova-metadata-metadata" containerID="cri-o://ad836900de66d3e644697f1c55555f68f0092b849060f19aa4cbb392067437d8" gracePeriod=30 Nov 28 17:46:30 crc kubenswrapper[4909]: I1128 17:46:30.310316 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 17:46:30 crc kubenswrapper[4909]: I1128 17:46:30.310823 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="917bd8d7-4c4a-43ae-a1e8-72ea0c6cffad" containerName="nova-scheduler-scheduler" containerID="cri-o://0dbc180878761ececcd4a47aacf0ed8e00a17b69916a2b1f7a043b02416c15a5" gracePeriod=30 Nov 28 17:46:30 crc kubenswrapper[4909]: I1128 17:46:30.320721 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 28 17:46:30 crc kubenswrapper[4909]: I1128 17:46:30.320989 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell1-novncproxy-0" podUID="eec53938-b992-40c0-8b4d-3109fa149936" containerName="nova-cell1-novncproxy-novncproxy" containerID="cri-o://b69f9ff2964098d35a2fd7dead293efdda84f4d9c656cac98ecc53294b5c8ebd" gracePeriod=30 Nov 28 17:46:30 crc kubenswrapper[4909]: I1128 17:46:30.330187 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 28 17:46:30 crc kubenswrapper[4909]: I1128 17:46:30.330394 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="2b676cf3-d4d3-4e1d-9ce3-eb7b4c871bd8" containerName="nova-api-log" containerID="cri-o://8acb6f15942b98cab5e60d236c923b7c5432f28b3ad0365acc150216894182e0" gracePeriod=30 Nov 28 17:46:30 crc kubenswrapper[4909]: I1128 17:46:30.330772 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="2b676cf3-d4d3-4e1d-9ce3-eb7b4c871bd8" containerName="nova-api-api" containerID="cri-o://3943dbfbce0f1e8ad73faefa427c93d2067e8e7e08a7c12686097b5532f76318" gracePeriod=30 Nov 28 17:46:31 crc kubenswrapper[4909]: I1128 17:46:31.041950 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/cinder-api-0" Nov 28 17:46:31 crc kubenswrapper[4909]: I1128 17:46:31.103526 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 28 17:46:31 crc kubenswrapper[4909]: I1128 17:46:31.142858 4909 generic.go:334] "Generic (PLEG): container finished" podID="2b676cf3-d4d3-4e1d-9ce3-eb7b4c871bd8" containerID="8acb6f15942b98cab5e60d236c923b7c5432f28b3ad0365acc150216894182e0" exitCode=143 Nov 28 17:46:31 crc kubenswrapper[4909]: I1128 17:46:31.142921 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"2b676cf3-d4d3-4e1d-9ce3-eb7b4c871bd8","Type":"ContainerDied","Data":"8acb6f15942b98cab5e60d236c923b7c5432f28b3ad0365acc150216894182e0"} Nov 28 17:46:31 crc kubenswrapper[4909]: I1128 17:46:31.146597 4909 generic.go:334] "Generic (PLEG): container finished" podID="eec53938-b992-40c0-8b4d-3109fa149936" containerID="b69f9ff2964098d35a2fd7dead293efdda84f4d9c656cac98ecc53294b5c8ebd" exitCode=0 Nov 28 17:46:31 crc kubenswrapper[4909]: I1128 17:46:31.146731 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 28 17:46:31 crc kubenswrapper[4909]: I1128 17:46:31.146903 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"eec53938-b992-40c0-8b4d-3109fa149936","Type":"ContainerDied","Data":"b69f9ff2964098d35a2fd7dead293efdda84f4d9c656cac98ecc53294b5c8ebd"} Nov 28 17:46:31 crc kubenswrapper[4909]: I1128 17:46:31.146960 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"eec53938-b992-40c0-8b4d-3109fa149936","Type":"ContainerDied","Data":"7b74307b23a827ccdf59bab61133b6340c7a9854c1b8ae1cb44e70f21f980489"} Nov 28 17:46:31 crc kubenswrapper[4909]: I1128 17:46:31.146977 4909 scope.go:117] "RemoveContainer" containerID="b69f9ff2964098d35a2fd7dead293efdda84f4d9c656cac98ecc53294b5c8ebd" Nov 28 17:46:31 crc kubenswrapper[4909]: I1128 17:46:31.151935 4909 generic.go:334] "Generic (PLEG): container finished" podID="f64ea2a3-228b-4b81-a969-4ee779d2d101" containerID="568645e165dd3d242a258c3ef8d737831758332d69789fc0e4ec1f70e853dbf0" exitCode=143 Nov 28 17:46:31 crc kubenswrapper[4909]: I1128 17:46:31.151979 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"f64ea2a3-228b-4b81-a969-4ee779d2d101","Type":"ContainerDied","Data":"568645e165dd3d242a258c3ef8d737831758332d69789fc0e4ec1f70e853dbf0"} Nov 28 17:46:31 crc kubenswrapper[4909]: I1128 17:46:31.176161 4909 scope.go:117] "RemoveContainer" containerID="b69f9ff2964098d35a2fd7dead293efdda84f4d9c656cac98ecc53294b5c8ebd" Nov 28 17:46:31 crc kubenswrapper[4909]: E1128 17:46:31.177002 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b69f9ff2964098d35a2fd7dead293efdda84f4d9c656cac98ecc53294b5c8ebd\": container with ID starting with b69f9ff2964098d35a2fd7dead293efdda84f4d9c656cac98ecc53294b5c8ebd not found: ID does not exist" containerID="b69f9ff2964098d35a2fd7dead293efdda84f4d9c656cac98ecc53294b5c8ebd" Nov 28 17:46:31 crc kubenswrapper[4909]: I1128 17:46:31.177050 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b69f9ff2964098d35a2fd7dead293efdda84f4d9c656cac98ecc53294b5c8ebd"} err="failed to get container status \"b69f9ff2964098d35a2fd7dead293efdda84f4d9c656cac98ecc53294b5c8ebd\": rpc error: code = NotFound desc = could not find container \"b69f9ff2964098d35a2fd7dead293efdda84f4d9c656cac98ecc53294b5c8ebd\": container with ID starting with b69f9ff2964098d35a2fd7dead293efdda84f4d9c656cac98ecc53294b5c8ebd not found: ID does not exist" Nov 28 17:46:31 crc kubenswrapper[4909]: I1128 17:46:31.232168 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/eec53938-b992-40c0-8b4d-3109fa149936-config-data\") pod \"eec53938-b992-40c0-8b4d-3109fa149936\" (UID: \"eec53938-b992-40c0-8b4d-3109fa149936\") " Nov 28 17:46:31 crc kubenswrapper[4909]: I1128 17:46:31.232268 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/eec53938-b992-40c0-8b4d-3109fa149936-combined-ca-bundle\") pod \"eec53938-b992-40c0-8b4d-3109fa149936\" (UID: \"eec53938-b992-40c0-8b4d-3109fa149936\") " Nov 28 17:46:31 crc kubenswrapper[4909]: I1128 17:46:31.232516 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gwjb5\" (UniqueName: \"kubernetes.io/projected/eec53938-b992-40c0-8b4d-3109fa149936-kube-api-access-gwjb5\") pod \"eec53938-b992-40c0-8b4d-3109fa149936\" (UID: \"eec53938-b992-40c0-8b4d-3109fa149936\") " Nov 28 17:46:31 crc kubenswrapper[4909]: I1128 17:46:31.238899 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/eec53938-b992-40c0-8b4d-3109fa149936-kube-api-access-gwjb5" (OuterVolumeSpecName: "kube-api-access-gwjb5") pod "eec53938-b992-40c0-8b4d-3109fa149936" (UID: "eec53938-b992-40c0-8b4d-3109fa149936"). InnerVolumeSpecName "kube-api-access-gwjb5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:46:31 crc kubenswrapper[4909]: I1128 17:46:31.259670 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/eec53938-b992-40c0-8b4d-3109fa149936-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "eec53938-b992-40c0-8b4d-3109fa149936" (UID: "eec53938-b992-40c0-8b4d-3109fa149936"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:46:31 crc kubenswrapper[4909]: I1128 17:46:31.271276 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/eec53938-b992-40c0-8b4d-3109fa149936-config-data" (OuterVolumeSpecName: "config-data") pod "eec53938-b992-40c0-8b4d-3109fa149936" (UID: "eec53938-b992-40c0-8b4d-3109fa149936"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:46:31 crc kubenswrapper[4909]: I1128 17:46:31.335300 4909 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/eec53938-b992-40c0-8b4d-3109fa149936-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 17:46:31 crc kubenswrapper[4909]: I1128 17:46:31.335334 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gwjb5\" (UniqueName: \"kubernetes.io/projected/eec53938-b992-40c0-8b4d-3109fa149936-kube-api-access-gwjb5\") on node \"crc\" DevicePath \"\"" Nov 28 17:46:31 crc kubenswrapper[4909]: I1128 17:46:31.335346 4909 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/eec53938-b992-40c0-8b4d-3109fa149936-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 17:46:31 crc kubenswrapper[4909]: I1128 17:46:31.479197 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 28 17:46:31 crc kubenswrapper[4909]: I1128 17:46:31.490039 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 28 17:46:31 crc kubenswrapper[4909]: I1128 17:46:31.499507 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 28 17:46:31 crc kubenswrapper[4909]: E1128 17:46:31.499905 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b8edd8a1-86bd-4299-8585-d36cabfbf0a6" containerName="init" Nov 28 17:46:31 crc kubenswrapper[4909]: I1128 17:46:31.499923 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="b8edd8a1-86bd-4299-8585-d36cabfbf0a6" containerName="init" Nov 28 17:46:31 crc kubenswrapper[4909]: E1128 17:46:31.499948 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eec53938-b992-40c0-8b4d-3109fa149936" containerName="nova-cell1-novncproxy-novncproxy" Nov 28 17:46:31 crc kubenswrapper[4909]: I1128 17:46:31.499954 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="eec53938-b992-40c0-8b4d-3109fa149936" containerName="nova-cell1-novncproxy-novncproxy" Nov 28 17:46:31 crc kubenswrapper[4909]: E1128 17:46:31.499969 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b8edd8a1-86bd-4299-8585-d36cabfbf0a6" containerName="dnsmasq-dns" Nov 28 17:46:31 crc kubenswrapper[4909]: I1128 17:46:31.499975 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="b8edd8a1-86bd-4299-8585-d36cabfbf0a6" containerName="dnsmasq-dns" Nov 28 17:46:31 crc kubenswrapper[4909]: I1128 17:46:31.500554 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="eec53938-b992-40c0-8b4d-3109fa149936" containerName="nova-cell1-novncproxy-novncproxy" Nov 28 17:46:31 crc kubenswrapper[4909]: I1128 17:46:31.500573 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="b8edd8a1-86bd-4299-8585-d36cabfbf0a6" containerName="dnsmasq-dns" Nov 28 17:46:31 crc kubenswrapper[4909]: I1128 17:46:31.501154 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 28 17:46:31 crc kubenswrapper[4909]: I1128 17:46:31.504687 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Nov 28 17:46:31 crc kubenswrapper[4909]: I1128 17:46:31.516260 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 28 17:46:31 crc kubenswrapper[4909]: I1128 17:46:31.640063 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d8441066-6062-409c-a31e-9009886f1104-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"d8441066-6062-409c-a31e-9009886f1104\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 17:46:31 crc kubenswrapper[4909]: I1128 17:46:31.640150 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r86pz\" (UniqueName: \"kubernetes.io/projected/d8441066-6062-409c-a31e-9009886f1104-kube-api-access-r86pz\") pod \"nova-cell1-novncproxy-0\" (UID: \"d8441066-6062-409c-a31e-9009886f1104\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 17:46:31 crc kubenswrapper[4909]: I1128 17:46:31.640213 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d8441066-6062-409c-a31e-9009886f1104-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"d8441066-6062-409c-a31e-9009886f1104\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 17:46:31 crc kubenswrapper[4909]: I1128 17:46:31.741899 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d8441066-6062-409c-a31e-9009886f1104-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"d8441066-6062-409c-a31e-9009886f1104\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 17:46:31 crc kubenswrapper[4909]: I1128 17:46:31.741973 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r86pz\" (UniqueName: \"kubernetes.io/projected/d8441066-6062-409c-a31e-9009886f1104-kube-api-access-r86pz\") pod \"nova-cell1-novncproxy-0\" (UID: \"d8441066-6062-409c-a31e-9009886f1104\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 17:46:31 crc kubenswrapper[4909]: I1128 17:46:31.742039 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d8441066-6062-409c-a31e-9009886f1104-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"d8441066-6062-409c-a31e-9009886f1104\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 17:46:31 crc kubenswrapper[4909]: I1128 17:46:31.747533 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d8441066-6062-409c-a31e-9009886f1104-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"d8441066-6062-409c-a31e-9009886f1104\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 17:46:31 crc kubenswrapper[4909]: I1128 17:46:31.748499 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d8441066-6062-409c-a31e-9009886f1104-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"d8441066-6062-409c-a31e-9009886f1104\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 17:46:31 crc kubenswrapper[4909]: I1128 17:46:31.761189 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r86pz\" (UniqueName: \"kubernetes.io/projected/d8441066-6062-409c-a31e-9009886f1104-kube-api-access-r86pz\") pod \"nova-cell1-novncproxy-0\" (UID: \"d8441066-6062-409c-a31e-9009886f1104\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 17:46:31 crc kubenswrapper[4909]: I1128 17:46:31.819444 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 28 17:46:31 crc kubenswrapper[4909]: E1128 17:46:31.827632 4909 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="0dbc180878761ececcd4a47aacf0ed8e00a17b69916a2b1f7a043b02416c15a5" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 28 17:46:31 crc kubenswrapper[4909]: E1128 17:46:31.829386 4909 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="0dbc180878761ececcd4a47aacf0ed8e00a17b69916a2b1f7a043b02416c15a5" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 28 17:46:31 crc kubenswrapper[4909]: E1128 17:46:31.831002 4909 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="0dbc180878761ececcd4a47aacf0ed8e00a17b69916a2b1f7a043b02416c15a5" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 28 17:46:31 crc kubenswrapper[4909]: E1128 17:46:31.831049 4909 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="917bd8d7-4c4a-43ae-a1e8-72ea0c6cffad" containerName="nova-scheduler-scheduler" Nov 28 17:46:31 crc kubenswrapper[4909]: I1128 17:46:31.931579 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b8edd8a1-86bd-4299-8585-d36cabfbf0a6" path="/var/lib/kubelet/pods/b8edd8a1-86bd-4299-8585-d36cabfbf0a6/volumes" Nov 28 17:46:31 crc kubenswrapper[4909]: I1128 17:46:31.933145 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="eec53938-b992-40c0-8b4d-3109fa149936" path="/var/lib/kubelet/pods/eec53938-b992-40c0-8b4d-3109fa149936/volumes" Nov 28 17:46:32 crc kubenswrapper[4909]: I1128 17:46:32.295331 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 28 17:46:32 crc kubenswrapper[4909]: W1128 17:46:32.303359 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd8441066_6062_409c_a31e_9009886f1104.slice/crio-4ec5e6d7a3594461fcb7b28655b48396fd4d442b63f57b5ce04a8cd8d9f56215 WatchSource:0}: Error finding container 4ec5e6d7a3594461fcb7b28655b48396fd4d442b63f57b5ce04a8cd8d9f56215: Status 404 returned error can't find the container with id 4ec5e6d7a3594461fcb7b28655b48396fd4d442b63f57b5ce04a8cd8d9f56215 Nov 28 17:46:33 crc kubenswrapper[4909]: E1128 17:46:33.100750 4909 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="c4677026a0b59ac10b561e36691160dc1c12fe09bf6c550e14d353cac0d11798" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Nov 28 17:46:33 crc kubenswrapper[4909]: E1128 17:46:33.102986 4909 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="c4677026a0b59ac10b561e36691160dc1c12fe09bf6c550e14d353cac0d11798" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Nov 28 17:46:33 crc kubenswrapper[4909]: E1128 17:46:33.104530 4909 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="c4677026a0b59ac10b561e36691160dc1c12fe09bf6c550e14d353cac0d11798" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Nov 28 17:46:33 crc kubenswrapper[4909]: E1128 17:46:33.104584 4909 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-cell0-conductor-0" podUID="8e02d2c0-edc6-43b9-b4a5-ac99b0c66d18" containerName="nova-cell0-conductor-conductor" Nov 28 17:46:33 crc kubenswrapper[4909]: I1128 17:46:33.180938 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"d8441066-6062-409c-a31e-9009886f1104","Type":"ContainerStarted","Data":"636833a3da6df7385d343f23509d2d3a6975fa1dd4e1b7ba6a7b0b64a33d4ff6"} Nov 28 17:46:33 crc kubenswrapper[4909]: I1128 17:46:33.180993 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"d8441066-6062-409c-a31e-9009886f1104","Type":"ContainerStarted","Data":"4ec5e6d7a3594461fcb7b28655b48396fd4d442b63f57b5ce04a8cd8d9f56215"} Nov 28 17:46:33 crc kubenswrapper[4909]: I1128 17:46:33.222849 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=2.222826052 podStartE2EDuration="2.222826052s" podCreationTimestamp="2025-11-28 17:46:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 17:46:33.207291545 +0000 UTC m=+5775.603976079" watchObservedRunningTime="2025-11-28 17:46:33.222826052 +0000 UTC m=+5775.619510576" Nov 28 17:46:33 crc kubenswrapper[4909]: I1128 17:46:33.480839 4909 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-api-0" podUID="2b676cf3-d4d3-4e1d-9ce3-eb7b4c871bd8" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.1.74:8774/\": read tcp 10.217.0.2:54418->10.217.1.74:8774: read: connection reset by peer" Nov 28 17:46:33 crc kubenswrapper[4909]: I1128 17:46:33.480877 4909 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-api-0" podUID="2b676cf3-d4d3-4e1d-9ce3-eb7b4c871bd8" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.1.74:8774/\": read tcp 10.217.0.2:54412->10.217.1.74:8774: read: connection reset by peer" Nov 28 17:46:33 crc kubenswrapper[4909]: I1128 17:46:33.501871 4909 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="f64ea2a3-228b-4b81-a969-4ee779d2d101" containerName="nova-metadata-log" probeResult="failure" output="Get \"http://10.217.1.72:8775/\": read tcp 10.217.0.2:51740->10.217.1.72:8775: read: connection reset by peer" Nov 28 17:46:33 crc kubenswrapper[4909]: I1128 17:46:33.502365 4909 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="f64ea2a3-228b-4b81-a969-4ee779d2d101" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"http://10.217.1.72:8775/\": read tcp 10.217.0.2:51738->10.217.1.72:8775: read: connection reset by peer" Nov 28 17:46:33 crc kubenswrapper[4909]: I1128 17:46:33.567203 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 28 17:46:33 crc kubenswrapper[4909]: I1128 17:46:33.567404 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell1-conductor-0" podUID="e8fc342f-8d41-430e-8b7c-94ab6dd5fa5c" containerName="nova-cell1-conductor-conductor" containerID="cri-o://e0f5e34fc2ba428776c3da47b5aedd7c3e29b80fbb3e4a59b085379fd4ee0104" gracePeriod=30 Nov 28 17:46:34 crc kubenswrapper[4909]: I1128 17:46:34.022220 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 17:46:34 crc kubenswrapper[4909]: I1128 17:46:34.029377 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 17:46:34 crc kubenswrapper[4909]: I1128 17:46:34.192010 4909 generic.go:334] "Generic (PLEG): container finished" podID="f64ea2a3-228b-4b81-a969-4ee779d2d101" containerID="ad836900de66d3e644697f1c55555f68f0092b849060f19aa4cbb392067437d8" exitCode=0 Nov 28 17:46:34 crc kubenswrapper[4909]: I1128 17:46:34.192057 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 17:46:34 crc kubenswrapper[4909]: I1128 17:46:34.192075 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"f64ea2a3-228b-4b81-a969-4ee779d2d101","Type":"ContainerDied","Data":"ad836900de66d3e644697f1c55555f68f0092b849060f19aa4cbb392067437d8"} Nov 28 17:46:34 crc kubenswrapper[4909]: I1128 17:46:34.192497 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"f64ea2a3-228b-4b81-a969-4ee779d2d101","Type":"ContainerDied","Data":"31c32b751b99e761b1c36d8a0ce02ef1a30466818b13d367d656f78ab4d62cac"} Nov 28 17:46:34 crc kubenswrapper[4909]: I1128 17:46:34.192557 4909 scope.go:117] "RemoveContainer" containerID="ad836900de66d3e644697f1c55555f68f0092b849060f19aa4cbb392067437d8" Nov 28 17:46:34 crc kubenswrapper[4909]: I1128 17:46:34.194706 4909 generic.go:334] "Generic (PLEG): container finished" podID="2b676cf3-d4d3-4e1d-9ce3-eb7b4c871bd8" containerID="3943dbfbce0f1e8ad73faefa427c93d2067e8e7e08a7c12686097b5532f76318" exitCode=0 Nov 28 17:46:34 crc kubenswrapper[4909]: I1128 17:46:34.194816 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 17:46:34 crc kubenswrapper[4909]: I1128 17:46:34.194913 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"2b676cf3-d4d3-4e1d-9ce3-eb7b4c871bd8","Type":"ContainerDied","Data":"3943dbfbce0f1e8ad73faefa427c93d2067e8e7e08a7c12686097b5532f76318"} Nov 28 17:46:34 crc kubenswrapper[4909]: I1128 17:46:34.195032 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"2b676cf3-d4d3-4e1d-9ce3-eb7b4c871bd8","Type":"ContainerDied","Data":"e4fe20d9034181485f10a28fda1555d95bdaec65bbe1199bc1fb0465af36932f"} Nov 28 17:46:34 crc kubenswrapper[4909]: I1128 17:46:34.212149 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f64ea2a3-228b-4b81-a969-4ee779d2d101-config-data\") pod \"f64ea2a3-228b-4b81-a969-4ee779d2d101\" (UID: \"f64ea2a3-228b-4b81-a969-4ee779d2d101\") " Nov 28 17:46:34 crc kubenswrapper[4909]: I1128 17:46:34.212207 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tjgs6\" (UniqueName: \"kubernetes.io/projected/2b676cf3-d4d3-4e1d-9ce3-eb7b4c871bd8-kube-api-access-tjgs6\") pod \"2b676cf3-d4d3-4e1d-9ce3-eb7b4c871bd8\" (UID: \"2b676cf3-d4d3-4e1d-9ce3-eb7b4c871bd8\") " Nov 28 17:46:34 crc kubenswrapper[4909]: I1128 17:46:34.212353 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2b676cf3-d4d3-4e1d-9ce3-eb7b4c871bd8-combined-ca-bundle\") pod \"2b676cf3-d4d3-4e1d-9ce3-eb7b4c871bd8\" (UID: \"2b676cf3-d4d3-4e1d-9ce3-eb7b4c871bd8\") " Nov 28 17:46:34 crc kubenswrapper[4909]: I1128 17:46:34.212384 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lkqs9\" (UniqueName: \"kubernetes.io/projected/f64ea2a3-228b-4b81-a969-4ee779d2d101-kube-api-access-lkqs9\") pod \"f64ea2a3-228b-4b81-a969-4ee779d2d101\" (UID: \"f64ea2a3-228b-4b81-a969-4ee779d2d101\") " Nov 28 17:46:34 crc kubenswrapper[4909]: I1128 17:46:34.212408 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f64ea2a3-228b-4b81-a969-4ee779d2d101-logs\") pod \"f64ea2a3-228b-4b81-a969-4ee779d2d101\" (UID: \"f64ea2a3-228b-4b81-a969-4ee779d2d101\") " Nov 28 17:46:34 crc kubenswrapper[4909]: I1128 17:46:34.212455 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f64ea2a3-228b-4b81-a969-4ee779d2d101-combined-ca-bundle\") pod \"f64ea2a3-228b-4b81-a969-4ee779d2d101\" (UID: \"f64ea2a3-228b-4b81-a969-4ee779d2d101\") " Nov 28 17:46:34 crc kubenswrapper[4909]: I1128 17:46:34.212531 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2b676cf3-d4d3-4e1d-9ce3-eb7b4c871bd8-config-data\") pod \"2b676cf3-d4d3-4e1d-9ce3-eb7b4c871bd8\" (UID: \"2b676cf3-d4d3-4e1d-9ce3-eb7b4c871bd8\") " Nov 28 17:46:34 crc kubenswrapper[4909]: I1128 17:46:34.212551 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2b676cf3-d4d3-4e1d-9ce3-eb7b4c871bd8-logs\") pod \"2b676cf3-d4d3-4e1d-9ce3-eb7b4c871bd8\" (UID: \"2b676cf3-d4d3-4e1d-9ce3-eb7b4c871bd8\") " Nov 28 17:46:34 crc kubenswrapper[4909]: I1128 17:46:34.214285 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2b676cf3-d4d3-4e1d-9ce3-eb7b4c871bd8-logs" (OuterVolumeSpecName: "logs") pod "2b676cf3-d4d3-4e1d-9ce3-eb7b4c871bd8" (UID: "2b676cf3-d4d3-4e1d-9ce3-eb7b4c871bd8"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:46:34 crc kubenswrapper[4909]: I1128 17:46:34.214815 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f64ea2a3-228b-4b81-a969-4ee779d2d101-logs" (OuterVolumeSpecName: "logs") pod "f64ea2a3-228b-4b81-a969-4ee779d2d101" (UID: "f64ea2a3-228b-4b81-a969-4ee779d2d101"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:46:34 crc kubenswrapper[4909]: I1128 17:46:34.222817 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2b676cf3-d4d3-4e1d-9ce3-eb7b4c871bd8-kube-api-access-tjgs6" (OuterVolumeSpecName: "kube-api-access-tjgs6") pod "2b676cf3-d4d3-4e1d-9ce3-eb7b4c871bd8" (UID: "2b676cf3-d4d3-4e1d-9ce3-eb7b4c871bd8"). InnerVolumeSpecName "kube-api-access-tjgs6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:46:34 crc kubenswrapper[4909]: I1128 17:46:34.223062 4909 scope.go:117] "RemoveContainer" containerID="568645e165dd3d242a258c3ef8d737831758332d69789fc0e4ec1f70e853dbf0" Nov 28 17:46:34 crc kubenswrapper[4909]: I1128 17:46:34.233832 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f64ea2a3-228b-4b81-a969-4ee779d2d101-kube-api-access-lkqs9" (OuterVolumeSpecName: "kube-api-access-lkqs9") pod "f64ea2a3-228b-4b81-a969-4ee779d2d101" (UID: "f64ea2a3-228b-4b81-a969-4ee779d2d101"). InnerVolumeSpecName "kube-api-access-lkqs9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:46:34 crc kubenswrapper[4909]: I1128 17:46:34.244858 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2b676cf3-d4d3-4e1d-9ce3-eb7b4c871bd8-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "2b676cf3-d4d3-4e1d-9ce3-eb7b4c871bd8" (UID: "2b676cf3-d4d3-4e1d-9ce3-eb7b4c871bd8"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:46:34 crc kubenswrapper[4909]: I1128 17:46:34.251975 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f64ea2a3-228b-4b81-a969-4ee779d2d101-config-data" (OuterVolumeSpecName: "config-data") pod "f64ea2a3-228b-4b81-a969-4ee779d2d101" (UID: "f64ea2a3-228b-4b81-a969-4ee779d2d101"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:46:34 crc kubenswrapper[4909]: I1128 17:46:34.259416 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f64ea2a3-228b-4b81-a969-4ee779d2d101-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f64ea2a3-228b-4b81-a969-4ee779d2d101" (UID: "f64ea2a3-228b-4b81-a969-4ee779d2d101"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:46:34 crc kubenswrapper[4909]: I1128 17:46:34.274698 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2b676cf3-d4d3-4e1d-9ce3-eb7b4c871bd8-config-data" (OuterVolumeSpecName: "config-data") pod "2b676cf3-d4d3-4e1d-9ce3-eb7b4c871bd8" (UID: "2b676cf3-d4d3-4e1d-9ce3-eb7b4c871bd8"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:46:34 crc kubenswrapper[4909]: I1128 17:46:34.276332 4909 scope.go:117] "RemoveContainer" containerID="ad836900de66d3e644697f1c55555f68f0092b849060f19aa4cbb392067437d8" Nov 28 17:46:34 crc kubenswrapper[4909]: E1128 17:46:34.276826 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ad836900de66d3e644697f1c55555f68f0092b849060f19aa4cbb392067437d8\": container with ID starting with ad836900de66d3e644697f1c55555f68f0092b849060f19aa4cbb392067437d8 not found: ID does not exist" containerID="ad836900de66d3e644697f1c55555f68f0092b849060f19aa4cbb392067437d8" Nov 28 17:46:34 crc kubenswrapper[4909]: I1128 17:46:34.276864 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ad836900de66d3e644697f1c55555f68f0092b849060f19aa4cbb392067437d8"} err="failed to get container status \"ad836900de66d3e644697f1c55555f68f0092b849060f19aa4cbb392067437d8\": rpc error: code = NotFound desc = could not find container \"ad836900de66d3e644697f1c55555f68f0092b849060f19aa4cbb392067437d8\": container with ID starting with ad836900de66d3e644697f1c55555f68f0092b849060f19aa4cbb392067437d8 not found: ID does not exist" Nov 28 17:46:34 crc kubenswrapper[4909]: I1128 17:46:34.276890 4909 scope.go:117] "RemoveContainer" containerID="568645e165dd3d242a258c3ef8d737831758332d69789fc0e4ec1f70e853dbf0" Nov 28 17:46:34 crc kubenswrapper[4909]: E1128 17:46:34.277183 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"568645e165dd3d242a258c3ef8d737831758332d69789fc0e4ec1f70e853dbf0\": container with ID starting with 568645e165dd3d242a258c3ef8d737831758332d69789fc0e4ec1f70e853dbf0 not found: ID does not exist" containerID="568645e165dd3d242a258c3ef8d737831758332d69789fc0e4ec1f70e853dbf0" Nov 28 17:46:34 crc kubenswrapper[4909]: I1128 17:46:34.277329 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"568645e165dd3d242a258c3ef8d737831758332d69789fc0e4ec1f70e853dbf0"} err="failed to get container status \"568645e165dd3d242a258c3ef8d737831758332d69789fc0e4ec1f70e853dbf0\": rpc error: code = NotFound desc = could not find container \"568645e165dd3d242a258c3ef8d737831758332d69789fc0e4ec1f70e853dbf0\": container with ID starting with 568645e165dd3d242a258c3ef8d737831758332d69789fc0e4ec1f70e853dbf0 not found: ID does not exist" Nov 28 17:46:34 crc kubenswrapper[4909]: I1128 17:46:34.277422 4909 scope.go:117] "RemoveContainer" containerID="3943dbfbce0f1e8ad73faefa427c93d2067e8e7e08a7c12686097b5532f76318" Nov 28 17:46:34 crc kubenswrapper[4909]: I1128 17:46:34.294215 4909 scope.go:117] "RemoveContainer" containerID="8acb6f15942b98cab5e60d236c923b7c5432f28b3ad0365acc150216894182e0" Nov 28 17:46:34 crc kubenswrapper[4909]: I1128 17:46:34.314811 4909 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2b676cf3-d4d3-4e1d-9ce3-eb7b4c871bd8-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 17:46:34 crc kubenswrapper[4909]: I1128 17:46:34.315055 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lkqs9\" (UniqueName: \"kubernetes.io/projected/f64ea2a3-228b-4b81-a969-4ee779d2d101-kube-api-access-lkqs9\") on node \"crc\" DevicePath \"\"" Nov 28 17:46:34 crc kubenswrapper[4909]: I1128 17:46:34.315142 4909 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f64ea2a3-228b-4b81-a969-4ee779d2d101-logs\") on node \"crc\" DevicePath \"\"" Nov 28 17:46:34 crc kubenswrapper[4909]: I1128 17:46:34.315251 4909 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f64ea2a3-228b-4b81-a969-4ee779d2d101-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 17:46:34 crc kubenswrapper[4909]: I1128 17:46:34.315327 4909 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2b676cf3-d4d3-4e1d-9ce3-eb7b4c871bd8-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 17:46:34 crc kubenswrapper[4909]: I1128 17:46:34.315389 4909 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2b676cf3-d4d3-4e1d-9ce3-eb7b4c871bd8-logs\") on node \"crc\" DevicePath \"\"" Nov 28 17:46:34 crc kubenswrapper[4909]: I1128 17:46:34.315450 4909 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f64ea2a3-228b-4b81-a969-4ee779d2d101-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 17:46:34 crc kubenswrapper[4909]: I1128 17:46:34.315520 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tjgs6\" (UniqueName: \"kubernetes.io/projected/2b676cf3-d4d3-4e1d-9ce3-eb7b4c871bd8-kube-api-access-tjgs6\") on node \"crc\" DevicePath \"\"" Nov 28 17:46:34 crc kubenswrapper[4909]: I1128 17:46:34.319573 4909 scope.go:117] "RemoveContainer" containerID="3943dbfbce0f1e8ad73faefa427c93d2067e8e7e08a7c12686097b5532f76318" Nov 28 17:46:34 crc kubenswrapper[4909]: E1128 17:46:34.320182 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3943dbfbce0f1e8ad73faefa427c93d2067e8e7e08a7c12686097b5532f76318\": container with ID starting with 3943dbfbce0f1e8ad73faefa427c93d2067e8e7e08a7c12686097b5532f76318 not found: ID does not exist" containerID="3943dbfbce0f1e8ad73faefa427c93d2067e8e7e08a7c12686097b5532f76318" Nov 28 17:46:34 crc kubenswrapper[4909]: I1128 17:46:34.320281 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3943dbfbce0f1e8ad73faefa427c93d2067e8e7e08a7c12686097b5532f76318"} err="failed to get container status \"3943dbfbce0f1e8ad73faefa427c93d2067e8e7e08a7c12686097b5532f76318\": rpc error: code = NotFound desc = could not find container \"3943dbfbce0f1e8ad73faefa427c93d2067e8e7e08a7c12686097b5532f76318\": container with ID starting with 3943dbfbce0f1e8ad73faefa427c93d2067e8e7e08a7c12686097b5532f76318 not found: ID does not exist" Nov 28 17:46:34 crc kubenswrapper[4909]: I1128 17:46:34.320361 4909 scope.go:117] "RemoveContainer" containerID="8acb6f15942b98cab5e60d236c923b7c5432f28b3ad0365acc150216894182e0" Nov 28 17:46:34 crc kubenswrapper[4909]: E1128 17:46:34.320793 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8acb6f15942b98cab5e60d236c923b7c5432f28b3ad0365acc150216894182e0\": container with ID starting with 8acb6f15942b98cab5e60d236c923b7c5432f28b3ad0365acc150216894182e0 not found: ID does not exist" containerID="8acb6f15942b98cab5e60d236c923b7c5432f28b3ad0365acc150216894182e0" Nov 28 17:46:34 crc kubenswrapper[4909]: I1128 17:46:34.320843 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8acb6f15942b98cab5e60d236c923b7c5432f28b3ad0365acc150216894182e0"} err="failed to get container status \"8acb6f15942b98cab5e60d236c923b7c5432f28b3ad0365acc150216894182e0\": rpc error: code = NotFound desc = could not find container \"8acb6f15942b98cab5e60d236c923b7c5432f28b3ad0365acc150216894182e0\": container with ID starting with 8acb6f15942b98cab5e60d236c923b7c5432f28b3ad0365acc150216894182e0 not found: ID does not exist" Nov 28 17:46:34 crc kubenswrapper[4909]: I1128 17:46:34.549528 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 28 17:46:34 crc kubenswrapper[4909]: I1128 17:46:34.561584 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 28 17:46:34 crc kubenswrapper[4909]: I1128 17:46:34.579847 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 17:46:34 crc kubenswrapper[4909]: I1128 17:46:34.595160 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 17:46:34 crc kubenswrapper[4909]: I1128 17:46:34.607838 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 28 17:46:34 crc kubenswrapper[4909]: E1128 17:46:34.608400 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f64ea2a3-228b-4b81-a969-4ee779d2d101" containerName="nova-metadata-log" Nov 28 17:46:34 crc kubenswrapper[4909]: I1128 17:46:34.608681 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="f64ea2a3-228b-4b81-a969-4ee779d2d101" containerName="nova-metadata-log" Nov 28 17:46:34 crc kubenswrapper[4909]: E1128 17:46:34.608757 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2b676cf3-d4d3-4e1d-9ce3-eb7b4c871bd8" containerName="nova-api-api" Nov 28 17:46:34 crc kubenswrapper[4909]: I1128 17:46:34.608805 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="2b676cf3-d4d3-4e1d-9ce3-eb7b4c871bd8" containerName="nova-api-api" Nov 28 17:46:34 crc kubenswrapper[4909]: E1128 17:46:34.608916 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f64ea2a3-228b-4b81-a969-4ee779d2d101" containerName="nova-metadata-metadata" Nov 28 17:46:34 crc kubenswrapper[4909]: I1128 17:46:34.608971 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="f64ea2a3-228b-4b81-a969-4ee779d2d101" containerName="nova-metadata-metadata" Nov 28 17:46:34 crc kubenswrapper[4909]: E1128 17:46:34.609026 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2b676cf3-d4d3-4e1d-9ce3-eb7b4c871bd8" containerName="nova-api-log" Nov 28 17:46:34 crc kubenswrapper[4909]: I1128 17:46:34.609080 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="2b676cf3-d4d3-4e1d-9ce3-eb7b4c871bd8" containerName="nova-api-log" Nov 28 17:46:34 crc kubenswrapper[4909]: I1128 17:46:34.609281 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="f64ea2a3-228b-4b81-a969-4ee779d2d101" containerName="nova-metadata-log" Nov 28 17:46:34 crc kubenswrapper[4909]: I1128 17:46:34.609345 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="f64ea2a3-228b-4b81-a969-4ee779d2d101" containerName="nova-metadata-metadata" Nov 28 17:46:34 crc kubenswrapper[4909]: I1128 17:46:34.609405 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="2b676cf3-d4d3-4e1d-9ce3-eb7b4c871bd8" containerName="nova-api-log" Nov 28 17:46:34 crc kubenswrapper[4909]: I1128 17:46:34.609455 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="2b676cf3-d4d3-4e1d-9ce3-eb7b4c871bd8" containerName="nova-api-api" Nov 28 17:46:34 crc kubenswrapper[4909]: I1128 17:46:34.610512 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 17:46:34 crc kubenswrapper[4909]: I1128 17:46:34.621493 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7f0a968b-b03b-4f03-99f8-5c4cf1d70baa-logs\") pod \"nova-api-0\" (UID: \"7f0a968b-b03b-4f03-99f8-5c4cf1d70baa\") " pod="openstack/nova-api-0" Nov 28 17:46:34 crc kubenswrapper[4909]: I1128 17:46:34.621738 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8m2kq\" (UniqueName: \"kubernetes.io/projected/7f0a968b-b03b-4f03-99f8-5c4cf1d70baa-kube-api-access-8m2kq\") pod \"nova-api-0\" (UID: \"7f0a968b-b03b-4f03-99f8-5c4cf1d70baa\") " pod="openstack/nova-api-0" Nov 28 17:46:34 crc kubenswrapper[4909]: I1128 17:46:34.621923 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7f0a968b-b03b-4f03-99f8-5c4cf1d70baa-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"7f0a968b-b03b-4f03-99f8-5c4cf1d70baa\") " pod="openstack/nova-api-0" Nov 28 17:46:34 crc kubenswrapper[4909]: I1128 17:46:34.622034 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7f0a968b-b03b-4f03-99f8-5c4cf1d70baa-config-data\") pod \"nova-api-0\" (UID: \"7f0a968b-b03b-4f03-99f8-5c4cf1d70baa\") " pod="openstack/nova-api-0" Nov 28 17:46:34 crc kubenswrapper[4909]: I1128 17:46:34.625704 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 28 17:46:34 crc kubenswrapper[4909]: I1128 17:46:34.639107 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 28 17:46:34 crc kubenswrapper[4909]: I1128 17:46:34.641126 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 17:46:34 crc kubenswrapper[4909]: I1128 17:46:34.642501 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 28 17:46:34 crc kubenswrapper[4909]: I1128 17:46:34.643388 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 28 17:46:34 crc kubenswrapper[4909]: I1128 17:46:34.662981 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 17:46:34 crc kubenswrapper[4909]: I1128 17:46:34.723886 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7f0a968b-b03b-4f03-99f8-5c4cf1d70baa-config-data\") pod \"nova-api-0\" (UID: \"7f0a968b-b03b-4f03-99f8-5c4cf1d70baa\") " pod="openstack/nova-api-0" Nov 28 17:46:34 crc kubenswrapper[4909]: I1128 17:46:34.723940 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7f0a968b-b03b-4f03-99f8-5c4cf1d70baa-logs\") pod \"nova-api-0\" (UID: \"7f0a968b-b03b-4f03-99f8-5c4cf1d70baa\") " pod="openstack/nova-api-0" Nov 28 17:46:34 crc kubenswrapper[4909]: I1128 17:46:34.723967 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hz6mg\" (UniqueName: \"kubernetes.io/projected/a2b756af-7f55-41f7-95be-4b01d0c05c51-kube-api-access-hz6mg\") pod \"nova-metadata-0\" (UID: \"a2b756af-7f55-41f7-95be-4b01d0c05c51\") " pod="openstack/nova-metadata-0" Nov 28 17:46:34 crc kubenswrapper[4909]: I1128 17:46:34.724013 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a2b756af-7f55-41f7-95be-4b01d0c05c51-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"a2b756af-7f55-41f7-95be-4b01d0c05c51\") " pod="openstack/nova-metadata-0" Nov 28 17:46:34 crc kubenswrapper[4909]: I1128 17:46:34.724043 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8m2kq\" (UniqueName: \"kubernetes.io/projected/7f0a968b-b03b-4f03-99f8-5c4cf1d70baa-kube-api-access-8m2kq\") pod \"nova-api-0\" (UID: \"7f0a968b-b03b-4f03-99f8-5c4cf1d70baa\") " pod="openstack/nova-api-0" Nov 28 17:46:34 crc kubenswrapper[4909]: I1128 17:46:34.724137 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a2b756af-7f55-41f7-95be-4b01d0c05c51-logs\") pod \"nova-metadata-0\" (UID: \"a2b756af-7f55-41f7-95be-4b01d0c05c51\") " pod="openstack/nova-metadata-0" Nov 28 17:46:34 crc kubenswrapper[4909]: I1128 17:46:34.724215 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a2b756af-7f55-41f7-95be-4b01d0c05c51-config-data\") pod \"nova-metadata-0\" (UID: \"a2b756af-7f55-41f7-95be-4b01d0c05c51\") " pod="openstack/nova-metadata-0" Nov 28 17:46:34 crc kubenswrapper[4909]: I1128 17:46:34.724270 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7f0a968b-b03b-4f03-99f8-5c4cf1d70baa-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"7f0a968b-b03b-4f03-99f8-5c4cf1d70baa\") " pod="openstack/nova-api-0" Nov 28 17:46:34 crc kubenswrapper[4909]: I1128 17:46:34.724444 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7f0a968b-b03b-4f03-99f8-5c4cf1d70baa-logs\") pod \"nova-api-0\" (UID: \"7f0a968b-b03b-4f03-99f8-5c4cf1d70baa\") " pod="openstack/nova-api-0" Nov 28 17:46:34 crc kubenswrapper[4909]: I1128 17:46:34.740473 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7f0a968b-b03b-4f03-99f8-5c4cf1d70baa-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"7f0a968b-b03b-4f03-99f8-5c4cf1d70baa\") " pod="openstack/nova-api-0" Nov 28 17:46:34 crc kubenswrapper[4909]: I1128 17:46:34.740552 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7f0a968b-b03b-4f03-99f8-5c4cf1d70baa-config-data\") pod \"nova-api-0\" (UID: \"7f0a968b-b03b-4f03-99f8-5c4cf1d70baa\") " pod="openstack/nova-api-0" Nov 28 17:46:34 crc kubenswrapper[4909]: I1128 17:46:34.747310 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8m2kq\" (UniqueName: \"kubernetes.io/projected/7f0a968b-b03b-4f03-99f8-5c4cf1d70baa-kube-api-access-8m2kq\") pod \"nova-api-0\" (UID: \"7f0a968b-b03b-4f03-99f8-5c4cf1d70baa\") " pod="openstack/nova-api-0" Nov 28 17:46:34 crc kubenswrapper[4909]: I1128 17:46:34.825837 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a2b756af-7f55-41f7-95be-4b01d0c05c51-logs\") pod \"nova-metadata-0\" (UID: \"a2b756af-7f55-41f7-95be-4b01d0c05c51\") " pod="openstack/nova-metadata-0" Nov 28 17:46:34 crc kubenswrapper[4909]: I1128 17:46:34.826268 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a2b756af-7f55-41f7-95be-4b01d0c05c51-config-data\") pod \"nova-metadata-0\" (UID: \"a2b756af-7f55-41f7-95be-4b01d0c05c51\") " pod="openstack/nova-metadata-0" Nov 28 17:46:34 crc kubenswrapper[4909]: I1128 17:46:34.826341 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hz6mg\" (UniqueName: \"kubernetes.io/projected/a2b756af-7f55-41f7-95be-4b01d0c05c51-kube-api-access-hz6mg\") pod \"nova-metadata-0\" (UID: \"a2b756af-7f55-41f7-95be-4b01d0c05c51\") " pod="openstack/nova-metadata-0" Nov 28 17:46:34 crc kubenswrapper[4909]: I1128 17:46:34.826355 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a2b756af-7f55-41f7-95be-4b01d0c05c51-logs\") pod \"nova-metadata-0\" (UID: \"a2b756af-7f55-41f7-95be-4b01d0c05c51\") " pod="openstack/nova-metadata-0" Nov 28 17:46:34 crc kubenswrapper[4909]: I1128 17:46:34.826371 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a2b756af-7f55-41f7-95be-4b01d0c05c51-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"a2b756af-7f55-41f7-95be-4b01d0c05c51\") " pod="openstack/nova-metadata-0" Nov 28 17:46:34 crc kubenswrapper[4909]: I1128 17:46:34.829929 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a2b756af-7f55-41f7-95be-4b01d0c05c51-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"a2b756af-7f55-41f7-95be-4b01d0c05c51\") " pod="openstack/nova-metadata-0" Nov 28 17:46:34 crc kubenswrapper[4909]: I1128 17:46:34.831186 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a2b756af-7f55-41f7-95be-4b01d0c05c51-config-data\") pod \"nova-metadata-0\" (UID: \"a2b756af-7f55-41f7-95be-4b01d0c05c51\") " pod="openstack/nova-metadata-0" Nov 28 17:46:34 crc kubenswrapper[4909]: I1128 17:46:34.844481 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hz6mg\" (UniqueName: \"kubernetes.io/projected/a2b756af-7f55-41f7-95be-4b01d0c05c51-kube-api-access-hz6mg\") pod \"nova-metadata-0\" (UID: \"a2b756af-7f55-41f7-95be-4b01d0c05c51\") " pod="openstack/nova-metadata-0" Nov 28 17:46:34 crc kubenswrapper[4909]: I1128 17:46:34.971420 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 17:46:34 crc kubenswrapper[4909]: I1128 17:46:34.982754 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 17:46:35 crc kubenswrapper[4909]: I1128 17:46:35.436687 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 28 17:46:35 crc kubenswrapper[4909]: I1128 17:46:35.524277 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 17:46:35 crc kubenswrapper[4909]: I1128 17:46:35.798087 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-rqmg5"] Nov 28 17:46:35 crc kubenswrapper[4909]: I1128 17:46:35.800139 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-rqmg5" Nov 28 17:46:35 crc kubenswrapper[4909]: I1128 17:46:35.840424 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-rqmg5"] Nov 28 17:46:35 crc kubenswrapper[4909]: I1128 17:46:35.844174 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3b847096-da3c-4b9c-883a-319681d9ebe5-catalog-content\") pod \"community-operators-rqmg5\" (UID: \"3b847096-da3c-4b9c-883a-319681d9ebe5\") " pod="openshift-marketplace/community-operators-rqmg5" Nov 28 17:46:35 crc kubenswrapper[4909]: I1128 17:46:35.844255 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rb46f\" (UniqueName: \"kubernetes.io/projected/3b847096-da3c-4b9c-883a-319681d9ebe5-kube-api-access-rb46f\") pod \"community-operators-rqmg5\" (UID: \"3b847096-da3c-4b9c-883a-319681d9ebe5\") " pod="openshift-marketplace/community-operators-rqmg5" Nov 28 17:46:35 crc kubenswrapper[4909]: I1128 17:46:35.844323 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3b847096-da3c-4b9c-883a-319681d9ebe5-utilities\") pod \"community-operators-rqmg5\" (UID: \"3b847096-da3c-4b9c-883a-319681d9ebe5\") " pod="openshift-marketplace/community-operators-rqmg5" Nov 28 17:46:35 crc kubenswrapper[4909]: I1128 17:46:35.925762 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2b676cf3-d4d3-4e1d-9ce3-eb7b4c871bd8" path="/var/lib/kubelet/pods/2b676cf3-d4d3-4e1d-9ce3-eb7b4c871bd8/volumes" Nov 28 17:46:35 crc kubenswrapper[4909]: I1128 17:46:35.926790 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f64ea2a3-228b-4b81-a969-4ee779d2d101" path="/var/lib/kubelet/pods/f64ea2a3-228b-4b81-a969-4ee779d2d101/volumes" Nov 28 17:46:35 crc kubenswrapper[4909]: I1128 17:46:35.945463 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3b847096-da3c-4b9c-883a-319681d9ebe5-utilities\") pod \"community-operators-rqmg5\" (UID: \"3b847096-da3c-4b9c-883a-319681d9ebe5\") " pod="openshift-marketplace/community-operators-rqmg5" Nov 28 17:46:35 crc kubenswrapper[4909]: I1128 17:46:35.945579 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3b847096-da3c-4b9c-883a-319681d9ebe5-catalog-content\") pod \"community-operators-rqmg5\" (UID: \"3b847096-da3c-4b9c-883a-319681d9ebe5\") " pod="openshift-marketplace/community-operators-rqmg5" Nov 28 17:46:35 crc kubenswrapper[4909]: I1128 17:46:35.945684 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rb46f\" (UniqueName: \"kubernetes.io/projected/3b847096-da3c-4b9c-883a-319681d9ebe5-kube-api-access-rb46f\") pod \"community-operators-rqmg5\" (UID: \"3b847096-da3c-4b9c-883a-319681d9ebe5\") " pod="openshift-marketplace/community-operators-rqmg5" Nov 28 17:46:35 crc kubenswrapper[4909]: I1128 17:46:35.946111 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3b847096-da3c-4b9c-883a-319681d9ebe5-utilities\") pod \"community-operators-rqmg5\" (UID: \"3b847096-da3c-4b9c-883a-319681d9ebe5\") " pod="openshift-marketplace/community-operators-rqmg5" Nov 28 17:46:35 crc kubenswrapper[4909]: I1128 17:46:35.946245 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3b847096-da3c-4b9c-883a-319681d9ebe5-catalog-content\") pod \"community-operators-rqmg5\" (UID: \"3b847096-da3c-4b9c-883a-319681d9ebe5\") " pod="openshift-marketplace/community-operators-rqmg5" Nov 28 17:46:35 crc kubenswrapper[4909]: I1128 17:46:35.971009 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rb46f\" (UniqueName: \"kubernetes.io/projected/3b847096-da3c-4b9c-883a-319681d9ebe5-kube-api-access-rb46f\") pod \"community-operators-rqmg5\" (UID: \"3b847096-da3c-4b9c-883a-319681d9ebe5\") " pod="openshift-marketplace/community-operators-rqmg5" Nov 28 17:46:36 crc kubenswrapper[4909]: I1128 17:46:36.210574 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-rqmg5" Nov 28 17:46:36 crc kubenswrapper[4909]: I1128 17:46:36.272719 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"7f0a968b-b03b-4f03-99f8-5c4cf1d70baa","Type":"ContainerStarted","Data":"3011ae936e046f930a87e8382a2115d20b5fac41b3278ba9b19928abacbf0d1f"} Nov 28 17:46:36 crc kubenswrapper[4909]: I1128 17:46:36.273039 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"7f0a968b-b03b-4f03-99f8-5c4cf1d70baa","Type":"ContainerStarted","Data":"3916421bf5eb1a4dd64705dc52aba217e0dd0f8adcc8ed2960649da6026f1340"} Nov 28 17:46:36 crc kubenswrapper[4909]: I1128 17:46:36.273055 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"7f0a968b-b03b-4f03-99f8-5c4cf1d70baa","Type":"ContainerStarted","Data":"c062d938c08070aaca44ee54b2bd4baeb9260c894aceb394663354832094b263"} Nov 28 17:46:36 crc kubenswrapper[4909]: I1128 17:46:36.284451 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"a2b756af-7f55-41f7-95be-4b01d0c05c51","Type":"ContainerStarted","Data":"9116defd9fe70722ad03cad59ee3e52631b206c84226f2c2088a926031e0b38c"} Nov 28 17:46:36 crc kubenswrapper[4909]: I1128 17:46:36.284526 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"a2b756af-7f55-41f7-95be-4b01d0c05c51","Type":"ContainerStarted","Data":"cf563c6f57ef2c67b0f416db42c68c8a696cf8b010ccf61af5cd491c594466f4"} Nov 28 17:46:36 crc kubenswrapper[4909]: I1128 17:46:36.284559 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"a2b756af-7f55-41f7-95be-4b01d0c05c51","Type":"ContainerStarted","Data":"6341298ee59f56a90dcb4b2f9b687fd85b6beef35bdb7c775b09dd7594ccba7b"} Nov 28 17:46:36 crc kubenswrapper[4909]: I1128 17:46:36.301696 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.301677974 podStartE2EDuration="2.301677974s" podCreationTimestamp="2025-11-28 17:46:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 17:46:36.298428067 +0000 UTC m=+5778.695112591" watchObservedRunningTime="2025-11-28 17:46:36.301677974 +0000 UTC m=+5778.698362498" Nov 28 17:46:36 crc kubenswrapper[4909]: I1128 17:46:36.343133 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.343110446 podStartE2EDuration="2.343110446s" podCreationTimestamp="2025-11-28 17:46:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 17:46:36.329376327 +0000 UTC m=+5778.726060861" watchObservedRunningTime="2025-11-28 17:46:36.343110446 +0000 UTC m=+5778.739794970" Nov 28 17:46:36 crc kubenswrapper[4909]: I1128 17:46:36.819180 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-rqmg5"] Nov 28 17:46:36 crc kubenswrapper[4909]: I1128 17:46:36.819598 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Nov 28 17:46:36 crc kubenswrapper[4909]: E1128 17:46:36.827123 4909 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="0dbc180878761ececcd4a47aacf0ed8e00a17b69916a2b1f7a043b02416c15a5" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 28 17:46:36 crc kubenswrapper[4909]: E1128 17:46:36.828513 4909 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="0dbc180878761ececcd4a47aacf0ed8e00a17b69916a2b1f7a043b02416c15a5" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 28 17:46:36 crc kubenswrapper[4909]: W1128 17:46:36.828538 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3b847096_da3c_4b9c_883a_319681d9ebe5.slice/crio-cf16e25b6412cc610f56b8208732a5868866b5a472e0c4f0115df857adde631a WatchSource:0}: Error finding container cf16e25b6412cc610f56b8208732a5868866b5a472e0c4f0115df857adde631a: Status 404 returned error can't find the container with id cf16e25b6412cc610f56b8208732a5868866b5a472e0c4f0115df857adde631a Nov 28 17:46:36 crc kubenswrapper[4909]: E1128 17:46:36.829535 4909 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="0dbc180878761ececcd4a47aacf0ed8e00a17b69916a2b1f7a043b02416c15a5" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 28 17:46:36 crc kubenswrapper[4909]: E1128 17:46:36.829588 4909 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="917bd8d7-4c4a-43ae-a1e8-72ea0c6cffad" containerName="nova-scheduler-scheduler" Nov 28 17:46:37 crc kubenswrapper[4909]: I1128 17:46:37.030061 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 28 17:46:37 crc kubenswrapper[4909]: I1128 17:46:37.173725 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e8fc342f-8d41-430e-8b7c-94ab6dd5fa5c-config-data\") pod \"e8fc342f-8d41-430e-8b7c-94ab6dd5fa5c\" (UID: \"e8fc342f-8d41-430e-8b7c-94ab6dd5fa5c\") " Nov 28 17:46:37 crc kubenswrapper[4909]: I1128 17:46:37.173842 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-km74v\" (UniqueName: \"kubernetes.io/projected/e8fc342f-8d41-430e-8b7c-94ab6dd5fa5c-kube-api-access-km74v\") pod \"e8fc342f-8d41-430e-8b7c-94ab6dd5fa5c\" (UID: \"e8fc342f-8d41-430e-8b7c-94ab6dd5fa5c\") " Nov 28 17:46:37 crc kubenswrapper[4909]: I1128 17:46:37.173907 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e8fc342f-8d41-430e-8b7c-94ab6dd5fa5c-combined-ca-bundle\") pod \"e8fc342f-8d41-430e-8b7c-94ab6dd5fa5c\" (UID: \"e8fc342f-8d41-430e-8b7c-94ab6dd5fa5c\") " Nov 28 17:46:37 crc kubenswrapper[4909]: I1128 17:46:37.180194 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e8fc342f-8d41-430e-8b7c-94ab6dd5fa5c-kube-api-access-km74v" (OuterVolumeSpecName: "kube-api-access-km74v") pod "e8fc342f-8d41-430e-8b7c-94ab6dd5fa5c" (UID: "e8fc342f-8d41-430e-8b7c-94ab6dd5fa5c"). InnerVolumeSpecName "kube-api-access-km74v". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:46:37 crc kubenswrapper[4909]: I1128 17:46:37.210726 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e8fc342f-8d41-430e-8b7c-94ab6dd5fa5c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e8fc342f-8d41-430e-8b7c-94ab6dd5fa5c" (UID: "e8fc342f-8d41-430e-8b7c-94ab6dd5fa5c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:46:37 crc kubenswrapper[4909]: I1128 17:46:37.218275 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e8fc342f-8d41-430e-8b7c-94ab6dd5fa5c-config-data" (OuterVolumeSpecName: "config-data") pod "e8fc342f-8d41-430e-8b7c-94ab6dd5fa5c" (UID: "e8fc342f-8d41-430e-8b7c-94ab6dd5fa5c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:46:37 crc kubenswrapper[4909]: I1128 17:46:37.275377 4909 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e8fc342f-8d41-430e-8b7c-94ab6dd5fa5c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 17:46:37 crc kubenswrapper[4909]: I1128 17:46:37.275411 4909 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e8fc342f-8d41-430e-8b7c-94ab6dd5fa5c-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 17:46:37 crc kubenswrapper[4909]: I1128 17:46:37.275423 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-km74v\" (UniqueName: \"kubernetes.io/projected/e8fc342f-8d41-430e-8b7c-94ab6dd5fa5c-kube-api-access-km74v\") on node \"crc\" DevicePath \"\"" Nov 28 17:46:37 crc kubenswrapper[4909]: I1128 17:46:37.292964 4909 generic.go:334] "Generic (PLEG): container finished" podID="8e02d2c0-edc6-43b9-b4a5-ac99b0c66d18" containerID="c4677026a0b59ac10b561e36691160dc1c12fe09bf6c550e14d353cac0d11798" exitCode=0 Nov 28 17:46:37 crc kubenswrapper[4909]: I1128 17:46:37.293038 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"8e02d2c0-edc6-43b9-b4a5-ac99b0c66d18","Type":"ContainerDied","Data":"c4677026a0b59ac10b561e36691160dc1c12fe09bf6c550e14d353cac0d11798"} Nov 28 17:46:37 crc kubenswrapper[4909]: I1128 17:46:37.294358 4909 generic.go:334] "Generic (PLEG): container finished" podID="3b847096-da3c-4b9c-883a-319681d9ebe5" containerID="ed87b24a132edb4d03b1f4666f0d9d84d9ae151cc4dc6b125c4eb151b798494a" exitCode=0 Nov 28 17:46:37 crc kubenswrapper[4909]: I1128 17:46:37.294400 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rqmg5" event={"ID":"3b847096-da3c-4b9c-883a-319681d9ebe5","Type":"ContainerDied","Data":"ed87b24a132edb4d03b1f4666f0d9d84d9ae151cc4dc6b125c4eb151b798494a"} Nov 28 17:46:37 crc kubenswrapper[4909]: I1128 17:46:37.294466 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rqmg5" event={"ID":"3b847096-da3c-4b9c-883a-319681d9ebe5","Type":"ContainerStarted","Data":"cf16e25b6412cc610f56b8208732a5868866b5a472e0c4f0115df857adde631a"} Nov 28 17:46:37 crc kubenswrapper[4909]: I1128 17:46:37.295847 4909 generic.go:334] "Generic (PLEG): container finished" podID="e8fc342f-8d41-430e-8b7c-94ab6dd5fa5c" containerID="e0f5e34fc2ba428776c3da47b5aedd7c3e29b80fbb3e4a59b085379fd4ee0104" exitCode=0 Nov 28 17:46:37 crc kubenswrapper[4909]: I1128 17:46:37.295878 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 28 17:46:37 crc kubenswrapper[4909]: I1128 17:46:37.295944 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"e8fc342f-8d41-430e-8b7c-94ab6dd5fa5c","Type":"ContainerDied","Data":"e0f5e34fc2ba428776c3da47b5aedd7c3e29b80fbb3e4a59b085379fd4ee0104"} Nov 28 17:46:37 crc kubenswrapper[4909]: I1128 17:46:37.295972 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"e8fc342f-8d41-430e-8b7c-94ab6dd5fa5c","Type":"ContainerDied","Data":"8fa86837a11b326907fb332381dea7418ba32038e08b04c9a50144ffb3507b72"} Nov 28 17:46:37 crc kubenswrapper[4909]: I1128 17:46:37.295992 4909 scope.go:117] "RemoveContainer" containerID="e0f5e34fc2ba428776c3da47b5aedd7c3e29b80fbb3e4a59b085379fd4ee0104" Nov 28 17:46:37 crc kubenswrapper[4909]: I1128 17:46:37.296976 4909 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 28 17:46:37 crc kubenswrapper[4909]: I1128 17:46:37.340807 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 28 17:46:37 crc kubenswrapper[4909]: I1128 17:46:37.343990 4909 scope.go:117] "RemoveContainer" containerID="e0f5e34fc2ba428776c3da47b5aedd7c3e29b80fbb3e4a59b085379fd4ee0104" Nov 28 17:46:37 crc kubenswrapper[4909]: E1128 17:46:37.346950 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e0f5e34fc2ba428776c3da47b5aedd7c3e29b80fbb3e4a59b085379fd4ee0104\": container with ID starting with e0f5e34fc2ba428776c3da47b5aedd7c3e29b80fbb3e4a59b085379fd4ee0104 not found: ID does not exist" containerID="e0f5e34fc2ba428776c3da47b5aedd7c3e29b80fbb3e4a59b085379fd4ee0104" Nov 28 17:46:37 crc kubenswrapper[4909]: I1128 17:46:37.346996 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e0f5e34fc2ba428776c3da47b5aedd7c3e29b80fbb3e4a59b085379fd4ee0104"} err="failed to get container status \"e0f5e34fc2ba428776c3da47b5aedd7c3e29b80fbb3e4a59b085379fd4ee0104\": rpc error: code = NotFound desc = could not find container \"e0f5e34fc2ba428776c3da47b5aedd7c3e29b80fbb3e4a59b085379fd4ee0104\": container with ID starting with e0f5e34fc2ba428776c3da47b5aedd7c3e29b80fbb3e4a59b085379fd4ee0104 not found: ID does not exist" Nov 28 17:46:37 crc kubenswrapper[4909]: I1128 17:46:37.352173 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 28 17:46:37 crc kubenswrapper[4909]: I1128 17:46:37.363143 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 28 17:46:37 crc kubenswrapper[4909]: I1128 17:46:37.363584 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 28 17:46:37 crc kubenswrapper[4909]: E1128 17:46:37.364004 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e8fc342f-8d41-430e-8b7c-94ab6dd5fa5c" containerName="nova-cell1-conductor-conductor" Nov 28 17:46:37 crc kubenswrapper[4909]: I1128 17:46:37.364022 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="e8fc342f-8d41-430e-8b7c-94ab6dd5fa5c" containerName="nova-cell1-conductor-conductor" Nov 28 17:46:37 crc kubenswrapper[4909]: E1128 17:46:37.364055 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8e02d2c0-edc6-43b9-b4a5-ac99b0c66d18" containerName="nova-cell0-conductor-conductor" Nov 28 17:46:37 crc kubenswrapper[4909]: I1128 17:46:37.364061 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="8e02d2c0-edc6-43b9-b4a5-ac99b0c66d18" containerName="nova-cell0-conductor-conductor" Nov 28 17:46:37 crc kubenswrapper[4909]: I1128 17:46:37.364240 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="8e02d2c0-edc6-43b9-b4a5-ac99b0c66d18" containerName="nova-cell0-conductor-conductor" Nov 28 17:46:37 crc kubenswrapper[4909]: I1128 17:46:37.364259 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="e8fc342f-8d41-430e-8b7c-94ab6dd5fa5c" containerName="nova-cell1-conductor-conductor" Nov 28 17:46:37 crc kubenswrapper[4909]: I1128 17:46:37.365199 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 28 17:46:37 crc kubenswrapper[4909]: I1128 17:46:37.367489 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Nov 28 17:46:37 crc kubenswrapper[4909]: I1128 17:46:37.373198 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 28 17:46:37 crc kubenswrapper[4909]: I1128 17:46:37.478606 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t7szs\" (UniqueName: \"kubernetes.io/projected/8e02d2c0-edc6-43b9-b4a5-ac99b0c66d18-kube-api-access-t7szs\") pod \"8e02d2c0-edc6-43b9-b4a5-ac99b0c66d18\" (UID: \"8e02d2c0-edc6-43b9-b4a5-ac99b0c66d18\") " Nov 28 17:46:37 crc kubenswrapper[4909]: I1128 17:46:37.478979 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8e02d2c0-edc6-43b9-b4a5-ac99b0c66d18-combined-ca-bundle\") pod \"8e02d2c0-edc6-43b9-b4a5-ac99b0c66d18\" (UID: \"8e02d2c0-edc6-43b9-b4a5-ac99b0c66d18\") " Nov 28 17:46:37 crc kubenswrapper[4909]: I1128 17:46:37.479142 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8e02d2c0-edc6-43b9-b4a5-ac99b0c66d18-config-data\") pod \"8e02d2c0-edc6-43b9-b4a5-ac99b0c66d18\" (UID: \"8e02d2c0-edc6-43b9-b4a5-ac99b0c66d18\") " Nov 28 17:46:37 crc kubenswrapper[4909]: I1128 17:46:37.479589 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/520d15b5-e2af-446f-8576-3d67355807b9-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"520d15b5-e2af-446f-8576-3d67355807b9\") " pod="openstack/nova-cell1-conductor-0" Nov 28 17:46:37 crc kubenswrapper[4909]: I1128 17:46:37.480680 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-srgmg\" (UniqueName: \"kubernetes.io/projected/520d15b5-e2af-446f-8576-3d67355807b9-kube-api-access-srgmg\") pod \"nova-cell1-conductor-0\" (UID: \"520d15b5-e2af-446f-8576-3d67355807b9\") " pod="openstack/nova-cell1-conductor-0" Nov 28 17:46:37 crc kubenswrapper[4909]: I1128 17:46:37.480810 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/520d15b5-e2af-446f-8576-3d67355807b9-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"520d15b5-e2af-446f-8576-3d67355807b9\") " pod="openstack/nova-cell1-conductor-0" Nov 28 17:46:37 crc kubenswrapper[4909]: I1128 17:46:37.489876 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8e02d2c0-edc6-43b9-b4a5-ac99b0c66d18-kube-api-access-t7szs" (OuterVolumeSpecName: "kube-api-access-t7szs") pod "8e02d2c0-edc6-43b9-b4a5-ac99b0c66d18" (UID: "8e02d2c0-edc6-43b9-b4a5-ac99b0c66d18"). InnerVolumeSpecName "kube-api-access-t7szs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:46:37 crc kubenswrapper[4909]: I1128 17:46:37.511233 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8e02d2c0-edc6-43b9-b4a5-ac99b0c66d18-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "8e02d2c0-edc6-43b9-b4a5-ac99b0c66d18" (UID: "8e02d2c0-edc6-43b9-b4a5-ac99b0c66d18"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:46:37 crc kubenswrapper[4909]: I1128 17:46:37.555276 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8e02d2c0-edc6-43b9-b4a5-ac99b0c66d18-config-data" (OuterVolumeSpecName: "config-data") pod "8e02d2c0-edc6-43b9-b4a5-ac99b0c66d18" (UID: "8e02d2c0-edc6-43b9-b4a5-ac99b0c66d18"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:46:37 crc kubenswrapper[4909]: I1128 17:46:37.594038 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/520d15b5-e2af-446f-8576-3d67355807b9-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"520d15b5-e2af-446f-8576-3d67355807b9\") " pod="openstack/nova-cell1-conductor-0" Nov 28 17:46:37 crc kubenswrapper[4909]: I1128 17:46:37.597066 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-srgmg\" (UniqueName: \"kubernetes.io/projected/520d15b5-e2af-446f-8576-3d67355807b9-kube-api-access-srgmg\") pod \"nova-cell1-conductor-0\" (UID: \"520d15b5-e2af-446f-8576-3d67355807b9\") " pod="openstack/nova-cell1-conductor-0" Nov 28 17:46:37 crc kubenswrapper[4909]: I1128 17:46:37.597474 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/520d15b5-e2af-446f-8576-3d67355807b9-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"520d15b5-e2af-446f-8576-3d67355807b9\") " pod="openstack/nova-cell1-conductor-0" Nov 28 17:46:37 crc kubenswrapper[4909]: I1128 17:46:37.598906 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t7szs\" (UniqueName: \"kubernetes.io/projected/8e02d2c0-edc6-43b9-b4a5-ac99b0c66d18-kube-api-access-t7szs\") on node \"crc\" DevicePath \"\"" Nov 28 17:46:37 crc kubenswrapper[4909]: I1128 17:46:37.599004 4909 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8e02d2c0-edc6-43b9-b4a5-ac99b0c66d18-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 17:46:37 crc kubenswrapper[4909]: I1128 17:46:37.599068 4909 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8e02d2c0-edc6-43b9-b4a5-ac99b0c66d18-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 17:46:37 crc kubenswrapper[4909]: I1128 17:46:37.605981 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/520d15b5-e2af-446f-8576-3d67355807b9-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"520d15b5-e2af-446f-8576-3d67355807b9\") " pod="openstack/nova-cell1-conductor-0" Nov 28 17:46:37 crc kubenswrapper[4909]: I1128 17:46:37.607489 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/520d15b5-e2af-446f-8576-3d67355807b9-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"520d15b5-e2af-446f-8576-3d67355807b9\") " pod="openstack/nova-cell1-conductor-0" Nov 28 17:46:37 crc kubenswrapper[4909]: I1128 17:46:37.634885 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-srgmg\" (UniqueName: \"kubernetes.io/projected/520d15b5-e2af-446f-8576-3d67355807b9-kube-api-access-srgmg\") pod \"nova-cell1-conductor-0\" (UID: \"520d15b5-e2af-446f-8576-3d67355807b9\") " pod="openstack/nova-cell1-conductor-0" Nov 28 17:46:37 crc kubenswrapper[4909]: I1128 17:46:37.689213 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 28 17:46:37 crc kubenswrapper[4909]: I1128 17:46:37.917912 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e8fc342f-8d41-430e-8b7c-94ab6dd5fa5c" path="/var/lib/kubelet/pods/e8fc342f-8d41-430e-8b7c-94ab6dd5fa5c/volumes" Nov 28 17:46:38 crc kubenswrapper[4909]: W1128 17:46:38.152989 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod520d15b5_e2af_446f_8576_3d67355807b9.slice/crio-a6ed951d796899a8ff6652da87d7bca340f9fbdae3f3e257fda85aae79180cf1 WatchSource:0}: Error finding container a6ed951d796899a8ff6652da87d7bca340f9fbdae3f3e257fda85aae79180cf1: Status 404 returned error can't find the container with id a6ed951d796899a8ff6652da87d7bca340f9fbdae3f3e257fda85aae79180cf1 Nov 28 17:46:38 crc kubenswrapper[4909]: I1128 17:46:38.157573 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 28 17:46:38 crc kubenswrapper[4909]: I1128 17:46:38.305930 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"8e02d2c0-edc6-43b9-b4a5-ac99b0c66d18","Type":"ContainerDied","Data":"3430dc2bb769b29a47b5c7d8365e7a7598fceb37ac4d566449b3e59a8fda0fca"} Nov 28 17:46:38 crc kubenswrapper[4909]: I1128 17:46:38.305967 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 28 17:46:38 crc kubenswrapper[4909]: I1128 17:46:38.306009 4909 scope.go:117] "RemoveContainer" containerID="c4677026a0b59ac10b561e36691160dc1c12fe09bf6c550e14d353cac0d11798" Nov 28 17:46:38 crc kubenswrapper[4909]: I1128 17:46:38.308751 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"520d15b5-e2af-446f-8576-3d67355807b9","Type":"ContainerStarted","Data":"a6ed951d796899a8ff6652da87d7bca340f9fbdae3f3e257fda85aae79180cf1"} Nov 28 17:46:38 crc kubenswrapper[4909]: I1128 17:46:38.311789 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rqmg5" event={"ID":"3b847096-da3c-4b9c-883a-319681d9ebe5","Type":"ContainerStarted","Data":"4263b8e37b7ea61ae0b425e2b9fed8f9297c62b519c81628ca03390c72bf14ad"} Nov 28 17:46:38 crc kubenswrapper[4909]: I1128 17:46:38.335377 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 28 17:46:38 crc kubenswrapper[4909]: I1128 17:46:38.353155 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 28 17:46:38 crc kubenswrapper[4909]: I1128 17:46:38.374803 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 28 17:46:38 crc kubenswrapper[4909]: I1128 17:46:38.377057 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 28 17:46:38 crc kubenswrapper[4909]: I1128 17:46:38.389935 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Nov 28 17:46:38 crc kubenswrapper[4909]: I1128 17:46:38.402764 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 28 17:46:38 crc kubenswrapper[4909]: I1128 17:46:38.517408 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7b8b20b0-03bc-4edf-89ef-1cad623e470e-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"7b8b20b0-03bc-4edf-89ef-1cad623e470e\") " pod="openstack/nova-cell0-conductor-0" Nov 28 17:46:38 crc kubenswrapper[4909]: I1128 17:46:38.517723 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lx6pm\" (UniqueName: \"kubernetes.io/projected/7b8b20b0-03bc-4edf-89ef-1cad623e470e-kube-api-access-lx6pm\") pod \"nova-cell0-conductor-0\" (UID: \"7b8b20b0-03bc-4edf-89ef-1cad623e470e\") " pod="openstack/nova-cell0-conductor-0" Nov 28 17:46:38 crc kubenswrapper[4909]: I1128 17:46:38.517804 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7b8b20b0-03bc-4edf-89ef-1cad623e470e-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"7b8b20b0-03bc-4edf-89ef-1cad623e470e\") " pod="openstack/nova-cell0-conductor-0" Nov 28 17:46:38 crc kubenswrapper[4909]: I1128 17:46:38.619088 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lx6pm\" (UniqueName: \"kubernetes.io/projected/7b8b20b0-03bc-4edf-89ef-1cad623e470e-kube-api-access-lx6pm\") pod \"nova-cell0-conductor-0\" (UID: \"7b8b20b0-03bc-4edf-89ef-1cad623e470e\") " pod="openstack/nova-cell0-conductor-0" Nov 28 17:46:38 crc kubenswrapper[4909]: I1128 17:46:38.619712 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7b8b20b0-03bc-4edf-89ef-1cad623e470e-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"7b8b20b0-03bc-4edf-89ef-1cad623e470e\") " pod="openstack/nova-cell0-conductor-0" Nov 28 17:46:38 crc kubenswrapper[4909]: I1128 17:46:38.619895 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7b8b20b0-03bc-4edf-89ef-1cad623e470e-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"7b8b20b0-03bc-4edf-89ef-1cad623e470e\") " pod="openstack/nova-cell0-conductor-0" Nov 28 17:46:38 crc kubenswrapper[4909]: I1128 17:46:38.624851 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7b8b20b0-03bc-4edf-89ef-1cad623e470e-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"7b8b20b0-03bc-4edf-89ef-1cad623e470e\") " pod="openstack/nova-cell0-conductor-0" Nov 28 17:46:38 crc kubenswrapper[4909]: I1128 17:46:38.635235 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7b8b20b0-03bc-4edf-89ef-1cad623e470e-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"7b8b20b0-03bc-4edf-89ef-1cad623e470e\") " pod="openstack/nova-cell0-conductor-0" Nov 28 17:46:38 crc kubenswrapper[4909]: I1128 17:46:38.638564 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lx6pm\" (UniqueName: \"kubernetes.io/projected/7b8b20b0-03bc-4edf-89ef-1cad623e470e-kube-api-access-lx6pm\") pod \"nova-cell0-conductor-0\" (UID: \"7b8b20b0-03bc-4edf-89ef-1cad623e470e\") " pod="openstack/nova-cell0-conductor-0" Nov 28 17:46:38 crc kubenswrapper[4909]: I1128 17:46:38.793924 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 28 17:46:39 crc kubenswrapper[4909]: I1128 17:46:39.243040 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 28 17:46:39 crc kubenswrapper[4909]: W1128 17:46:39.248552 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7b8b20b0_03bc_4edf_89ef_1cad623e470e.slice/crio-208bf8958ecb13ba4b5e9918198422fa986f23b894d040111cb869736248a550 WatchSource:0}: Error finding container 208bf8958ecb13ba4b5e9918198422fa986f23b894d040111cb869736248a550: Status 404 returned error can't find the container with id 208bf8958ecb13ba4b5e9918198422fa986f23b894d040111cb869736248a550 Nov 28 17:46:39 crc kubenswrapper[4909]: I1128 17:46:39.327454 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"7b8b20b0-03bc-4edf-89ef-1cad623e470e","Type":"ContainerStarted","Data":"208bf8958ecb13ba4b5e9918198422fa986f23b894d040111cb869736248a550"} Nov 28 17:46:39 crc kubenswrapper[4909]: I1128 17:46:39.330021 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"520d15b5-e2af-446f-8576-3d67355807b9","Type":"ContainerStarted","Data":"6e52fa75391c03ea8613f3eb1bda67c7ce7fc954edab1810b00800ccc0e6ed5b"} Nov 28 17:46:39 crc kubenswrapper[4909]: I1128 17:46:39.330110 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-conductor-0" Nov 28 17:46:39 crc kubenswrapper[4909]: I1128 17:46:39.332426 4909 generic.go:334] "Generic (PLEG): container finished" podID="3b847096-da3c-4b9c-883a-319681d9ebe5" containerID="4263b8e37b7ea61ae0b425e2b9fed8f9297c62b519c81628ca03390c72bf14ad" exitCode=0 Nov 28 17:46:39 crc kubenswrapper[4909]: I1128 17:46:39.332458 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rqmg5" event={"ID":"3b847096-da3c-4b9c-883a-319681d9ebe5","Type":"ContainerDied","Data":"4263b8e37b7ea61ae0b425e2b9fed8f9297c62b519c81628ca03390c72bf14ad"} Nov 28 17:46:39 crc kubenswrapper[4909]: I1128 17:46:39.350570 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-0" podStartSLOduration=2.3505511009999998 podStartE2EDuration="2.350551101s" podCreationTimestamp="2025-11-28 17:46:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 17:46:39.347078228 +0000 UTC m=+5781.743762762" watchObservedRunningTime="2025-11-28 17:46:39.350551101 +0000 UTC m=+5781.747235635" Nov 28 17:46:39 crc kubenswrapper[4909]: I1128 17:46:39.913583 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8e02d2c0-edc6-43b9-b4a5-ac99b0c66d18" path="/var/lib/kubelet/pods/8e02d2c0-edc6-43b9-b4a5-ac99b0c66d18/volumes" Nov 28 17:46:39 crc kubenswrapper[4909]: I1128 17:46:39.983642 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 28 17:46:39 crc kubenswrapper[4909]: I1128 17:46:39.984375 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 28 17:46:40 crc kubenswrapper[4909]: I1128 17:46:40.342877 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rqmg5" event={"ID":"3b847096-da3c-4b9c-883a-319681d9ebe5","Type":"ContainerStarted","Data":"a72788331a5cc384431b901a96e371944ff79ca8b778b0811a06fc7170e38762"} Nov 28 17:46:40 crc kubenswrapper[4909]: I1128 17:46:40.347811 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"7b8b20b0-03bc-4edf-89ef-1cad623e470e","Type":"ContainerStarted","Data":"4497815e4da4961b57d051aeed3d768d65bc3e75563cc5e62034a3ca89cc32e7"} Nov 28 17:46:40 crc kubenswrapper[4909]: I1128 17:46:40.348142 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell0-conductor-0" Nov 28 17:46:40 crc kubenswrapper[4909]: I1128 17:46:40.368037 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-rqmg5" podStartSLOduration=2.78313452 podStartE2EDuration="5.368021643s" podCreationTimestamp="2025-11-28 17:46:35 +0000 UTC" firstStartedPulling="2025-11-28 17:46:37.296413415 +0000 UTC m=+5779.693097949" lastFinishedPulling="2025-11-28 17:46:39.881300558 +0000 UTC m=+5782.277985072" observedRunningTime="2025-11-28 17:46:40.359336519 +0000 UTC m=+5782.756021093" watchObservedRunningTime="2025-11-28 17:46:40.368021643 +0000 UTC m=+5782.764706167" Nov 28 17:46:40 crc kubenswrapper[4909]: I1128 17:46:40.381105 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-0" podStartSLOduration=2.381086513 podStartE2EDuration="2.381086513s" podCreationTimestamp="2025-11-28 17:46:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 17:46:40.375605366 +0000 UTC m=+5782.772289900" watchObservedRunningTime="2025-11-28 17:46:40.381086513 +0000 UTC m=+5782.777771067" Nov 28 17:46:41 crc kubenswrapper[4909]: I1128 17:46:41.145721 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 17:46:41 crc kubenswrapper[4909]: I1128 17:46:41.272792 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/917bd8d7-4c4a-43ae-a1e8-72ea0c6cffad-combined-ca-bundle\") pod \"917bd8d7-4c4a-43ae-a1e8-72ea0c6cffad\" (UID: \"917bd8d7-4c4a-43ae-a1e8-72ea0c6cffad\") " Nov 28 17:46:41 crc kubenswrapper[4909]: I1128 17:46:41.272891 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/917bd8d7-4c4a-43ae-a1e8-72ea0c6cffad-config-data\") pod \"917bd8d7-4c4a-43ae-a1e8-72ea0c6cffad\" (UID: \"917bd8d7-4c4a-43ae-a1e8-72ea0c6cffad\") " Nov 28 17:46:41 crc kubenswrapper[4909]: I1128 17:46:41.272928 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mh4cb\" (UniqueName: \"kubernetes.io/projected/917bd8d7-4c4a-43ae-a1e8-72ea0c6cffad-kube-api-access-mh4cb\") pod \"917bd8d7-4c4a-43ae-a1e8-72ea0c6cffad\" (UID: \"917bd8d7-4c4a-43ae-a1e8-72ea0c6cffad\") " Nov 28 17:46:41 crc kubenswrapper[4909]: I1128 17:46:41.278468 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/917bd8d7-4c4a-43ae-a1e8-72ea0c6cffad-kube-api-access-mh4cb" (OuterVolumeSpecName: "kube-api-access-mh4cb") pod "917bd8d7-4c4a-43ae-a1e8-72ea0c6cffad" (UID: "917bd8d7-4c4a-43ae-a1e8-72ea0c6cffad"). InnerVolumeSpecName "kube-api-access-mh4cb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:46:41 crc kubenswrapper[4909]: I1128 17:46:41.300885 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/917bd8d7-4c4a-43ae-a1e8-72ea0c6cffad-config-data" (OuterVolumeSpecName: "config-data") pod "917bd8d7-4c4a-43ae-a1e8-72ea0c6cffad" (UID: "917bd8d7-4c4a-43ae-a1e8-72ea0c6cffad"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:46:41 crc kubenswrapper[4909]: I1128 17:46:41.304115 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/917bd8d7-4c4a-43ae-a1e8-72ea0c6cffad-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "917bd8d7-4c4a-43ae-a1e8-72ea0c6cffad" (UID: "917bd8d7-4c4a-43ae-a1e8-72ea0c6cffad"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:46:41 crc kubenswrapper[4909]: I1128 17:46:41.371159 4909 generic.go:334] "Generic (PLEG): container finished" podID="917bd8d7-4c4a-43ae-a1e8-72ea0c6cffad" containerID="0dbc180878761ececcd4a47aacf0ed8e00a17b69916a2b1f7a043b02416c15a5" exitCode=0 Nov 28 17:46:41 crc kubenswrapper[4909]: I1128 17:46:41.372492 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 17:46:41 crc kubenswrapper[4909]: I1128 17:46:41.372814 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"917bd8d7-4c4a-43ae-a1e8-72ea0c6cffad","Type":"ContainerDied","Data":"0dbc180878761ececcd4a47aacf0ed8e00a17b69916a2b1f7a043b02416c15a5"} Nov 28 17:46:41 crc kubenswrapper[4909]: I1128 17:46:41.372895 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"917bd8d7-4c4a-43ae-a1e8-72ea0c6cffad","Type":"ContainerDied","Data":"701d0b86ef676cb389e70b9398915868e80da37162a150fb8ae2d4e7f3bb13fe"} Nov 28 17:46:41 crc kubenswrapper[4909]: I1128 17:46:41.372919 4909 scope.go:117] "RemoveContainer" containerID="0dbc180878761ececcd4a47aacf0ed8e00a17b69916a2b1f7a043b02416c15a5" Nov 28 17:46:41 crc kubenswrapper[4909]: I1128 17:46:41.388310 4909 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/917bd8d7-4c4a-43ae-a1e8-72ea0c6cffad-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 17:46:41 crc kubenswrapper[4909]: I1128 17:46:41.390543 4909 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/917bd8d7-4c4a-43ae-a1e8-72ea0c6cffad-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 17:46:41 crc kubenswrapper[4909]: I1128 17:46:41.390837 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mh4cb\" (UniqueName: \"kubernetes.io/projected/917bd8d7-4c4a-43ae-a1e8-72ea0c6cffad-kube-api-access-mh4cb\") on node \"crc\" DevicePath \"\"" Nov 28 17:46:41 crc kubenswrapper[4909]: I1128 17:46:41.409680 4909 scope.go:117] "RemoveContainer" containerID="0dbc180878761ececcd4a47aacf0ed8e00a17b69916a2b1f7a043b02416c15a5" Nov 28 17:46:41 crc kubenswrapper[4909]: E1128 17:46:41.412151 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0dbc180878761ececcd4a47aacf0ed8e00a17b69916a2b1f7a043b02416c15a5\": container with ID starting with 0dbc180878761ececcd4a47aacf0ed8e00a17b69916a2b1f7a043b02416c15a5 not found: ID does not exist" containerID="0dbc180878761ececcd4a47aacf0ed8e00a17b69916a2b1f7a043b02416c15a5" Nov 28 17:46:41 crc kubenswrapper[4909]: I1128 17:46:41.412287 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0dbc180878761ececcd4a47aacf0ed8e00a17b69916a2b1f7a043b02416c15a5"} err="failed to get container status \"0dbc180878761ececcd4a47aacf0ed8e00a17b69916a2b1f7a043b02416c15a5\": rpc error: code = NotFound desc = could not find container \"0dbc180878761ececcd4a47aacf0ed8e00a17b69916a2b1f7a043b02416c15a5\": container with ID starting with 0dbc180878761ececcd4a47aacf0ed8e00a17b69916a2b1f7a043b02416c15a5 not found: ID does not exist" Nov 28 17:46:41 crc kubenswrapper[4909]: I1128 17:46:41.439780 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 17:46:41 crc kubenswrapper[4909]: I1128 17:46:41.462515 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 17:46:41 crc kubenswrapper[4909]: I1128 17:46:41.475380 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 17:46:41 crc kubenswrapper[4909]: E1128 17:46:41.475900 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="917bd8d7-4c4a-43ae-a1e8-72ea0c6cffad" containerName="nova-scheduler-scheduler" Nov 28 17:46:41 crc kubenswrapper[4909]: I1128 17:46:41.475920 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="917bd8d7-4c4a-43ae-a1e8-72ea0c6cffad" containerName="nova-scheduler-scheduler" Nov 28 17:46:41 crc kubenswrapper[4909]: I1128 17:46:41.476124 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="917bd8d7-4c4a-43ae-a1e8-72ea0c6cffad" containerName="nova-scheduler-scheduler" Nov 28 17:46:41 crc kubenswrapper[4909]: I1128 17:46:41.476728 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 17:46:41 crc kubenswrapper[4909]: I1128 17:46:41.480737 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 28 17:46:41 crc kubenswrapper[4909]: I1128 17:46:41.486156 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 17:46:41 crc kubenswrapper[4909]: I1128 17:46:41.496780 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pgllc\" (UniqueName: \"kubernetes.io/projected/d447854c-9bc0-4c9e-b1b8-7c9b1cb549a5-kube-api-access-pgllc\") pod \"nova-scheduler-0\" (UID: \"d447854c-9bc0-4c9e-b1b8-7c9b1cb549a5\") " pod="openstack/nova-scheduler-0" Nov 28 17:46:41 crc kubenswrapper[4909]: I1128 17:46:41.496872 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d447854c-9bc0-4c9e-b1b8-7c9b1cb549a5-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"d447854c-9bc0-4c9e-b1b8-7c9b1cb549a5\") " pod="openstack/nova-scheduler-0" Nov 28 17:46:41 crc kubenswrapper[4909]: I1128 17:46:41.496951 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d447854c-9bc0-4c9e-b1b8-7c9b1cb549a5-config-data\") pod \"nova-scheduler-0\" (UID: \"d447854c-9bc0-4c9e-b1b8-7c9b1cb549a5\") " pod="openstack/nova-scheduler-0" Nov 28 17:46:41 crc kubenswrapper[4909]: I1128 17:46:41.598501 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pgllc\" (UniqueName: \"kubernetes.io/projected/d447854c-9bc0-4c9e-b1b8-7c9b1cb549a5-kube-api-access-pgllc\") pod \"nova-scheduler-0\" (UID: \"d447854c-9bc0-4c9e-b1b8-7c9b1cb549a5\") " pod="openstack/nova-scheduler-0" Nov 28 17:46:41 crc kubenswrapper[4909]: I1128 17:46:41.598584 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d447854c-9bc0-4c9e-b1b8-7c9b1cb549a5-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"d447854c-9bc0-4c9e-b1b8-7c9b1cb549a5\") " pod="openstack/nova-scheduler-0" Nov 28 17:46:41 crc kubenswrapper[4909]: I1128 17:46:41.598737 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d447854c-9bc0-4c9e-b1b8-7c9b1cb549a5-config-data\") pod \"nova-scheduler-0\" (UID: \"d447854c-9bc0-4c9e-b1b8-7c9b1cb549a5\") " pod="openstack/nova-scheduler-0" Nov 28 17:46:41 crc kubenswrapper[4909]: I1128 17:46:41.602123 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d447854c-9bc0-4c9e-b1b8-7c9b1cb549a5-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"d447854c-9bc0-4c9e-b1b8-7c9b1cb549a5\") " pod="openstack/nova-scheduler-0" Nov 28 17:46:41 crc kubenswrapper[4909]: I1128 17:46:41.602275 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d447854c-9bc0-4c9e-b1b8-7c9b1cb549a5-config-data\") pod \"nova-scheduler-0\" (UID: \"d447854c-9bc0-4c9e-b1b8-7c9b1cb549a5\") " pod="openstack/nova-scheduler-0" Nov 28 17:46:41 crc kubenswrapper[4909]: I1128 17:46:41.615753 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pgllc\" (UniqueName: \"kubernetes.io/projected/d447854c-9bc0-4c9e-b1b8-7c9b1cb549a5-kube-api-access-pgllc\") pod \"nova-scheduler-0\" (UID: \"d447854c-9bc0-4c9e-b1b8-7c9b1cb549a5\") " pod="openstack/nova-scheduler-0" Nov 28 17:46:41 crc kubenswrapper[4909]: I1128 17:46:41.807298 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 17:46:41 crc kubenswrapper[4909]: I1128 17:46:41.820500 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-cell1-novncproxy-0" Nov 28 17:46:41 crc kubenswrapper[4909]: I1128 17:46:41.831912 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-cell1-novncproxy-0" Nov 28 17:46:41 crc kubenswrapper[4909]: I1128 17:46:41.919089 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="917bd8d7-4c4a-43ae-a1e8-72ea0c6cffad" path="/var/lib/kubelet/pods/917bd8d7-4c4a-43ae-a1e8-72ea0c6cffad/volumes" Nov 28 17:46:42 crc kubenswrapper[4909]: I1128 17:46:42.261854 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 17:46:42 crc kubenswrapper[4909]: I1128 17:46:42.383780 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"d447854c-9bc0-4c9e-b1b8-7c9b1cb549a5","Type":"ContainerStarted","Data":"1a5be13676a6b180c1b4dde3e4ecdbba5636b9d3e2ff47b1075335539dd0a852"} Nov 28 17:46:42 crc kubenswrapper[4909]: I1128 17:46:42.401319 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-novncproxy-0" Nov 28 17:46:43 crc kubenswrapper[4909]: I1128 17:46:43.392254 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"d447854c-9bc0-4c9e-b1b8-7c9b1cb549a5","Type":"ContainerStarted","Data":"7502ba00a4b47f66f200769bd3bc15236c8b4f7d4b3e60240180cae56fb01d26"} Nov 28 17:46:43 crc kubenswrapper[4909]: I1128 17:46:43.412938 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.412918313 podStartE2EDuration="2.412918313s" podCreationTimestamp="2025-11-28 17:46:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 17:46:43.409411539 +0000 UTC m=+5785.806096063" watchObservedRunningTime="2025-11-28 17:46:43.412918313 +0000 UTC m=+5785.809602837" Nov 28 17:46:44 crc kubenswrapper[4909]: I1128 17:46:44.972521 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 28 17:46:44 crc kubenswrapper[4909]: I1128 17:46:44.972965 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 28 17:46:44 crc kubenswrapper[4909]: I1128 17:46:44.984561 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 28 17:46:44 crc kubenswrapper[4909]: I1128 17:46:44.984593 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 28 17:46:46 crc kubenswrapper[4909]: I1128 17:46:46.054958 4909 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="7f0a968b-b03b-4f03-99f8-5c4cf1d70baa" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.1.82:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 28 17:46:46 crc kubenswrapper[4909]: I1128 17:46:46.137907 4909 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="7f0a968b-b03b-4f03-99f8-5c4cf1d70baa" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.1.82:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 28 17:46:46 crc kubenswrapper[4909]: I1128 17:46:46.138056 4909 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="a2b756af-7f55-41f7-95be-4b01d0c05c51" containerName="nova-metadata-log" probeResult="failure" output="Get \"http://10.217.1.83:8775/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 28 17:46:46 crc kubenswrapper[4909]: I1128 17:46:46.137926 4909 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="a2b756af-7f55-41f7-95be-4b01d0c05c51" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"http://10.217.1.83:8775/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 28 17:46:46 crc kubenswrapper[4909]: I1128 17:46:46.212625 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-rqmg5" Nov 28 17:46:46 crc kubenswrapper[4909]: I1128 17:46:46.212775 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-rqmg5" Nov 28 17:46:46 crc kubenswrapper[4909]: I1128 17:46:46.282669 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-rqmg5" Nov 28 17:46:46 crc kubenswrapper[4909]: I1128 17:46:46.475569 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-rqmg5" Nov 28 17:46:46 crc kubenswrapper[4909]: I1128 17:46:46.526934 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-rqmg5"] Nov 28 17:46:46 crc kubenswrapper[4909]: I1128 17:46:46.807724 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Nov 28 17:46:47 crc kubenswrapper[4909]: I1128 17:46:47.731870 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-conductor-0" Nov 28 17:46:48 crc kubenswrapper[4909]: I1128 17:46:48.436273 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Nov 28 17:46:48 crc kubenswrapper[4909]: I1128 17:46:48.438115 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 28 17:46:48 crc kubenswrapper[4909]: I1128 17:46:48.440065 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Nov 28 17:46:48 crc kubenswrapper[4909]: I1128 17:46:48.444271 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-rqmg5" podUID="3b847096-da3c-4b9c-883a-319681d9ebe5" containerName="registry-server" containerID="cri-o://a72788331a5cc384431b901a96e371944ff79ca8b778b0811a06fc7170e38762" gracePeriod=2 Nov 28 17:46:48 crc kubenswrapper[4909]: I1128 17:46:48.458315 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 28 17:46:48 crc kubenswrapper[4909]: I1128 17:46:48.525519 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/e5e90ac9-38b7-4f4b-9125-d7ddd7516403-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"e5e90ac9-38b7-4f4b-9125-d7ddd7516403\") " pod="openstack/cinder-scheduler-0" Nov 28 17:46:48 crc kubenswrapper[4909]: I1128 17:46:48.525587 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bg5tk\" (UniqueName: \"kubernetes.io/projected/e5e90ac9-38b7-4f4b-9125-d7ddd7516403-kube-api-access-bg5tk\") pod \"cinder-scheduler-0\" (UID: \"e5e90ac9-38b7-4f4b-9125-d7ddd7516403\") " pod="openstack/cinder-scheduler-0" Nov 28 17:46:48 crc kubenswrapper[4909]: I1128 17:46:48.525781 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e5e90ac9-38b7-4f4b-9125-d7ddd7516403-scripts\") pod \"cinder-scheduler-0\" (UID: \"e5e90ac9-38b7-4f4b-9125-d7ddd7516403\") " pod="openstack/cinder-scheduler-0" Nov 28 17:46:48 crc kubenswrapper[4909]: I1128 17:46:48.525871 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/e5e90ac9-38b7-4f4b-9125-d7ddd7516403-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"e5e90ac9-38b7-4f4b-9125-d7ddd7516403\") " pod="openstack/cinder-scheduler-0" Nov 28 17:46:48 crc kubenswrapper[4909]: I1128 17:46:48.526044 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e5e90ac9-38b7-4f4b-9125-d7ddd7516403-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"e5e90ac9-38b7-4f4b-9125-d7ddd7516403\") " pod="openstack/cinder-scheduler-0" Nov 28 17:46:48 crc kubenswrapper[4909]: I1128 17:46:48.526159 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e5e90ac9-38b7-4f4b-9125-d7ddd7516403-config-data\") pod \"cinder-scheduler-0\" (UID: \"e5e90ac9-38b7-4f4b-9125-d7ddd7516403\") " pod="openstack/cinder-scheduler-0" Nov 28 17:46:48 crc kubenswrapper[4909]: I1128 17:46:48.628236 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/e5e90ac9-38b7-4f4b-9125-d7ddd7516403-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"e5e90ac9-38b7-4f4b-9125-d7ddd7516403\") " pod="openstack/cinder-scheduler-0" Nov 28 17:46:48 crc kubenswrapper[4909]: I1128 17:46:48.628633 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e5e90ac9-38b7-4f4b-9125-d7ddd7516403-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"e5e90ac9-38b7-4f4b-9125-d7ddd7516403\") " pod="openstack/cinder-scheduler-0" Nov 28 17:46:48 crc kubenswrapper[4909]: I1128 17:46:48.628709 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e5e90ac9-38b7-4f4b-9125-d7ddd7516403-config-data\") pod \"cinder-scheduler-0\" (UID: \"e5e90ac9-38b7-4f4b-9125-d7ddd7516403\") " pod="openstack/cinder-scheduler-0" Nov 28 17:46:48 crc kubenswrapper[4909]: I1128 17:46:48.628774 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/e5e90ac9-38b7-4f4b-9125-d7ddd7516403-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"e5e90ac9-38b7-4f4b-9125-d7ddd7516403\") " pod="openstack/cinder-scheduler-0" Nov 28 17:46:48 crc kubenswrapper[4909]: I1128 17:46:48.628842 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bg5tk\" (UniqueName: \"kubernetes.io/projected/e5e90ac9-38b7-4f4b-9125-d7ddd7516403-kube-api-access-bg5tk\") pod \"cinder-scheduler-0\" (UID: \"e5e90ac9-38b7-4f4b-9125-d7ddd7516403\") " pod="openstack/cinder-scheduler-0" Nov 28 17:46:48 crc kubenswrapper[4909]: I1128 17:46:48.628843 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/e5e90ac9-38b7-4f4b-9125-d7ddd7516403-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"e5e90ac9-38b7-4f4b-9125-d7ddd7516403\") " pod="openstack/cinder-scheduler-0" Nov 28 17:46:48 crc kubenswrapper[4909]: I1128 17:46:48.628875 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e5e90ac9-38b7-4f4b-9125-d7ddd7516403-scripts\") pod \"cinder-scheduler-0\" (UID: \"e5e90ac9-38b7-4f4b-9125-d7ddd7516403\") " pod="openstack/cinder-scheduler-0" Nov 28 17:46:48 crc kubenswrapper[4909]: I1128 17:46:48.636260 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e5e90ac9-38b7-4f4b-9125-d7ddd7516403-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"e5e90ac9-38b7-4f4b-9125-d7ddd7516403\") " pod="openstack/cinder-scheduler-0" Nov 28 17:46:48 crc kubenswrapper[4909]: I1128 17:46:48.641276 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e5e90ac9-38b7-4f4b-9125-d7ddd7516403-scripts\") pod \"cinder-scheduler-0\" (UID: \"e5e90ac9-38b7-4f4b-9125-d7ddd7516403\") " pod="openstack/cinder-scheduler-0" Nov 28 17:46:48 crc kubenswrapper[4909]: I1128 17:46:48.641407 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/e5e90ac9-38b7-4f4b-9125-d7ddd7516403-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"e5e90ac9-38b7-4f4b-9125-d7ddd7516403\") " pod="openstack/cinder-scheduler-0" Nov 28 17:46:48 crc kubenswrapper[4909]: I1128 17:46:48.652002 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e5e90ac9-38b7-4f4b-9125-d7ddd7516403-config-data\") pod \"cinder-scheduler-0\" (UID: \"e5e90ac9-38b7-4f4b-9125-d7ddd7516403\") " pod="openstack/cinder-scheduler-0" Nov 28 17:46:48 crc kubenswrapper[4909]: I1128 17:46:48.659245 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bg5tk\" (UniqueName: \"kubernetes.io/projected/e5e90ac9-38b7-4f4b-9125-d7ddd7516403-kube-api-access-bg5tk\") pod \"cinder-scheduler-0\" (UID: \"e5e90ac9-38b7-4f4b-9125-d7ddd7516403\") " pod="openstack/cinder-scheduler-0" Nov 28 17:46:48 crc kubenswrapper[4909]: I1128 17:46:48.766088 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 28 17:46:48 crc kubenswrapper[4909]: I1128 17:46:48.830130 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell0-conductor-0" Nov 28 17:46:48 crc kubenswrapper[4909]: I1128 17:46:48.892156 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-rqmg5" Nov 28 17:46:49 crc kubenswrapper[4909]: I1128 17:46:49.036565 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3b847096-da3c-4b9c-883a-319681d9ebe5-catalog-content\") pod \"3b847096-da3c-4b9c-883a-319681d9ebe5\" (UID: \"3b847096-da3c-4b9c-883a-319681d9ebe5\") " Nov 28 17:46:49 crc kubenswrapper[4909]: I1128 17:46:49.036640 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3b847096-da3c-4b9c-883a-319681d9ebe5-utilities\") pod \"3b847096-da3c-4b9c-883a-319681d9ebe5\" (UID: \"3b847096-da3c-4b9c-883a-319681d9ebe5\") " Nov 28 17:46:49 crc kubenswrapper[4909]: I1128 17:46:49.036791 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rb46f\" (UniqueName: \"kubernetes.io/projected/3b847096-da3c-4b9c-883a-319681d9ebe5-kube-api-access-rb46f\") pod \"3b847096-da3c-4b9c-883a-319681d9ebe5\" (UID: \"3b847096-da3c-4b9c-883a-319681d9ebe5\") " Nov 28 17:46:49 crc kubenswrapper[4909]: I1128 17:46:49.037614 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3b847096-da3c-4b9c-883a-319681d9ebe5-utilities" (OuterVolumeSpecName: "utilities") pod "3b847096-da3c-4b9c-883a-319681d9ebe5" (UID: "3b847096-da3c-4b9c-883a-319681d9ebe5"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:46:49 crc kubenswrapper[4909]: I1128 17:46:49.042266 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3b847096-da3c-4b9c-883a-319681d9ebe5-kube-api-access-rb46f" (OuterVolumeSpecName: "kube-api-access-rb46f") pod "3b847096-da3c-4b9c-883a-319681d9ebe5" (UID: "3b847096-da3c-4b9c-883a-319681d9ebe5"). InnerVolumeSpecName "kube-api-access-rb46f". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:46:49 crc kubenswrapper[4909]: I1128 17:46:49.090645 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3b847096-da3c-4b9c-883a-319681d9ebe5-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "3b847096-da3c-4b9c-883a-319681d9ebe5" (UID: "3b847096-da3c-4b9c-883a-319681d9ebe5"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:46:49 crc kubenswrapper[4909]: I1128 17:46:49.139137 4909 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3b847096-da3c-4b9c-883a-319681d9ebe5-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 17:46:49 crc kubenswrapper[4909]: I1128 17:46:49.139178 4909 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3b847096-da3c-4b9c-883a-319681d9ebe5-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 17:46:49 crc kubenswrapper[4909]: I1128 17:46:49.139194 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rb46f\" (UniqueName: \"kubernetes.io/projected/3b847096-da3c-4b9c-883a-319681d9ebe5-kube-api-access-rb46f\") on node \"crc\" DevicePath \"\"" Nov 28 17:46:49 crc kubenswrapper[4909]: I1128 17:46:49.240452 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 28 17:46:49 crc kubenswrapper[4909]: I1128 17:46:49.460775 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"e5e90ac9-38b7-4f4b-9125-d7ddd7516403","Type":"ContainerStarted","Data":"d00067541a91e894e58032a75f7eaf4f60e4f8d9d14e5ddb0ca32744f6cfd67c"} Nov 28 17:46:49 crc kubenswrapper[4909]: I1128 17:46:49.464703 4909 generic.go:334] "Generic (PLEG): container finished" podID="3b847096-da3c-4b9c-883a-319681d9ebe5" containerID="a72788331a5cc384431b901a96e371944ff79ca8b778b0811a06fc7170e38762" exitCode=0 Nov 28 17:46:49 crc kubenswrapper[4909]: I1128 17:46:49.464746 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rqmg5" event={"ID":"3b847096-da3c-4b9c-883a-319681d9ebe5","Type":"ContainerDied","Data":"a72788331a5cc384431b901a96e371944ff79ca8b778b0811a06fc7170e38762"} Nov 28 17:46:49 crc kubenswrapper[4909]: I1128 17:46:49.464772 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rqmg5" event={"ID":"3b847096-da3c-4b9c-883a-319681d9ebe5","Type":"ContainerDied","Data":"cf16e25b6412cc610f56b8208732a5868866b5a472e0c4f0115df857adde631a"} Nov 28 17:46:49 crc kubenswrapper[4909]: I1128 17:46:49.464790 4909 scope.go:117] "RemoveContainer" containerID="a72788331a5cc384431b901a96e371944ff79ca8b778b0811a06fc7170e38762" Nov 28 17:46:49 crc kubenswrapper[4909]: I1128 17:46:49.464794 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-rqmg5" Nov 28 17:46:49 crc kubenswrapper[4909]: I1128 17:46:49.517322 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-rqmg5"] Nov 28 17:46:49 crc kubenswrapper[4909]: I1128 17:46:49.517385 4909 scope.go:117] "RemoveContainer" containerID="4263b8e37b7ea61ae0b425e2b9fed8f9297c62b519c81628ca03390c72bf14ad" Nov 28 17:46:49 crc kubenswrapper[4909]: I1128 17:46:49.529729 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-rqmg5"] Nov 28 17:46:49 crc kubenswrapper[4909]: I1128 17:46:49.544446 4909 scope.go:117] "RemoveContainer" containerID="ed87b24a132edb4d03b1f4666f0d9d84d9ae151cc4dc6b125c4eb151b798494a" Nov 28 17:46:49 crc kubenswrapper[4909]: I1128 17:46:49.575570 4909 scope.go:117] "RemoveContainer" containerID="a72788331a5cc384431b901a96e371944ff79ca8b778b0811a06fc7170e38762" Nov 28 17:46:49 crc kubenswrapper[4909]: E1128 17:46:49.576069 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a72788331a5cc384431b901a96e371944ff79ca8b778b0811a06fc7170e38762\": container with ID starting with a72788331a5cc384431b901a96e371944ff79ca8b778b0811a06fc7170e38762 not found: ID does not exist" containerID="a72788331a5cc384431b901a96e371944ff79ca8b778b0811a06fc7170e38762" Nov 28 17:46:49 crc kubenswrapper[4909]: I1128 17:46:49.576105 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a72788331a5cc384431b901a96e371944ff79ca8b778b0811a06fc7170e38762"} err="failed to get container status \"a72788331a5cc384431b901a96e371944ff79ca8b778b0811a06fc7170e38762\": rpc error: code = NotFound desc = could not find container \"a72788331a5cc384431b901a96e371944ff79ca8b778b0811a06fc7170e38762\": container with ID starting with a72788331a5cc384431b901a96e371944ff79ca8b778b0811a06fc7170e38762 not found: ID does not exist" Nov 28 17:46:49 crc kubenswrapper[4909]: I1128 17:46:49.576128 4909 scope.go:117] "RemoveContainer" containerID="4263b8e37b7ea61ae0b425e2b9fed8f9297c62b519c81628ca03390c72bf14ad" Nov 28 17:46:49 crc kubenswrapper[4909]: E1128 17:46:49.577189 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4263b8e37b7ea61ae0b425e2b9fed8f9297c62b519c81628ca03390c72bf14ad\": container with ID starting with 4263b8e37b7ea61ae0b425e2b9fed8f9297c62b519c81628ca03390c72bf14ad not found: ID does not exist" containerID="4263b8e37b7ea61ae0b425e2b9fed8f9297c62b519c81628ca03390c72bf14ad" Nov 28 17:46:49 crc kubenswrapper[4909]: I1128 17:46:49.577235 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4263b8e37b7ea61ae0b425e2b9fed8f9297c62b519c81628ca03390c72bf14ad"} err="failed to get container status \"4263b8e37b7ea61ae0b425e2b9fed8f9297c62b519c81628ca03390c72bf14ad\": rpc error: code = NotFound desc = could not find container \"4263b8e37b7ea61ae0b425e2b9fed8f9297c62b519c81628ca03390c72bf14ad\": container with ID starting with 4263b8e37b7ea61ae0b425e2b9fed8f9297c62b519c81628ca03390c72bf14ad not found: ID does not exist" Nov 28 17:46:49 crc kubenswrapper[4909]: I1128 17:46:49.577256 4909 scope.go:117] "RemoveContainer" containerID="ed87b24a132edb4d03b1f4666f0d9d84d9ae151cc4dc6b125c4eb151b798494a" Nov 28 17:46:49 crc kubenswrapper[4909]: E1128 17:46:49.577748 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ed87b24a132edb4d03b1f4666f0d9d84d9ae151cc4dc6b125c4eb151b798494a\": container with ID starting with ed87b24a132edb4d03b1f4666f0d9d84d9ae151cc4dc6b125c4eb151b798494a not found: ID does not exist" containerID="ed87b24a132edb4d03b1f4666f0d9d84d9ae151cc4dc6b125c4eb151b798494a" Nov 28 17:46:49 crc kubenswrapper[4909]: I1128 17:46:49.577793 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ed87b24a132edb4d03b1f4666f0d9d84d9ae151cc4dc6b125c4eb151b798494a"} err="failed to get container status \"ed87b24a132edb4d03b1f4666f0d9d84d9ae151cc4dc6b125c4eb151b798494a\": rpc error: code = NotFound desc = could not find container \"ed87b24a132edb4d03b1f4666f0d9d84d9ae151cc4dc6b125c4eb151b798494a\": container with ID starting with ed87b24a132edb4d03b1f4666f0d9d84d9ae151cc4dc6b125c4eb151b798494a not found: ID does not exist" Nov 28 17:46:49 crc kubenswrapper[4909]: I1128 17:46:49.632751 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Nov 28 17:46:49 crc kubenswrapper[4909]: I1128 17:46:49.632990 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="5a6ccec4-c48a-4d29-ad8e-cb132e181d86" containerName="cinder-api-log" containerID="cri-o://0b16538e33190c922f3962621f620fa2a4048e371c3c32c88856b62d826dab8b" gracePeriod=30 Nov 28 17:46:49 crc kubenswrapper[4909]: I1128 17:46:49.633053 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="5a6ccec4-c48a-4d29-ad8e-cb132e181d86" containerName="cinder-api" containerID="cri-o://9d2efc6c84db61762d42255c13f0b261b1510b198f46ee001a7501cd0962431a" gracePeriod=30 Nov 28 17:46:49 crc kubenswrapper[4909]: I1128 17:46:49.922449 4909 patch_prober.go:28] interesting pod/machine-config-daemon-d5nd7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 17:46:49 crc kubenswrapper[4909]: I1128 17:46:49.922507 4909 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 17:46:49 crc kubenswrapper[4909]: I1128 17:46:49.929938 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3b847096-da3c-4b9c-883a-319681d9ebe5" path="/var/lib/kubelet/pods/3b847096-da3c-4b9c-883a-319681d9ebe5/volumes" Nov 28 17:46:49 crc kubenswrapper[4909]: I1128 17:46:49.931018 4909 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" Nov 28 17:46:49 crc kubenswrapper[4909]: I1128 17:46:49.931568 4909 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"a9c5475473dd9ba62a24558f25eeaca06c83d54c4c834c5612f2192cce1e1a09"} pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 17:46:49 crc kubenswrapper[4909]: I1128 17:46:49.931637 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerName="machine-config-daemon" containerID="cri-o://a9c5475473dd9ba62a24558f25eeaca06c83d54c4c834c5612f2192cce1e1a09" gracePeriod=600 Nov 28 17:46:50 crc kubenswrapper[4909]: E1128 17:46:50.020726 4909 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5f0ac931_d37b_4342_8c12_c2779b455cc5.slice/crio-conmon-a9c5475473dd9ba62a24558f25eeaca06c83d54c4c834c5612f2192cce1e1a09.scope\": RecentStats: unable to find data in memory cache]" Nov 28 17:46:50 crc kubenswrapper[4909]: I1128 17:46:50.473231 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-volume-volume1-0"] Nov 28 17:46:50 crc kubenswrapper[4909]: E1128 17:46:50.473915 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3b847096-da3c-4b9c-883a-319681d9ebe5" containerName="registry-server" Nov 28 17:46:50 crc kubenswrapper[4909]: I1128 17:46:50.473939 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="3b847096-da3c-4b9c-883a-319681d9ebe5" containerName="registry-server" Nov 28 17:46:50 crc kubenswrapper[4909]: E1128 17:46:50.473968 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3b847096-da3c-4b9c-883a-319681d9ebe5" containerName="extract-utilities" Nov 28 17:46:50 crc kubenswrapper[4909]: I1128 17:46:50.473977 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="3b847096-da3c-4b9c-883a-319681d9ebe5" containerName="extract-utilities" Nov 28 17:46:50 crc kubenswrapper[4909]: E1128 17:46:50.474011 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3b847096-da3c-4b9c-883a-319681d9ebe5" containerName="extract-content" Nov 28 17:46:50 crc kubenswrapper[4909]: I1128 17:46:50.474018 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="3b847096-da3c-4b9c-883a-319681d9ebe5" containerName="extract-content" Nov 28 17:46:50 crc kubenswrapper[4909]: I1128 17:46:50.474232 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="3b847096-da3c-4b9c-883a-319681d9ebe5" containerName="registry-server" Nov 28 17:46:50 crc kubenswrapper[4909]: I1128 17:46:50.475423 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-volume-volume1-0" Nov 28 17:46:50 crc kubenswrapper[4909]: I1128 17:46:50.476835 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-volume-volume1-config-data" Nov 28 17:46:50 crc kubenswrapper[4909]: I1128 17:46:50.477141 4909 generic.go:334] "Generic (PLEG): container finished" podID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerID="a9c5475473dd9ba62a24558f25eeaca06c83d54c4c834c5612f2192cce1e1a09" exitCode=0 Nov 28 17:46:50 crc kubenswrapper[4909]: I1128 17:46:50.477194 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" event={"ID":"5f0ac931-d37b-4342-8c12-c2779b455cc5","Type":"ContainerDied","Data":"a9c5475473dd9ba62a24558f25eeaca06c83d54c4c834c5612f2192cce1e1a09"} Nov 28 17:46:50 crc kubenswrapper[4909]: I1128 17:46:50.477219 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" event={"ID":"5f0ac931-d37b-4342-8c12-c2779b455cc5","Type":"ContainerStarted","Data":"eb8a08a6c738fff0fcbfbb88427c9ed53477944abe7436212850e368ec229c4f"} Nov 28 17:46:50 crc kubenswrapper[4909]: I1128 17:46:50.477234 4909 scope.go:117] "RemoveContainer" containerID="ba4943f4ba136c11fa217eba14fcdb34cf54ee4ef96ee334416ec901f5f4fe45" Nov 28 17:46:50 crc kubenswrapper[4909]: I1128 17:46:50.488016 4909 generic.go:334] "Generic (PLEG): container finished" podID="5a6ccec4-c48a-4d29-ad8e-cb132e181d86" containerID="0b16538e33190c922f3962621f620fa2a4048e371c3c32c88856b62d826dab8b" exitCode=143 Nov 28 17:46:50 crc kubenswrapper[4909]: I1128 17:46:50.488119 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"5a6ccec4-c48a-4d29-ad8e-cb132e181d86","Type":"ContainerDied","Data":"0b16538e33190c922f3962621f620fa2a4048e371c3c32c88856b62d826dab8b"} Nov 28 17:46:50 crc kubenswrapper[4909]: I1128 17:46:50.493966 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"e5e90ac9-38b7-4f4b-9125-d7ddd7516403","Type":"ContainerStarted","Data":"fa51538af27e817a9866c8c69ba6ad509d8a44e76d594b3c80bee1748798783c"} Nov 28 17:46:50 crc kubenswrapper[4909]: I1128 17:46:50.512770 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-volume-volume1-0"] Nov 28 17:46:50 crc kubenswrapper[4909]: I1128 17:46:50.563739 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/d3778829-e25a-4c1a-b6bd-25085ee5e380-etc-machine-id\") pod \"cinder-volume-volume1-0\" (UID: \"d3778829-e25a-4c1a-b6bd-25085ee5e380\") " pod="openstack/cinder-volume-volume1-0" Nov 28 17:46:50 crc kubenswrapper[4909]: I1128 17:46:50.563801 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/d3778829-e25a-4c1a-b6bd-25085ee5e380-var-locks-brick\") pod \"cinder-volume-volume1-0\" (UID: \"d3778829-e25a-4c1a-b6bd-25085ee5e380\") " pod="openstack/cinder-volume-volume1-0" Nov 28 17:46:50 crc kubenswrapper[4909]: I1128 17:46:50.563821 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d3778829-e25a-4c1a-b6bd-25085ee5e380-combined-ca-bundle\") pod \"cinder-volume-volume1-0\" (UID: \"d3778829-e25a-4c1a-b6bd-25085ee5e380\") " pod="openstack/cinder-volume-volume1-0" Nov 28 17:46:50 crc kubenswrapper[4909]: I1128 17:46:50.563853 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/d3778829-e25a-4c1a-b6bd-25085ee5e380-run\") pod \"cinder-volume-volume1-0\" (UID: \"d3778829-e25a-4c1a-b6bd-25085ee5e380\") " pod="openstack/cinder-volume-volume1-0" Nov 28 17:46:50 crc kubenswrapper[4909]: I1128 17:46:50.563877 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/d3778829-e25a-4c1a-b6bd-25085ee5e380-lib-modules\") pod \"cinder-volume-volume1-0\" (UID: \"d3778829-e25a-4c1a-b6bd-25085ee5e380\") " pod="openstack/cinder-volume-volume1-0" Nov 28 17:46:50 crc kubenswrapper[4909]: I1128 17:46:50.563895 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/d3778829-e25a-4c1a-b6bd-25085ee5e380-ceph\") pod \"cinder-volume-volume1-0\" (UID: \"d3778829-e25a-4c1a-b6bd-25085ee5e380\") " pod="openstack/cinder-volume-volume1-0" Nov 28 17:46:50 crc kubenswrapper[4909]: I1128 17:46:50.564026 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d3778829-e25a-4c1a-b6bd-25085ee5e380-config-data\") pod \"cinder-volume-volume1-0\" (UID: \"d3778829-e25a-4c1a-b6bd-25085ee5e380\") " pod="openstack/cinder-volume-volume1-0" Nov 28 17:46:50 crc kubenswrapper[4909]: I1128 17:46:50.564094 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/d3778829-e25a-4c1a-b6bd-25085ee5e380-var-locks-cinder\") pod \"cinder-volume-volume1-0\" (UID: \"d3778829-e25a-4c1a-b6bd-25085ee5e380\") " pod="openstack/cinder-volume-volume1-0" Nov 28 17:46:50 crc kubenswrapper[4909]: I1128 17:46:50.564196 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/d3778829-e25a-4c1a-b6bd-25085ee5e380-sys\") pod \"cinder-volume-volume1-0\" (UID: \"d3778829-e25a-4c1a-b6bd-25085ee5e380\") " pod="openstack/cinder-volume-volume1-0" Nov 28 17:46:50 crc kubenswrapper[4909]: I1128 17:46:50.564313 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/d3778829-e25a-4c1a-b6bd-25085ee5e380-dev\") pod \"cinder-volume-volume1-0\" (UID: \"d3778829-e25a-4c1a-b6bd-25085ee5e380\") " pod="openstack/cinder-volume-volume1-0" Nov 28 17:46:50 crc kubenswrapper[4909]: I1128 17:46:50.564353 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d3778829-e25a-4c1a-b6bd-25085ee5e380-config-data-custom\") pod \"cinder-volume-volume1-0\" (UID: \"d3778829-e25a-4c1a-b6bd-25085ee5e380\") " pod="openstack/cinder-volume-volume1-0" Nov 28 17:46:50 crc kubenswrapper[4909]: I1128 17:46:50.564394 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d3778829-e25a-4c1a-b6bd-25085ee5e380-scripts\") pod \"cinder-volume-volume1-0\" (UID: \"d3778829-e25a-4c1a-b6bd-25085ee5e380\") " pod="openstack/cinder-volume-volume1-0" Nov 28 17:46:50 crc kubenswrapper[4909]: I1128 17:46:50.564451 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/d3778829-e25a-4c1a-b6bd-25085ee5e380-etc-nvme\") pod \"cinder-volume-volume1-0\" (UID: \"d3778829-e25a-4c1a-b6bd-25085ee5e380\") " pod="openstack/cinder-volume-volume1-0" Nov 28 17:46:50 crc kubenswrapper[4909]: I1128 17:46:50.564479 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/d3778829-e25a-4c1a-b6bd-25085ee5e380-var-lib-cinder\") pod \"cinder-volume-volume1-0\" (UID: \"d3778829-e25a-4c1a-b6bd-25085ee5e380\") " pod="openstack/cinder-volume-volume1-0" Nov 28 17:46:50 crc kubenswrapper[4909]: I1128 17:46:50.564522 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bxdnm\" (UniqueName: \"kubernetes.io/projected/d3778829-e25a-4c1a-b6bd-25085ee5e380-kube-api-access-bxdnm\") pod \"cinder-volume-volume1-0\" (UID: \"d3778829-e25a-4c1a-b6bd-25085ee5e380\") " pod="openstack/cinder-volume-volume1-0" Nov 28 17:46:50 crc kubenswrapper[4909]: I1128 17:46:50.564830 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/d3778829-e25a-4c1a-b6bd-25085ee5e380-etc-iscsi\") pod \"cinder-volume-volume1-0\" (UID: \"d3778829-e25a-4c1a-b6bd-25085ee5e380\") " pod="openstack/cinder-volume-volume1-0" Nov 28 17:46:50 crc kubenswrapper[4909]: I1128 17:46:50.574025 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=2.574004891 podStartE2EDuration="2.574004891s" podCreationTimestamp="2025-11-28 17:46:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 17:46:50.569809718 +0000 UTC m=+5792.966494262" watchObservedRunningTime="2025-11-28 17:46:50.574004891 +0000 UTC m=+5792.970689415" Nov 28 17:46:50 crc kubenswrapper[4909]: I1128 17:46:50.666195 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/d3778829-e25a-4c1a-b6bd-25085ee5e380-sys\") pod \"cinder-volume-volume1-0\" (UID: \"d3778829-e25a-4c1a-b6bd-25085ee5e380\") " pod="openstack/cinder-volume-volume1-0" Nov 28 17:46:50 crc kubenswrapper[4909]: I1128 17:46:50.666262 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/d3778829-e25a-4c1a-b6bd-25085ee5e380-dev\") pod \"cinder-volume-volume1-0\" (UID: \"d3778829-e25a-4c1a-b6bd-25085ee5e380\") " pod="openstack/cinder-volume-volume1-0" Nov 28 17:46:50 crc kubenswrapper[4909]: I1128 17:46:50.666266 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/d3778829-e25a-4c1a-b6bd-25085ee5e380-sys\") pod \"cinder-volume-volume1-0\" (UID: \"d3778829-e25a-4c1a-b6bd-25085ee5e380\") " pod="openstack/cinder-volume-volume1-0" Nov 28 17:46:50 crc kubenswrapper[4909]: I1128 17:46:50.666290 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d3778829-e25a-4c1a-b6bd-25085ee5e380-config-data-custom\") pod \"cinder-volume-volume1-0\" (UID: \"d3778829-e25a-4c1a-b6bd-25085ee5e380\") " pod="openstack/cinder-volume-volume1-0" Nov 28 17:46:50 crc kubenswrapper[4909]: I1128 17:46:50.666334 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d3778829-e25a-4c1a-b6bd-25085ee5e380-scripts\") pod \"cinder-volume-volume1-0\" (UID: \"d3778829-e25a-4c1a-b6bd-25085ee5e380\") " pod="openstack/cinder-volume-volume1-0" Nov 28 17:46:50 crc kubenswrapper[4909]: I1128 17:46:50.666385 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/d3778829-e25a-4c1a-b6bd-25085ee5e380-etc-nvme\") pod \"cinder-volume-volume1-0\" (UID: \"d3778829-e25a-4c1a-b6bd-25085ee5e380\") " pod="openstack/cinder-volume-volume1-0" Nov 28 17:46:50 crc kubenswrapper[4909]: I1128 17:46:50.666426 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/d3778829-e25a-4c1a-b6bd-25085ee5e380-var-lib-cinder\") pod \"cinder-volume-volume1-0\" (UID: \"d3778829-e25a-4c1a-b6bd-25085ee5e380\") " pod="openstack/cinder-volume-volume1-0" Nov 28 17:46:50 crc kubenswrapper[4909]: I1128 17:46:50.666469 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bxdnm\" (UniqueName: \"kubernetes.io/projected/d3778829-e25a-4c1a-b6bd-25085ee5e380-kube-api-access-bxdnm\") pod \"cinder-volume-volume1-0\" (UID: \"d3778829-e25a-4c1a-b6bd-25085ee5e380\") " pod="openstack/cinder-volume-volume1-0" Nov 28 17:46:50 crc kubenswrapper[4909]: I1128 17:46:50.666511 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/d3778829-e25a-4c1a-b6bd-25085ee5e380-etc-nvme\") pod \"cinder-volume-volume1-0\" (UID: \"d3778829-e25a-4c1a-b6bd-25085ee5e380\") " pod="openstack/cinder-volume-volume1-0" Nov 28 17:46:50 crc kubenswrapper[4909]: I1128 17:46:50.666544 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/d3778829-e25a-4c1a-b6bd-25085ee5e380-etc-iscsi\") pod \"cinder-volume-volume1-0\" (UID: \"d3778829-e25a-4c1a-b6bd-25085ee5e380\") " pod="openstack/cinder-volume-volume1-0" Nov 28 17:46:50 crc kubenswrapper[4909]: I1128 17:46:50.666519 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/d3778829-e25a-4c1a-b6bd-25085ee5e380-etc-iscsi\") pod \"cinder-volume-volume1-0\" (UID: \"d3778829-e25a-4c1a-b6bd-25085ee5e380\") " pod="openstack/cinder-volume-volume1-0" Nov 28 17:46:50 crc kubenswrapper[4909]: I1128 17:46:50.666670 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/d3778829-e25a-4c1a-b6bd-25085ee5e380-etc-machine-id\") pod \"cinder-volume-volume1-0\" (UID: \"d3778829-e25a-4c1a-b6bd-25085ee5e380\") " pod="openstack/cinder-volume-volume1-0" Nov 28 17:46:50 crc kubenswrapper[4909]: I1128 17:46:50.666766 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/d3778829-e25a-4c1a-b6bd-25085ee5e380-var-locks-brick\") pod \"cinder-volume-volume1-0\" (UID: \"d3778829-e25a-4c1a-b6bd-25085ee5e380\") " pod="openstack/cinder-volume-volume1-0" Nov 28 17:46:50 crc kubenswrapper[4909]: I1128 17:46:50.666797 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d3778829-e25a-4c1a-b6bd-25085ee5e380-combined-ca-bundle\") pod \"cinder-volume-volume1-0\" (UID: \"d3778829-e25a-4c1a-b6bd-25085ee5e380\") " pod="openstack/cinder-volume-volume1-0" Nov 28 17:46:50 crc kubenswrapper[4909]: I1128 17:46:50.666836 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/d3778829-e25a-4c1a-b6bd-25085ee5e380-run\") pod \"cinder-volume-volume1-0\" (UID: \"d3778829-e25a-4c1a-b6bd-25085ee5e380\") " pod="openstack/cinder-volume-volume1-0" Nov 28 17:46:50 crc kubenswrapper[4909]: I1128 17:46:50.666875 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/d3778829-e25a-4c1a-b6bd-25085ee5e380-lib-modules\") pod \"cinder-volume-volume1-0\" (UID: \"d3778829-e25a-4c1a-b6bd-25085ee5e380\") " pod="openstack/cinder-volume-volume1-0" Nov 28 17:46:50 crc kubenswrapper[4909]: I1128 17:46:50.666907 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/d3778829-e25a-4c1a-b6bd-25085ee5e380-ceph\") pod \"cinder-volume-volume1-0\" (UID: \"d3778829-e25a-4c1a-b6bd-25085ee5e380\") " pod="openstack/cinder-volume-volume1-0" Nov 28 17:46:50 crc kubenswrapper[4909]: I1128 17:46:50.666983 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d3778829-e25a-4c1a-b6bd-25085ee5e380-config-data\") pod \"cinder-volume-volume1-0\" (UID: \"d3778829-e25a-4c1a-b6bd-25085ee5e380\") " pod="openstack/cinder-volume-volume1-0" Nov 28 17:46:50 crc kubenswrapper[4909]: I1128 17:46:50.667011 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/d3778829-e25a-4c1a-b6bd-25085ee5e380-var-locks-cinder\") pod \"cinder-volume-volume1-0\" (UID: \"d3778829-e25a-4c1a-b6bd-25085ee5e380\") " pod="openstack/cinder-volume-volume1-0" Nov 28 17:46:50 crc kubenswrapper[4909]: I1128 17:46:50.666421 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/d3778829-e25a-4c1a-b6bd-25085ee5e380-dev\") pod \"cinder-volume-volume1-0\" (UID: \"d3778829-e25a-4c1a-b6bd-25085ee5e380\") " pod="openstack/cinder-volume-volume1-0" Nov 28 17:46:50 crc kubenswrapper[4909]: I1128 17:46:50.667273 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/d3778829-e25a-4c1a-b6bd-25085ee5e380-etc-machine-id\") pod \"cinder-volume-volume1-0\" (UID: \"d3778829-e25a-4c1a-b6bd-25085ee5e380\") " pod="openstack/cinder-volume-volume1-0" Nov 28 17:46:50 crc kubenswrapper[4909]: I1128 17:46:50.667324 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/d3778829-e25a-4c1a-b6bd-25085ee5e380-var-locks-cinder\") pod \"cinder-volume-volume1-0\" (UID: \"d3778829-e25a-4c1a-b6bd-25085ee5e380\") " pod="openstack/cinder-volume-volume1-0" Nov 28 17:46:50 crc kubenswrapper[4909]: I1128 17:46:50.667351 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/d3778829-e25a-4c1a-b6bd-25085ee5e380-var-lib-cinder\") pod \"cinder-volume-volume1-0\" (UID: \"d3778829-e25a-4c1a-b6bd-25085ee5e380\") " pod="openstack/cinder-volume-volume1-0" Nov 28 17:46:50 crc kubenswrapper[4909]: I1128 17:46:50.667397 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/d3778829-e25a-4c1a-b6bd-25085ee5e380-lib-modules\") pod \"cinder-volume-volume1-0\" (UID: \"d3778829-e25a-4c1a-b6bd-25085ee5e380\") " pod="openstack/cinder-volume-volume1-0" Nov 28 17:46:50 crc kubenswrapper[4909]: I1128 17:46:50.667409 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run\" (UniqueName: \"kubernetes.io/host-path/d3778829-e25a-4c1a-b6bd-25085ee5e380-run\") pod \"cinder-volume-volume1-0\" (UID: \"d3778829-e25a-4c1a-b6bd-25085ee5e380\") " pod="openstack/cinder-volume-volume1-0" Nov 28 17:46:50 crc kubenswrapper[4909]: I1128 17:46:50.667591 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/d3778829-e25a-4c1a-b6bd-25085ee5e380-var-locks-brick\") pod \"cinder-volume-volume1-0\" (UID: \"d3778829-e25a-4c1a-b6bd-25085ee5e380\") " pod="openstack/cinder-volume-volume1-0" Nov 28 17:46:50 crc kubenswrapper[4909]: I1128 17:46:50.672272 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/d3778829-e25a-4c1a-b6bd-25085ee5e380-ceph\") pod \"cinder-volume-volume1-0\" (UID: \"d3778829-e25a-4c1a-b6bd-25085ee5e380\") " pod="openstack/cinder-volume-volume1-0" Nov 28 17:46:50 crc kubenswrapper[4909]: I1128 17:46:50.672411 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d3778829-e25a-4c1a-b6bd-25085ee5e380-config-data-custom\") pod \"cinder-volume-volume1-0\" (UID: \"d3778829-e25a-4c1a-b6bd-25085ee5e380\") " pod="openstack/cinder-volume-volume1-0" Nov 28 17:46:50 crc kubenswrapper[4909]: I1128 17:46:50.673759 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d3778829-e25a-4c1a-b6bd-25085ee5e380-config-data\") pod \"cinder-volume-volume1-0\" (UID: \"d3778829-e25a-4c1a-b6bd-25085ee5e380\") " pod="openstack/cinder-volume-volume1-0" Nov 28 17:46:50 crc kubenswrapper[4909]: I1128 17:46:50.674526 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d3778829-e25a-4c1a-b6bd-25085ee5e380-scripts\") pod \"cinder-volume-volume1-0\" (UID: \"d3778829-e25a-4c1a-b6bd-25085ee5e380\") " pod="openstack/cinder-volume-volume1-0" Nov 28 17:46:50 crc kubenswrapper[4909]: I1128 17:46:50.678143 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d3778829-e25a-4c1a-b6bd-25085ee5e380-combined-ca-bundle\") pod \"cinder-volume-volume1-0\" (UID: \"d3778829-e25a-4c1a-b6bd-25085ee5e380\") " pod="openstack/cinder-volume-volume1-0" Nov 28 17:46:50 crc kubenswrapper[4909]: I1128 17:46:50.688223 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bxdnm\" (UniqueName: \"kubernetes.io/projected/d3778829-e25a-4c1a-b6bd-25085ee5e380-kube-api-access-bxdnm\") pod \"cinder-volume-volume1-0\" (UID: \"d3778829-e25a-4c1a-b6bd-25085ee5e380\") " pod="openstack/cinder-volume-volume1-0" Nov 28 17:46:50 crc kubenswrapper[4909]: I1128 17:46:50.799693 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-volume-volume1-0" Nov 28 17:46:51 crc kubenswrapper[4909]: I1128 17:46:51.098945 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-backup-0"] Nov 28 17:46:51 crc kubenswrapper[4909]: I1128 17:46:51.113760 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-backup-0" Nov 28 17:46:51 crc kubenswrapper[4909]: I1128 17:46:51.120129 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-backup-config-data" Nov 28 17:46:51 crc kubenswrapper[4909]: I1128 17:46:51.134267 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-backup-0"] Nov 28 17:46:51 crc kubenswrapper[4909]: I1128 17:46:51.187103 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d6109b09-2279-459c-a836-b02bb0156e0a-combined-ca-bundle\") pod \"cinder-backup-0\" (UID: \"d6109b09-2279-459c-a836-b02bb0156e0a\") " pod="openstack/cinder-backup-0" Nov 28 17:46:51 crc kubenswrapper[4909]: I1128 17:46:51.187382 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5mb9q\" (UniqueName: \"kubernetes.io/projected/d6109b09-2279-459c-a836-b02bb0156e0a-kube-api-access-5mb9q\") pod \"cinder-backup-0\" (UID: \"d6109b09-2279-459c-a836-b02bb0156e0a\") " pod="openstack/cinder-backup-0" Nov 28 17:46:51 crc kubenswrapper[4909]: I1128 17:46:51.187445 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/d6109b09-2279-459c-a836-b02bb0156e0a-var-lib-cinder\") pod \"cinder-backup-0\" (UID: \"d6109b09-2279-459c-a836-b02bb0156e0a\") " pod="openstack/cinder-backup-0" Nov 28 17:46:51 crc kubenswrapper[4909]: I1128 17:46:51.187483 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d6109b09-2279-459c-a836-b02bb0156e0a-config-data-custom\") pod \"cinder-backup-0\" (UID: \"d6109b09-2279-459c-a836-b02bb0156e0a\") " pod="openstack/cinder-backup-0" Nov 28 17:46:51 crc kubenswrapper[4909]: I1128 17:46:51.187528 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/d6109b09-2279-459c-a836-b02bb0156e0a-etc-machine-id\") pod \"cinder-backup-0\" (UID: \"d6109b09-2279-459c-a836-b02bb0156e0a\") " pod="openstack/cinder-backup-0" Nov 28 17:46:51 crc kubenswrapper[4909]: I1128 17:46:51.187552 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/d6109b09-2279-459c-a836-b02bb0156e0a-run\") pod \"cinder-backup-0\" (UID: \"d6109b09-2279-459c-a836-b02bb0156e0a\") " pod="openstack/cinder-backup-0" Nov 28 17:46:51 crc kubenswrapper[4909]: I1128 17:46:51.187580 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d6109b09-2279-459c-a836-b02bb0156e0a-config-data\") pod \"cinder-backup-0\" (UID: \"d6109b09-2279-459c-a836-b02bb0156e0a\") " pod="openstack/cinder-backup-0" Nov 28 17:46:51 crc kubenswrapper[4909]: I1128 17:46:51.187615 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/d6109b09-2279-459c-a836-b02bb0156e0a-ceph\") pod \"cinder-backup-0\" (UID: \"d6109b09-2279-459c-a836-b02bb0156e0a\") " pod="openstack/cinder-backup-0" Nov 28 17:46:51 crc kubenswrapper[4909]: I1128 17:46:51.187633 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/d6109b09-2279-459c-a836-b02bb0156e0a-dev\") pod \"cinder-backup-0\" (UID: \"d6109b09-2279-459c-a836-b02bb0156e0a\") " pod="openstack/cinder-backup-0" Nov 28 17:46:51 crc kubenswrapper[4909]: I1128 17:46:51.187671 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/d6109b09-2279-459c-a836-b02bb0156e0a-etc-nvme\") pod \"cinder-backup-0\" (UID: \"d6109b09-2279-459c-a836-b02bb0156e0a\") " pod="openstack/cinder-backup-0" Nov 28 17:46:51 crc kubenswrapper[4909]: I1128 17:46:51.187745 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/d6109b09-2279-459c-a836-b02bb0156e0a-sys\") pod \"cinder-backup-0\" (UID: \"d6109b09-2279-459c-a836-b02bb0156e0a\") " pod="openstack/cinder-backup-0" Nov 28 17:46:51 crc kubenswrapper[4909]: I1128 17:46:51.187759 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/d6109b09-2279-459c-a836-b02bb0156e0a-var-locks-brick\") pod \"cinder-backup-0\" (UID: \"d6109b09-2279-459c-a836-b02bb0156e0a\") " pod="openstack/cinder-backup-0" Nov 28 17:46:51 crc kubenswrapper[4909]: I1128 17:46:51.187777 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/d6109b09-2279-459c-a836-b02bb0156e0a-etc-iscsi\") pod \"cinder-backup-0\" (UID: \"d6109b09-2279-459c-a836-b02bb0156e0a\") " pod="openstack/cinder-backup-0" Nov 28 17:46:51 crc kubenswrapper[4909]: I1128 17:46:51.187801 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/d6109b09-2279-459c-a836-b02bb0156e0a-lib-modules\") pod \"cinder-backup-0\" (UID: \"d6109b09-2279-459c-a836-b02bb0156e0a\") " pod="openstack/cinder-backup-0" Nov 28 17:46:51 crc kubenswrapper[4909]: I1128 17:46:51.187819 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/d6109b09-2279-459c-a836-b02bb0156e0a-var-locks-cinder\") pod \"cinder-backup-0\" (UID: \"d6109b09-2279-459c-a836-b02bb0156e0a\") " pod="openstack/cinder-backup-0" Nov 28 17:46:51 crc kubenswrapper[4909]: I1128 17:46:51.187841 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d6109b09-2279-459c-a836-b02bb0156e0a-scripts\") pod \"cinder-backup-0\" (UID: \"d6109b09-2279-459c-a836-b02bb0156e0a\") " pod="openstack/cinder-backup-0" Nov 28 17:46:51 crc kubenswrapper[4909]: I1128 17:46:51.289377 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/d6109b09-2279-459c-a836-b02bb0156e0a-sys\") pod \"cinder-backup-0\" (UID: \"d6109b09-2279-459c-a836-b02bb0156e0a\") " pod="openstack/cinder-backup-0" Nov 28 17:46:51 crc kubenswrapper[4909]: I1128 17:46:51.289426 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/d6109b09-2279-459c-a836-b02bb0156e0a-var-locks-brick\") pod \"cinder-backup-0\" (UID: \"d6109b09-2279-459c-a836-b02bb0156e0a\") " pod="openstack/cinder-backup-0" Nov 28 17:46:51 crc kubenswrapper[4909]: I1128 17:46:51.289449 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/d6109b09-2279-459c-a836-b02bb0156e0a-etc-iscsi\") pod \"cinder-backup-0\" (UID: \"d6109b09-2279-459c-a836-b02bb0156e0a\") " pod="openstack/cinder-backup-0" Nov 28 17:46:51 crc kubenswrapper[4909]: I1128 17:46:51.289479 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/d6109b09-2279-459c-a836-b02bb0156e0a-lib-modules\") pod \"cinder-backup-0\" (UID: \"d6109b09-2279-459c-a836-b02bb0156e0a\") " pod="openstack/cinder-backup-0" Nov 28 17:46:51 crc kubenswrapper[4909]: I1128 17:46:51.289498 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/d6109b09-2279-459c-a836-b02bb0156e0a-var-locks-cinder\") pod \"cinder-backup-0\" (UID: \"d6109b09-2279-459c-a836-b02bb0156e0a\") " pod="openstack/cinder-backup-0" Nov 28 17:46:51 crc kubenswrapper[4909]: I1128 17:46:51.289529 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d6109b09-2279-459c-a836-b02bb0156e0a-scripts\") pod \"cinder-backup-0\" (UID: \"d6109b09-2279-459c-a836-b02bb0156e0a\") " pod="openstack/cinder-backup-0" Nov 28 17:46:51 crc kubenswrapper[4909]: I1128 17:46:51.289564 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d6109b09-2279-459c-a836-b02bb0156e0a-combined-ca-bundle\") pod \"cinder-backup-0\" (UID: \"d6109b09-2279-459c-a836-b02bb0156e0a\") " pod="openstack/cinder-backup-0" Nov 28 17:46:51 crc kubenswrapper[4909]: I1128 17:46:51.289587 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5mb9q\" (UniqueName: \"kubernetes.io/projected/d6109b09-2279-459c-a836-b02bb0156e0a-kube-api-access-5mb9q\") pod \"cinder-backup-0\" (UID: \"d6109b09-2279-459c-a836-b02bb0156e0a\") " pod="openstack/cinder-backup-0" Nov 28 17:46:51 crc kubenswrapper[4909]: I1128 17:46:51.289611 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/d6109b09-2279-459c-a836-b02bb0156e0a-var-lib-cinder\") pod \"cinder-backup-0\" (UID: \"d6109b09-2279-459c-a836-b02bb0156e0a\") " pod="openstack/cinder-backup-0" Nov 28 17:46:51 crc kubenswrapper[4909]: I1128 17:46:51.289667 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d6109b09-2279-459c-a836-b02bb0156e0a-config-data-custom\") pod \"cinder-backup-0\" (UID: \"d6109b09-2279-459c-a836-b02bb0156e0a\") " pod="openstack/cinder-backup-0" Nov 28 17:46:51 crc kubenswrapper[4909]: I1128 17:46:51.289744 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/d6109b09-2279-459c-a836-b02bb0156e0a-etc-machine-id\") pod \"cinder-backup-0\" (UID: \"d6109b09-2279-459c-a836-b02bb0156e0a\") " pod="openstack/cinder-backup-0" Nov 28 17:46:51 crc kubenswrapper[4909]: I1128 17:46:51.289775 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/d6109b09-2279-459c-a836-b02bb0156e0a-run\") pod \"cinder-backup-0\" (UID: \"d6109b09-2279-459c-a836-b02bb0156e0a\") " pod="openstack/cinder-backup-0" Nov 28 17:46:51 crc kubenswrapper[4909]: I1128 17:46:51.289814 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d6109b09-2279-459c-a836-b02bb0156e0a-config-data\") pod \"cinder-backup-0\" (UID: \"d6109b09-2279-459c-a836-b02bb0156e0a\") " pod="openstack/cinder-backup-0" Nov 28 17:46:51 crc kubenswrapper[4909]: I1128 17:46:51.289860 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/d6109b09-2279-459c-a836-b02bb0156e0a-ceph\") pod \"cinder-backup-0\" (UID: \"d6109b09-2279-459c-a836-b02bb0156e0a\") " pod="openstack/cinder-backup-0" Nov 28 17:46:51 crc kubenswrapper[4909]: I1128 17:46:51.289881 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/d6109b09-2279-459c-a836-b02bb0156e0a-dev\") pod \"cinder-backup-0\" (UID: \"d6109b09-2279-459c-a836-b02bb0156e0a\") " pod="openstack/cinder-backup-0" Nov 28 17:46:51 crc kubenswrapper[4909]: I1128 17:46:51.289907 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/d6109b09-2279-459c-a836-b02bb0156e0a-etc-nvme\") pod \"cinder-backup-0\" (UID: \"d6109b09-2279-459c-a836-b02bb0156e0a\") " pod="openstack/cinder-backup-0" Nov 28 17:46:51 crc kubenswrapper[4909]: I1128 17:46:51.290037 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/d6109b09-2279-459c-a836-b02bb0156e0a-etc-nvme\") pod \"cinder-backup-0\" (UID: \"d6109b09-2279-459c-a836-b02bb0156e0a\") " pod="openstack/cinder-backup-0" Nov 28 17:46:51 crc kubenswrapper[4909]: I1128 17:46:51.290081 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/d6109b09-2279-459c-a836-b02bb0156e0a-sys\") pod \"cinder-backup-0\" (UID: \"d6109b09-2279-459c-a836-b02bb0156e0a\") " pod="openstack/cinder-backup-0" Nov 28 17:46:51 crc kubenswrapper[4909]: I1128 17:46:51.290121 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/d6109b09-2279-459c-a836-b02bb0156e0a-var-locks-brick\") pod \"cinder-backup-0\" (UID: \"d6109b09-2279-459c-a836-b02bb0156e0a\") " pod="openstack/cinder-backup-0" Nov 28 17:46:51 crc kubenswrapper[4909]: I1128 17:46:51.290149 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/d6109b09-2279-459c-a836-b02bb0156e0a-etc-iscsi\") pod \"cinder-backup-0\" (UID: \"d6109b09-2279-459c-a836-b02bb0156e0a\") " pod="openstack/cinder-backup-0" Nov 28 17:46:51 crc kubenswrapper[4909]: I1128 17:46:51.290174 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/d6109b09-2279-459c-a836-b02bb0156e0a-lib-modules\") pod \"cinder-backup-0\" (UID: \"d6109b09-2279-459c-a836-b02bb0156e0a\") " pod="openstack/cinder-backup-0" Nov 28 17:46:51 crc kubenswrapper[4909]: I1128 17:46:51.290211 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/d6109b09-2279-459c-a836-b02bb0156e0a-var-locks-cinder\") pod \"cinder-backup-0\" (UID: \"d6109b09-2279-459c-a836-b02bb0156e0a\") " pod="openstack/cinder-backup-0" Nov 28 17:46:51 crc kubenswrapper[4909]: I1128 17:46:51.290501 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/d6109b09-2279-459c-a836-b02bb0156e0a-etc-machine-id\") pod \"cinder-backup-0\" (UID: \"d6109b09-2279-459c-a836-b02bb0156e0a\") " pod="openstack/cinder-backup-0" Nov 28 17:46:51 crc kubenswrapper[4909]: I1128 17:46:51.296615 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run\" (UniqueName: \"kubernetes.io/host-path/d6109b09-2279-459c-a836-b02bb0156e0a-run\") pod \"cinder-backup-0\" (UID: \"d6109b09-2279-459c-a836-b02bb0156e0a\") " pod="openstack/cinder-backup-0" Nov 28 17:46:51 crc kubenswrapper[4909]: I1128 17:46:51.299602 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/d6109b09-2279-459c-a836-b02bb0156e0a-var-lib-cinder\") pod \"cinder-backup-0\" (UID: \"d6109b09-2279-459c-a836-b02bb0156e0a\") " pod="openstack/cinder-backup-0" Nov 28 17:46:51 crc kubenswrapper[4909]: I1128 17:46:51.302486 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/d6109b09-2279-459c-a836-b02bb0156e0a-dev\") pod \"cinder-backup-0\" (UID: \"d6109b09-2279-459c-a836-b02bb0156e0a\") " pod="openstack/cinder-backup-0" Nov 28 17:46:51 crc kubenswrapper[4909]: I1128 17:46:51.309744 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d6109b09-2279-459c-a836-b02bb0156e0a-combined-ca-bundle\") pod \"cinder-backup-0\" (UID: \"d6109b09-2279-459c-a836-b02bb0156e0a\") " pod="openstack/cinder-backup-0" Nov 28 17:46:51 crc kubenswrapper[4909]: I1128 17:46:51.312267 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d6109b09-2279-459c-a836-b02bb0156e0a-config-data-custom\") pod \"cinder-backup-0\" (UID: \"d6109b09-2279-459c-a836-b02bb0156e0a\") " pod="openstack/cinder-backup-0" Nov 28 17:46:51 crc kubenswrapper[4909]: I1128 17:46:51.314456 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d6109b09-2279-459c-a836-b02bb0156e0a-config-data\") pod \"cinder-backup-0\" (UID: \"d6109b09-2279-459c-a836-b02bb0156e0a\") " pod="openstack/cinder-backup-0" Nov 28 17:46:51 crc kubenswrapper[4909]: I1128 17:46:51.314833 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/d6109b09-2279-459c-a836-b02bb0156e0a-ceph\") pod \"cinder-backup-0\" (UID: \"d6109b09-2279-459c-a836-b02bb0156e0a\") " pod="openstack/cinder-backup-0" Nov 28 17:46:51 crc kubenswrapper[4909]: I1128 17:46:51.323167 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d6109b09-2279-459c-a836-b02bb0156e0a-scripts\") pod \"cinder-backup-0\" (UID: \"d6109b09-2279-459c-a836-b02bb0156e0a\") " pod="openstack/cinder-backup-0" Nov 28 17:46:51 crc kubenswrapper[4909]: I1128 17:46:51.338649 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5mb9q\" (UniqueName: \"kubernetes.io/projected/d6109b09-2279-459c-a836-b02bb0156e0a-kube-api-access-5mb9q\") pod \"cinder-backup-0\" (UID: \"d6109b09-2279-459c-a836-b02bb0156e0a\") " pod="openstack/cinder-backup-0" Nov 28 17:46:51 crc kubenswrapper[4909]: I1128 17:46:51.430395 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-volume-volume1-0"] Nov 28 17:46:51 crc kubenswrapper[4909]: I1128 17:46:51.436634 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-backup-0" Nov 28 17:46:51 crc kubenswrapper[4909]: I1128 17:46:51.507983 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-volume-volume1-0" event={"ID":"d3778829-e25a-4c1a-b6bd-25085ee5e380","Type":"ContainerStarted","Data":"e0654643cd7bd4ca8f862049303d4271176287a60ee24c3bf06e07b3adb91e4f"} Nov 28 17:46:51 crc kubenswrapper[4909]: I1128 17:46:51.513873 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"e5e90ac9-38b7-4f4b-9125-d7ddd7516403","Type":"ContainerStarted","Data":"2a355f767751ddbf1f390dcc39cd579f2e875df14adf2a76faadc3b98f0f0ebd"} Nov 28 17:46:51 crc kubenswrapper[4909]: I1128 17:46:51.807808 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Nov 28 17:46:51 crc kubenswrapper[4909]: I1128 17:46:51.841571 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Nov 28 17:46:52 crc kubenswrapper[4909]: W1128 17:46:52.001718 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd6109b09_2279_459c_a836_b02bb0156e0a.slice/crio-12f38c9b89102ddaf231ada48017bac1a4421120b06c6d69a4e2ef872421d5ec WatchSource:0}: Error finding container 12f38c9b89102ddaf231ada48017bac1a4421120b06c6d69a4e2ef872421d5ec: Status 404 returned error can't find the container with id 12f38c9b89102ddaf231ada48017bac1a4421120b06c6d69a4e2ef872421d5ec Nov 28 17:46:52 crc kubenswrapper[4909]: I1128 17:46:52.007868 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-backup-0"] Nov 28 17:46:52 crc kubenswrapper[4909]: I1128 17:46:52.524185 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-backup-0" event={"ID":"d6109b09-2279-459c-a836-b02bb0156e0a","Type":"ContainerStarted","Data":"12f38c9b89102ddaf231ada48017bac1a4421120b06c6d69a4e2ef872421d5ec"} Nov 28 17:46:52 crc kubenswrapper[4909]: I1128 17:46:52.575093 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Nov 28 17:46:53 crc kubenswrapper[4909]: I1128 17:46:53.245438 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 28 17:46:53 crc kubenswrapper[4909]: I1128 17:46:53.338094 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/5a6ccec4-c48a-4d29-ad8e-cb132e181d86-etc-machine-id\") pod \"5a6ccec4-c48a-4d29-ad8e-cb132e181d86\" (UID: \"5a6ccec4-c48a-4d29-ad8e-cb132e181d86\") " Nov 28 17:46:53 crc kubenswrapper[4909]: I1128 17:46:53.338351 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/5a6ccec4-c48a-4d29-ad8e-cb132e181d86-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "5a6ccec4-c48a-4d29-ad8e-cb132e181d86" (UID: "5a6ccec4-c48a-4d29-ad8e-cb132e181d86"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 17:46:53 crc kubenswrapper[4909]: I1128 17:46:53.338391 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9xh6m\" (UniqueName: \"kubernetes.io/projected/5a6ccec4-c48a-4d29-ad8e-cb132e181d86-kube-api-access-9xh6m\") pod \"5a6ccec4-c48a-4d29-ad8e-cb132e181d86\" (UID: \"5a6ccec4-c48a-4d29-ad8e-cb132e181d86\") " Nov 28 17:46:53 crc kubenswrapper[4909]: I1128 17:46:53.338415 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5a6ccec4-c48a-4d29-ad8e-cb132e181d86-config-data\") pod \"5a6ccec4-c48a-4d29-ad8e-cb132e181d86\" (UID: \"5a6ccec4-c48a-4d29-ad8e-cb132e181d86\") " Nov 28 17:46:53 crc kubenswrapper[4909]: I1128 17:46:53.338464 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5a6ccec4-c48a-4d29-ad8e-cb132e181d86-logs\") pod \"5a6ccec4-c48a-4d29-ad8e-cb132e181d86\" (UID: \"5a6ccec4-c48a-4d29-ad8e-cb132e181d86\") " Nov 28 17:46:53 crc kubenswrapper[4909]: I1128 17:46:53.338508 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/5a6ccec4-c48a-4d29-ad8e-cb132e181d86-config-data-custom\") pod \"5a6ccec4-c48a-4d29-ad8e-cb132e181d86\" (UID: \"5a6ccec4-c48a-4d29-ad8e-cb132e181d86\") " Nov 28 17:46:53 crc kubenswrapper[4909]: I1128 17:46:53.338555 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5a6ccec4-c48a-4d29-ad8e-cb132e181d86-scripts\") pod \"5a6ccec4-c48a-4d29-ad8e-cb132e181d86\" (UID: \"5a6ccec4-c48a-4d29-ad8e-cb132e181d86\") " Nov 28 17:46:53 crc kubenswrapper[4909]: I1128 17:46:53.338621 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5a6ccec4-c48a-4d29-ad8e-cb132e181d86-combined-ca-bundle\") pod \"5a6ccec4-c48a-4d29-ad8e-cb132e181d86\" (UID: \"5a6ccec4-c48a-4d29-ad8e-cb132e181d86\") " Nov 28 17:46:53 crc kubenswrapper[4909]: I1128 17:46:53.338988 4909 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/5a6ccec4-c48a-4d29-ad8e-cb132e181d86-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 28 17:46:53 crc kubenswrapper[4909]: I1128 17:46:53.339027 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5a6ccec4-c48a-4d29-ad8e-cb132e181d86-logs" (OuterVolumeSpecName: "logs") pod "5a6ccec4-c48a-4d29-ad8e-cb132e181d86" (UID: "5a6ccec4-c48a-4d29-ad8e-cb132e181d86"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:46:53 crc kubenswrapper[4909]: I1128 17:46:53.343212 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5a6ccec4-c48a-4d29-ad8e-cb132e181d86-kube-api-access-9xh6m" (OuterVolumeSpecName: "kube-api-access-9xh6m") pod "5a6ccec4-c48a-4d29-ad8e-cb132e181d86" (UID: "5a6ccec4-c48a-4d29-ad8e-cb132e181d86"). InnerVolumeSpecName "kube-api-access-9xh6m". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:46:53 crc kubenswrapper[4909]: I1128 17:46:53.343537 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5a6ccec4-c48a-4d29-ad8e-cb132e181d86-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "5a6ccec4-c48a-4d29-ad8e-cb132e181d86" (UID: "5a6ccec4-c48a-4d29-ad8e-cb132e181d86"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:46:53 crc kubenswrapper[4909]: I1128 17:46:53.344674 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5a6ccec4-c48a-4d29-ad8e-cb132e181d86-scripts" (OuterVolumeSpecName: "scripts") pod "5a6ccec4-c48a-4d29-ad8e-cb132e181d86" (UID: "5a6ccec4-c48a-4d29-ad8e-cb132e181d86"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:46:53 crc kubenswrapper[4909]: I1128 17:46:53.406222 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5a6ccec4-c48a-4d29-ad8e-cb132e181d86-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "5a6ccec4-c48a-4d29-ad8e-cb132e181d86" (UID: "5a6ccec4-c48a-4d29-ad8e-cb132e181d86"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:46:53 crc kubenswrapper[4909]: I1128 17:46:53.410792 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5a6ccec4-c48a-4d29-ad8e-cb132e181d86-config-data" (OuterVolumeSpecName: "config-data") pod "5a6ccec4-c48a-4d29-ad8e-cb132e181d86" (UID: "5a6ccec4-c48a-4d29-ad8e-cb132e181d86"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:46:53 crc kubenswrapper[4909]: I1128 17:46:53.440038 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9xh6m\" (UniqueName: \"kubernetes.io/projected/5a6ccec4-c48a-4d29-ad8e-cb132e181d86-kube-api-access-9xh6m\") on node \"crc\" DevicePath \"\"" Nov 28 17:46:53 crc kubenswrapper[4909]: I1128 17:46:53.440063 4909 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5a6ccec4-c48a-4d29-ad8e-cb132e181d86-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 17:46:53 crc kubenswrapper[4909]: I1128 17:46:53.440072 4909 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5a6ccec4-c48a-4d29-ad8e-cb132e181d86-logs\") on node \"crc\" DevicePath \"\"" Nov 28 17:46:53 crc kubenswrapper[4909]: I1128 17:46:53.440080 4909 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/5a6ccec4-c48a-4d29-ad8e-cb132e181d86-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 28 17:46:53 crc kubenswrapper[4909]: I1128 17:46:53.440092 4909 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5a6ccec4-c48a-4d29-ad8e-cb132e181d86-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 17:46:53 crc kubenswrapper[4909]: I1128 17:46:53.440101 4909 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5a6ccec4-c48a-4d29-ad8e-cb132e181d86-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 17:46:53 crc kubenswrapper[4909]: I1128 17:46:53.537390 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-volume-volume1-0" event={"ID":"d3778829-e25a-4c1a-b6bd-25085ee5e380","Type":"ContainerStarted","Data":"f6cfc3a51d556ad7e90b6eea05bed72886f3539daec8d9f0ea5cf02157273811"} Nov 28 17:46:53 crc kubenswrapper[4909]: I1128 17:46:53.537969 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-volume-volume1-0" event={"ID":"d3778829-e25a-4c1a-b6bd-25085ee5e380","Type":"ContainerStarted","Data":"e9db04325ce8ff3ee18f93f89926d2651005c871050949d992bb77551f43933c"} Nov 28 17:46:53 crc kubenswrapper[4909]: I1128 17:46:53.539792 4909 generic.go:334] "Generic (PLEG): container finished" podID="5a6ccec4-c48a-4d29-ad8e-cb132e181d86" containerID="9d2efc6c84db61762d42255c13f0b261b1510b198f46ee001a7501cd0962431a" exitCode=0 Nov 28 17:46:53 crc kubenswrapper[4909]: I1128 17:46:53.539853 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"5a6ccec4-c48a-4d29-ad8e-cb132e181d86","Type":"ContainerDied","Data":"9d2efc6c84db61762d42255c13f0b261b1510b198f46ee001a7501cd0962431a"} Nov 28 17:46:53 crc kubenswrapper[4909]: I1128 17:46:53.539878 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"5a6ccec4-c48a-4d29-ad8e-cb132e181d86","Type":"ContainerDied","Data":"8b18917785a5e6272d19f857f92ff4d21cd67e55f7be2ff4dab20b2b4c9a5903"} Nov 28 17:46:53 crc kubenswrapper[4909]: I1128 17:46:53.539897 4909 scope.go:117] "RemoveContainer" containerID="9d2efc6c84db61762d42255c13f0b261b1510b198f46ee001a7501cd0962431a" Nov 28 17:46:53 crc kubenswrapper[4909]: I1128 17:46:53.540038 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 28 17:46:53 crc kubenswrapper[4909]: I1128 17:46:53.555457 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-backup-0" event={"ID":"d6109b09-2279-459c-a836-b02bb0156e0a","Type":"ContainerStarted","Data":"5d960b7596cf09e39b54a9206368d7a9a2f6cb8723ec73a09f14be128ef53afb"} Nov 28 17:46:53 crc kubenswrapper[4909]: I1128 17:46:53.555513 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-backup-0" event={"ID":"d6109b09-2279-459c-a836-b02bb0156e0a","Type":"ContainerStarted","Data":"cb06aa7f75c6ab43f71bac936e33a265f2f3d8f65dfc4a9065d9e3dd46bda209"} Nov 28 17:46:53 crc kubenswrapper[4909]: I1128 17:46:53.579922 4909 scope.go:117] "RemoveContainer" containerID="0b16538e33190c922f3962621f620fa2a4048e371c3c32c88856b62d826dab8b" Nov 28 17:46:53 crc kubenswrapper[4909]: I1128 17:46:53.581294 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-volume-volume1-0" podStartSLOduration=2.496012942 podStartE2EDuration="3.581279712s" podCreationTimestamp="2025-11-28 17:46:50 +0000 UTC" firstStartedPulling="2025-11-28 17:46:51.424564382 +0000 UTC m=+5793.821248906" lastFinishedPulling="2025-11-28 17:46:52.509831152 +0000 UTC m=+5794.906515676" observedRunningTime="2025-11-28 17:46:53.580427319 +0000 UTC m=+5795.977111843" watchObservedRunningTime="2025-11-28 17:46:53.581279712 +0000 UTC m=+5795.977964236" Nov 28 17:46:53 crc kubenswrapper[4909]: I1128 17:46:53.653248 4909 scope.go:117] "RemoveContainer" containerID="9d2efc6c84db61762d42255c13f0b261b1510b198f46ee001a7501cd0962431a" Nov 28 17:46:53 crc kubenswrapper[4909]: E1128 17:46:53.653768 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9d2efc6c84db61762d42255c13f0b261b1510b198f46ee001a7501cd0962431a\": container with ID starting with 9d2efc6c84db61762d42255c13f0b261b1510b198f46ee001a7501cd0962431a not found: ID does not exist" containerID="9d2efc6c84db61762d42255c13f0b261b1510b198f46ee001a7501cd0962431a" Nov 28 17:46:53 crc kubenswrapper[4909]: I1128 17:46:53.653796 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9d2efc6c84db61762d42255c13f0b261b1510b198f46ee001a7501cd0962431a"} err="failed to get container status \"9d2efc6c84db61762d42255c13f0b261b1510b198f46ee001a7501cd0962431a\": rpc error: code = NotFound desc = could not find container \"9d2efc6c84db61762d42255c13f0b261b1510b198f46ee001a7501cd0962431a\": container with ID starting with 9d2efc6c84db61762d42255c13f0b261b1510b198f46ee001a7501cd0962431a not found: ID does not exist" Nov 28 17:46:53 crc kubenswrapper[4909]: I1128 17:46:53.653814 4909 scope.go:117] "RemoveContainer" containerID="0b16538e33190c922f3962621f620fa2a4048e371c3c32c88856b62d826dab8b" Nov 28 17:46:53 crc kubenswrapper[4909]: E1128 17:46:53.654131 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0b16538e33190c922f3962621f620fa2a4048e371c3c32c88856b62d826dab8b\": container with ID starting with 0b16538e33190c922f3962621f620fa2a4048e371c3c32c88856b62d826dab8b not found: ID does not exist" containerID="0b16538e33190c922f3962621f620fa2a4048e371c3c32c88856b62d826dab8b" Nov 28 17:46:53 crc kubenswrapper[4909]: I1128 17:46:53.654153 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0b16538e33190c922f3962621f620fa2a4048e371c3c32c88856b62d826dab8b"} err="failed to get container status \"0b16538e33190c922f3962621f620fa2a4048e371c3c32c88856b62d826dab8b\": rpc error: code = NotFound desc = could not find container \"0b16538e33190c922f3962621f620fa2a4048e371c3c32c88856b62d826dab8b\": container with ID starting with 0b16538e33190c922f3962621f620fa2a4048e371c3c32c88856b62d826dab8b not found: ID does not exist" Nov 28 17:46:53 crc kubenswrapper[4909]: I1128 17:46:53.655193 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-backup-0" podStartSLOduration=1.629581647 podStartE2EDuration="2.655174195s" podCreationTimestamp="2025-11-28 17:46:51 +0000 UTC" firstStartedPulling="2025-11-28 17:46:52.003842431 +0000 UTC m=+5794.400526955" lastFinishedPulling="2025-11-28 17:46:53.029434979 +0000 UTC m=+5795.426119503" observedRunningTime="2025-11-28 17:46:53.60654754 +0000 UTC m=+5796.003232064" watchObservedRunningTime="2025-11-28 17:46:53.655174195 +0000 UTC m=+5796.051858739" Nov 28 17:46:53 crc kubenswrapper[4909]: I1128 17:46:53.662033 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Nov 28 17:46:53 crc kubenswrapper[4909]: I1128 17:46:53.687724 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-api-0"] Nov 28 17:46:53 crc kubenswrapper[4909]: I1128 17:46:53.697525 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Nov 28 17:46:53 crc kubenswrapper[4909]: E1128 17:46:53.698111 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5a6ccec4-c48a-4d29-ad8e-cb132e181d86" containerName="cinder-api" Nov 28 17:46:53 crc kubenswrapper[4909]: I1128 17:46:53.698139 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="5a6ccec4-c48a-4d29-ad8e-cb132e181d86" containerName="cinder-api" Nov 28 17:46:53 crc kubenswrapper[4909]: E1128 17:46:53.698163 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5a6ccec4-c48a-4d29-ad8e-cb132e181d86" containerName="cinder-api-log" Nov 28 17:46:53 crc kubenswrapper[4909]: I1128 17:46:53.698171 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="5a6ccec4-c48a-4d29-ad8e-cb132e181d86" containerName="cinder-api-log" Nov 28 17:46:53 crc kubenswrapper[4909]: I1128 17:46:53.698419 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="5a6ccec4-c48a-4d29-ad8e-cb132e181d86" containerName="cinder-api-log" Nov 28 17:46:53 crc kubenswrapper[4909]: I1128 17:46:53.698447 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="5a6ccec4-c48a-4d29-ad8e-cb132e181d86" containerName="cinder-api" Nov 28 17:46:53 crc kubenswrapper[4909]: I1128 17:46:53.699557 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 28 17:46:53 crc kubenswrapper[4909]: I1128 17:46:53.702633 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Nov 28 17:46:53 crc kubenswrapper[4909]: I1128 17:46:53.708519 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 28 17:46:53 crc kubenswrapper[4909]: I1128 17:46:53.767131 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Nov 28 17:46:53 crc kubenswrapper[4909]: I1128 17:46:53.858320 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/fc695e03-ba92-4ca7-a736-44869be5a553-config-data-custom\") pod \"cinder-api-0\" (UID: \"fc695e03-ba92-4ca7-a736-44869be5a553\") " pod="openstack/cinder-api-0" Nov 28 17:46:53 crc kubenswrapper[4909]: I1128 17:46:53.858631 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fc695e03-ba92-4ca7-a736-44869be5a553-logs\") pod \"cinder-api-0\" (UID: \"fc695e03-ba92-4ca7-a736-44869be5a553\") " pod="openstack/cinder-api-0" Nov 28 17:46:53 crc kubenswrapper[4909]: I1128 17:46:53.858763 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fc695e03-ba92-4ca7-a736-44869be5a553-config-data\") pod \"cinder-api-0\" (UID: \"fc695e03-ba92-4ca7-a736-44869be5a553\") " pod="openstack/cinder-api-0" Nov 28 17:46:53 crc kubenswrapper[4909]: I1128 17:46:53.858866 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fc695e03-ba92-4ca7-a736-44869be5a553-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"fc695e03-ba92-4ca7-a736-44869be5a553\") " pod="openstack/cinder-api-0" Nov 28 17:46:53 crc kubenswrapper[4909]: I1128 17:46:53.859123 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fc695e03-ba92-4ca7-a736-44869be5a553-scripts\") pod \"cinder-api-0\" (UID: \"fc695e03-ba92-4ca7-a736-44869be5a553\") " pod="openstack/cinder-api-0" Nov 28 17:46:53 crc kubenswrapper[4909]: I1128 17:46:53.859471 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hvvqg\" (UniqueName: \"kubernetes.io/projected/fc695e03-ba92-4ca7-a736-44869be5a553-kube-api-access-hvvqg\") pod \"cinder-api-0\" (UID: \"fc695e03-ba92-4ca7-a736-44869be5a553\") " pod="openstack/cinder-api-0" Nov 28 17:46:53 crc kubenswrapper[4909]: I1128 17:46:53.859504 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/fc695e03-ba92-4ca7-a736-44869be5a553-etc-machine-id\") pod \"cinder-api-0\" (UID: \"fc695e03-ba92-4ca7-a736-44869be5a553\") " pod="openstack/cinder-api-0" Nov 28 17:46:53 crc kubenswrapper[4909]: I1128 17:46:53.933914 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5a6ccec4-c48a-4d29-ad8e-cb132e181d86" path="/var/lib/kubelet/pods/5a6ccec4-c48a-4d29-ad8e-cb132e181d86/volumes" Nov 28 17:46:53 crc kubenswrapper[4909]: I1128 17:46:53.960767 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fc695e03-ba92-4ca7-a736-44869be5a553-scripts\") pod \"cinder-api-0\" (UID: \"fc695e03-ba92-4ca7-a736-44869be5a553\") " pod="openstack/cinder-api-0" Nov 28 17:46:53 crc kubenswrapper[4909]: I1128 17:46:53.960828 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hvvqg\" (UniqueName: \"kubernetes.io/projected/fc695e03-ba92-4ca7-a736-44869be5a553-kube-api-access-hvvqg\") pod \"cinder-api-0\" (UID: \"fc695e03-ba92-4ca7-a736-44869be5a553\") " pod="openstack/cinder-api-0" Nov 28 17:46:53 crc kubenswrapper[4909]: I1128 17:46:53.960854 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/fc695e03-ba92-4ca7-a736-44869be5a553-etc-machine-id\") pod \"cinder-api-0\" (UID: \"fc695e03-ba92-4ca7-a736-44869be5a553\") " pod="openstack/cinder-api-0" Nov 28 17:46:53 crc kubenswrapper[4909]: I1128 17:46:53.960922 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/fc695e03-ba92-4ca7-a736-44869be5a553-config-data-custom\") pod \"cinder-api-0\" (UID: \"fc695e03-ba92-4ca7-a736-44869be5a553\") " pod="openstack/cinder-api-0" Nov 28 17:46:53 crc kubenswrapper[4909]: I1128 17:46:53.960954 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fc695e03-ba92-4ca7-a736-44869be5a553-logs\") pod \"cinder-api-0\" (UID: \"fc695e03-ba92-4ca7-a736-44869be5a553\") " pod="openstack/cinder-api-0" Nov 28 17:46:53 crc kubenswrapper[4909]: I1128 17:46:53.960969 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fc695e03-ba92-4ca7-a736-44869be5a553-config-data\") pod \"cinder-api-0\" (UID: \"fc695e03-ba92-4ca7-a736-44869be5a553\") " pod="openstack/cinder-api-0" Nov 28 17:46:53 crc kubenswrapper[4909]: I1128 17:46:53.960990 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fc695e03-ba92-4ca7-a736-44869be5a553-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"fc695e03-ba92-4ca7-a736-44869be5a553\") " pod="openstack/cinder-api-0" Nov 28 17:46:53 crc kubenswrapper[4909]: I1128 17:46:53.961497 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/fc695e03-ba92-4ca7-a736-44869be5a553-etc-machine-id\") pod \"cinder-api-0\" (UID: \"fc695e03-ba92-4ca7-a736-44869be5a553\") " pod="openstack/cinder-api-0" Nov 28 17:46:53 crc kubenswrapper[4909]: I1128 17:46:53.961919 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fc695e03-ba92-4ca7-a736-44869be5a553-logs\") pod \"cinder-api-0\" (UID: \"fc695e03-ba92-4ca7-a736-44869be5a553\") " pod="openstack/cinder-api-0" Nov 28 17:46:53 crc kubenswrapper[4909]: I1128 17:46:53.967718 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fc695e03-ba92-4ca7-a736-44869be5a553-config-data\") pod \"cinder-api-0\" (UID: \"fc695e03-ba92-4ca7-a736-44869be5a553\") " pod="openstack/cinder-api-0" Nov 28 17:46:53 crc kubenswrapper[4909]: I1128 17:46:53.972283 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fc695e03-ba92-4ca7-a736-44869be5a553-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"fc695e03-ba92-4ca7-a736-44869be5a553\") " pod="openstack/cinder-api-0" Nov 28 17:46:53 crc kubenswrapper[4909]: I1128 17:46:53.972359 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fc695e03-ba92-4ca7-a736-44869be5a553-scripts\") pod \"cinder-api-0\" (UID: \"fc695e03-ba92-4ca7-a736-44869be5a553\") " pod="openstack/cinder-api-0" Nov 28 17:46:53 crc kubenswrapper[4909]: I1128 17:46:53.980067 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hvvqg\" (UniqueName: \"kubernetes.io/projected/fc695e03-ba92-4ca7-a736-44869be5a553-kube-api-access-hvvqg\") pod \"cinder-api-0\" (UID: \"fc695e03-ba92-4ca7-a736-44869be5a553\") " pod="openstack/cinder-api-0" Nov 28 17:46:53 crc kubenswrapper[4909]: I1128 17:46:53.980135 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/fc695e03-ba92-4ca7-a736-44869be5a553-config-data-custom\") pod \"cinder-api-0\" (UID: \"fc695e03-ba92-4ca7-a736-44869be5a553\") " pod="openstack/cinder-api-0" Nov 28 17:46:54 crc kubenswrapper[4909]: I1128 17:46:54.031732 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 28 17:46:54 crc kubenswrapper[4909]: W1128 17:46:54.508797 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podfc695e03_ba92_4ca7_a736_44869be5a553.slice/crio-f46ee512a6e23a6e6c51c200e788912539e6e17d00a56e4177b86af89d6cc980 WatchSource:0}: Error finding container f46ee512a6e23a6e6c51c200e788912539e6e17d00a56e4177b86af89d6cc980: Status 404 returned error can't find the container with id f46ee512a6e23a6e6c51c200e788912539e6e17d00a56e4177b86af89d6cc980 Nov 28 17:46:54 crc kubenswrapper[4909]: I1128 17:46:54.509258 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 28 17:46:54 crc kubenswrapper[4909]: I1128 17:46:54.572817 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"fc695e03-ba92-4ca7-a736-44869be5a553","Type":"ContainerStarted","Data":"f46ee512a6e23a6e6c51c200e788912539e6e17d00a56e4177b86af89d6cc980"} Nov 28 17:46:54 crc kubenswrapper[4909]: I1128 17:46:54.975782 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 28 17:46:54 crc kubenswrapper[4909]: I1128 17:46:54.976482 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 28 17:46:54 crc kubenswrapper[4909]: I1128 17:46:54.977327 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 28 17:46:54 crc kubenswrapper[4909]: I1128 17:46:54.977365 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 28 17:46:54 crc kubenswrapper[4909]: I1128 17:46:54.983245 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 28 17:46:54 crc kubenswrapper[4909]: I1128 17:46:54.985647 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 28 17:46:54 crc kubenswrapper[4909]: I1128 17:46:54.986046 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 28 17:46:54 crc kubenswrapper[4909]: I1128 17:46:54.986704 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 28 17:46:54 crc kubenswrapper[4909]: I1128 17:46:54.987485 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 28 17:46:54 crc kubenswrapper[4909]: I1128 17:46:54.987793 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 28 17:46:55 crc kubenswrapper[4909]: I1128 17:46:55.606989 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"fc695e03-ba92-4ca7-a736-44869be5a553","Type":"ContainerStarted","Data":"5d2e754a282c253144cad515dfe0bd58d1dd1018ae6828f23ef5650d839d82a4"} Nov 28 17:46:55 crc kubenswrapper[4909]: I1128 17:46:55.800641 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-volume-volume1-0" Nov 28 17:46:56 crc kubenswrapper[4909]: I1128 17:46:56.437419 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-backup-0" Nov 28 17:46:56 crc kubenswrapper[4909]: I1128 17:46:56.627229 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"fc695e03-ba92-4ca7-a736-44869be5a553","Type":"ContainerStarted","Data":"e1e655e151a98191105cb2e8e2945507411cb6ce70d008c2dc5c34d3a99527b9"} Nov 28 17:46:56 crc kubenswrapper[4909]: I1128 17:46:56.674290 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=3.6742710130000003 podStartE2EDuration="3.674271013s" podCreationTimestamp="2025-11-28 17:46:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 17:46:56.658176051 +0000 UTC m=+5799.054860625" watchObservedRunningTime="2025-11-28 17:46:56.674271013 +0000 UTC m=+5799.070955537" Nov 28 17:46:57 crc kubenswrapper[4909]: I1128 17:46:57.637416 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Nov 28 17:46:58 crc kubenswrapper[4909]: I1128 17:46:58.978369 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Nov 28 17:46:59 crc kubenswrapper[4909]: I1128 17:46:59.046634 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 28 17:46:59 crc kubenswrapper[4909]: I1128 17:46:59.656326 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="e5e90ac9-38b7-4f4b-9125-d7ddd7516403" containerName="cinder-scheduler" containerID="cri-o://fa51538af27e817a9866c8c69ba6ad509d8a44e76d594b3c80bee1748798783c" gracePeriod=30 Nov 28 17:46:59 crc kubenswrapper[4909]: I1128 17:46:59.656450 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="e5e90ac9-38b7-4f4b-9125-d7ddd7516403" containerName="probe" containerID="cri-o://2a355f767751ddbf1f390dcc39cd579f2e875df14adf2a76faadc3b98f0f0ebd" gracePeriod=30 Nov 28 17:47:00 crc kubenswrapper[4909]: I1128 17:47:00.671702 4909 generic.go:334] "Generic (PLEG): container finished" podID="e5e90ac9-38b7-4f4b-9125-d7ddd7516403" containerID="2a355f767751ddbf1f390dcc39cd579f2e875df14adf2a76faadc3b98f0f0ebd" exitCode=0 Nov 28 17:47:00 crc kubenswrapper[4909]: I1128 17:47:00.671778 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"e5e90ac9-38b7-4f4b-9125-d7ddd7516403","Type":"ContainerDied","Data":"2a355f767751ddbf1f390dcc39cd579f2e875df14adf2a76faadc3b98f0f0ebd"} Nov 28 17:47:01 crc kubenswrapper[4909]: I1128 17:47:01.046451 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-volume-volume1-0" Nov 28 17:47:01 crc kubenswrapper[4909]: I1128 17:47:01.682538 4909 generic.go:334] "Generic (PLEG): container finished" podID="e5e90ac9-38b7-4f4b-9125-d7ddd7516403" containerID="fa51538af27e817a9866c8c69ba6ad509d8a44e76d594b3c80bee1748798783c" exitCode=0 Nov 28 17:47:01 crc kubenswrapper[4909]: I1128 17:47:01.682617 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"e5e90ac9-38b7-4f4b-9125-d7ddd7516403","Type":"ContainerDied","Data":"fa51538af27e817a9866c8c69ba6ad509d8a44e76d594b3c80bee1748798783c"} Nov 28 17:47:01 crc kubenswrapper[4909]: I1128 17:47:01.700868 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-backup-0" Nov 28 17:47:01 crc kubenswrapper[4909]: I1128 17:47:01.858627 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 28 17:47:02 crc kubenswrapper[4909]: I1128 17:47:02.035965 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e5e90ac9-38b7-4f4b-9125-d7ddd7516403-config-data\") pod \"e5e90ac9-38b7-4f4b-9125-d7ddd7516403\" (UID: \"e5e90ac9-38b7-4f4b-9125-d7ddd7516403\") " Nov 28 17:47:02 crc kubenswrapper[4909]: I1128 17:47:02.036350 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/e5e90ac9-38b7-4f4b-9125-d7ddd7516403-etc-machine-id\") pod \"e5e90ac9-38b7-4f4b-9125-d7ddd7516403\" (UID: \"e5e90ac9-38b7-4f4b-9125-d7ddd7516403\") " Nov 28 17:47:02 crc kubenswrapper[4909]: I1128 17:47:02.036412 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/e5e90ac9-38b7-4f4b-9125-d7ddd7516403-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "e5e90ac9-38b7-4f4b-9125-d7ddd7516403" (UID: "e5e90ac9-38b7-4f4b-9125-d7ddd7516403"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 17:47:02 crc kubenswrapper[4909]: I1128 17:47:02.036774 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/e5e90ac9-38b7-4f4b-9125-d7ddd7516403-config-data-custom\") pod \"e5e90ac9-38b7-4f4b-9125-d7ddd7516403\" (UID: \"e5e90ac9-38b7-4f4b-9125-d7ddd7516403\") " Nov 28 17:47:02 crc kubenswrapper[4909]: I1128 17:47:02.037584 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e5e90ac9-38b7-4f4b-9125-d7ddd7516403-scripts\") pod \"e5e90ac9-38b7-4f4b-9125-d7ddd7516403\" (UID: \"e5e90ac9-38b7-4f4b-9125-d7ddd7516403\") " Nov 28 17:47:02 crc kubenswrapper[4909]: I1128 17:47:02.037749 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bg5tk\" (UniqueName: \"kubernetes.io/projected/e5e90ac9-38b7-4f4b-9125-d7ddd7516403-kube-api-access-bg5tk\") pod \"e5e90ac9-38b7-4f4b-9125-d7ddd7516403\" (UID: \"e5e90ac9-38b7-4f4b-9125-d7ddd7516403\") " Nov 28 17:47:02 crc kubenswrapper[4909]: I1128 17:47:02.037908 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e5e90ac9-38b7-4f4b-9125-d7ddd7516403-combined-ca-bundle\") pod \"e5e90ac9-38b7-4f4b-9125-d7ddd7516403\" (UID: \"e5e90ac9-38b7-4f4b-9125-d7ddd7516403\") " Nov 28 17:47:02 crc kubenswrapper[4909]: I1128 17:47:02.038827 4909 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/e5e90ac9-38b7-4f4b-9125-d7ddd7516403-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 28 17:47:02 crc kubenswrapper[4909]: I1128 17:47:02.045586 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e5e90ac9-38b7-4f4b-9125-d7ddd7516403-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "e5e90ac9-38b7-4f4b-9125-d7ddd7516403" (UID: "e5e90ac9-38b7-4f4b-9125-d7ddd7516403"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:47:02 crc kubenswrapper[4909]: I1128 17:47:02.045688 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e5e90ac9-38b7-4f4b-9125-d7ddd7516403-scripts" (OuterVolumeSpecName: "scripts") pod "e5e90ac9-38b7-4f4b-9125-d7ddd7516403" (UID: "e5e90ac9-38b7-4f4b-9125-d7ddd7516403"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:47:02 crc kubenswrapper[4909]: I1128 17:47:02.056229 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e5e90ac9-38b7-4f4b-9125-d7ddd7516403-kube-api-access-bg5tk" (OuterVolumeSpecName: "kube-api-access-bg5tk") pod "e5e90ac9-38b7-4f4b-9125-d7ddd7516403" (UID: "e5e90ac9-38b7-4f4b-9125-d7ddd7516403"). InnerVolumeSpecName "kube-api-access-bg5tk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:47:02 crc kubenswrapper[4909]: I1128 17:47:02.102767 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e5e90ac9-38b7-4f4b-9125-d7ddd7516403-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e5e90ac9-38b7-4f4b-9125-d7ddd7516403" (UID: "e5e90ac9-38b7-4f4b-9125-d7ddd7516403"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:47:02 crc kubenswrapper[4909]: I1128 17:47:02.132319 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e5e90ac9-38b7-4f4b-9125-d7ddd7516403-config-data" (OuterVolumeSpecName: "config-data") pod "e5e90ac9-38b7-4f4b-9125-d7ddd7516403" (UID: "e5e90ac9-38b7-4f4b-9125-d7ddd7516403"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:47:02 crc kubenswrapper[4909]: I1128 17:47:02.140747 4909 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/e5e90ac9-38b7-4f4b-9125-d7ddd7516403-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 28 17:47:02 crc kubenswrapper[4909]: I1128 17:47:02.140783 4909 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e5e90ac9-38b7-4f4b-9125-d7ddd7516403-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 17:47:02 crc kubenswrapper[4909]: I1128 17:47:02.140792 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bg5tk\" (UniqueName: \"kubernetes.io/projected/e5e90ac9-38b7-4f4b-9125-d7ddd7516403-kube-api-access-bg5tk\") on node \"crc\" DevicePath \"\"" Nov 28 17:47:02 crc kubenswrapper[4909]: I1128 17:47:02.140802 4909 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e5e90ac9-38b7-4f4b-9125-d7ddd7516403-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 17:47:02 crc kubenswrapper[4909]: I1128 17:47:02.140810 4909 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e5e90ac9-38b7-4f4b-9125-d7ddd7516403-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 17:47:02 crc kubenswrapper[4909]: I1128 17:47:02.699752 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"e5e90ac9-38b7-4f4b-9125-d7ddd7516403","Type":"ContainerDied","Data":"d00067541a91e894e58032a75f7eaf4f60e4f8d9d14e5ddb0ca32744f6cfd67c"} Nov 28 17:47:02 crc kubenswrapper[4909]: I1128 17:47:02.699844 4909 scope.go:117] "RemoveContainer" containerID="2a355f767751ddbf1f390dcc39cd579f2e875df14adf2a76faadc3b98f0f0ebd" Nov 28 17:47:02 crc kubenswrapper[4909]: I1128 17:47:02.700047 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 28 17:47:02 crc kubenswrapper[4909]: I1128 17:47:02.768734 4909 scope.go:117] "RemoveContainer" containerID="fa51538af27e817a9866c8c69ba6ad509d8a44e76d594b3c80bee1748798783c" Nov 28 17:47:02 crc kubenswrapper[4909]: I1128 17:47:02.768886 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 28 17:47:02 crc kubenswrapper[4909]: I1128 17:47:02.780912 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 28 17:47:02 crc kubenswrapper[4909]: I1128 17:47:02.802711 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Nov 28 17:47:02 crc kubenswrapper[4909]: E1128 17:47:02.803980 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e5e90ac9-38b7-4f4b-9125-d7ddd7516403" containerName="probe" Nov 28 17:47:02 crc kubenswrapper[4909]: I1128 17:47:02.804012 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="e5e90ac9-38b7-4f4b-9125-d7ddd7516403" containerName="probe" Nov 28 17:47:02 crc kubenswrapper[4909]: E1128 17:47:02.804076 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e5e90ac9-38b7-4f4b-9125-d7ddd7516403" containerName="cinder-scheduler" Nov 28 17:47:02 crc kubenswrapper[4909]: I1128 17:47:02.804087 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="e5e90ac9-38b7-4f4b-9125-d7ddd7516403" containerName="cinder-scheduler" Nov 28 17:47:02 crc kubenswrapper[4909]: I1128 17:47:02.804353 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="e5e90ac9-38b7-4f4b-9125-d7ddd7516403" containerName="probe" Nov 28 17:47:02 crc kubenswrapper[4909]: I1128 17:47:02.804378 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="e5e90ac9-38b7-4f4b-9125-d7ddd7516403" containerName="cinder-scheduler" Nov 28 17:47:02 crc kubenswrapper[4909]: I1128 17:47:02.805767 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 28 17:47:02 crc kubenswrapper[4909]: I1128 17:47:02.807837 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Nov 28 17:47:02 crc kubenswrapper[4909]: I1128 17:47:02.820422 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 28 17:47:02 crc kubenswrapper[4909]: I1128 17:47:02.955193 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/02b08df8-7490-4759-8f0b-c7415210385b-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"02b08df8-7490-4759-8f0b-c7415210385b\") " pod="openstack/cinder-scheduler-0" Nov 28 17:47:02 crc kubenswrapper[4909]: I1128 17:47:02.955586 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/02b08df8-7490-4759-8f0b-c7415210385b-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"02b08df8-7490-4759-8f0b-c7415210385b\") " pod="openstack/cinder-scheduler-0" Nov 28 17:47:02 crc kubenswrapper[4909]: I1128 17:47:02.955627 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/02b08df8-7490-4759-8f0b-c7415210385b-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"02b08df8-7490-4759-8f0b-c7415210385b\") " pod="openstack/cinder-scheduler-0" Nov 28 17:47:02 crc kubenswrapper[4909]: I1128 17:47:02.955769 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/02b08df8-7490-4759-8f0b-c7415210385b-scripts\") pod \"cinder-scheduler-0\" (UID: \"02b08df8-7490-4759-8f0b-c7415210385b\") " pod="openstack/cinder-scheduler-0" Nov 28 17:47:02 crc kubenswrapper[4909]: I1128 17:47:02.955942 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5bf8w\" (UniqueName: \"kubernetes.io/projected/02b08df8-7490-4759-8f0b-c7415210385b-kube-api-access-5bf8w\") pod \"cinder-scheduler-0\" (UID: \"02b08df8-7490-4759-8f0b-c7415210385b\") " pod="openstack/cinder-scheduler-0" Nov 28 17:47:02 crc kubenswrapper[4909]: I1128 17:47:02.956015 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/02b08df8-7490-4759-8f0b-c7415210385b-config-data\") pod \"cinder-scheduler-0\" (UID: \"02b08df8-7490-4759-8f0b-c7415210385b\") " pod="openstack/cinder-scheduler-0" Nov 28 17:47:03 crc kubenswrapper[4909]: I1128 17:47:03.057867 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/02b08df8-7490-4759-8f0b-c7415210385b-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"02b08df8-7490-4759-8f0b-c7415210385b\") " pod="openstack/cinder-scheduler-0" Nov 28 17:47:03 crc kubenswrapper[4909]: I1128 17:47:03.058018 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/02b08df8-7490-4759-8f0b-c7415210385b-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"02b08df8-7490-4759-8f0b-c7415210385b\") " pod="openstack/cinder-scheduler-0" Nov 28 17:47:03 crc kubenswrapper[4909]: I1128 17:47:03.058059 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/02b08df8-7490-4759-8f0b-c7415210385b-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"02b08df8-7490-4759-8f0b-c7415210385b\") " pod="openstack/cinder-scheduler-0" Nov 28 17:47:03 crc kubenswrapper[4909]: I1128 17:47:03.058078 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/02b08df8-7490-4759-8f0b-c7415210385b-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"02b08df8-7490-4759-8f0b-c7415210385b\") " pod="openstack/cinder-scheduler-0" Nov 28 17:47:03 crc kubenswrapper[4909]: I1128 17:47:03.058223 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/02b08df8-7490-4759-8f0b-c7415210385b-scripts\") pod \"cinder-scheduler-0\" (UID: \"02b08df8-7490-4759-8f0b-c7415210385b\") " pod="openstack/cinder-scheduler-0" Nov 28 17:47:03 crc kubenswrapper[4909]: I1128 17:47:03.058307 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5bf8w\" (UniqueName: \"kubernetes.io/projected/02b08df8-7490-4759-8f0b-c7415210385b-kube-api-access-5bf8w\") pod \"cinder-scheduler-0\" (UID: \"02b08df8-7490-4759-8f0b-c7415210385b\") " pod="openstack/cinder-scheduler-0" Nov 28 17:47:03 crc kubenswrapper[4909]: I1128 17:47:03.058370 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/02b08df8-7490-4759-8f0b-c7415210385b-config-data\") pod \"cinder-scheduler-0\" (UID: \"02b08df8-7490-4759-8f0b-c7415210385b\") " pod="openstack/cinder-scheduler-0" Nov 28 17:47:03 crc kubenswrapper[4909]: I1128 17:47:03.066077 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/02b08df8-7490-4759-8f0b-c7415210385b-scripts\") pod \"cinder-scheduler-0\" (UID: \"02b08df8-7490-4759-8f0b-c7415210385b\") " pod="openstack/cinder-scheduler-0" Nov 28 17:47:03 crc kubenswrapper[4909]: I1128 17:47:03.076210 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/02b08df8-7490-4759-8f0b-c7415210385b-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"02b08df8-7490-4759-8f0b-c7415210385b\") " pod="openstack/cinder-scheduler-0" Nov 28 17:47:03 crc kubenswrapper[4909]: I1128 17:47:03.081511 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/02b08df8-7490-4759-8f0b-c7415210385b-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"02b08df8-7490-4759-8f0b-c7415210385b\") " pod="openstack/cinder-scheduler-0" Nov 28 17:47:03 crc kubenswrapper[4909]: I1128 17:47:03.082843 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/02b08df8-7490-4759-8f0b-c7415210385b-config-data\") pod \"cinder-scheduler-0\" (UID: \"02b08df8-7490-4759-8f0b-c7415210385b\") " pod="openstack/cinder-scheduler-0" Nov 28 17:47:03 crc kubenswrapper[4909]: I1128 17:47:03.089500 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5bf8w\" (UniqueName: \"kubernetes.io/projected/02b08df8-7490-4759-8f0b-c7415210385b-kube-api-access-5bf8w\") pod \"cinder-scheduler-0\" (UID: \"02b08df8-7490-4759-8f0b-c7415210385b\") " pod="openstack/cinder-scheduler-0" Nov 28 17:47:03 crc kubenswrapper[4909]: I1128 17:47:03.128091 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 28 17:47:03 crc kubenswrapper[4909]: I1128 17:47:03.591868 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 28 17:47:03 crc kubenswrapper[4909]: W1128 17:47:03.593761 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod02b08df8_7490_4759_8f0b_c7415210385b.slice/crio-770c56c766f11774daf62f0e358980780694e68cccf51858c286de1fb4aaa1dd WatchSource:0}: Error finding container 770c56c766f11774daf62f0e358980780694e68cccf51858c286de1fb4aaa1dd: Status 404 returned error can't find the container with id 770c56c766f11774daf62f0e358980780694e68cccf51858c286de1fb4aaa1dd Nov 28 17:47:03 crc kubenswrapper[4909]: I1128 17:47:03.713022 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"02b08df8-7490-4759-8f0b-c7415210385b","Type":"ContainerStarted","Data":"770c56c766f11774daf62f0e358980780694e68cccf51858c286de1fb4aaa1dd"} Nov 28 17:47:03 crc kubenswrapper[4909]: I1128 17:47:03.913214 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e5e90ac9-38b7-4f4b-9125-d7ddd7516403" path="/var/lib/kubelet/pods/e5e90ac9-38b7-4f4b-9125-d7ddd7516403/volumes" Nov 28 17:47:04 crc kubenswrapper[4909]: I1128 17:47:04.723263 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"02b08df8-7490-4759-8f0b-c7415210385b","Type":"ContainerStarted","Data":"c334909ad7b8622077382a5711f2e3c88800aa9411e11c5a7f30761531b4f322"} Nov 28 17:47:05 crc kubenswrapper[4909]: I1128 17:47:05.737137 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"02b08df8-7490-4759-8f0b-c7415210385b","Type":"ContainerStarted","Data":"56b29f0d3340f4812403ae68c3f5c755fd77257b27acfe2469b9c0b20bfd0aa7"} Nov 28 17:47:05 crc kubenswrapper[4909]: I1128 17:47:05.766575 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=3.766552748 podStartE2EDuration="3.766552748s" podCreationTimestamp="2025-11-28 17:47:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 17:47:05.753548509 +0000 UTC m=+5808.150233073" watchObservedRunningTime="2025-11-28 17:47:05.766552748 +0000 UTC m=+5808.163237292" Nov 28 17:47:05 crc kubenswrapper[4909]: I1128 17:47:05.913869 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/cinder-api-0" Nov 28 17:47:08 crc kubenswrapper[4909]: I1128 17:47:08.128442 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Nov 28 17:47:13 crc kubenswrapper[4909]: I1128 17:47:13.329041 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Nov 28 17:48:53 crc kubenswrapper[4909]: I1128 17:48:53.872992 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-vgwmt"] Nov 28 17:48:53 crc kubenswrapper[4909]: I1128 17:48:53.874669 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-vgwmt" Nov 28 17:48:53 crc kubenswrapper[4909]: I1128 17:48:53.876923 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncontroller-ovncontroller-dockercfg-x8wtm" Nov 28 17:48:53 crc kubenswrapper[4909]: I1128 17:48:53.878340 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-scripts" Nov 28 17:48:53 crc kubenswrapper[4909]: I1128 17:48:53.897179 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-vgwmt"] Nov 28 17:48:53 crc kubenswrapper[4909]: I1128 17:48:53.924704 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-ovs-b7xg2"] Nov 28 17:48:53 crc kubenswrapper[4909]: I1128 17:48:53.927373 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-b7xg2" Nov 28 17:48:53 crc kubenswrapper[4909]: I1128 17:48:53.954030 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-b7xg2"] Nov 28 17:48:54 crc kubenswrapper[4909]: I1128 17:48:54.033455 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8xxkr\" (UniqueName: \"kubernetes.io/projected/6c05fa8e-ecd9-477f-bf04-c67ca51f425d-kube-api-access-8xxkr\") pod \"ovn-controller-vgwmt\" (UID: \"6c05fa8e-ecd9-477f-bf04-c67ca51f425d\") " pod="openstack/ovn-controller-vgwmt" Nov 28 17:48:54 crc kubenswrapper[4909]: I1128 17:48:54.033515 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/6c05fa8e-ecd9-477f-bf04-c67ca51f425d-scripts\") pod \"ovn-controller-vgwmt\" (UID: \"6c05fa8e-ecd9-477f-bf04-c67ca51f425d\") " pod="openstack/ovn-controller-vgwmt" Nov 28 17:48:54 crc kubenswrapper[4909]: I1128 17:48:54.033603 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f579a06d-9f20-4fa2-aaaa-9cdfd4b82a9a-var-log\") pod \"ovn-controller-ovs-b7xg2\" (UID: \"f579a06d-9f20-4fa2-aaaa-9cdfd4b82a9a\") " pod="openstack/ovn-controller-ovs-b7xg2" Nov 28 17:48:54 crc kubenswrapper[4909]: I1128 17:48:54.033636 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/f579a06d-9f20-4fa2-aaaa-9cdfd4b82a9a-etc-ovs\") pod \"ovn-controller-ovs-b7xg2\" (UID: \"f579a06d-9f20-4fa2-aaaa-9cdfd4b82a9a\") " pod="openstack/ovn-controller-ovs-b7xg2" Nov 28 17:48:54 crc kubenswrapper[4909]: I1128 17:48:54.033684 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/6c05fa8e-ecd9-477f-bf04-c67ca51f425d-var-log-ovn\") pod \"ovn-controller-vgwmt\" (UID: \"6c05fa8e-ecd9-477f-bf04-c67ca51f425d\") " pod="openstack/ovn-controller-vgwmt" Nov 28 17:48:54 crc kubenswrapper[4909]: I1128 17:48:54.033746 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/f579a06d-9f20-4fa2-aaaa-9cdfd4b82a9a-var-lib\") pod \"ovn-controller-ovs-b7xg2\" (UID: \"f579a06d-9f20-4fa2-aaaa-9cdfd4b82a9a\") " pod="openstack/ovn-controller-ovs-b7xg2" Nov 28 17:48:54 crc kubenswrapper[4909]: I1128 17:48:54.033768 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/6c05fa8e-ecd9-477f-bf04-c67ca51f425d-var-run-ovn\") pod \"ovn-controller-vgwmt\" (UID: \"6c05fa8e-ecd9-477f-bf04-c67ca51f425d\") " pod="openstack/ovn-controller-vgwmt" Nov 28 17:48:54 crc kubenswrapper[4909]: I1128 17:48:54.033824 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/f579a06d-9f20-4fa2-aaaa-9cdfd4b82a9a-var-run\") pod \"ovn-controller-ovs-b7xg2\" (UID: \"f579a06d-9f20-4fa2-aaaa-9cdfd4b82a9a\") " pod="openstack/ovn-controller-ovs-b7xg2" Nov 28 17:48:54 crc kubenswrapper[4909]: I1128 17:48:54.033852 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/f579a06d-9f20-4fa2-aaaa-9cdfd4b82a9a-scripts\") pod \"ovn-controller-ovs-b7xg2\" (UID: \"f579a06d-9f20-4fa2-aaaa-9cdfd4b82a9a\") " pod="openstack/ovn-controller-ovs-b7xg2" Nov 28 17:48:54 crc kubenswrapper[4909]: I1128 17:48:54.033867 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/6c05fa8e-ecd9-477f-bf04-c67ca51f425d-var-run\") pod \"ovn-controller-vgwmt\" (UID: \"6c05fa8e-ecd9-477f-bf04-c67ca51f425d\") " pod="openstack/ovn-controller-vgwmt" Nov 28 17:48:54 crc kubenswrapper[4909]: I1128 17:48:54.033899 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cnrbh\" (UniqueName: \"kubernetes.io/projected/f579a06d-9f20-4fa2-aaaa-9cdfd4b82a9a-kube-api-access-cnrbh\") pod \"ovn-controller-ovs-b7xg2\" (UID: \"f579a06d-9f20-4fa2-aaaa-9cdfd4b82a9a\") " pod="openstack/ovn-controller-ovs-b7xg2" Nov 28 17:48:54 crc kubenswrapper[4909]: I1128 17:48:54.135883 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/f579a06d-9f20-4fa2-aaaa-9cdfd4b82a9a-scripts\") pod \"ovn-controller-ovs-b7xg2\" (UID: \"f579a06d-9f20-4fa2-aaaa-9cdfd4b82a9a\") " pod="openstack/ovn-controller-ovs-b7xg2" Nov 28 17:48:54 crc kubenswrapper[4909]: I1128 17:48:54.135928 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/6c05fa8e-ecd9-477f-bf04-c67ca51f425d-var-run\") pod \"ovn-controller-vgwmt\" (UID: \"6c05fa8e-ecd9-477f-bf04-c67ca51f425d\") " pod="openstack/ovn-controller-vgwmt" Nov 28 17:48:54 crc kubenswrapper[4909]: I1128 17:48:54.135973 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cnrbh\" (UniqueName: \"kubernetes.io/projected/f579a06d-9f20-4fa2-aaaa-9cdfd4b82a9a-kube-api-access-cnrbh\") pod \"ovn-controller-ovs-b7xg2\" (UID: \"f579a06d-9f20-4fa2-aaaa-9cdfd4b82a9a\") " pod="openstack/ovn-controller-ovs-b7xg2" Nov 28 17:48:54 crc kubenswrapper[4909]: I1128 17:48:54.136003 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8xxkr\" (UniqueName: \"kubernetes.io/projected/6c05fa8e-ecd9-477f-bf04-c67ca51f425d-kube-api-access-8xxkr\") pod \"ovn-controller-vgwmt\" (UID: \"6c05fa8e-ecd9-477f-bf04-c67ca51f425d\") " pod="openstack/ovn-controller-vgwmt" Nov 28 17:48:54 crc kubenswrapper[4909]: I1128 17:48:54.136024 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/6c05fa8e-ecd9-477f-bf04-c67ca51f425d-scripts\") pod \"ovn-controller-vgwmt\" (UID: \"6c05fa8e-ecd9-477f-bf04-c67ca51f425d\") " pod="openstack/ovn-controller-vgwmt" Nov 28 17:48:54 crc kubenswrapper[4909]: I1128 17:48:54.136074 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f579a06d-9f20-4fa2-aaaa-9cdfd4b82a9a-var-log\") pod \"ovn-controller-ovs-b7xg2\" (UID: \"f579a06d-9f20-4fa2-aaaa-9cdfd4b82a9a\") " pod="openstack/ovn-controller-ovs-b7xg2" Nov 28 17:48:54 crc kubenswrapper[4909]: I1128 17:48:54.136096 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/f579a06d-9f20-4fa2-aaaa-9cdfd4b82a9a-etc-ovs\") pod \"ovn-controller-ovs-b7xg2\" (UID: \"f579a06d-9f20-4fa2-aaaa-9cdfd4b82a9a\") " pod="openstack/ovn-controller-ovs-b7xg2" Nov 28 17:48:54 crc kubenswrapper[4909]: I1128 17:48:54.136118 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/6c05fa8e-ecd9-477f-bf04-c67ca51f425d-var-log-ovn\") pod \"ovn-controller-vgwmt\" (UID: \"6c05fa8e-ecd9-477f-bf04-c67ca51f425d\") " pod="openstack/ovn-controller-vgwmt" Nov 28 17:48:54 crc kubenswrapper[4909]: I1128 17:48:54.136143 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/f579a06d-9f20-4fa2-aaaa-9cdfd4b82a9a-var-lib\") pod \"ovn-controller-ovs-b7xg2\" (UID: \"f579a06d-9f20-4fa2-aaaa-9cdfd4b82a9a\") " pod="openstack/ovn-controller-ovs-b7xg2" Nov 28 17:48:54 crc kubenswrapper[4909]: I1128 17:48:54.136163 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/6c05fa8e-ecd9-477f-bf04-c67ca51f425d-var-run-ovn\") pod \"ovn-controller-vgwmt\" (UID: \"6c05fa8e-ecd9-477f-bf04-c67ca51f425d\") " pod="openstack/ovn-controller-vgwmt" Nov 28 17:48:54 crc kubenswrapper[4909]: I1128 17:48:54.136209 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/f579a06d-9f20-4fa2-aaaa-9cdfd4b82a9a-var-run\") pod \"ovn-controller-ovs-b7xg2\" (UID: \"f579a06d-9f20-4fa2-aaaa-9cdfd4b82a9a\") " pod="openstack/ovn-controller-ovs-b7xg2" Nov 28 17:48:54 crc kubenswrapper[4909]: I1128 17:48:54.136353 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/6c05fa8e-ecd9-477f-bf04-c67ca51f425d-var-run\") pod \"ovn-controller-vgwmt\" (UID: \"6c05fa8e-ecd9-477f-bf04-c67ca51f425d\") " pod="openstack/ovn-controller-vgwmt" Nov 28 17:48:54 crc kubenswrapper[4909]: I1128 17:48:54.136423 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/f579a06d-9f20-4fa2-aaaa-9cdfd4b82a9a-var-run\") pod \"ovn-controller-ovs-b7xg2\" (UID: \"f579a06d-9f20-4fa2-aaaa-9cdfd4b82a9a\") " pod="openstack/ovn-controller-ovs-b7xg2" Nov 28 17:48:54 crc kubenswrapper[4909]: I1128 17:48:54.136450 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/f579a06d-9f20-4fa2-aaaa-9cdfd4b82a9a-etc-ovs\") pod \"ovn-controller-ovs-b7xg2\" (UID: \"f579a06d-9f20-4fa2-aaaa-9cdfd4b82a9a\") " pod="openstack/ovn-controller-ovs-b7xg2" Nov 28 17:48:54 crc kubenswrapper[4909]: I1128 17:48:54.136507 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/f579a06d-9f20-4fa2-aaaa-9cdfd4b82a9a-var-lib\") pod \"ovn-controller-ovs-b7xg2\" (UID: \"f579a06d-9f20-4fa2-aaaa-9cdfd4b82a9a\") " pod="openstack/ovn-controller-ovs-b7xg2" Nov 28 17:48:54 crc kubenswrapper[4909]: I1128 17:48:54.136559 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/6c05fa8e-ecd9-477f-bf04-c67ca51f425d-var-log-ovn\") pod \"ovn-controller-vgwmt\" (UID: \"6c05fa8e-ecd9-477f-bf04-c67ca51f425d\") " pod="openstack/ovn-controller-vgwmt" Nov 28 17:48:54 crc kubenswrapper[4909]: I1128 17:48:54.136592 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/6c05fa8e-ecd9-477f-bf04-c67ca51f425d-var-run-ovn\") pod \"ovn-controller-vgwmt\" (UID: \"6c05fa8e-ecd9-477f-bf04-c67ca51f425d\") " pod="openstack/ovn-controller-vgwmt" Nov 28 17:48:54 crc kubenswrapper[4909]: I1128 17:48:54.136625 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f579a06d-9f20-4fa2-aaaa-9cdfd4b82a9a-var-log\") pod \"ovn-controller-ovs-b7xg2\" (UID: \"f579a06d-9f20-4fa2-aaaa-9cdfd4b82a9a\") " pod="openstack/ovn-controller-ovs-b7xg2" Nov 28 17:48:54 crc kubenswrapper[4909]: I1128 17:48:54.137772 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/f579a06d-9f20-4fa2-aaaa-9cdfd4b82a9a-scripts\") pod \"ovn-controller-ovs-b7xg2\" (UID: \"f579a06d-9f20-4fa2-aaaa-9cdfd4b82a9a\") " pod="openstack/ovn-controller-ovs-b7xg2" Nov 28 17:48:54 crc kubenswrapper[4909]: I1128 17:48:54.138407 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/6c05fa8e-ecd9-477f-bf04-c67ca51f425d-scripts\") pod \"ovn-controller-vgwmt\" (UID: \"6c05fa8e-ecd9-477f-bf04-c67ca51f425d\") " pod="openstack/ovn-controller-vgwmt" Nov 28 17:48:54 crc kubenswrapper[4909]: I1128 17:48:54.155693 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8xxkr\" (UniqueName: \"kubernetes.io/projected/6c05fa8e-ecd9-477f-bf04-c67ca51f425d-kube-api-access-8xxkr\") pod \"ovn-controller-vgwmt\" (UID: \"6c05fa8e-ecd9-477f-bf04-c67ca51f425d\") " pod="openstack/ovn-controller-vgwmt" Nov 28 17:48:54 crc kubenswrapper[4909]: I1128 17:48:54.162778 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cnrbh\" (UniqueName: \"kubernetes.io/projected/f579a06d-9f20-4fa2-aaaa-9cdfd4b82a9a-kube-api-access-cnrbh\") pod \"ovn-controller-ovs-b7xg2\" (UID: \"f579a06d-9f20-4fa2-aaaa-9cdfd4b82a9a\") " pod="openstack/ovn-controller-ovs-b7xg2" Nov 28 17:48:54 crc kubenswrapper[4909]: I1128 17:48:54.201273 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-vgwmt" Nov 28 17:48:54 crc kubenswrapper[4909]: I1128 17:48:54.249872 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-b7xg2" Nov 28 17:48:54 crc kubenswrapper[4909]: I1128 17:48:54.743417 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-vgwmt"] Nov 28 17:48:55 crc kubenswrapper[4909]: I1128 17:48:55.012961 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-vgwmt" event={"ID":"6c05fa8e-ecd9-477f-bf04-c67ca51f425d","Type":"ContainerStarted","Data":"dcbf562012c4cf301d19f8e0636f74e14f495170b37095b1d17299eb56daad7c"} Nov 28 17:48:55 crc kubenswrapper[4909]: I1128 17:48:55.065791 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-b7xg2"] Nov 28 17:48:55 crc kubenswrapper[4909]: W1128 17:48:55.074237 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf579a06d_9f20_4fa2_aaaa_9cdfd4b82a9a.slice/crio-a9ebfcd02e3c563fcc5a981a0d3dda6b3c747f2d708059f41f021a1764fe7474 WatchSource:0}: Error finding container a9ebfcd02e3c563fcc5a981a0d3dda6b3c747f2d708059f41f021a1764fe7474: Status 404 returned error can't find the container with id a9ebfcd02e3c563fcc5a981a0d3dda6b3c747f2d708059f41f021a1764fe7474 Nov 28 17:48:55 crc kubenswrapper[4909]: I1128 17:48:55.322837 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-metrics-6k4kn"] Nov 28 17:48:55 crc kubenswrapper[4909]: I1128 17:48:55.324877 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-6k4kn" Nov 28 17:48:55 crc kubenswrapper[4909]: I1128 17:48:55.329135 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-metrics-config" Nov 28 17:48:55 crc kubenswrapper[4909]: I1128 17:48:55.337827 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-6k4kn"] Nov 28 17:48:55 crc kubenswrapper[4909]: I1128 17:48:55.466424 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e76f930f-b286-4682-91d2-20a57d3be765-config\") pod \"ovn-controller-metrics-6k4kn\" (UID: \"e76f930f-b286-4682-91d2-20a57d3be765\") " pod="openstack/ovn-controller-metrics-6k4kn" Nov 28 17:48:55 crc kubenswrapper[4909]: I1128 17:48:55.466491 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/e76f930f-b286-4682-91d2-20a57d3be765-ovn-rundir\") pod \"ovn-controller-metrics-6k4kn\" (UID: \"e76f930f-b286-4682-91d2-20a57d3be765\") " pod="openstack/ovn-controller-metrics-6k4kn" Nov 28 17:48:55 crc kubenswrapper[4909]: I1128 17:48:55.466531 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9qrdn\" (UniqueName: \"kubernetes.io/projected/e76f930f-b286-4682-91d2-20a57d3be765-kube-api-access-9qrdn\") pod \"ovn-controller-metrics-6k4kn\" (UID: \"e76f930f-b286-4682-91d2-20a57d3be765\") " pod="openstack/ovn-controller-metrics-6k4kn" Nov 28 17:48:55 crc kubenswrapper[4909]: I1128 17:48:55.466668 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/e76f930f-b286-4682-91d2-20a57d3be765-ovs-rundir\") pod \"ovn-controller-metrics-6k4kn\" (UID: \"e76f930f-b286-4682-91d2-20a57d3be765\") " pod="openstack/ovn-controller-metrics-6k4kn" Nov 28 17:48:55 crc kubenswrapper[4909]: I1128 17:48:55.568371 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/e76f930f-b286-4682-91d2-20a57d3be765-ovn-rundir\") pod \"ovn-controller-metrics-6k4kn\" (UID: \"e76f930f-b286-4682-91d2-20a57d3be765\") " pod="openstack/ovn-controller-metrics-6k4kn" Nov 28 17:48:55 crc kubenswrapper[4909]: I1128 17:48:55.568446 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9qrdn\" (UniqueName: \"kubernetes.io/projected/e76f930f-b286-4682-91d2-20a57d3be765-kube-api-access-9qrdn\") pod \"ovn-controller-metrics-6k4kn\" (UID: \"e76f930f-b286-4682-91d2-20a57d3be765\") " pod="openstack/ovn-controller-metrics-6k4kn" Nov 28 17:48:55 crc kubenswrapper[4909]: I1128 17:48:55.568577 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/e76f930f-b286-4682-91d2-20a57d3be765-ovs-rundir\") pod \"ovn-controller-metrics-6k4kn\" (UID: \"e76f930f-b286-4682-91d2-20a57d3be765\") " pod="openstack/ovn-controller-metrics-6k4kn" Nov 28 17:48:55 crc kubenswrapper[4909]: I1128 17:48:55.568645 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e76f930f-b286-4682-91d2-20a57d3be765-config\") pod \"ovn-controller-metrics-6k4kn\" (UID: \"e76f930f-b286-4682-91d2-20a57d3be765\") " pod="openstack/ovn-controller-metrics-6k4kn" Nov 28 17:48:55 crc kubenswrapper[4909]: I1128 17:48:55.569767 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e76f930f-b286-4682-91d2-20a57d3be765-config\") pod \"ovn-controller-metrics-6k4kn\" (UID: \"e76f930f-b286-4682-91d2-20a57d3be765\") " pod="openstack/ovn-controller-metrics-6k4kn" Nov 28 17:48:55 crc kubenswrapper[4909]: I1128 17:48:55.569989 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/e76f930f-b286-4682-91d2-20a57d3be765-ovn-rundir\") pod \"ovn-controller-metrics-6k4kn\" (UID: \"e76f930f-b286-4682-91d2-20a57d3be765\") " pod="openstack/ovn-controller-metrics-6k4kn" Nov 28 17:48:55 crc kubenswrapper[4909]: I1128 17:48:55.570084 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/e76f930f-b286-4682-91d2-20a57d3be765-ovs-rundir\") pod \"ovn-controller-metrics-6k4kn\" (UID: \"e76f930f-b286-4682-91d2-20a57d3be765\") " pod="openstack/ovn-controller-metrics-6k4kn" Nov 28 17:48:55 crc kubenswrapper[4909]: I1128 17:48:55.591862 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9qrdn\" (UniqueName: \"kubernetes.io/projected/e76f930f-b286-4682-91d2-20a57d3be765-kube-api-access-9qrdn\") pod \"ovn-controller-metrics-6k4kn\" (UID: \"e76f930f-b286-4682-91d2-20a57d3be765\") " pod="openstack/ovn-controller-metrics-6k4kn" Nov 28 17:48:55 crc kubenswrapper[4909]: I1128 17:48:55.650190 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-6k4kn" Nov 28 17:48:56 crc kubenswrapper[4909]: I1128 17:48:56.027329 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-b7xg2" event={"ID":"f579a06d-9f20-4fa2-aaaa-9cdfd4b82a9a","Type":"ContainerStarted","Data":"6de532e41898dba311834f2360991df3bec1ffe7f3bae94b15f667a699504822"} Nov 28 17:48:56 crc kubenswrapper[4909]: I1128 17:48:56.027695 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-b7xg2" event={"ID":"f579a06d-9f20-4fa2-aaaa-9cdfd4b82a9a","Type":"ContainerStarted","Data":"a9ebfcd02e3c563fcc5a981a0d3dda6b3c747f2d708059f41f021a1764fe7474"} Nov 28 17:48:56 crc kubenswrapper[4909]: I1128 17:48:56.035767 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-vgwmt" event={"ID":"6c05fa8e-ecd9-477f-bf04-c67ca51f425d","Type":"ContainerStarted","Data":"2ca2b3b6e9dab2b86595e00089e26d25a2a7f1a6b2d78fee7b78c2a410529dd4"} Nov 28 17:48:56 crc kubenswrapper[4909]: I1128 17:48:56.036832 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-vgwmt" Nov 28 17:48:56 crc kubenswrapper[4909]: I1128 17:48:56.045199 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/octavia-db-create-ml6s4"] Nov 28 17:48:56 crc kubenswrapper[4909]: I1128 17:48:56.046741 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-db-create-ml6s4" Nov 28 17:48:56 crc kubenswrapper[4909]: I1128 17:48:56.058110 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-db-create-ml6s4"] Nov 28 17:48:56 crc kubenswrapper[4909]: I1128 17:48:56.077065 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-vgwmt" podStartSLOduration=3.077045847 podStartE2EDuration="3.077045847s" podCreationTimestamp="2025-11-28 17:48:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 17:48:56.067115241 +0000 UTC m=+5918.463799775" watchObservedRunningTime="2025-11-28 17:48:56.077045847 +0000 UTC m=+5918.473730371" Nov 28 17:48:56 crc kubenswrapper[4909]: W1128 17:48:56.157318 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode76f930f_b286_4682_91d2_20a57d3be765.slice/crio-0eed749fe6d8bf186bf374c5c4b9661e60c69dccafac8e49e51a16df42d1fd72 WatchSource:0}: Error finding container 0eed749fe6d8bf186bf374c5c4b9661e60c69dccafac8e49e51a16df42d1fd72: Status 404 returned error can't find the container with id 0eed749fe6d8bf186bf374c5c4b9661e60c69dccafac8e49e51a16df42d1fd72 Nov 28 17:48:56 crc kubenswrapper[4909]: I1128 17:48:56.159894 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-6k4kn"] Nov 28 17:48:56 crc kubenswrapper[4909]: I1128 17:48:56.181720 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/21636d2d-fea0-4488-8ab1-9f5766ff35ef-operator-scripts\") pod \"octavia-db-create-ml6s4\" (UID: \"21636d2d-fea0-4488-8ab1-9f5766ff35ef\") " pod="openstack/octavia-db-create-ml6s4" Nov 28 17:48:56 crc kubenswrapper[4909]: I1128 17:48:56.181929 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-92mrq\" (UniqueName: \"kubernetes.io/projected/21636d2d-fea0-4488-8ab1-9f5766ff35ef-kube-api-access-92mrq\") pod \"octavia-db-create-ml6s4\" (UID: \"21636d2d-fea0-4488-8ab1-9f5766ff35ef\") " pod="openstack/octavia-db-create-ml6s4" Nov 28 17:48:56 crc kubenswrapper[4909]: I1128 17:48:56.283520 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/21636d2d-fea0-4488-8ab1-9f5766ff35ef-operator-scripts\") pod \"octavia-db-create-ml6s4\" (UID: \"21636d2d-fea0-4488-8ab1-9f5766ff35ef\") " pod="openstack/octavia-db-create-ml6s4" Nov 28 17:48:56 crc kubenswrapper[4909]: I1128 17:48:56.283916 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-92mrq\" (UniqueName: \"kubernetes.io/projected/21636d2d-fea0-4488-8ab1-9f5766ff35ef-kube-api-access-92mrq\") pod \"octavia-db-create-ml6s4\" (UID: \"21636d2d-fea0-4488-8ab1-9f5766ff35ef\") " pod="openstack/octavia-db-create-ml6s4" Nov 28 17:48:56 crc kubenswrapper[4909]: I1128 17:48:56.286582 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/21636d2d-fea0-4488-8ab1-9f5766ff35ef-operator-scripts\") pod \"octavia-db-create-ml6s4\" (UID: \"21636d2d-fea0-4488-8ab1-9f5766ff35ef\") " pod="openstack/octavia-db-create-ml6s4" Nov 28 17:48:56 crc kubenswrapper[4909]: I1128 17:48:56.301554 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-92mrq\" (UniqueName: \"kubernetes.io/projected/21636d2d-fea0-4488-8ab1-9f5766ff35ef-kube-api-access-92mrq\") pod \"octavia-db-create-ml6s4\" (UID: \"21636d2d-fea0-4488-8ab1-9f5766ff35ef\") " pod="openstack/octavia-db-create-ml6s4" Nov 28 17:48:56 crc kubenswrapper[4909]: I1128 17:48:56.434962 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-db-create-ml6s4" Nov 28 17:48:56 crc kubenswrapper[4909]: W1128 17:48:56.925420 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod21636d2d_fea0_4488_8ab1_9f5766ff35ef.slice/crio-ce9ecd2f4140ab561e63f2862418abcb97bb70464d3d588d65eec883e97bf992 WatchSource:0}: Error finding container ce9ecd2f4140ab561e63f2862418abcb97bb70464d3d588d65eec883e97bf992: Status 404 returned error can't find the container with id ce9ecd2f4140ab561e63f2862418abcb97bb70464d3d588d65eec883e97bf992 Nov 28 17:48:56 crc kubenswrapper[4909]: I1128 17:48:56.932574 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-db-create-ml6s4"] Nov 28 17:48:57 crc kubenswrapper[4909]: I1128 17:48:57.045669 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-6k4kn" event={"ID":"e76f930f-b286-4682-91d2-20a57d3be765","Type":"ContainerStarted","Data":"16aa6d4d8cb7ef4aab463c3f9f949dbb59d610cd88c0ce66ac2dcfa852f857a5"} Nov 28 17:48:57 crc kubenswrapper[4909]: I1128 17:48:57.045715 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-6k4kn" event={"ID":"e76f930f-b286-4682-91d2-20a57d3be765","Type":"ContainerStarted","Data":"0eed749fe6d8bf186bf374c5c4b9661e60c69dccafac8e49e51a16df42d1fd72"} Nov 28 17:48:57 crc kubenswrapper[4909]: I1128 17:48:57.047482 4909 generic.go:334] "Generic (PLEG): container finished" podID="f579a06d-9f20-4fa2-aaaa-9cdfd4b82a9a" containerID="6de532e41898dba311834f2360991df3bec1ffe7f3bae94b15f667a699504822" exitCode=0 Nov 28 17:48:57 crc kubenswrapper[4909]: I1128 17:48:57.047583 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-b7xg2" event={"ID":"f579a06d-9f20-4fa2-aaaa-9cdfd4b82a9a","Type":"ContainerDied","Data":"6de532e41898dba311834f2360991df3bec1ffe7f3bae94b15f667a699504822"} Nov 28 17:48:57 crc kubenswrapper[4909]: I1128 17:48:57.049100 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-db-create-ml6s4" event={"ID":"21636d2d-fea0-4488-8ab1-9f5766ff35ef","Type":"ContainerStarted","Data":"ce9ecd2f4140ab561e63f2862418abcb97bb70464d3d588d65eec883e97bf992"} Nov 28 17:48:57 crc kubenswrapper[4909]: I1128 17:48:57.083544 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-metrics-6k4kn" podStartSLOduration=2.083524544 podStartE2EDuration="2.083524544s" podCreationTimestamp="2025-11-28 17:48:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 17:48:57.077244925 +0000 UTC m=+5919.473929459" watchObservedRunningTime="2025-11-28 17:48:57.083524544 +0000 UTC m=+5919.480209068" Nov 28 17:48:57 crc kubenswrapper[4909]: I1128 17:48:57.402424 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/octavia-57ae-account-create-update-scp7k"] Nov 28 17:48:57 crc kubenswrapper[4909]: I1128 17:48:57.410428 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-57ae-account-create-update-scp7k" Nov 28 17:48:57 crc kubenswrapper[4909]: I1128 17:48:57.413110 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-57ae-account-create-update-scp7k"] Nov 28 17:48:57 crc kubenswrapper[4909]: I1128 17:48:57.413272 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-db-secret" Nov 28 17:48:57 crc kubenswrapper[4909]: I1128 17:48:57.507777 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7ll8r\" (UniqueName: \"kubernetes.io/projected/60dc21da-1ae5-49b5-88d2-a8f7bd552cc2-kube-api-access-7ll8r\") pod \"octavia-57ae-account-create-update-scp7k\" (UID: \"60dc21da-1ae5-49b5-88d2-a8f7bd552cc2\") " pod="openstack/octavia-57ae-account-create-update-scp7k" Nov 28 17:48:57 crc kubenswrapper[4909]: I1128 17:48:57.509202 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/60dc21da-1ae5-49b5-88d2-a8f7bd552cc2-operator-scripts\") pod \"octavia-57ae-account-create-update-scp7k\" (UID: \"60dc21da-1ae5-49b5-88d2-a8f7bd552cc2\") " pod="openstack/octavia-57ae-account-create-update-scp7k" Nov 28 17:48:57 crc kubenswrapper[4909]: I1128 17:48:57.611886 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7ll8r\" (UniqueName: \"kubernetes.io/projected/60dc21da-1ae5-49b5-88d2-a8f7bd552cc2-kube-api-access-7ll8r\") pod \"octavia-57ae-account-create-update-scp7k\" (UID: \"60dc21da-1ae5-49b5-88d2-a8f7bd552cc2\") " pod="openstack/octavia-57ae-account-create-update-scp7k" Nov 28 17:48:57 crc kubenswrapper[4909]: I1128 17:48:57.611982 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/60dc21da-1ae5-49b5-88d2-a8f7bd552cc2-operator-scripts\") pod \"octavia-57ae-account-create-update-scp7k\" (UID: \"60dc21da-1ae5-49b5-88d2-a8f7bd552cc2\") " pod="openstack/octavia-57ae-account-create-update-scp7k" Nov 28 17:48:57 crc kubenswrapper[4909]: I1128 17:48:57.612945 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/60dc21da-1ae5-49b5-88d2-a8f7bd552cc2-operator-scripts\") pod \"octavia-57ae-account-create-update-scp7k\" (UID: \"60dc21da-1ae5-49b5-88d2-a8f7bd552cc2\") " pod="openstack/octavia-57ae-account-create-update-scp7k" Nov 28 17:48:57 crc kubenswrapper[4909]: I1128 17:48:57.631597 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7ll8r\" (UniqueName: \"kubernetes.io/projected/60dc21da-1ae5-49b5-88d2-a8f7bd552cc2-kube-api-access-7ll8r\") pod \"octavia-57ae-account-create-update-scp7k\" (UID: \"60dc21da-1ae5-49b5-88d2-a8f7bd552cc2\") " pod="openstack/octavia-57ae-account-create-update-scp7k" Nov 28 17:48:57 crc kubenswrapper[4909]: I1128 17:48:57.737508 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-57ae-account-create-update-scp7k" Nov 28 17:48:58 crc kubenswrapper[4909]: I1128 17:48:58.060264 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-b7xg2" event={"ID":"f579a06d-9f20-4fa2-aaaa-9cdfd4b82a9a","Type":"ContainerStarted","Data":"1b8cdb6e8852d8385ec6b6016a7c872b34fae2fdbe5d61fdb63a944464753590"} Nov 28 17:48:58 crc kubenswrapper[4909]: I1128 17:48:58.060556 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-b7xg2" event={"ID":"f579a06d-9f20-4fa2-aaaa-9cdfd4b82a9a","Type":"ContainerStarted","Data":"2b1b8a5463af0903383e55e905ae81f801da005ca49ce8a4ce4fc0748f18d2e0"} Nov 28 17:48:58 crc kubenswrapper[4909]: I1128 17:48:58.060609 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-b7xg2" Nov 28 17:48:58 crc kubenswrapper[4909]: I1128 17:48:58.060630 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-b7xg2" Nov 28 17:48:58 crc kubenswrapper[4909]: I1128 17:48:58.063133 4909 generic.go:334] "Generic (PLEG): container finished" podID="21636d2d-fea0-4488-8ab1-9f5766ff35ef" containerID="5027288c6f28cd10a4e0a3b8034e7deaa8cdaa1490b93d2bbf744db24f4e87d0" exitCode=0 Nov 28 17:48:58 crc kubenswrapper[4909]: I1128 17:48:58.063940 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-db-create-ml6s4" event={"ID":"21636d2d-fea0-4488-8ab1-9f5766ff35ef","Type":"ContainerDied","Data":"5027288c6f28cd10a4e0a3b8034e7deaa8cdaa1490b93d2bbf744db24f4e87d0"} Nov 28 17:48:58 crc kubenswrapper[4909]: I1128 17:48:58.088228 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-ovs-b7xg2" podStartSLOduration=5.0882105 podStartE2EDuration="5.0882105s" podCreationTimestamp="2025-11-28 17:48:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 17:48:58.077423261 +0000 UTC m=+5920.474107785" watchObservedRunningTime="2025-11-28 17:48:58.0882105 +0000 UTC m=+5920.484895014" Nov 28 17:48:58 crc kubenswrapper[4909]: I1128 17:48:58.215897 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-57ae-account-create-update-scp7k"] Nov 28 17:48:58 crc kubenswrapper[4909]: W1128 17:48:58.232348 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod60dc21da_1ae5_49b5_88d2_a8f7bd552cc2.slice/crio-0711d4fba0720d7db3f81378b53cc7624237674a5ded773409f1daba63f19fda WatchSource:0}: Error finding container 0711d4fba0720d7db3f81378b53cc7624237674a5ded773409f1daba63f19fda: Status 404 returned error can't find the container with id 0711d4fba0720d7db3f81378b53cc7624237674a5ded773409f1daba63f19fda Nov 28 17:48:59 crc kubenswrapper[4909]: I1128 17:48:59.079796 4909 generic.go:334] "Generic (PLEG): container finished" podID="60dc21da-1ae5-49b5-88d2-a8f7bd552cc2" containerID="36aed24fa244a6eff805b588671dd1bdf99853668507bdb37dc2cce9f6c1a619" exitCode=0 Nov 28 17:48:59 crc kubenswrapper[4909]: I1128 17:48:59.079888 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-57ae-account-create-update-scp7k" event={"ID":"60dc21da-1ae5-49b5-88d2-a8f7bd552cc2","Type":"ContainerDied","Data":"36aed24fa244a6eff805b588671dd1bdf99853668507bdb37dc2cce9f6c1a619"} Nov 28 17:48:59 crc kubenswrapper[4909]: I1128 17:48:59.080380 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-57ae-account-create-update-scp7k" event={"ID":"60dc21da-1ae5-49b5-88d2-a8f7bd552cc2","Type":"ContainerStarted","Data":"0711d4fba0720d7db3f81378b53cc7624237674a5ded773409f1daba63f19fda"} Nov 28 17:48:59 crc kubenswrapper[4909]: I1128 17:48:59.546028 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-db-create-ml6s4" Nov 28 17:48:59 crc kubenswrapper[4909]: I1128 17:48:59.655224 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/21636d2d-fea0-4488-8ab1-9f5766ff35ef-operator-scripts\") pod \"21636d2d-fea0-4488-8ab1-9f5766ff35ef\" (UID: \"21636d2d-fea0-4488-8ab1-9f5766ff35ef\") " Nov 28 17:48:59 crc kubenswrapper[4909]: I1128 17:48:59.655338 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-92mrq\" (UniqueName: \"kubernetes.io/projected/21636d2d-fea0-4488-8ab1-9f5766ff35ef-kube-api-access-92mrq\") pod \"21636d2d-fea0-4488-8ab1-9f5766ff35ef\" (UID: \"21636d2d-fea0-4488-8ab1-9f5766ff35ef\") " Nov 28 17:48:59 crc kubenswrapper[4909]: I1128 17:48:59.656390 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/21636d2d-fea0-4488-8ab1-9f5766ff35ef-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "21636d2d-fea0-4488-8ab1-9f5766ff35ef" (UID: "21636d2d-fea0-4488-8ab1-9f5766ff35ef"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 17:48:59 crc kubenswrapper[4909]: I1128 17:48:59.662479 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/21636d2d-fea0-4488-8ab1-9f5766ff35ef-kube-api-access-92mrq" (OuterVolumeSpecName: "kube-api-access-92mrq") pod "21636d2d-fea0-4488-8ab1-9f5766ff35ef" (UID: "21636d2d-fea0-4488-8ab1-9f5766ff35ef"). InnerVolumeSpecName "kube-api-access-92mrq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:48:59 crc kubenswrapper[4909]: I1128 17:48:59.758259 4909 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/21636d2d-fea0-4488-8ab1-9f5766ff35ef-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 17:48:59 crc kubenswrapper[4909]: I1128 17:48:59.758294 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-92mrq\" (UniqueName: \"kubernetes.io/projected/21636d2d-fea0-4488-8ab1-9f5766ff35ef-kube-api-access-92mrq\") on node \"crc\" DevicePath \"\"" Nov 28 17:49:00 crc kubenswrapper[4909]: I1128 17:49:00.096368 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-db-create-ml6s4" Nov 28 17:49:00 crc kubenswrapper[4909]: I1128 17:49:00.096580 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-db-create-ml6s4" event={"ID":"21636d2d-fea0-4488-8ab1-9f5766ff35ef","Type":"ContainerDied","Data":"ce9ecd2f4140ab561e63f2862418abcb97bb70464d3d588d65eec883e97bf992"} Nov 28 17:49:00 crc kubenswrapper[4909]: I1128 17:49:00.097080 4909 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ce9ecd2f4140ab561e63f2862418abcb97bb70464d3d588d65eec883e97bf992" Nov 28 17:49:00 crc kubenswrapper[4909]: I1128 17:49:00.532238 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-57ae-account-create-update-scp7k" Nov 28 17:49:00 crc kubenswrapper[4909]: I1128 17:49:00.677774 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/60dc21da-1ae5-49b5-88d2-a8f7bd552cc2-operator-scripts\") pod \"60dc21da-1ae5-49b5-88d2-a8f7bd552cc2\" (UID: \"60dc21da-1ae5-49b5-88d2-a8f7bd552cc2\") " Nov 28 17:49:00 crc kubenswrapper[4909]: I1128 17:49:00.678061 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7ll8r\" (UniqueName: \"kubernetes.io/projected/60dc21da-1ae5-49b5-88d2-a8f7bd552cc2-kube-api-access-7ll8r\") pod \"60dc21da-1ae5-49b5-88d2-a8f7bd552cc2\" (UID: \"60dc21da-1ae5-49b5-88d2-a8f7bd552cc2\") " Nov 28 17:49:00 crc kubenswrapper[4909]: I1128 17:49:00.678964 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/60dc21da-1ae5-49b5-88d2-a8f7bd552cc2-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "60dc21da-1ae5-49b5-88d2-a8f7bd552cc2" (UID: "60dc21da-1ae5-49b5-88d2-a8f7bd552cc2"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 17:49:00 crc kubenswrapper[4909]: I1128 17:49:00.681354 4909 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/60dc21da-1ae5-49b5-88d2-a8f7bd552cc2-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 17:49:00 crc kubenswrapper[4909]: I1128 17:49:00.684033 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/60dc21da-1ae5-49b5-88d2-a8f7bd552cc2-kube-api-access-7ll8r" (OuterVolumeSpecName: "kube-api-access-7ll8r") pod "60dc21da-1ae5-49b5-88d2-a8f7bd552cc2" (UID: "60dc21da-1ae5-49b5-88d2-a8f7bd552cc2"). InnerVolumeSpecName "kube-api-access-7ll8r". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:49:00 crc kubenswrapper[4909]: I1128 17:49:00.783257 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7ll8r\" (UniqueName: \"kubernetes.io/projected/60dc21da-1ae5-49b5-88d2-a8f7bd552cc2-kube-api-access-7ll8r\") on node \"crc\" DevicePath \"\"" Nov 28 17:49:01 crc kubenswrapper[4909]: I1128 17:49:01.106847 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-57ae-account-create-update-scp7k" event={"ID":"60dc21da-1ae5-49b5-88d2-a8f7bd552cc2","Type":"ContainerDied","Data":"0711d4fba0720d7db3f81378b53cc7624237674a5ded773409f1daba63f19fda"} Nov 28 17:49:01 crc kubenswrapper[4909]: I1128 17:49:01.107189 4909 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0711d4fba0720d7db3f81378b53cc7624237674a5ded773409f1daba63f19fda" Nov 28 17:49:01 crc kubenswrapper[4909]: I1128 17:49:01.106934 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-57ae-account-create-update-scp7k" Nov 28 17:49:03 crc kubenswrapper[4909]: I1128 17:49:03.062305 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/octavia-persistence-db-create-vgrrt"] Nov 28 17:49:03 crc kubenswrapper[4909]: E1128 17:49:03.064733 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="21636d2d-fea0-4488-8ab1-9f5766ff35ef" containerName="mariadb-database-create" Nov 28 17:49:03 crc kubenswrapper[4909]: I1128 17:49:03.064765 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="21636d2d-fea0-4488-8ab1-9f5766ff35ef" containerName="mariadb-database-create" Nov 28 17:49:03 crc kubenswrapper[4909]: E1128 17:49:03.064798 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="60dc21da-1ae5-49b5-88d2-a8f7bd552cc2" containerName="mariadb-account-create-update" Nov 28 17:49:03 crc kubenswrapper[4909]: I1128 17:49:03.064808 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="60dc21da-1ae5-49b5-88d2-a8f7bd552cc2" containerName="mariadb-account-create-update" Nov 28 17:49:03 crc kubenswrapper[4909]: I1128 17:49:03.066995 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="60dc21da-1ae5-49b5-88d2-a8f7bd552cc2" containerName="mariadb-account-create-update" Nov 28 17:49:03 crc kubenswrapper[4909]: I1128 17:49:03.067037 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="21636d2d-fea0-4488-8ab1-9f5766ff35ef" containerName="mariadb-database-create" Nov 28 17:49:03 crc kubenswrapper[4909]: I1128 17:49:03.085813 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-persistence-db-create-vgrrt" Nov 28 17:49:03 crc kubenswrapper[4909]: I1128 17:49:03.104172 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-create-wwpvc"] Nov 28 17:49:03 crc kubenswrapper[4909]: I1128 17:49:03.114704 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-722f-account-create-update-f7nk9"] Nov 28 17:49:03 crc kubenswrapper[4909]: I1128 17:49:03.132218 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-create-wwpvc"] Nov 28 17:49:03 crc kubenswrapper[4909]: I1128 17:49:03.163718 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-722f-account-create-update-f7nk9"] Nov 28 17:49:03 crc kubenswrapper[4909]: I1128 17:49:03.173376 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-persistence-db-create-vgrrt"] Nov 28 17:49:03 crc kubenswrapper[4909]: I1128 17:49:03.233392 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/50f7d8e1-a601-4fb0-894c-ceaeba78f7ea-operator-scripts\") pod \"octavia-persistence-db-create-vgrrt\" (UID: \"50f7d8e1-a601-4fb0-894c-ceaeba78f7ea\") " pod="openstack/octavia-persistence-db-create-vgrrt" Nov 28 17:49:03 crc kubenswrapper[4909]: I1128 17:49:03.233478 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f66xv\" (UniqueName: \"kubernetes.io/projected/50f7d8e1-a601-4fb0-894c-ceaeba78f7ea-kube-api-access-f66xv\") pod \"octavia-persistence-db-create-vgrrt\" (UID: \"50f7d8e1-a601-4fb0-894c-ceaeba78f7ea\") " pod="openstack/octavia-persistence-db-create-vgrrt" Nov 28 17:49:03 crc kubenswrapper[4909]: I1128 17:49:03.338070 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/50f7d8e1-a601-4fb0-894c-ceaeba78f7ea-operator-scripts\") pod \"octavia-persistence-db-create-vgrrt\" (UID: \"50f7d8e1-a601-4fb0-894c-ceaeba78f7ea\") " pod="openstack/octavia-persistence-db-create-vgrrt" Nov 28 17:49:03 crc kubenswrapper[4909]: I1128 17:49:03.338281 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f66xv\" (UniqueName: \"kubernetes.io/projected/50f7d8e1-a601-4fb0-894c-ceaeba78f7ea-kube-api-access-f66xv\") pod \"octavia-persistence-db-create-vgrrt\" (UID: \"50f7d8e1-a601-4fb0-894c-ceaeba78f7ea\") " pod="openstack/octavia-persistence-db-create-vgrrt" Nov 28 17:49:03 crc kubenswrapper[4909]: I1128 17:49:03.338895 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/50f7d8e1-a601-4fb0-894c-ceaeba78f7ea-operator-scripts\") pod \"octavia-persistence-db-create-vgrrt\" (UID: \"50f7d8e1-a601-4fb0-894c-ceaeba78f7ea\") " pod="openstack/octavia-persistence-db-create-vgrrt" Nov 28 17:49:03 crc kubenswrapper[4909]: I1128 17:49:03.371014 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f66xv\" (UniqueName: \"kubernetes.io/projected/50f7d8e1-a601-4fb0-894c-ceaeba78f7ea-kube-api-access-f66xv\") pod \"octavia-persistence-db-create-vgrrt\" (UID: \"50f7d8e1-a601-4fb0-894c-ceaeba78f7ea\") " pod="openstack/octavia-persistence-db-create-vgrrt" Nov 28 17:49:03 crc kubenswrapper[4909]: I1128 17:49:03.452720 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-persistence-db-create-vgrrt" Nov 28 17:49:03 crc kubenswrapper[4909]: I1128 17:49:03.919722 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="505dcabe-949d-4084-af7c-bb46f054ab68" path="/var/lib/kubelet/pods/505dcabe-949d-4084-af7c-bb46f054ab68/volumes" Nov 28 17:49:03 crc kubenswrapper[4909]: I1128 17:49:03.920791 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bbb1a892-57b8-4458-836f-3f01bc129873" path="/var/lib/kubelet/pods/bbb1a892-57b8-4458-836f-3f01bc129873/volumes" Nov 28 17:49:03 crc kubenswrapper[4909]: I1128 17:49:03.969842 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-persistence-db-create-vgrrt"] Nov 28 17:49:04 crc kubenswrapper[4909]: I1128 17:49:04.103756 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/octavia-72f1-account-create-update-lkr8q"] Nov 28 17:49:04 crc kubenswrapper[4909]: I1128 17:49:04.105471 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-72f1-account-create-update-lkr8q" Nov 28 17:49:04 crc kubenswrapper[4909]: I1128 17:49:04.109008 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-persistence-db-secret" Nov 28 17:49:04 crc kubenswrapper[4909]: I1128 17:49:04.112853 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-72f1-account-create-update-lkr8q"] Nov 28 17:49:04 crc kubenswrapper[4909]: I1128 17:49:04.167206 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-persistence-db-create-vgrrt" event={"ID":"50f7d8e1-a601-4fb0-894c-ceaeba78f7ea","Type":"ContainerStarted","Data":"33c837bf8ee3546ab8e0d9f14858a28df3e87d8fb8ac96edf1885f6c751b9a00"} Nov 28 17:49:04 crc kubenswrapper[4909]: I1128 17:49:04.253651 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cdzpn\" (UniqueName: \"kubernetes.io/projected/b0da79de-6f24-4b07-ab86-215469b6e162-kube-api-access-cdzpn\") pod \"octavia-72f1-account-create-update-lkr8q\" (UID: \"b0da79de-6f24-4b07-ab86-215469b6e162\") " pod="openstack/octavia-72f1-account-create-update-lkr8q" Nov 28 17:49:04 crc kubenswrapper[4909]: I1128 17:49:04.253827 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b0da79de-6f24-4b07-ab86-215469b6e162-operator-scripts\") pod \"octavia-72f1-account-create-update-lkr8q\" (UID: \"b0da79de-6f24-4b07-ab86-215469b6e162\") " pod="openstack/octavia-72f1-account-create-update-lkr8q" Nov 28 17:49:04 crc kubenswrapper[4909]: I1128 17:49:04.355362 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b0da79de-6f24-4b07-ab86-215469b6e162-operator-scripts\") pod \"octavia-72f1-account-create-update-lkr8q\" (UID: \"b0da79de-6f24-4b07-ab86-215469b6e162\") " pod="openstack/octavia-72f1-account-create-update-lkr8q" Nov 28 17:49:04 crc kubenswrapper[4909]: I1128 17:49:04.355574 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cdzpn\" (UniqueName: \"kubernetes.io/projected/b0da79de-6f24-4b07-ab86-215469b6e162-kube-api-access-cdzpn\") pod \"octavia-72f1-account-create-update-lkr8q\" (UID: \"b0da79de-6f24-4b07-ab86-215469b6e162\") " pod="openstack/octavia-72f1-account-create-update-lkr8q" Nov 28 17:49:04 crc kubenswrapper[4909]: I1128 17:49:04.356240 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b0da79de-6f24-4b07-ab86-215469b6e162-operator-scripts\") pod \"octavia-72f1-account-create-update-lkr8q\" (UID: \"b0da79de-6f24-4b07-ab86-215469b6e162\") " pod="openstack/octavia-72f1-account-create-update-lkr8q" Nov 28 17:49:04 crc kubenswrapper[4909]: I1128 17:49:04.379585 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cdzpn\" (UniqueName: \"kubernetes.io/projected/b0da79de-6f24-4b07-ab86-215469b6e162-kube-api-access-cdzpn\") pod \"octavia-72f1-account-create-update-lkr8q\" (UID: \"b0da79de-6f24-4b07-ab86-215469b6e162\") " pod="openstack/octavia-72f1-account-create-update-lkr8q" Nov 28 17:49:04 crc kubenswrapper[4909]: I1128 17:49:04.454027 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-72f1-account-create-update-lkr8q" Nov 28 17:49:04 crc kubenswrapper[4909]: I1128 17:49:04.946806 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-72f1-account-create-update-lkr8q"] Nov 28 17:49:05 crc kubenswrapper[4909]: I1128 17:49:05.181752 4909 generic.go:334] "Generic (PLEG): container finished" podID="50f7d8e1-a601-4fb0-894c-ceaeba78f7ea" containerID="5c213a55efa3551857deb4916353d765fabd979bc4098942f0ee1721a2a3f6bb" exitCode=0 Nov 28 17:49:05 crc kubenswrapper[4909]: I1128 17:49:05.181840 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-persistence-db-create-vgrrt" event={"ID":"50f7d8e1-a601-4fb0-894c-ceaeba78f7ea","Type":"ContainerDied","Data":"5c213a55efa3551857deb4916353d765fabd979bc4098942f0ee1721a2a3f6bb"} Nov 28 17:49:05 crc kubenswrapper[4909]: I1128 17:49:05.184139 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-72f1-account-create-update-lkr8q" event={"ID":"b0da79de-6f24-4b07-ab86-215469b6e162","Type":"ContainerStarted","Data":"3990e620e1d3ad8d3aec7f208b5119268a7365c254b48068f983d0c58090a93a"} Nov 28 17:49:05 crc kubenswrapper[4909]: I1128 17:49:05.184170 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-72f1-account-create-update-lkr8q" event={"ID":"b0da79de-6f24-4b07-ab86-215469b6e162","Type":"ContainerStarted","Data":"7009839bf1f427d08a444d343a751715bb5b0301a56f4a48f54e2d42cf4f82a4"} Nov 28 17:49:05 crc kubenswrapper[4909]: I1128 17:49:05.235519 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/octavia-72f1-account-create-update-lkr8q" podStartSLOduration=1.235488417 podStartE2EDuration="1.235488417s" podCreationTimestamp="2025-11-28 17:49:04 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 17:49:05.218633934 +0000 UTC m=+5927.615318458" watchObservedRunningTime="2025-11-28 17:49:05.235488417 +0000 UTC m=+5927.632172981" Nov 28 17:49:06 crc kubenswrapper[4909]: I1128 17:49:06.197689 4909 generic.go:334] "Generic (PLEG): container finished" podID="b0da79de-6f24-4b07-ab86-215469b6e162" containerID="3990e620e1d3ad8d3aec7f208b5119268a7365c254b48068f983d0c58090a93a" exitCode=0 Nov 28 17:49:06 crc kubenswrapper[4909]: I1128 17:49:06.197835 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-72f1-account-create-update-lkr8q" event={"ID":"b0da79de-6f24-4b07-ab86-215469b6e162","Type":"ContainerDied","Data":"3990e620e1d3ad8d3aec7f208b5119268a7365c254b48068f983d0c58090a93a"} Nov 28 17:49:06 crc kubenswrapper[4909]: I1128 17:49:06.547578 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-persistence-db-create-vgrrt" Nov 28 17:49:06 crc kubenswrapper[4909]: I1128 17:49:06.608934 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/50f7d8e1-a601-4fb0-894c-ceaeba78f7ea-operator-scripts\") pod \"50f7d8e1-a601-4fb0-894c-ceaeba78f7ea\" (UID: \"50f7d8e1-a601-4fb0-894c-ceaeba78f7ea\") " Nov 28 17:49:06 crc kubenswrapper[4909]: I1128 17:49:06.609073 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f66xv\" (UniqueName: \"kubernetes.io/projected/50f7d8e1-a601-4fb0-894c-ceaeba78f7ea-kube-api-access-f66xv\") pod \"50f7d8e1-a601-4fb0-894c-ceaeba78f7ea\" (UID: \"50f7d8e1-a601-4fb0-894c-ceaeba78f7ea\") " Nov 28 17:49:06 crc kubenswrapper[4909]: I1128 17:49:06.612104 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/50f7d8e1-a601-4fb0-894c-ceaeba78f7ea-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "50f7d8e1-a601-4fb0-894c-ceaeba78f7ea" (UID: "50f7d8e1-a601-4fb0-894c-ceaeba78f7ea"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 17:49:06 crc kubenswrapper[4909]: I1128 17:49:06.619028 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/50f7d8e1-a601-4fb0-894c-ceaeba78f7ea-kube-api-access-f66xv" (OuterVolumeSpecName: "kube-api-access-f66xv") pod "50f7d8e1-a601-4fb0-894c-ceaeba78f7ea" (UID: "50f7d8e1-a601-4fb0-894c-ceaeba78f7ea"). InnerVolumeSpecName "kube-api-access-f66xv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:49:06 crc kubenswrapper[4909]: I1128 17:49:06.712838 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f66xv\" (UniqueName: \"kubernetes.io/projected/50f7d8e1-a601-4fb0-894c-ceaeba78f7ea-kube-api-access-f66xv\") on node \"crc\" DevicePath \"\"" Nov 28 17:49:06 crc kubenswrapper[4909]: I1128 17:49:06.712889 4909 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/50f7d8e1-a601-4fb0-894c-ceaeba78f7ea-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 17:49:07 crc kubenswrapper[4909]: I1128 17:49:07.214082 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-persistence-db-create-vgrrt" Nov 28 17:49:07 crc kubenswrapper[4909]: I1128 17:49:07.214101 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-persistence-db-create-vgrrt" event={"ID":"50f7d8e1-a601-4fb0-894c-ceaeba78f7ea","Type":"ContainerDied","Data":"33c837bf8ee3546ab8e0d9f14858a28df3e87d8fb8ac96edf1885f6c751b9a00"} Nov 28 17:49:07 crc kubenswrapper[4909]: I1128 17:49:07.214163 4909 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="33c837bf8ee3546ab8e0d9f14858a28df3e87d8fb8ac96edf1885f6c751b9a00" Nov 28 17:49:07 crc kubenswrapper[4909]: I1128 17:49:07.561747 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-72f1-account-create-update-lkr8q" Nov 28 17:49:07 crc kubenswrapper[4909]: I1128 17:49:07.640513 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cdzpn\" (UniqueName: \"kubernetes.io/projected/b0da79de-6f24-4b07-ab86-215469b6e162-kube-api-access-cdzpn\") pod \"b0da79de-6f24-4b07-ab86-215469b6e162\" (UID: \"b0da79de-6f24-4b07-ab86-215469b6e162\") " Nov 28 17:49:07 crc kubenswrapper[4909]: I1128 17:49:07.640593 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b0da79de-6f24-4b07-ab86-215469b6e162-operator-scripts\") pod \"b0da79de-6f24-4b07-ab86-215469b6e162\" (UID: \"b0da79de-6f24-4b07-ab86-215469b6e162\") " Nov 28 17:49:07 crc kubenswrapper[4909]: I1128 17:49:07.641403 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b0da79de-6f24-4b07-ab86-215469b6e162-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "b0da79de-6f24-4b07-ab86-215469b6e162" (UID: "b0da79de-6f24-4b07-ab86-215469b6e162"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 17:49:07 crc kubenswrapper[4909]: I1128 17:49:07.647359 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b0da79de-6f24-4b07-ab86-215469b6e162-kube-api-access-cdzpn" (OuterVolumeSpecName: "kube-api-access-cdzpn") pod "b0da79de-6f24-4b07-ab86-215469b6e162" (UID: "b0da79de-6f24-4b07-ab86-215469b6e162"). InnerVolumeSpecName "kube-api-access-cdzpn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:49:07 crc kubenswrapper[4909]: I1128 17:49:07.742765 4909 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b0da79de-6f24-4b07-ab86-215469b6e162-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 17:49:07 crc kubenswrapper[4909]: I1128 17:49:07.742815 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cdzpn\" (UniqueName: \"kubernetes.io/projected/b0da79de-6f24-4b07-ab86-215469b6e162-kube-api-access-cdzpn\") on node \"crc\" DevicePath \"\"" Nov 28 17:49:08 crc kubenswrapper[4909]: I1128 17:49:08.230272 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-72f1-account-create-update-lkr8q" event={"ID":"b0da79de-6f24-4b07-ab86-215469b6e162","Type":"ContainerDied","Data":"7009839bf1f427d08a444d343a751715bb5b0301a56f4a48f54e2d42cf4f82a4"} Nov 28 17:49:08 crc kubenswrapper[4909]: I1128 17:49:08.230530 4909 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7009839bf1f427d08a444d343a751715bb5b0301a56f4a48f54e2d42cf4f82a4" Nov 28 17:49:08 crc kubenswrapper[4909]: I1128 17:49:08.230356 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-72f1-account-create-update-lkr8q" Nov 28 17:49:09 crc kubenswrapper[4909]: I1128 17:49:09.797457 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/octavia-api-6fbcff57c4-cnfb7"] Nov 28 17:49:09 crc kubenswrapper[4909]: E1128 17:49:09.798230 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="50f7d8e1-a601-4fb0-894c-ceaeba78f7ea" containerName="mariadb-database-create" Nov 28 17:49:09 crc kubenswrapper[4909]: I1128 17:49:09.798282 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="50f7d8e1-a601-4fb0-894c-ceaeba78f7ea" containerName="mariadb-database-create" Nov 28 17:49:09 crc kubenswrapper[4909]: E1128 17:49:09.798335 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b0da79de-6f24-4b07-ab86-215469b6e162" containerName="mariadb-account-create-update" Nov 28 17:49:09 crc kubenswrapper[4909]: I1128 17:49:09.798348 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="b0da79de-6f24-4b07-ab86-215469b6e162" containerName="mariadb-account-create-update" Nov 28 17:49:09 crc kubenswrapper[4909]: I1128 17:49:09.798691 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="b0da79de-6f24-4b07-ab86-215469b6e162" containerName="mariadb-account-create-update" Nov 28 17:49:09 crc kubenswrapper[4909]: I1128 17:49:09.798728 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="50f7d8e1-a601-4fb0-894c-ceaeba78f7ea" containerName="mariadb-database-create" Nov 28 17:49:09 crc kubenswrapper[4909]: I1128 17:49:09.803903 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-api-6fbcff57c4-cnfb7" Nov 28 17:49:09 crc kubenswrapper[4909]: I1128 17:49:09.806720 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-api-scripts" Nov 28 17:49:09 crc kubenswrapper[4909]: I1128 17:49:09.807724 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-octavia-dockercfg-x5kdg" Nov 28 17:49:09 crc kubenswrapper[4909]: I1128 17:49:09.807897 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-api-config-data" Nov 28 17:49:09 crc kubenswrapper[4909]: I1128 17:49:09.841002 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-api-6fbcff57c4-cnfb7"] Nov 28 17:49:09 crc kubenswrapper[4909]: I1128 17:49:09.887043 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7d5c1cf2-bc57-40ac-84d4-8e3c82d04e33-config-data\") pod \"octavia-api-6fbcff57c4-cnfb7\" (UID: \"7d5c1cf2-bc57-40ac-84d4-8e3c82d04e33\") " pod="openstack/octavia-api-6fbcff57c4-cnfb7" Nov 28 17:49:09 crc kubenswrapper[4909]: I1128 17:49:09.887120 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7d5c1cf2-bc57-40ac-84d4-8e3c82d04e33-scripts\") pod \"octavia-api-6fbcff57c4-cnfb7\" (UID: \"7d5c1cf2-bc57-40ac-84d4-8e3c82d04e33\") " pod="openstack/octavia-api-6fbcff57c4-cnfb7" Nov 28 17:49:09 crc kubenswrapper[4909]: I1128 17:49:09.887155 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/7d5c1cf2-bc57-40ac-84d4-8e3c82d04e33-config-data-merged\") pod \"octavia-api-6fbcff57c4-cnfb7\" (UID: \"7d5c1cf2-bc57-40ac-84d4-8e3c82d04e33\") " pod="openstack/octavia-api-6fbcff57c4-cnfb7" Nov 28 17:49:09 crc kubenswrapper[4909]: I1128 17:49:09.887183 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7d5c1cf2-bc57-40ac-84d4-8e3c82d04e33-combined-ca-bundle\") pod \"octavia-api-6fbcff57c4-cnfb7\" (UID: \"7d5c1cf2-bc57-40ac-84d4-8e3c82d04e33\") " pod="openstack/octavia-api-6fbcff57c4-cnfb7" Nov 28 17:49:09 crc kubenswrapper[4909]: I1128 17:49:09.887298 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"octavia-run\" (UniqueName: \"kubernetes.io/empty-dir/7d5c1cf2-bc57-40ac-84d4-8e3c82d04e33-octavia-run\") pod \"octavia-api-6fbcff57c4-cnfb7\" (UID: \"7d5c1cf2-bc57-40ac-84d4-8e3c82d04e33\") " pod="openstack/octavia-api-6fbcff57c4-cnfb7" Nov 28 17:49:09 crc kubenswrapper[4909]: I1128 17:49:09.989198 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7d5c1cf2-bc57-40ac-84d4-8e3c82d04e33-config-data\") pod \"octavia-api-6fbcff57c4-cnfb7\" (UID: \"7d5c1cf2-bc57-40ac-84d4-8e3c82d04e33\") " pod="openstack/octavia-api-6fbcff57c4-cnfb7" Nov 28 17:49:09 crc kubenswrapper[4909]: I1128 17:49:09.989283 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7d5c1cf2-bc57-40ac-84d4-8e3c82d04e33-scripts\") pod \"octavia-api-6fbcff57c4-cnfb7\" (UID: \"7d5c1cf2-bc57-40ac-84d4-8e3c82d04e33\") " pod="openstack/octavia-api-6fbcff57c4-cnfb7" Nov 28 17:49:09 crc kubenswrapper[4909]: I1128 17:49:09.989324 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/7d5c1cf2-bc57-40ac-84d4-8e3c82d04e33-config-data-merged\") pod \"octavia-api-6fbcff57c4-cnfb7\" (UID: \"7d5c1cf2-bc57-40ac-84d4-8e3c82d04e33\") " pod="openstack/octavia-api-6fbcff57c4-cnfb7" Nov 28 17:49:09 crc kubenswrapper[4909]: I1128 17:49:09.989358 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7d5c1cf2-bc57-40ac-84d4-8e3c82d04e33-combined-ca-bundle\") pod \"octavia-api-6fbcff57c4-cnfb7\" (UID: \"7d5c1cf2-bc57-40ac-84d4-8e3c82d04e33\") " pod="openstack/octavia-api-6fbcff57c4-cnfb7" Nov 28 17:49:09 crc kubenswrapper[4909]: I1128 17:49:09.989508 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"octavia-run\" (UniqueName: \"kubernetes.io/empty-dir/7d5c1cf2-bc57-40ac-84d4-8e3c82d04e33-octavia-run\") pod \"octavia-api-6fbcff57c4-cnfb7\" (UID: \"7d5c1cf2-bc57-40ac-84d4-8e3c82d04e33\") " pod="openstack/octavia-api-6fbcff57c4-cnfb7" Nov 28 17:49:09 crc kubenswrapper[4909]: I1128 17:49:09.989919 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/7d5c1cf2-bc57-40ac-84d4-8e3c82d04e33-config-data-merged\") pod \"octavia-api-6fbcff57c4-cnfb7\" (UID: \"7d5c1cf2-bc57-40ac-84d4-8e3c82d04e33\") " pod="openstack/octavia-api-6fbcff57c4-cnfb7" Nov 28 17:49:09 crc kubenswrapper[4909]: I1128 17:49:09.990013 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"octavia-run\" (UniqueName: \"kubernetes.io/empty-dir/7d5c1cf2-bc57-40ac-84d4-8e3c82d04e33-octavia-run\") pod \"octavia-api-6fbcff57c4-cnfb7\" (UID: \"7d5c1cf2-bc57-40ac-84d4-8e3c82d04e33\") " pod="openstack/octavia-api-6fbcff57c4-cnfb7" Nov 28 17:49:09 crc kubenswrapper[4909]: I1128 17:49:09.996616 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7d5c1cf2-bc57-40ac-84d4-8e3c82d04e33-config-data\") pod \"octavia-api-6fbcff57c4-cnfb7\" (UID: \"7d5c1cf2-bc57-40ac-84d4-8e3c82d04e33\") " pod="openstack/octavia-api-6fbcff57c4-cnfb7" Nov 28 17:49:09 crc kubenswrapper[4909]: I1128 17:49:09.997341 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7d5c1cf2-bc57-40ac-84d4-8e3c82d04e33-combined-ca-bundle\") pod \"octavia-api-6fbcff57c4-cnfb7\" (UID: \"7d5c1cf2-bc57-40ac-84d4-8e3c82d04e33\") " pod="openstack/octavia-api-6fbcff57c4-cnfb7" Nov 28 17:49:09 crc kubenswrapper[4909]: I1128 17:49:09.998219 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7d5c1cf2-bc57-40ac-84d4-8e3c82d04e33-scripts\") pod \"octavia-api-6fbcff57c4-cnfb7\" (UID: \"7d5c1cf2-bc57-40ac-84d4-8e3c82d04e33\") " pod="openstack/octavia-api-6fbcff57c4-cnfb7" Nov 28 17:49:10 crc kubenswrapper[4909]: I1128 17:49:10.043101 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-sync-6nvlj"] Nov 28 17:49:10 crc kubenswrapper[4909]: I1128 17:49:10.051980 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-sync-6nvlj"] Nov 28 17:49:10 crc kubenswrapper[4909]: I1128 17:49:10.137923 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-api-6fbcff57c4-cnfb7" Nov 28 17:49:10 crc kubenswrapper[4909]: I1128 17:49:10.628443 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-api-6fbcff57c4-cnfb7"] Nov 28 17:49:10 crc kubenswrapper[4909]: W1128 17:49:10.630809 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7d5c1cf2_bc57_40ac_84d4_8e3c82d04e33.slice/crio-5e748d5274ffd7c47308047cae2be6d143ff528857c1d4e2e9a07d868866c3fd WatchSource:0}: Error finding container 5e748d5274ffd7c47308047cae2be6d143ff528857c1d4e2e9a07d868866c3fd: Status 404 returned error can't find the container with id 5e748d5274ffd7c47308047cae2be6d143ff528857c1d4e2e9a07d868866c3fd Nov 28 17:49:11 crc kubenswrapper[4909]: I1128 17:49:11.263765 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-api-6fbcff57c4-cnfb7" event={"ID":"7d5c1cf2-bc57-40ac-84d4-8e3c82d04e33","Type":"ContainerStarted","Data":"5e748d5274ffd7c47308047cae2be6d143ff528857c1d4e2e9a07d868866c3fd"} Nov 28 17:49:11 crc kubenswrapper[4909]: I1128 17:49:11.915628 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e5384a5e-a9a1-4beb-a760-4b15d072f465" path="/var/lib/kubelet/pods/e5384a5e-a9a1-4beb-a760-4b15d072f465/volumes" Nov 28 17:49:19 crc kubenswrapper[4909]: I1128 17:49:19.366955 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-api-6fbcff57c4-cnfb7" event={"ID":"7d5c1cf2-bc57-40ac-84d4-8e3c82d04e33","Type":"ContainerStarted","Data":"7ef1630e877fb8e021bd9ea0b1f1709d9c8c622238dabd8c046c70af4778a19c"} Nov 28 17:49:19 crc kubenswrapper[4909]: I1128 17:49:19.910924 4909 patch_prober.go:28] interesting pod/machine-config-daemon-d5nd7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 17:49:19 crc kubenswrapper[4909]: I1128 17:49:19.911288 4909 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 17:49:20 crc kubenswrapper[4909]: I1128 17:49:20.377361 4909 generic.go:334] "Generic (PLEG): container finished" podID="7d5c1cf2-bc57-40ac-84d4-8e3c82d04e33" containerID="7ef1630e877fb8e021bd9ea0b1f1709d9c8c622238dabd8c046c70af4778a19c" exitCode=0 Nov 28 17:49:20 crc kubenswrapper[4909]: I1128 17:49:20.377401 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-api-6fbcff57c4-cnfb7" event={"ID":"7d5c1cf2-bc57-40ac-84d4-8e3c82d04e33","Type":"ContainerDied","Data":"7ef1630e877fb8e021bd9ea0b1f1709d9c8c622238dabd8c046c70af4778a19c"} Nov 28 17:49:21 crc kubenswrapper[4909]: I1128 17:49:21.388415 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-api-6fbcff57c4-cnfb7" event={"ID":"7d5c1cf2-bc57-40ac-84d4-8e3c82d04e33","Type":"ContainerStarted","Data":"844f9646ec4aad17acf0a0b8b631692618401e0bc194a634ab87af57c21adf15"} Nov 28 17:49:21 crc kubenswrapper[4909]: I1128 17:49:21.388970 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-api-6fbcff57c4-cnfb7" event={"ID":"7d5c1cf2-bc57-40ac-84d4-8e3c82d04e33","Type":"ContainerStarted","Data":"549c23e42c319918dd21a6f8f4d9da091cf7e991fa3826e48f68eab220728e9b"} Nov 28 17:49:21 crc kubenswrapper[4909]: I1128 17:49:21.388990 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/octavia-api-6fbcff57c4-cnfb7" Nov 28 17:49:21 crc kubenswrapper[4909]: I1128 17:49:21.389000 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/octavia-api-6fbcff57c4-cnfb7" Nov 28 17:49:21 crc kubenswrapper[4909]: I1128 17:49:21.417432 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/octavia-api-6fbcff57c4-cnfb7" podStartSLOduration=3.982736208 podStartE2EDuration="12.417405082s" podCreationTimestamp="2025-11-28 17:49:09 +0000 UTC" firstStartedPulling="2025-11-28 17:49:10.633630914 +0000 UTC m=+5933.030315438" lastFinishedPulling="2025-11-28 17:49:19.068299788 +0000 UTC m=+5941.464984312" observedRunningTime="2025-11-28 17:49:21.414062972 +0000 UTC m=+5943.810747496" watchObservedRunningTime="2025-11-28 17:49:21.417405082 +0000 UTC m=+5943.814089616" Nov 28 17:49:22 crc kubenswrapper[4909]: I1128 17:49:22.050790 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-l7db9"] Nov 28 17:49:22 crc kubenswrapper[4909]: I1128 17:49:22.063390 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-l7db9"] Nov 28 17:49:23 crc kubenswrapper[4909]: I1128 17:49:23.457873 4909 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-9vnqw" podUID="771c530f-d0ab-412d-a6c6-931999bc878f" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.89:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 28 17:49:23 crc kubenswrapper[4909]: I1128 17:49:23.920064 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="473b34a3-d6d9-4f27-946c-707ffe7641ed" path="/var/lib/kubelet/pods/473b34a3-d6d9-4f27-946c-707ffe7641ed/volumes" Nov 28 17:49:28 crc kubenswrapper[4909]: I1128 17:49:28.996275 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/octavia-rsyslog-7r7s9"] Nov 28 17:49:28 crc kubenswrapper[4909]: I1128 17:49:28.998482 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-rsyslog-7r7s9" Nov 28 17:49:29 crc kubenswrapper[4909]: I1128 17:49:29.000378 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"octavia-hmport-map" Nov 28 17:49:29 crc kubenswrapper[4909]: I1128 17:49:29.000597 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-rsyslog-config-data" Nov 28 17:49:29 crc kubenswrapper[4909]: I1128 17:49:29.000738 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-rsyslog-scripts" Nov 28 17:49:29 crc kubenswrapper[4909]: I1128 17:49:29.013438 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-rsyslog-7r7s9"] Nov 28 17:49:29 crc kubenswrapper[4909]: I1128 17:49:29.053980 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hm-ports\" (UniqueName: \"kubernetes.io/configmap/ccb4dffe-4790-45bb-ba07-e38a24c64022-hm-ports\") pod \"octavia-rsyslog-7r7s9\" (UID: \"ccb4dffe-4790-45bb-ba07-e38a24c64022\") " pod="openstack/octavia-rsyslog-7r7s9" Nov 28 17:49:29 crc kubenswrapper[4909]: I1128 17:49:29.054031 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ccb4dffe-4790-45bb-ba07-e38a24c64022-config-data\") pod \"octavia-rsyslog-7r7s9\" (UID: \"ccb4dffe-4790-45bb-ba07-e38a24c64022\") " pod="openstack/octavia-rsyslog-7r7s9" Nov 28 17:49:29 crc kubenswrapper[4909]: I1128 17:49:29.054099 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ccb4dffe-4790-45bb-ba07-e38a24c64022-scripts\") pod \"octavia-rsyslog-7r7s9\" (UID: \"ccb4dffe-4790-45bb-ba07-e38a24c64022\") " pod="openstack/octavia-rsyslog-7r7s9" Nov 28 17:49:29 crc kubenswrapper[4909]: I1128 17:49:29.054165 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/ccb4dffe-4790-45bb-ba07-e38a24c64022-config-data-merged\") pod \"octavia-rsyslog-7r7s9\" (UID: \"ccb4dffe-4790-45bb-ba07-e38a24c64022\") " pod="openstack/octavia-rsyslog-7r7s9" Nov 28 17:49:29 crc kubenswrapper[4909]: I1128 17:49:29.156054 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ccb4dffe-4790-45bb-ba07-e38a24c64022-scripts\") pod \"octavia-rsyslog-7r7s9\" (UID: \"ccb4dffe-4790-45bb-ba07-e38a24c64022\") " pod="openstack/octavia-rsyslog-7r7s9" Nov 28 17:49:29 crc kubenswrapper[4909]: I1128 17:49:29.156131 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/ccb4dffe-4790-45bb-ba07-e38a24c64022-config-data-merged\") pod \"octavia-rsyslog-7r7s9\" (UID: \"ccb4dffe-4790-45bb-ba07-e38a24c64022\") " pod="openstack/octavia-rsyslog-7r7s9" Nov 28 17:49:29 crc kubenswrapper[4909]: I1128 17:49:29.156246 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hm-ports\" (UniqueName: \"kubernetes.io/configmap/ccb4dffe-4790-45bb-ba07-e38a24c64022-hm-ports\") pod \"octavia-rsyslog-7r7s9\" (UID: \"ccb4dffe-4790-45bb-ba07-e38a24c64022\") " pod="openstack/octavia-rsyslog-7r7s9" Nov 28 17:49:29 crc kubenswrapper[4909]: I1128 17:49:29.156270 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ccb4dffe-4790-45bb-ba07-e38a24c64022-config-data\") pod \"octavia-rsyslog-7r7s9\" (UID: \"ccb4dffe-4790-45bb-ba07-e38a24c64022\") " pod="openstack/octavia-rsyslog-7r7s9" Nov 28 17:49:29 crc kubenswrapper[4909]: I1128 17:49:29.156780 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/ccb4dffe-4790-45bb-ba07-e38a24c64022-config-data-merged\") pod \"octavia-rsyslog-7r7s9\" (UID: \"ccb4dffe-4790-45bb-ba07-e38a24c64022\") " pod="openstack/octavia-rsyslog-7r7s9" Nov 28 17:49:29 crc kubenswrapper[4909]: I1128 17:49:29.157250 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hm-ports\" (UniqueName: \"kubernetes.io/configmap/ccb4dffe-4790-45bb-ba07-e38a24c64022-hm-ports\") pod \"octavia-rsyslog-7r7s9\" (UID: \"ccb4dffe-4790-45bb-ba07-e38a24c64022\") " pod="openstack/octavia-rsyslog-7r7s9" Nov 28 17:49:29 crc kubenswrapper[4909]: I1128 17:49:29.161551 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ccb4dffe-4790-45bb-ba07-e38a24c64022-config-data\") pod \"octavia-rsyslog-7r7s9\" (UID: \"ccb4dffe-4790-45bb-ba07-e38a24c64022\") " pod="openstack/octavia-rsyslog-7r7s9" Nov 28 17:49:29 crc kubenswrapper[4909]: I1128 17:49:29.164387 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ccb4dffe-4790-45bb-ba07-e38a24c64022-scripts\") pod \"octavia-rsyslog-7r7s9\" (UID: \"ccb4dffe-4790-45bb-ba07-e38a24c64022\") " pod="openstack/octavia-rsyslog-7r7s9" Nov 28 17:49:29 crc kubenswrapper[4909]: I1128 17:49:29.182335 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/octavia-api-6fbcff57c4-cnfb7" Nov 28 17:49:29 crc kubenswrapper[4909]: I1128 17:49:29.230750 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/octavia-api-6fbcff57c4-cnfb7" Nov 28 17:49:29 crc kubenswrapper[4909]: I1128 17:49:29.273246 4909 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-vgwmt" podUID="6c05fa8e-ecd9-477f-bf04-c67ca51f425d" containerName="ovn-controller" probeResult="failure" output=< Nov 28 17:49:29 crc kubenswrapper[4909]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Nov 28 17:49:29 crc kubenswrapper[4909]: > Nov 28 17:49:29 crc kubenswrapper[4909]: I1128 17:49:29.318100 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-rsyslog-7r7s9" Nov 28 17:49:29 crc kubenswrapper[4909]: I1128 17:49:29.324056 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-b7xg2" Nov 28 17:49:29 crc kubenswrapper[4909]: I1128 17:49:29.342238 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-b7xg2" Nov 28 17:49:29 crc kubenswrapper[4909]: I1128 17:49:29.455700 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-vgwmt-config-djzlc"] Nov 28 17:49:29 crc kubenswrapper[4909]: I1128 17:49:29.456931 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-vgwmt-config-djzlc" Nov 28 17:49:29 crc kubenswrapper[4909]: I1128 17:49:29.468553 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-vgwmt-config-djzlc"] Nov 28 17:49:29 crc kubenswrapper[4909]: I1128 17:49:29.469057 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-extra-scripts" Nov 28 17:49:29 crc kubenswrapper[4909]: I1128 17:49:29.568567 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/9b0c8258-de97-4775-a1f4-b479249d6cb6-additional-scripts\") pod \"ovn-controller-vgwmt-config-djzlc\" (UID: \"9b0c8258-de97-4775-a1f4-b479249d6cb6\") " pod="openstack/ovn-controller-vgwmt-config-djzlc" Nov 28 17:49:29 crc kubenswrapper[4909]: I1128 17:49:29.568671 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/9b0c8258-de97-4775-a1f4-b479249d6cb6-scripts\") pod \"ovn-controller-vgwmt-config-djzlc\" (UID: \"9b0c8258-de97-4775-a1f4-b479249d6cb6\") " pod="openstack/ovn-controller-vgwmt-config-djzlc" Nov 28 17:49:29 crc kubenswrapper[4909]: I1128 17:49:29.568741 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/9b0c8258-de97-4775-a1f4-b479249d6cb6-var-run-ovn\") pod \"ovn-controller-vgwmt-config-djzlc\" (UID: \"9b0c8258-de97-4775-a1f4-b479249d6cb6\") " pod="openstack/ovn-controller-vgwmt-config-djzlc" Nov 28 17:49:29 crc kubenswrapper[4909]: I1128 17:49:29.568801 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k6vk5\" (UniqueName: \"kubernetes.io/projected/9b0c8258-de97-4775-a1f4-b479249d6cb6-kube-api-access-k6vk5\") pod \"ovn-controller-vgwmt-config-djzlc\" (UID: \"9b0c8258-de97-4775-a1f4-b479249d6cb6\") " pod="openstack/ovn-controller-vgwmt-config-djzlc" Nov 28 17:49:29 crc kubenswrapper[4909]: I1128 17:49:29.568833 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/9b0c8258-de97-4775-a1f4-b479249d6cb6-var-run\") pod \"ovn-controller-vgwmt-config-djzlc\" (UID: \"9b0c8258-de97-4775-a1f4-b479249d6cb6\") " pod="openstack/ovn-controller-vgwmt-config-djzlc" Nov 28 17:49:29 crc kubenswrapper[4909]: I1128 17:49:29.568859 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/9b0c8258-de97-4775-a1f4-b479249d6cb6-var-log-ovn\") pod \"ovn-controller-vgwmt-config-djzlc\" (UID: \"9b0c8258-de97-4775-a1f4-b479249d6cb6\") " pod="openstack/ovn-controller-vgwmt-config-djzlc" Nov 28 17:49:29 crc kubenswrapper[4909]: I1128 17:49:29.670724 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k6vk5\" (UniqueName: \"kubernetes.io/projected/9b0c8258-de97-4775-a1f4-b479249d6cb6-kube-api-access-k6vk5\") pod \"ovn-controller-vgwmt-config-djzlc\" (UID: \"9b0c8258-de97-4775-a1f4-b479249d6cb6\") " pod="openstack/ovn-controller-vgwmt-config-djzlc" Nov 28 17:49:29 crc kubenswrapper[4909]: I1128 17:49:29.670975 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/9b0c8258-de97-4775-a1f4-b479249d6cb6-var-run\") pod \"ovn-controller-vgwmt-config-djzlc\" (UID: \"9b0c8258-de97-4775-a1f4-b479249d6cb6\") " pod="openstack/ovn-controller-vgwmt-config-djzlc" Nov 28 17:49:29 crc kubenswrapper[4909]: I1128 17:49:29.671305 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/9b0c8258-de97-4775-a1f4-b479249d6cb6-var-run\") pod \"ovn-controller-vgwmt-config-djzlc\" (UID: \"9b0c8258-de97-4775-a1f4-b479249d6cb6\") " pod="openstack/ovn-controller-vgwmt-config-djzlc" Nov 28 17:49:29 crc kubenswrapper[4909]: I1128 17:49:29.671326 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/9b0c8258-de97-4775-a1f4-b479249d6cb6-var-log-ovn\") pod \"ovn-controller-vgwmt-config-djzlc\" (UID: \"9b0c8258-de97-4775-a1f4-b479249d6cb6\") " pod="openstack/ovn-controller-vgwmt-config-djzlc" Nov 28 17:49:29 crc kubenswrapper[4909]: I1128 17:49:29.671378 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/9b0c8258-de97-4775-a1f4-b479249d6cb6-additional-scripts\") pod \"ovn-controller-vgwmt-config-djzlc\" (UID: \"9b0c8258-de97-4775-a1f4-b479249d6cb6\") " pod="openstack/ovn-controller-vgwmt-config-djzlc" Nov 28 17:49:29 crc kubenswrapper[4909]: I1128 17:49:29.671385 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/9b0c8258-de97-4775-a1f4-b479249d6cb6-var-log-ovn\") pod \"ovn-controller-vgwmt-config-djzlc\" (UID: \"9b0c8258-de97-4775-a1f4-b479249d6cb6\") " pod="openstack/ovn-controller-vgwmt-config-djzlc" Nov 28 17:49:29 crc kubenswrapper[4909]: I1128 17:49:29.671440 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/9b0c8258-de97-4775-a1f4-b479249d6cb6-scripts\") pod \"ovn-controller-vgwmt-config-djzlc\" (UID: \"9b0c8258-de97-4775-a1f4-b479249d6cb6\") " pod="openstack/ovn-controller-vgwmt-config-djzlc" Nov 28 17:49:29 crc kubenswrapper[4909]: I1128 17:49:29.671537 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/9b0c8258-de97-4775-a1f4-b479249d6cb6-var-run-ovn\") pod \"ovn-controller-vgwmt-config-djzlc\" (UID: \"9b0c8258-de97-4775-a1f4-b479249d6cb6\") " pod="openstack/ovn-controller-vgwmt-config-djzlc" Nov 28 17:49:29 crc kubenswrapper[4909]: I1128 17:49:29.671703 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/9b0c8258-de97-4775-a1f4-b479249d6cb6-var-run-ovn\") pod \"ovn-controller-vgwmt-config-djzlc\" (UID: \"9b0c8258-de97-4775-a1f4-b479249d6cb6\") " pod="openstack/ovn-controller-vgwmt-config-djzlc" Nov 28 17:49:29 crc kubenswrapper[4909]: I1128 17:49:29.672097 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/9b0c8258-de97-4775-a1f4-b479249d6cb6-additional-scripts\") pod \"ovn-controller-vgwmt-config-djzlc\" (UID: \"9b0c8258-de97-4775-a1f4-b479249d6cb6\") " pod="openstack/ovn-controller-vgwmt-config-djzlc" Nov 28 17:49:29 crc kubenswrapper[4909]: I1128 17:49:29.673730 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/9b0c8258-de97-4775-a1f4-b479249d6cb6-scripts\") pod \"ovn-controller-vgwmt-config-djzlc\" (UID: \"9b0c8258-de97-4775-a1f4-b479249d6cb6\") " pod="openstack/ovn-controller-vgwmt-config-djzlc" Nov 28 17:49:29 crc kubenswrapper[4909]: I1128 17:49:29.688101 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k6vk5\" (UniqueName: \"kubernetes.io/projected/9b0c8258-de97-4775-a1f4-b479249d6cb6-kube-api-access-k6vk5\") pod \"ovn-controller-vgwmt-config-djzlc\" (UID: \"9b0c8258-de97-4775-a1f4-b479249d6cb6\") " pod="openstack/ovn-controller-vgwmt-config-djzlc" Nov 28 17:49:29 crc kubenswrapper[4909]: I1128 17:49:29.756635 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/octavia-image-upload-59f8cff499-xjdgz"] Nov 28 17:49:29 crc kubenswrapper[4909]: I1128 17:49:29.758337 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-image-upload-59f8cff499-xjdgz" Nov 28 17:49:29 crc kubenswrapper[4909]: I1128 17:49:29.762377 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-config-data" Nov 28 17:49:29 crc kubenswrapper[4909]: I1128 17:49:29.768540 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-image-upload-59f8cff499-xjdgz"] Nov 28 17:49:29 crc kubenswrapper[4909]: I1128 17:49:29.840360 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-vgwmt-config-djzlc" Nov 28 17:49:29 crc kubenswrapper[4909]: I1128 17:49:29.877504 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"amphora-image\" (UniqueName: \"kubernetes.io/empty-dir/a236d9b4-0661-4561-aabd-4c45d150c2ce-amphora-image\") pod \"octavia-image-upload-59f8cff499-xjdgz\" (UID: \"a236d9b4-0661-4561-aabd-4c45d150c2ce\") " pod="openstack/octavia-image-upload-59f8cff499-xjdgz" Nov 28 17:49:29 crc kubenswrapper[4909]: I1128 17:49:29.877690 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/a236d9b4-0661-4561-aabd-4c45d150c2ce-httpd-config\") pod \"octavia-image-upload-59f8cff499-xjdgz\" (UID: \"a236d9b4-0661-4561-aabd-4c45d150c2ce\") " pod="openstack/octavia-image-upload-59f8cff499-xjdgz" Nov 28 17:49:29 crc kubenswrapper[4909]: I1128 17:49:29.979159 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"amphora-image\" (UniqueName: \"kubernetes.io/empty-dir/a236d9b4-0661-4561-aabd-4c45d150c2ce-amphora-image\") pod \"octavia-image-upload-59f8cff499-xjdgz\" (UID: \"a236d9b4-0661-4561-aabd-4c45d150c2ce\") " pod="openstack/octavia-image-upload-59f8cff499-xjdgz" Nov 28 17:49:29 crc kubenswrapper[4909]: I1128 17:49:29.979436 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/a236d9b4-0661-4561-aabd-4c45d150c2ce-httpd-config\") pod \"octavia-image-upload-59f8cff499-xjdgz\" (UID: \"a236d9b4-0661-4561-aabd-4c45d150c2ce\") " pod="openstack/octavia-image-upload-59f8cff499-xjdgz" Nov 28 17:49:29 crc kubenswrapper[4909]: I1128 17:49:29.980780 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"amphora-image\" (UniqueName: \"kubernetes.io/empty-dir/a236d9b4-0661-4561-aabd-4c45d150c2ce-amphora-image\") pod \"octavia-image-upload-59f8cff499-xjdgz\" (UID: \"a236d9b4-0661-4561-aabd-4c45d150c2ce\") " pod="openstack/octavia-image-upload-59f8cff499-xjdgz" Nov 28 17:49:29 crc kubenswrapper[4909]: I1128 17:49:29.987197 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/a236d9b4-0661-4561-aabd-4c45d150c2ce-httpd-config\") pod \"octavia-image-upload-59f8cff499-xjdgz\" (UID: \"a236d9b4-0661-4561-aabd-4c45d150c2ce\") " pod="openstack/octavia-image-upload-59f8cff499-xjdgz" Nov 28 17:49:30 crc kubenswrapper[4909]: I1128 17:49:30.018537 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-rsyslog-7r7s9"] Nov 28 17:49:30 crc kubenswrapper[4909]: I1128 17:49:30.088857 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-image-upload-59f8cff499-xjdgz" Nov 28 17:49:30 crc kubenswrapper[4909]: I1128 17:49:30.405588 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-vgwmt-config-djzlc"] Nov 28 17:49:30 crc kubenswrapper[4909]: I1128 17:49:30.487090 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-rsyslog-7r7s9" event={"ID":"ccb4dffe-4790-45bb-ba07-e38a24c64022","Type":"ContainerStarted","Data":"d9f7c0f084f49932a5787fb78c5679b90afc7b755e35d0f89af4b1cdd97b556c"} Nov 28 17:49:30 crc kubenswrapper[4909]: I1128 17:49:30.490885 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-vgwmt-config-djzlc" event={"ID":"9b0c8258-de97-4775-a1f4-b479249d6cb6","Type":"ContainerStarted","Data":"ddda7f754e0ecb535391092ed9b3873d10b1da585506db95775db53a4280f425"} Nov 28 17:49:30 crc kubenswrapper[4909]: I1128 17:49:30.520860 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-image-upload-59f8cff499-xjdgz"] Nov 28 17:49:30 crc kubenswrapper[4909]: W1128 17:49:30.537798 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda236d9b4_0661_4561_aabd_4c45d150c2ce.slice/crio-1b11253488a4e40788825bc566de8f104b1e234b4a764cfeedbbcccb88fd63e9 WatchSource:0}: Error finding container 1b11253488a4e40788825bc566de8f104b1e234b4a764cfeedbbcccb88fd63e9: Status 404 returned error can't find the container with id 1b11253488a4e40788825bc566de8f104b1e234b4a764cfeedbbcccb88fd63e9 Nov 28 17:49:31 crc kubenswrapper[4909]: I1128 17:49:31.195340 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/octavia-db-sync-vz6hh"] Nov 28 17:49:31 crc kubenswrapper[4909]: I1128 17:49:31.197605 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-db-sync-vz6hh" Nov 28 17:49:31 crc kubenswrapper[4909]: I1128 17:49:31.201064 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-scripts" Nov 28 17:49:31 crc kubenswrapper[4909]: I1128 17:49:31.203574 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-db-sync-vz6hh"] Nov 28 17:49:31 crc kubenswrapper[4909]: I1128 17:49:31.303922 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/44ec722e-82ca-4adf-b196-c76a6d192768-combined-ca-bundle\") pod \"octavia-db-sync-vz6hh\" (UID: \"44ec722e-82ca-4adf-b196-c76a6d192768\") " pod="openstack/octavia-db-sync-vz6hh" Nov 28 17:49:31 crc kubenswrapper[4909]: I1128 17:49:31.304057 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/44ec722e-82ca-4adf-b196-c76a6d192768-config-data\") pod \"octavia-db-sync-vz6hh\" (UID: \"44ec722e-82ca-4adf-b196-c76a6d192768\") " pod="openstack/octavia-db-sync-vz6hh" Nov 28 17:49:31 crc kubenswrapper[4909]: I1128 17:49:31.304110 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/44ec722e-82ca-4adf-b196-c76a6d192768-scripts\") pod \"octavia-db-sync-vz6hh\" (UID: \"44ec722e-82ca-4adf-b196-c76a6d192768\") " pod="openstack/octavia-db-sync-vz6hh" Nov 28 17:49:31 crc kubenswrapper[4909]: I1128 17:49:31.304162 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/44ec722e-82ca-4adf-b196-c76a6d192768-config-data-merged\") pod \"octavia-db-sync-vz6hh\" (UID: \"44ec722e-82ca-4adf-b196-c76a6d192768\") " pod="openstack/octavia-db-sync-vz6hh" Nov 28 17:49:31 crc kubenswrapper[4909]: I1128 17:49:31.406047 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/44ec722e-82ca-4adf-b196-c76a6d192768-combined-ca-bundle\") pod \"octavia-db-sync-vz6hh\" (UID: \"44ec722e-82ca-4adf-b196-c76a6d192768\") " pod="openstack/octavia-db-sync-vz6hh" Nov 28 17:49:31 crc kubenswrapper[4909]: I1128 17:49:31.406164 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/44ec722e-82ca-4adf-b196-c76a6d192768-config-data\") pod \"octavia-db-sync-vz6hh\" (UID: \"44ec722e-82ca-4adf-b196-c76a6d192768\") " pod="openstack/octavia-db-sync-vz6hh" Nov 28 17:49:31 crc kubenswrapper[4909]: I1128 17:49:31.406214 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/44ec722e-82ca-4adf-b196-c76a6d192768-scripts\") pod \"octavia-db-sync-vz6hh\" (UID: \"44ec722e-82ca-4adf-b196-c76a6d192768\") " pod="openstack/octavia-db-sync-vz6hh" Nov 28 17:49:31 crc kubenswrapper[4909]: I1128 17:49:31.406263 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/44ec722e-82ca-4adf-b196-c76a6d192768-config-data-merged\") pod \"octavia-db-sync-vz6hh\" (UID: \"44ec722e-82ca-4adf-b196-c76a6d192768\") " pod="openstack/octavia-db-sync-vz6hh" Nov 28 17:49:31 crc kubenswrapper[4909]: I1128 17:49:31.406872 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/44ec722e-82ca-4adf-b196-c76a6d192768-config-data-merged\") pod \"octavia-db-sync-vz6hh\" (UID: \"44ec722e-82ca-4adf-b196-c76a6d192768\") " pod="openstack/octavia-db-sync-vz6hh" Nov 28 17:49:31 crc kubenswrapper[4909]: I1128 17:49:31.411811 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/44ec722e-82ca-4adf-b196-c76a6d192768-combined-ca-bundle\") pod \"octavia-db-sync-vz6hh\" (UID: \"44ec722e-82ca-4adf-b196-c76a6d192768\") " pod="openstack/octavia-db-sync-vz6hh" Nov 28 17:49:31 crc kubenswrapper[4909]: I1128 17:49:31.412261 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/44ec722e-82ca-4adf-b196-c76a6d192768-config-data\") pod \"octavia-db-sync-vz6hh\" (UID: \"44ec722e-82ca-4adf-b196-c76a6d192768\") " pod="openstack/octavia-db-sync-vz6hh" Nov 28 17:49:31 crc kubenswrapper[4909]: I1128 17:49:31.412800 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/44ec722e-82ca-4adf-b196-c76a6d192768-scripts\") pod \"octavia-db-sync-vz6hh\" (UID: \"44ec722e-82ca-4adf-b196-c76a6d192768\") " pod="openstack/octavia-db-sync-vz6hh" Nov 28 17:49:31 crc kubenswrapper[4909]: I1128 17:49:31.500519 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-image-upload-59f8cff499-xjdgz" event={"ID":"a236d9b4-0661-4561-aabd-4c45d150c2ce","Type":"ContainerStarted","Data":"1b11253488a4e40788825bc566de8f104b1e234b4a764cfeedbbcccb88fd63e9"} Nov 28 17:49:31 crc kubenswrapper[4909]: I1128 17:49:31.503062 4909 generic.go:334] "Generic (PLEG): container finished" podID="9b0c8258-de97-4775-a1f4-b479249d6cb6" containerID="901214c22a53b1a5aba0637cb474a8013e7cdee1d881a1b9a5168ea372206b7c" exitCode=0 Nov 28 17:49:31 crc kubenswrapper[4909]: I1128 17:49:31.503100 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-vgwmt-config-djzlc" event={"ID":"9b0c8258-de97-4775-a1f4-b479249d6cb6","Type":"ContainerDied","Data":"901214c22a53b1a5aba0637cb474a8013e7cdee1d881a1b9a5168ea372206b7c"} Nov 28 17:49:31 crc kubenswrapper[4909]: I1128 17:49:31.529295 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-db-sync-vz6hh" Nov 28 17:49:32 crc kubenswrapper[4909]: I1128 17:49:32.107287 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-db-sync-vz6hh"] Nov 28 17:49:32 crc kubenswrapper[4909]: I1128 17:49:32.534954 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-rsyslog-7r7s9" event={"ID":"ccb4dffe-4790-45bb-ba07-e38a24c64022","Type":"ContainerStarted","Data":"5c4cb5f8a8d5d11a8b24d6aab70c316c93d55af5e20d71122ec6239b2a868592"} Nov 28 17:49:32 crc kubenswrapper[4909]: I1128 17:49:32.945213 4909 scope.go:117] "RemoveContainer" containerID="af8c707d13707ef6a2121caee72f7e2db770c9c44031bdfefc8e522dc19f843b" Nov 28 17:49:33 crc kubenswrapper[4909]: I1128 17:49:33.545629 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-db-sync-vz6hh" event={"ID":"44ec722e-82ca-4adf-b196-c76a6d192768","Type":"ContainerStarted","Data":"3d366ecb76199a35b79f3dfd64ab8edce4ba08ea8ad0d91aa1ba606f449c00cd"} Nov 28 17:49:33 crc kubenswrapper[4909]: I1128 17:49:33.592548 4909 scope.go:117] "RemoveContainer" containerID="5884c2188f4a95ea79c9b961064a62dfa885997f4ee9ec83988a2575a447add5" Nov 28 17:49:33 crc kubenswrapper[4909]: I1128 17:49:33.666363 4909 scope.go:117] "RemoveContainer" containerID="332f0539b12161a23f9745305082c8f4a398c34867c7ee4c6ff4c0ebde858110" Nov 28 17:49:33 crc kubenswrapper[4909]: I1128 17:49:33.666943 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-vgwmt-config-djzlc" Nov 28 17:49:33 crc kubenswrapper[4909]: I1128 17:49:33.750393 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/9b0c8258-de97-4775-a1f4-b479249d6cb6-scripts\") pod \"9b0c8258-de97-4775-a1f4-b479249d6cb6\" (UID: \"9b0c8258-de97-4775-a1f4-b479249d6cb6\") " Nov 28 17:49:33 crc kubenswrapper[4909]: I1128 17:49:33.750581 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k6vk5\" (UniqueName: \"kubernetes.io/projected/9b0c8258-de97-4775-a1f4-b479249d6cb6-kube-api-access-k6vk5\") pod \"9b0c8258-de97-4775-a1f4-b479249d6cb6\" (UID: \"9b0c8258-de97-4775-a1f4-b479249d6cb6\") " Nov 28 17:49:33 crc kubenswrapper[4909]: I1128 17:49:33.751149 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/9b0c8258-de97-4775-a1f4-b479249d6cb6-additional-scripts\") pod \"9b0c8258-de97-4775-a1f4-b479249d6cb6\" (UID: \"9b0c8258-de97-4775-a1f4-b479249d6cb6\") " Nov 28 17:49:33 crc kubenswrapper[4909]: I1128 17:49:33.751351 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/9b0c8258-de97-4775-a1f4-b479249d6cb6-var-log-ovn\") pod \"9b0c8258-de97-4775-a1f4-b479249d6cb6\" (UID: \"9b0c8258-de97-4775-a1f4-b479249d6cb6\") " Nov 28 17:49:33 crc kubenswrapper[4909]: I1128 17:49:33.751464 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/9b0c8258-de97-4775-a1f4-b479249d6cb6-var-run\") pod \"9b0c8258-de97-4775-a1f4-b479249d6cb6\" (UID: \"9b0c8258-de97-4775-a1f4-b479249d6cb6\") " Nov 28 17:49:33 crc kubenswrapper[4909]: I1128 17:49:33.751594 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/9b0c8258-de97-4775-a1f4-b479249d6cb6-var-run-ovn\") pod \"9b0c8258-de97-4775-a1f4-b479249d6cb6\" (UID: \"9b0c8258-de97-4775-a1f4-b479249d6cb6\") " Nov 28 17:49:33 crc kubenswrapper[4909]: I1128 17:49:33.752029 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/9b0c8258-de97-4775-a1f4-b479249d6cb6-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "9b0c8258-de97-4775-a1f4-b479249d6cb6" (UID: "9b0c8258-de97-4775-a1f4-b479249d6cb6"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 17:49:33 crc kubenswrapper[4909]: I1128 17:49:33.752106 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9b0c8258-de97-4775-a1f4-b479249d6cb6-scripts" (OuterVolumeSpecName: "scripts") pod "9b0c8258-de97-4775-a1f4-b479249d6cb6" (UID: "9b0c8258-de97-4775-a1f4-b479249d6cb6"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 17:49:33 crc kubenswrapper[4909]: I1128 17:49:33.752173 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/9b0c8258-de97-4775-a1f4-b479249d6cb6-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "9b0c8258-de97-4775-a1f4-b479249d6cb6" (UID: "9b0c8258-de97-4775-a1f4-b479249d6cb6"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 17:49:33 crc kubenswrapper[4909]: I1128 17:49:33.753066 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/9b0c8258-de97-4775-a1f4-b479249d6cb6-var-run" (OuterVolumeSpecName: "var-run") pod "9b0c8258-de97-4775-a1f4-b479249d6cb6" (UID: "9b0c8258-de97-4775-a1f4-b479249d6cb6"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 17:49:33 crc kubenswrapper[4909]: I1128 17:49:33.753387 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9b0c8258-de97-4775-a1f4-b479249d6cb6-additional-scripts" (OuterVolumeSpecName: "additional-scripts") pod "9b0c8258-de97-4775-a1f4-b479249d6cb6" (UID: "9b0c8258-de97-4775-a1f4-b479249d6cb6"). InnerVolumeSpecName "additional-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 17:49:33 crc kubenswrapper[4909]: I1128 17:49:33.759895 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9b0c8258-de97-4775-a1f4-b479249d6cb6-kube-api-access-k6vk5" (OuterVolumeSpecName: "kube-api-access-k6vk5") pod "9b0c8258-de97-4775-a1f4-b479249d6cb6" (UID: "9b0c8258-de97-4775-a1f4-b479249d6cb6"). InnerVolumeSpecName "kube-api-access-k6vk5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:49:33 crc kubenswrapper[4909]: I1128 17:49:33.762494 4909 scope.go:117] "RemoveContainer" containerID="916775bf3e278027a629e5a16d9c467f2827588e58d64d2ed0fd842354e2a8a9" Nov 28 17:49:33 crc kubenswrapper[4909]: I1128 17:49:33.853184 4909 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/9b0c8258-de97-4775-a1f4-b479249d6cb6-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 17:49:33 crc kubenswrapper[4909]: I1128 17:49:33.853212 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k6vk5\" (UniqueName: \"kubernetes.io/projected/9b0c8258-de97-4775-a1f4-b479249d6cb6-kube-api-access-k6vk5\") on node \"crc\" DevicePath \"\"" Nov 28 17:49:33 crc kubenswrapper[4909]: I1128 17:49:33.853223 4909 reconciler_common.go:293] "Volume detached for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/9b0c8258-de97-4775-a1f4-b479249d6cb6-additional-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 17:49:33 crc kubenswrapper[4909]: I1128 17:49:33.853252 4909 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/9b0c8258-de97-4775-a1f4-b479249d6cb6-var-log-ovn\") on node \"crc\" DevicePath \"\"" Nov 28 17:49:33 crc kubenswrapper[4909]: I1128 17:49:33.853261 4909 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/9b0c8258-de97-4775-a1f4-b479249d6cb6-var-run\") on node \"crc\" DevicePath \"\"" Nov 28 17:49:33 crc kubenswrapper[4909]: I1128 17:49:33.853271 4909 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/9b0c8258-de97-4775-a1f4-b479249d6cb6-var-run-ovn\") on node \"crc\" DevicePath \"\"" Nov 28 17:49:34 crc kubenswrapper[4909]: E1128 17:49:34.176299 4909 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9b0c8258_de97_4775_a1f4_b479249d6cb6.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9b0c8258_de97_4775_a1f4_b479249d6cb6.slice/crio-ddda7f754e0ecb535391092ed9b3873d10b1da585506db95775db53a4280f425\": RecentStats: unable to find data in memory cache]" Nov 28 17:49:34 crc kubenswrapper[4909]: I1128 17:49:34.249511 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/octavia-healthmanager-srrcl"] Nov 28 17:49:34 crc kubenswrapper[4909]: E1128 17:49:34.250036 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9b0c8258-de97-4775-a1f4-b479249d6cb6" containerName="ovn-config" Nov 28 17:49:34 crc kubenswrapper[4909]: I1128 17:49:34.250056 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="9b0c8258-de97-4775-a1f4-b479249d6cb6" containerName="ovn-config" Nov 28 17:49:34 crc kubenswrapper[4909]: I1128 17:49:34.250275 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="9b0c8258-de97-4775-a1f4-b479249d6cb6" containerName="ovn-config" Nov 28 17:49:34 crc kubenswrapper[4909]: I1128 17:49:34.251520 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-healthmanager-srrcl" Nov 28 17:49:34 crc kubenswrapper[4909]: I1128 17:49:34.254473 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-healthmanager-scripts" Nov 28 17:49:34 crc kubenswrapper[4909]: I1128 17:49:34.254609 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-healthmanager-config-data" Nov 28 17:49:34 crc kubenswrapper[4909]: I1128 17:49:34.262578 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-certs-secret" Nov 28 17:49:34 crc kubenswrapper[4909]: I1128 17:49:34.298566 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-healthmanager-srrcl"] Nov 28 17:49:34 crc kubenswrapper[4909]: I1128 17:49:34.309890 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-vgwmt" Nov 28 17:49:34 crc kubenswrapper[4909]: I1128 17:49:34.365206 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7da68717-3e7e-4652-be23-4662a4fcf3e8-scripts\") pod \"octavia-healthmanager-srrcl\" (UID: \"7da68717-3e7e-4652-be23-4662a4fcf3e8\") " pod="openstack/octavia-healthmanager-srrcl" Nov 28 17:49:34 crc kubenswrapper[4909]: I1128 17:49:34.365255 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/7da68717-3e7e-4652-be23-4662a4fcf3e8-config-data-merged\") pod \"octavia-healthmanager-srrcl\" (UID: \"7da68717-3e7e-4652-be23-4662a4fcf3e8\") " pod="openstack/octavia-healthmanager-srrcl" Nov 28 17:49:34 crc kubenswrapper[4909]: I1128 17:49:34.365339 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7da68717-3e7e-4652-be23-4662a4fcf3e8-combined-ca-bundle\") pod \"octavia-healthmanager-srrcl\" (UID: \"7da68717-3e7e-4652-be23-4662a4fcf3e8\") " pod="openstack/octavia-healthmanager-srrcl" Nov 28 17:49:34 crc kubenswrapper[4909]: I1128 17:49:34.365378 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hm-ports\" (UniqueName: \"kubernetes.io/configmap/7da68717-3e7e-4652-be23-4662a4fcf3e8-hm-ports\") pod \"octavia-healthmanager-srrcl\" (UID: \"7da68717-3e7e-4652-be23-4662a4fcf3e8\") " pod="openstack/octavia-healthmanager-srrcl" Nov 28 17:49:34 crc kubenswrapper[4909]: I1128 17:49:34.365405 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7da68717-3e7e-4652-be23-4662a4fcf3e8-config-data\") pod \"octavia-healthmanager-srrcl\" (UID: \"7da68717-3e7e-4652-be23-4662a4fcf3e8\") " pod="openstack/octavia-healthmanager-srrcl" Nov 28 17:49:34 crc kubenswrapper[4909]: I1128 17:49:34.365470 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"amphora-certs\" (UniqueName: \"kubernetes.io/secret/7da68717-3e7e-4652-be23-4662a4fcf3e8-amphora-certs\") pod \"octavia-healthmanager-srrcl\" (UID: \"7da68717-3e7e-4652-be23-4662a4fcf3e8\") " pod="openstack/octavia-healthmanager-srrcl" Nov 28 17:49:34 crc kubenswrapper[4909]: I1128 17:49:34.467301 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7da68717-3e7e-4652-be23-4662a4fcf3e8-scripts\") pod \"octavia-healthmanager-srrcl\" (UID: \"7da68717-3e7e-4652-be23-4662a4fcf3e8\") " pod="openstack/octavia-healthmanager-srrcl" Nov 28 17:49:34 crc kubenswrapper[4909]: I1128 17:49:34.467345 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/7da68717-3e7e-4652-be23-4662a4fcf3e8-config-data-merged\") pod \"octavia-healthmanager-srrcl\" (UID: \"7da68717-3e7e-4652-be23-4662a4fcf3e8\") " pod="openstack/octavia-healthmanager-srrcl" Nov 28 17:49:34 crc kubenswrapper[4909]: I1128 17:49:34.467409 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7da68717-3e7e-4652-be23-4662a4fcf3e8-combined-ca-bundle\") pod \"octavia-healthmanager-srrcl\" (UID: \"7da68717-3e7e-4652-be23-4662a4fcf3e8\") " pod="openstack/octavia-healthmanager-srrcl" Nov 28 17:49:34 crc kubenswrapper[4909]: I1128 17:49:34.467446 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hm-ports\" (UniqueName: \"kubernetes.io/configmap/7da68717-3e7e-4652-be23-4662a4fcf3e8-hm-ports\") pod \"octavia-healthmanager-srrcl\" (UID: \"7da68717-3e7e-4652-be23-4662a4fcf3e8\") " pod="openstack/octavia-healthmanager-srrcl" Nov 28 17:49:34 crc kubenswrapper[4909]: I1128 17:49:34.467470 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7da68717-3e7e-4652-be23-4662a4fcf3e8-config-data\") pod \"octavia-healthmanager-srrcl\" (UID: \"7da68717-3e7e-4652-be23-4662a4fcf3e8\") " pod="openstack/octavia-healthmanager-srrcl" Nov 28 17:49:34 crc kubenswrapper[4909]: I1128 17:49:34.467538 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"amphora-certs\" (UniqueName: \"kubernetes.io/secret/7da68717-3e7e-4652-be23-4662a4fcf3e8-amphora-certs\") pod \"octavia-healthmanager-srrcl\" (UID: \"7da68717-3e7e-4652-be23-4662a4fcf3e8\") " pod="openstack/octavia-healthmanager-srrcl" Nov 28 17:49:34 crc kubenswrapper[4909]: I1128 17:49:34.467867 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/7da68717-3e7e-4652-be23-4662a4fcf3e8-config-data-merged\") pod \"octavia-healthmanager-srrcl\" (UID: \"7da68717-3e7e-4652-be23-4662a4fcf3e8\") " pod="openstack/octavia-healthmanager-srrcl" Nov 28 17:49:34 crc kubenswrapper[4909]: I1128 17:49:34.468616 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hm-ports\" (UniqueName: \"kubernetes.io/configmap/7da68717-3e7e-4652-be23-4662a4fcf3e8-hm-ports\") pod \"octavia-healthmanager-srrcl\" (UID: \"7da68717-3e7e-4652-be23-4662a4fcf3e8\") " pod="openstack/octavia-healthmanager-srrcl" Nov 28 17:49:34 crc kubenswrapper[4909]: I1128 17:49:34.474292 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7da68717-3e7e-4652-be23-4662a4fcf3e8-config-data\") pod \"octavia-healthmanager-srrcl\" (UID: \"7da68717-3e7e-4652-be23-4662a4fcf3e8\") " pod="openstack/octavia-healthmanager-srrcl" Nov 28 17:49:34 crc kubenswrapper[4909]: I1128 17:49:34.474362 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7da68717-3e7e-4652-be23-4662a4fcf3e8-combined-ca-bundle\") pod \"octavia-healthmanager-srrcl\" (UID: \"7da68717-3e7e-4652-be23-4662a4fcf3e8\") " pod="openstack/octavia-healthmanager-srrcl" Nov 28 17:49:34 crc kubenswrapper[4909]: I1128 17:49:34.486619 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"amphora-certs\" (UniqueName: \"kubernetes.io/secret/7da68717-3e7e-4652-be23-4662a4fcf3e8-amphora-certs\") pod \"octavia-healthmanager-srrcl\" (UID: \"7da68717-3e7e-4652-be23-4662a4fcf3e8\") " pod="openstack/octavia-healthmanager-srrcl" Nov 28 17:49:34 crc kubenswrapper[4909]: I1128 17:49:34.486965 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7da68717-3e7e-4652-be23-4662a4fcf3e8-scripts\") pod \"octavia-healthmanager-srrcl\" (UID: \"7da68717-3e7e-4652-be23-4662a4fcf3e8\") " pod="openstack/octavia-healthmanager-srrcl" Nov 28 17:49:34 crc kubenswrapper[4909]: I1128 17:49:34.564794 4909 generic.go:334] "Generic (PLEG): container finished" podID="ccb4dffe-4790-45bb-ba07-e38a24c64022" containerID="5c4cb5f8a8d5d11a8b24d6aab70c316c93d55af5e20d71122ec6239b2a868592" exitCode=0 Nov 28 17:49:34 crc kubenswrapper[4909]: I1128 17:49:34.564864 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-rsyslog-7r7s9" event={"ID":"ccb4dffe-4790-45bb-ba07-e38a24c64022","Type":"ContainerDied","Data":"5c4cb5f8a8d5d11a8b24d6aab70c316c93d55af5e20d71122ec6239b2a868592"} Nov 28 17:49:34 crc kubenswrapper[4909]: I1128 17:49:34.569741 4909 generic.go:334] "Generic (PLEG): container finished" podID="44ec722e-82ca-4adf-b196-c76a6d192768" containerID="fa938d3befb77f50b7d816f18ba395a3c8ccbbf9a17492a0bc474afee0aa4f28" exitCode=0 Nov 28 17:49:34 crc kubenswrapper[4909]: I1128 17:49:34.569796 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-db-sync-vz6hh" event={"ID":"44ec722e-82ca-4adf-b196-c76a6d192768","Type":"ContainerDied","Data":"fa938d3befb77f50b7d816f18ba395a3c8ccbbf9a17492a0bc474afee0aa4f28"} Nov 28 17:49:34 crc kubenswrapper[4909]: I1128 17:49:34.573171 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-vgwmt-config-djzlc" event={"ID":"9b0c8258-de97-4775-a1f4-b479249d6cb6","Type":"ContainerDied","Data":"ddda7f754e0ecb535391092ed9b3873d10b1da585506db95775db53a4280f425"} Nov 28 17:49:34 crc kubenswrapper[4909]: I1128 17:49:34.573191 4909 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ddda7f754e0ecb535391092ed9b3873d10b1da585506db95775db53a4280f425" Nov 28 17:49:34 crc kubenswrapper[4909]: I1128 17:49:34.573232 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-vgwmt-config-djzlc" Nov 28 17:49:34 crc kubenswrapper[4909]: I1128 17:49:34.599431 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-healthmanager-srrcl" Nov 28 17:49:34 crc kubenswrapper[4909]: I1128 17:49:34.768606 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-vgwmt-config-djzlc"] Nov 28 17:49:34 crc kubenswrapper[4909]: I1128 17:49:34.784485 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-vgwmt-config-djzlc"] Nov 28 17:49:35 crc kubenswrapper[4909]: I1128 17:49:35.138962 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-healthmanager-srrcl"] Nov 28 17:49:35 crc kubenswrapper[4909]: I1128 17:49:35.585089 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-db-sync-vz6hh" event={"ID":"44ec722e-82ca-4adf-b196-c76a6d192768","Type":"ContainerStarted","Data":"6b4711ed370ed0e1dc30137af9f9ca29cc9903c95f654f9237ac5085edce7b96"} Nov 28 17:49:35 crc kubenswrapper[4909]: I1128 17:49:35.589289 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-healthmanager-srrcl" event={"ID":"7da68717-3e7e-4652-be23-4662a4fcf3e8","Type":"ContainerStarted","Data":"07a96a3ac664fe3d6137038a5e181957fcd7c92d6faa6bbeb042dc7bb6b1a938"} Nov 28 17:49:35 crc kubenswrapper[4909]: I1128 17:49:35.615371 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/octavia-db-sync-vz6hh" podStartSLOduration=4.615353322 podStartE2EDuration="4.615353322s" podCreationTimestamp="2025-11-28 17:49:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 17:49:35.605290672 +0000 UTC m=+5958.001975206" watchObservedRunningTime="2025-11-28 17:49:35.615353322 +0000 UTC m=+5958.012037846" Nov 28 17:49:35 crc kubenswrapper[4909]: I1128 17:49:35.926614 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9b0c8258-de97-4775-a1f4-b479249d6cb6" path="/var/lib/kubelet/pods/9b0c8258-de97-4775-a1f4-b479249d6cb6/volumes" Nov 28 17:49:36 crc kubenswrapper[4909]: I1128 17:49:36.394771 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/octavia-housekeeping-v8g4q"] Nov 28 17:49:36 crc kubenswrapper[4909]: I1128 17:49:36.397125 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-housekeeping-v8g4q" Nov 28 17:49:36 crc kubenswrapper[4909]: I1128 17:49:36.399915 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-housekeeping-scripts" Nov 28 17:49:36 crc kubenswrapper[4909]: I1128 17:49:36.402721 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-housekeeping-config-data" Nov 28 17:49:36 crc kubenswrapper[4909]: I1128 17:49:36.412479 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-housekeeping-v8g4q"] Nov 28 17:49:36 crc kubenswrapper[4909]: I1128 17:49:36.510928 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hm-ports\" (UniqueName: \"kubernetes.io/configmap/443cff50-0670-48ec-9602-a49de3990b65-hm-ports\") pod \"octavia-housekeeping-v8g4q\" (UID: \"443cff50-0670-48ec-9602-a49de3990b65\") " pod="openstack/octavia-housekeeping-v8g4q" Nov 28 17:49:36 crc kubenswrapper[4909]: I1128 17:49:36.511022 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"amphora-certs\" (UniqueName: \"kubernetes.io/secret/443cff50-0670-48ec-9602-a49de3990b65-amphora-certs\") pod \"octavia-housekeeping-v8g4q\" (UID: \"443cff50-0670-48ec-9602-a49de3990b65\") " pod="openstack/octavia-housekeeping-v8g4q" Nov 28 17:49:36 crc kubenswrapper[4909]: I1128 17:49:36.511156 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/443cff50-0670-48ec-9602-a49de3990b65-scripts\") pod \"octavia-housekeeping-v8g4q\" (UID: \"443cff50-0670-48ec-9602-a49de3990b65\") " pod="openstack/octavia-housekeeping-v8g4q" Nov 28 17:49:36 crc kubenswrapper[4909]: I1128 17:49:36.511189 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/443cff50-0670-48ec-9602-a49de3990b65-config-data\") pod \"octavia-housekeeping-v8g4q\" (UID: \"443cff50-0670-48ec-9602-a49de3990b65\") " pod="openstack/octavia-housekeeping-v8g4q" Nov 28 17:49:36 crc kubenswrapper[4909]: I1128 17:49:36.511311 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/443cff50-0670-48ec-9602-a49de3990b65-combined-ca-bundle\") pod \"octavia-housekeeping-v8g4q\" (UID: \"443cff50-0670-48ec-9602-a49de3990b65\") " pod="openstack/octavia-housekeeping-v8g4q" Nov 28 17:49:36 crc kubenswrapper[4909]: I1128 17:49:36.511393 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/443cff50-0670-48ec-9602-a49de3990b65-config-data-merged\") pod \"octavia-housekeeping-v8g4q\" (UID: \"443cff50-0670-48ec-9602-a49de3990b65\") " pod="openstack/octavia-housekeeping-v8g4q" Nov 28 17:49:36 crc kubenswrapper[4909]: I1128 17:49:36.600713 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-healthmanager-srrcl" event={"ID":"7da68717-3e7e-4652-be23-4662a4fcf3e8","Type":"ContainerStarted","Data":"fa7dd8207812fedefbf688ed2f5a8209fcac4bf1bd92453cb827a95ac04bb1a9"} Nov 28 17:49:36 crc kubenswrapper[4909]: I1128 17:49:36.616349 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/443cff50-0670-48ec-9602-a49de3990b65-combined-ca-bundle\") pod \"octavia-housekeeping-v8g4q\" (UID: \"443cff50-0670-48ec-9602-a49de3990b65\") " pod="openstack/octavia-housekeeping-v8g4q" Nov 28 17:49:36 crc kubenswrapper[4909]: I1128 17:49:36.616446 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/443cff50-0670-48ec-9602-a49de3990b65-config-data-merged\") pod \"octavia-housekeeping-v8g4q\" (UID: \"443cff50-0670-48ec-9602-a49de3990b65\") " pod="openstack/octavia-housekeeping-v8g4q" Nov 28 17:49:36 crc kubenswrapper[4909]: I1128 17:49:36.616519 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hm-ports\" (UniqueName: \"kubernetes.io/configmap/443cff50-0670-48ec-9602-a49de3990b65-hm-ports\") pod \"octavia-housekeeping-v8g4q\" (UID: \"443cff50-0670-48ec-9602-a49de3990b65\") " pod="openstack/octavia-housekeeping-v8g4q" Nov 28 17:49:36 crc kubenswrapper[4909]: I1128 17:49:36.616583 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"amphora-certs\" (UniqueName: \"kubernetes.io/secret/443cff50-0670-48ec-9602-a49de3990b65-amphora-certs\") pod \"octavia-housekeeping-v8g4q\" (UID: \"443cff50-0670-48ec-9602-a49de3990b65\") " pod="openstack/octavia-housekeeping-v8g4q" Nov 28 17:49:36 crc kubenswrapper[4909]: I1128 17:49:36.616643 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/443cff50-0670-48ec-9602-a49de3990b65-scripts\") pod \"octavia-housekeeping-v8g4q\" (UID: \"443cff50-0670-48ec-9602-a49de3990b65\") " pod="openstack/octavia-housekeeping-v8g4q" Nov 28 17:49:36 crc kubenswrapper[4909]: I1128 17:49:36.616702 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/443cff50-0670-48ec-9602-a49de3990b65-config-data\") pod \"octavia-housekeeping-v8g4q\" (UID: \"443cff50-0670-48ec-9602-a49de3990b65\") " pod="openstack/octavia-housekeeping-v8g4q" Nov 28 17:49:36 crc kubenswrapper[4909]: I1128 17:49:36.618437 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hm-ports\" (UniqueName: \"kubernetes.io/configmap/443cff50-0670-48ec-9602-a49de3990b65-hm-ports\") pod \"octavia-housekeeping-v8g4q\" (UID: \"443cff50-0670-48ec-9602-a49de3990b65\") " pod="openstack/octavia-housekeeping-v8g4q" Nov 28 17:49:36 crc kubenswrapper[4909]: I1128 17:49:36.618994 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/443cff50-0670-48ec-9602-a49de3990b65-config-data-merged\") pod \"octavia-housekeeping-v8g4q\" (UID: \"443cff50-0670-48ec-9602-a49de3990b65\") " pod="openstack/octavia-housekeeping-v8g4q" Nov 28 17:49:36 crc kubenswrapper[4909]: I1128 17:49:36.623891 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"amphora-certs\" (UniqueName: \"kubernetes.io/secret/443cff50-0670-48ec-9602-a49de3990b65-amphora-certs\") pod \"octavia-housekeeping-v8g4q\" (UID: \"443cff50-0670-48ec-9602-a49de3990b65\") " pod="openstack/octavia-housekeeping-v8g4q" Nov 28 17:49:36 crc kubenswrapper[4909]: I1128 17:49:36.624968 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/443cff50-0670-48ec-9602-a49de3990b65-config-data\") pod \"octavia-housekeeping-v8g4q\" (UID: \"443cff50-0670-48ec-9602-a49de3990b65\") " pod="openstack/octavia-housekeeping-v8g4q" Nov 28 17:49:36 crc kubenswrapper[4909]: I1128 17:49:36.625078 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/443cff50-0670-48ec-9602-a49de3990b65-combined-ca-bundle\") pod \"octavia-housekeeping-v8g4q\" (UID: \"443cff50-0670-48ec-9602-a49de3990b65\") " pod="openstack/octavia-housekeeping-v8g4q" Nov 28 17:49:36 crc kubenswrapper[4909]: I1128 17:49:36.653685 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/443cff50-0670-48ec-9602-a49de3990b65-scripts\") pod \"octavia-housekeeping-v8g4q\" (UID: \"443cff50-0670-48ec-9602-a49de3990b65\") " pod="openstack/octavia-housekeeping-v8g4q" Nov 28 17:49:36 crc kubenswrapper[4909]: I1128 17:49:36.715442 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-housekeeping-v8g4q" Nov 28 17:49:37 crc kubenswrapper[4909]: W1128 17:49:37.435109 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod443cff50_0670_48ec_9602_a49de3990b65.slice/crio-9caa55c11fbb5e48190445c6ac8b93c9cf3876f3b91057393665d2f2b9b3fa7a WatchSource:0}: Error finding container 9caa55c11fbb5e48190445c6ac8b93c9cf3876f3b91057393665d2f2b9b3fa7a: Status 404 returned error can't find the container with id 9caa55c11fbb5e48190445c6ac8b93c9cf3876f3b91057393665d2f2b9b3fa7a Nov 28 17:49:37 crc kubenswrapper[4909]: I1128 17:49:37.441911 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-housekeeping-v8g4q"] Nov 28 17:49:37 crc kubenswrapper[4909]: I1128 17:49:37.612689 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-housekeeping-v8g4q" event={"ID":"443cff50-0670-48ec-9602-a49de3990b65","Type":"ContainerStarted","Data":"9caa55c11fbb5e48190445c6ac8b93c9cf3876f3b91057393665d2f2b9b3fa7a"} Nov 28 17:49:38 crc kubenswrapper[4909]: I1128 17:49:38.634319 4909 generic.go:334] "Generic (PLEG): container finished" podID="7da68717-3e7e-4652-be23-4662a4fcf3e8" containerID="fa7dd8207812fedefbf688ed2f5a8209fcac4bf1bd92453cb827a95ac04bb1a9" exitCode=0 Nov 28 17:49:38 crc kubenswrapper[4909]: I1128 17:49:38.634404 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-healthmanager-srrcl" event={"ID":"7da68717-3e7e-4652-be23-4662a4fcf3e8","Type":"ContainerDied","Data":"fa7dd8207812fedefbf688ed2f5a8209fcac4bf1bd92453cb827a95ac04bb1a9"} Nov 28 17:49:38 crc kubenswrapper[4909]: I1128 17:49:38.819512 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-hjphz"] Nov 28 17:49:38 crc kubenswrapper[4909]: I1128 17:49:38.825948 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-hjphz" Nov 28 17:49:38 crc kubenswrapper[4909]: I1128 17:49:38.851065 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-hjphz"] Nov 28 17:49:38 crc kubenswrapper[4909]: I1128 17:49:38.862565 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bgq7r\" (UniqueName: \"kubernetes.io/projected/7db8835b-2eb9-44d9-b7f2-62171c2b314c-kube-api-access-bgq7r\") pod \"redhat-operators-hjphz\" (UID: \"7db8835b-2eb9-44d9-b7f2-62171c2b314c\") " pod="openshift-marketplace/redhat-operators-hjphz" Nov 28 17:49:38 crc kubenswrapper[4909]: I1128 17:49:38.862642 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7db8835b-2eb9-44d9-b7f2-62171c2b314c-catalog-content\") pod \"redhat-operators-hjphz\" (UID: \"7db8835b-2eb9-44d9-b7f2-62171c2b314c\") " pod="openshift-marketplace/redhat-operators-hjphz" Nov 28 17:49:38 crc kubenswrapper[4909]: I1128 17:49:38.862762 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7db8835b-2eb9-44d9-b7f2-62171c2b314c-utilities\") pod \"redhat-operators-hjphz\" (UID: \"7db8835b-2eb9-44d9-b7f2-62171c2b314c\") " pod="openshift-marketplace/redhat-operators-hjphz" Nov 28 17:49:38 crc kubenswrapper[4909]: I1128 17:49:38.963801 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7db8835b-2eb9-44d9-b7f2-62171c2b314c-utilities\") pod \"redhat-operators-hjphz\" (UID: \"7db8835b-2eb9-44d9-b7f2-62171c2b314c\") " pod="openshift-marketplace/redhat-operators-hjphz" Nov 28 17:49:38 crc kubenswrapper[4909]: I1128 17:49:38.963917 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bgq7r\" (UniqueName: \"kubernetes.io/projected/7db8835b-2eb9-44d9-b7f2-62171c2b314c-kube-api-access-bgq7r\") pod \"redhat-operators-hjphz\" (UID: \"7db8835b-2eb9-44d9-b7f2-62171c2b314c\") " pod="openshift-marketplace/redhat-operators-hjphz" Nov 28 17:49:38 crc kubenswrapper[4909]: I1128 17:49:38.964025 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7db8835b-2eb9-44d9-b7f2-62171c2b314c-catalog-content\") pod \"redhat-operators-hjphz\" (UID: \"7db8835b-2eb9-44d9-b7f2-62171c2b314c\") " pod="openshift-marketplace/redhat-operators-hjphz" Nov 28 17:49:38 crc kubenswrapper[4909]: I1128 17:49:38.964319 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7db8835b-2eb9-44d9-b7f2-62171c2b314c-utilities\") pod \"redhat-operators-hjphz\" (UID: \"7db8835b-2eb9-44d9-b7f2-62171c2b314c\") " pod="openshift-marketplace/redhat-operators-hjphz" Nov 28 17:49:38 crc kubenswrapper[4909]: I1128 17:49:38.964547 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7db8835b-2eb9-44d9-b7f2-62171c2b314c-catalog-content\") pod \"redhat-operators-hjphz\" (UID: \"7db8835b-2eb9-44d9-b7f2-62171c2b314c\") " pod="openshift-marketplace/redhat-operators-hjphz" Nov 28 17:49:38 crc kubenswrapper[4909]: I1128 17:49:38.989871 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bgq7r\" (UniqueName: \"kubernetes.io/projected/7db8835b-2eb9-44d9-b7f2-62171c2b314c-kube-api-access-bgq7r\") pod \"redhat-operators-hjphz\" (UID: \"7db8835b-2eb9-44d9-b7f2-62171c2b314c\") " pod="openshift-marketplace/redhat-operators-hjphz" Nov 28 17:49:39 crc kubenswrapper[4909]: I1128 17:49:39.156467 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-hjphz" Nov 28 17:49:39 crc kubenswrapper[4909]: I1128 17:49:39.500050 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/octavia-worker-dbch8"] Nov 28 17:49:39 crc kubenswrapper[4909]: I1128 17:49:39.502402 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-worker-dbch8" Nov 28 17:49:39 crc kubenswrapper[4909]: I1128 17:49:39.507336 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-worker-config-data" Nov 28 17:49:39 crc kubenswrapper[4909]: I1128 17:49:39.507432 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-worker-scripts" Nov 28 17:49:39 crc kubenswrapper[4909]: I1128 17:49:39.547057 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-worker-dbch8"] Nov 28 17:49:39 crc kubenswrapper[4909]: I1128 17:49:39.648638 4909 generic.go:334] "Generic (PLEG): container finished" podID="44ec722e-82ca-4adf-b196-c76a6d192768" containerID="6b4711ed370ed0e1dc30137af9f9ca29cc9903c95f654f9237ac5085edce7b96" exitCode=0 Nov 28 17:49:39 crc kubenswrapper[4909]: I1128 17:49:39.648706 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-db-sync-vz6hh" event={"ID":"44ec722e-82ca-4adf-b196-c76a6d192768","Type":"ContainerDied","Data":"6b4711ed370ed0e1dc30137af9f9ca29cc9903c95f654f9237ac5085edce7b96"} Nov 28 17:49:39 crc kubenswrapper[4909]: I1128 17:49:39.679516 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/0961e7b7-53da-4f50-813c-048957ce0256-config-data-merged\") pod \"octavia-worker-dbch8\" (UID: \"0961e7b7-53da-4f50-813c-048957ce0256\") " pod="openstack/octavia-worker-dbch8" Nov 28 17:49:39 crc kubenswrapper[4909]: I1128 17:49:39.680179 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0961e7b7-53da-4f50-813c-048957ce0256-config-data\") pod \"octavia-worker-dbch8\" (UID: \"0961e7b7-53da-4f50-813c-048957ce0256\") " pod="openstack/octavia-worker-dbch8" Nov 28 17:49:39 crc kubenswrapper[4909]: I1128 17:49:39.680232 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"amphora-certs\" (UniqueName: \"kubernetes.io/secret/0961e7b7-53da-4f50-813c-048957ce0256-amphora-certs\") pod \"octavia-worker-dbch8\" (UID: \"0961e7b7-53da-4f50-813c-048957ce0256\") " pod="openstack/octavia-worker-dbch8" Nov 28 17:49:39 crc kubenswrapper[4909]: I1128 17:49:39.680270 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0961e7b7-53da-4f50-813c-048957ce0256-combined-ca-bundle\") pod \"octavia-worker-dbch8\" (UID: \"0961e7b7-53da-4f50-813c-048957ce0256\") " pod="openstack/octavia-worker-dbch8" Nov 28 17:49:39 crc kubenswrapper[4909]: I1128 17:49:39.680310 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0961e7b7-53da-4f50-813c-048957ce0256-scripts\") pod \"octavia-worker-dbch8\" (UID: \"0961e7b7-53da-4f50-813c-048957ce0256\") " pod="openstack/octavia-worker-dbch8" Nov 28 17:49:39 crc kubenswrapper[4909]: I1128 17:49:39.680367 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hm-ports\" (UniqueName: \"kubernetes.io/configmap/0961e7b7-53da-4f50-813c-048957ce0256-hm-ports\") pod \"octavia-worker-dbch8\" (UID: \"0961e7b7-53da-4f50-813c-048957ce0256\") " pod="openstack/octavia-worker-dbch8" Nov 28 17:49:39 crc kubenswrapper[4909]: I1128 17:49:39.782071 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/0961e7b7-53da-4f50-813c-048957ce0256-config-data-merged\") pod \"octavia-worker-dbch8\" (UID: \"0961e7b7-53da-4f50-813c-048957ce0256\") " pod="openstack/octavia-worker-dbch8" Nov 28 17:49:39 crc kubenswrapper[4909]: I1128 17:49:39.782189 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0961e7b7-53da-4f50-813c-048957ce0256-config-data\") pod \"octavia-worker-dbch8\" (UID: \"0961e7b7-53da-4f50-813c-048957ce0256\") " pod="openstack/octavia-worker-dbch8" Nov 28 17:49:39 crc kubenswrapper[4909]: I1128 17:49:39.782213 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"amphora-certs\" (UniqueName: \"kubernetes.io/secret/0961e7b7-53da-4f50-813c-048957ce0256-amphora-certs\") pod \"octavia-worker-dbch8\" (UID: \"0961e7b7-53da-4f50-813c-048957ce0256\") " pod="openstack/octavia-worker-dbch8" Nov 28 17:49:39 crc kubenswrapper[4909]: I1128 17:49:39.782239 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0961e7b7-53da-4f50-813c-048957ce0256-combined-ca-bundle\") pod \"octavia-worker-dbch8\" (UID: \"0961e7b7-53da-4f50-813c-048957ce0256\") " pod="openstack/octavia-worker-dbch8" Nov 28 17:49:39 crc kubenswrapper[4909]: I1128 17:49:39.782268 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0961e7b7-53da-4f50-813c-048957ce0256-scripts\") pod \"octavia-worker-dbch8\" (UID: \"0961e7b7-53da-4f50-813c-048957ce0256\") " pod="openstack/octavia-worker-dbch8" Nov 28 17:49:39 crc kubenswrapper[4909]: I1128 17:49:39.782307 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hm-ports\" (UniqueName: \"kubernetes.io/configmap/0961e7b7-53da-4f50-813c-048957ce0256-hm-ports\") pod \"octavia-worker-dbch8\" (UID: \"0961e7b7-53da-4f50-813c-048957ce0256\") " pod="openstack/octavia-worker-dbch8" Nov 28 17:49:39 crc kubenswrapper[4909]: I1128 17:49:39.783557 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hm-ports\" (UniqueName: \"kubernetes.io/configmap/0961e7b7-53da-4f50-813c-048957ce0256-hm-ports\") pod \"octavia-worker-dbch8\" (UID: \"0961e7b7-53da-4f50-813c-048957ce0256\") " pod="openstack/octavia-worker-dbch8" Nov 28 17:49:39 crc kubenswrapper[4909]: I1128 17:49:39.783899 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/0961e7b7-53da-4f50-813c-048957ce0256-config-data-merged\") pod \"octavia-worker-dbch8\" (UID: \"0961e7b7-53da-4f50-813c-048957ce0256\") " pod="openstack/octavia-worker-dbch8" Nov 28 17:49:39 crc kubenswrapper[4909]: I1128 17:49:39.788276 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0961e7b7-53da-4f50-813c-048957ce0256-combined-ca-bundle\") pod \"octavia-worker-dbch8\" (UID: \"0961e7b7-53da-4f50-813c-048957ce0256\") " pod="openstack/octavia-worker-dbch8" Nov 28 17:49:39 crc kubenswrapper[4909]: I1128 17:49:39.788367 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"amphora-certs\" (UniqueName: \"kubernetes.io/secret/0961e7b7-53da-4f50-813c-048957ce0256-amphora-certs\") pod \"octavia-worker-dbch8\" (UID: \"0961e7b7-53da-4f50-813c-048957ce0256\") " pod="openstack/octavia-worker-dbch8" Nov 28 17:49:39 crc kubenswrapper[4909]: I1128 17:49:39.789793 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0961e7b7-53da-4f50-813c-048957ce0256-config-data\") pod \"octavia-worker-dbch8\" (UID: \"0961e7b7-53da-4f50-813c-048957ce0256\") " pod="openstack/octavia-worker-dbch8" Nov 28 17:49:39 crc kubenswrapper[4909]: I1128 17:49:39.792493 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0961e7b7-53da-4f50-813c-048957ce0256-scripts\") pod \"octavia-worker-dbch8\" (UID: \"0961e7b7-53da-4f50-813c-048957ce0256\") " pod="openstack/octavia-worker-dbch8" Nov 28 17:49:39 crc kubenswrapper[4909]: I1128 17:49:39.818210 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-worker-dbch8" Nov 28 17:49:42 crc kubenswrapper[4909]: I1128 17:49:42.976340 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-db-sync-vz6hh" Nov 28 17:49:43 crc kubenswrapper[4909]: I1128 17:49:43.044400 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/44ec722e-82ca-4adf-b196-c76a6d192768-config-data-merged\") pod \"44ec722e-82ca-4adf-b196-c76a6d192768\" (UID: \"44ec722e-82ca-4adf-b196-c76a6d192768\") " Nov 28 17:49:43 crc kubenswrapper[4909]: I1128 17:49:43.044511 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/44ec722e-82ca-4adf-b196-c76a6d192768-config-data\") pod \"44ec722e-82ca-4adf-b196-c76a6d192768\" (UID: \"44ec722e-82ca-4adf-b196-c76a6d192768\") " Nov 28 17:49:43 crc kubenswrapper[4909]: I1128 17:49:43.044606 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/44ec722e-82ca-4adf-b196-c76a6d192768-combined-ca-bundle\") pod \"44ec722e-82ca-4adf-b196-c76a6d192768\" (UID: \"44ec722e-82ca-4adf-b196-c76a6d192768\") " Nov 28 17:49:43 crc kubenswrapper[4909]: I1128 17:49:43.044710 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/44ec722e-82ca-4adf-b196-c76a6d192768-scripts\") pod \"44ec722e-82ca-4adf-b196-c76a6d192768\" (UID: \"44ec722e-82ca-4adf-b196-c76a6d192768\") " Nov 28 17:49:43 crc kubenswrapper[4909]: I1128 17:49:43.048299 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/44ec722e-82ca-4adf-b196-c76a6d192768-config-data" (OuterVolumeSpecName: "config-data") pod "44ec722e-82ca-4adf-b196-c76a6d192768" (UID: "44ec722e-82ca-4adf-b196-c76a6d192768"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:49:43 crc kubenswrapper[4909]: I1128 17:49:43.048396 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/44ec722e-82ca-4adf-b196-c76a6d192768-scripts" (OuterVolumeSpecName: "scripts") pod "44ec722e-82ca-4adf-b196-c76a6d192768" (UID: "44ec722e-82ca-4adf-b196-c76a6d192768"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:49:43 crc kubenswrapper[4909]: I1128 17:49:43.080867 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/44ec722e-82ca-4adf-b196-c76a6d192768-config-data-merged" (OuterVolumeSpecName: "config-data-merged") pod "44ec722e-82ca-4adf-b196-c76a6d192768" (UID: "44ec722e-82ca-4adf-b196-c76a6d192768"). InnerVolumeSpecName "config-data-merged". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:49:43 crc kubenswrapper[4909]: I1128 17:49:43.081439 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/44ec722e-82ca-4adf-b196-c76a6d192768-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "44ec722e-82ca-4adf-b196-c76a6d192768" (UID: "44ec722e-82ca-4adf-b196-c76a6d192768"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:49:43 crc kubenswrapper[4909]: I1128 17:49:43.147868 4909 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/44ec722e-82ca-4adf-b196-c76a6d192768-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 17:49:43 crc kubenswrapper[4909]: I1128 17:49:43.147903 4909 reconciler_common.go:293] "Volume detached for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/44ec722e-82ca-4adf-b196-c76a6d192768-config-data-merged\") on node \"crc\" DevicePath \"\"" Nov 28 17:49:43 crc kubenswrapper[4909]: I1128 17:49:43.147914 4909 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/44ec722e-82ca-4adf-b196-c76a6d192768-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 17:49:43 crc kubenswrapper[4909]: I1128 17:49:43.147923 4909 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/44ec722e-82ca-4adf-b196-c76a6d192768-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 17:49:43 crc kubenswrapper[4909]: I1128 17:49:43.696814 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-db-sync-vz6hh" event={"ID":"44ec722e-82ca-4adf-b196-c76a6d192768","Type":"ContainerDied","Data":"3d366ecb76199a35b79f3dfd64ab8edce4ba08ea8ad0d91aa1ba606f449c00cd"} Nov 28 17:49:43 crc kubenswrapper[4909]: I1128 17:49:43.697913 4909 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3d366ecb76199a35b79f3dfd64ab8edce4ba08ea8ad0d91aa1ba606f449c00cd" Nov 28 17:49:43 crc kubenswrapper[4909]: I1128 17:49:43.697987 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-db-sync-vz6hh" Nov 28 17:49:43 crc kubenswrapper[4909]: I1128 17:49:43.750757 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-hjphz"] Nov 28 17:49:43 crc kubenswrapper[4909]: W1128 17:49:43.811109 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7db8835b_2eb9_44d9_b7f2_62171c2b314c.slice/crio-deb0413e5d322e6c4596510b57d91cc133f2c69177d9cc8f70e9eff7db21c4e7 WatchSource:0}: Error finding container deb0413e5d322e6c4596510b57d91cc133f2c69177d9cc8f70e9eff7db21c4e7: Status 404 returned error can't find the container with id deb0413e5d322e6c4596510b57d91cc133f2c69177d9cc8f70e9eff7db21c4e7 Nov 28 17:49:43 crc kubenswrapper[4909]: I1128 17:49:43.979460 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-worker-dbch8"] Nov 28 17:49:44 crc kubenswrapper[4909]: I1128 17:49:44.710093 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-rsyslog-7r7s9" event={"ID":"ccb4dffe-4790-45bb-ba07-e38a24c64022","Type":"ContainerStarted","Data":"6f95b16e018b42f5b0cc05a2287e55b633d7ffbc6bc613306ece9d7cfe321e71"} Nov 28 17:49:44 crc kubenswrapper[4909]: I1128 17:49:44.710996 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/octavia-rsyslog-7r7s9" Nov 28 17:49:44 crc kubenswrapper[4909]: I1128 17:49:44.711804 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-worker-dbch8" event={"ID":"0961e7b7-53da-4f50-813c-048957ce0256","Type":"ContainerStarted","Data":"8dea873aee0472312a5963c02edcf97c8676a95dea8d15d9cc404c0ebcdc27b6"} Nov 28 17:49:44 crc kubenswrapper[4909]: I1128 17:49:44.713725 4909 generic.go:334] "Generic (PLEG): container finished" podID="7db8835b-2eb9-44d9-b7f2-62171c2b314c" containerID="9f1f038cd162ff4d335db50a6b751a9c0e24c76492c718c4aed37a2e2750c7cc" exitCode=0 Nov 28 17:49:44 crc kubenswrapper[4909]: I1128 17:49:44.713790 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-hjphz" event={"ID":"7db8835b-2eb9-44d9-b7f2-62171c2b314c","Type":"ContainerDied","Data":"9f1f038cd162ff4d335db50a6b751a9c0e24c76492c718c4aed37a2e2750c7cc"} Nov 28 17:49:44 crc kubenswrapper[4909]: I1128 17:49:44.713810 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-hjphz" event={"ID":"7db8835b-2eb9-44d9-b7f2-62171c2b314c","Type":"ContainerStarted","Data":"deb0413e5d322e6c4596510b57d91cc133f2c69177d9cc8f70e9eff7db21c4e7"} Nov 28 17:49:44 crc kubenswrapper[4909]: I1128 17:49:44.717565 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-healthmanager-srrcl" event={"ID":"7da68717-3e7e-4652-be23-4662a4fcf3e8","Type":"ContainerStarted","Data":"5f1a0ba7ef94adb9a14c2b63b2767abf378ddca0008b7639977735c8485df84b"} Nov 28 17:49:44 crc kubenswrapper[4909]: I1128 17:49:44.718307 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/octavia-healthmanager-srrcl" Nov 28 17:49:44 crc kubenswrapper[4909]: I1128 17:49:44.722580 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-image-upload-59f8cff499-xjdgz" event={"ID":"a236d9b4-0661-4561-aabd-4c45d150c2ce","Type":"ContainerStarted","Data":"492af500c4559d2abedb0c4b688710114fe91fdadf37e26712352f4a91636f0b"} Nov 28 17:49:44 crc kubenswrapper[4909]: I1128 17:49:44.731792 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-housekeeping-v8g4q" event={"ID":"443cff50-0670-48ec-9602-a49de3990b65","Type":"ContainerStarted","Data":"157a6440d7768d070c2377eaf030b38e356cbde1b427c332dda60d54a140ca2b"} Nov 28 17:49:44 crc kubenswrapper[4909]: I1128 17:49:44.740373 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/octavia-rsyslog-7r7s9" podStartSLOduration=3.770710393 podStartE2EDuration="16.740358274s" podCreationTimestamp="2025-11-28 17:49:28 +0000 UTC" firstStartedPulling="2025-11-28 17:49:30.026079594 +0000 UTC m=+5952.422764118" lastFinishedPulling="2025-11-28 17:49:42.995727475 +0000 UTC m=+5965.392411999" observedRunningTime="2025-11-28 17:49:44.729581145 +0000 UTC m=+5967.126265669" watchObservedRunningTime="2025-11-28 17:49:44.740358274 +0000 UTC m=+5967.137042798" Nov 28 17:49:44 crc kubenswrapper[4909]: I1128 17:49:44.817546 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/octavia-healthmanager-srrcl" podStartSLOduration=10.817524855 podStartE2EDuration="10.817524855s" podCreationTimestamp="2025-11-28 17:49:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 17:49:44.799528722 +0000 UTC m=+5967.196213256" watchObservedRunningTime="2025-11-28 17:49:44.817524855 +0000 UTC m=+5967.214209379" Nov 28 17:49:45 crc kubenswrapper[4909]: I1128 17:49:45.745045 4909 generic.go:334] "Generic (PLEG): container finished" podID="a236d9b4-0661-4561-aabd-4c45d150c2ce" containerID="492af500c4559d2abedb0c4b688710114fe91fdadf37e26712352f4a91636f0b" exitCode=0 Nov 28 17:49:45 crc kubenswrapper[4909]: I1128 17:49:45.745182 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-image-upload-59f8cff499-xjdgz" event={"ID":"a236d9b4-0661-4561-aabd-4c45d150c2ce","Type":"ContainerDied","Data":"492af500c4559d2abedb0c4b688710114fe91fdadf37e26712352f4a91636f0b"} Nov 28 17:49:46 crc kubenswrapper[4909]: I1128 17:49:46.755101 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-hjphz" event={"ID":"7db8835b-2eb9-44d9-b7f2-62171c2b314c","Type":"ContainerStarted","Data":"004130b3487e573e8873b847a58e61897656a265393502a791c6e4717707dc46"} Nov 28 17:49:47 crc kubenswrapper[4909]: I1128 17:49:47.767902 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-worker-dbch8" event={"ID":"0961e7b7-53da-4f50-813c-048957ce0256","Type":"ContainerStarted","Data":"2c87d94c6bca3800cfb41c14eab305125d3b647a28377b0e9287a5feafa6a174"} Nov 28 17:49:48 crc kubenswrapper[4909]: I1128 17:49:48.782877 4909 generic.go:334] "Generic (PLEG): container finished" podID="443cff50-0670-48ec-9602-a49de3990b65" containerID="157a6440d7768d070c2377eaf030b38e356cbde1b427c332dda60d54a140ca2b" exitCode=0 Nov 28 17:49:48 crc kubenswrapper[4909]: I1128 17:49:48.782955 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-housekeeping-v8g4q" event={"ID":"443cff50-0670-48ec-9602-a49de3990b65","Type":"ContainerDied","Data":"157a6440d7768d070c2377eaf030b38e356cbde1b427c332dda60d54a140ca2b"} Nov 28 17:49:49 crc kubenswrapper[4909]: I1128 17:49:49.651790 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/octavia-healthmanager-srrcl" Nov 28 17:49:49 crc kubenswrapper[4909]: I1128 17:49:49.795148 4909 generic.go:334] "Generic (PLEG): container finished" podID="7db8835b-2eb9-44d9-b7f2-62171c2b314c" containerID="004130b3487e573e8873b847a58e61897656a265393502a791c6e4717707dc46" exitCode=0 Nov 28 17:49:49 crc kubenswrapper[4909]: I1128 17:49:49.795260 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-hjphz" event={"ID":"7db8835b-2eb9-44d9-b7f2-62171c2b314c","Type":"ContainerDied","Data":"004130b3487e573e8873b847a58e61897656a265393502a791c6e4717707dc46"} Nov 28 17:49:49 crc kubenswrapper[4909]: I1128 17:49:49.799144 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-housekeeping-v8g4q" event={"ID":"443cff50-0670-48ec-9602-a49de3990b65","Type":"ContainerStarted","Data":"aab9f5401c190a5ffcce9a6f677bf91281690289929599ac4409885d3a7df81b"} Nov 28 17:49:49 crc kubenswrapper[4909]: I1128 17:49:49.910825 4909 patch_prober.go:28] interesting pod/machine-config-daemon-d5nd7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 17:49:49 crc kubenswrapper[4909]: I1128 17:49:49.910917 4909 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 17:49:50 crc kubenswrapper[4909]: I1128 17:49:50.823384 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-hjphz" event={"ID":"7db8835b-2eb9-44d9-b7f2-62171c2b314c","Type":"ContainerStarted","Data":"808967b381bededdd9056ffe46d83b98cb37113619a358e9e18dc8ea5584db08"} Nov 28 17:49:50 crc kubenswrapper[4909]: I1128 17:49:50.832698 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-image-upload-59f8cff499-xjdgz" event={"ID":"a236d9b4-0661-4561-aabd-4c45d150c2ce","Type":"ContainerStarted","Data":"beec615b3e17b1d91bffb4ee24558b90cf69698d1de77cbca37498f8916661f8"} Nov 28 17:49:50 crc kubenswrapper[4909]: I1128 17:49:50.836130 4909 generic.go:334] "Generic (PLEG): container finished" podID="0961e7b7-53da-4f50-813c-048957ce0256" containerID="2c87d94c6bca3800cfb41c14eab305125d3b647a28377b0e9287a5feafa6a174" exitCode=0 Nov 28 17:49:50 crc kubenswrapper[4909]: I1128 17:49:50.837426 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-worker-dbch8" event={"ID":"0961e7b7-53da-4f50-813c-048957ce0256","Type":"ContainerDied","Data":"2c87d94c6bca3800cfb41c14eab305125d3b647a28377b0e9287a5feafa6a174"} Nov 28 17:49:50 crc kubenswrapper[4909]: I1128 17:49:50.838935 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/octavia-housekeeping-v8g4q" Nov 28 17:49:50 crc kubenswrapper[4909]: I1128 17:49:50.880707 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-hjphz" podStartSLOduration=7.19325052 podStartE2EDuration="12.880681443s" podCreationTimestamp="2025-11-28 17:49:38 +0000 UTC" firstStartedPulling="2025-11-28 17:49:44.715397704 +0000 UTC m=+5967.112082268" lastFinishedPulling="2025-11-28 17:49:50.402828667 +0000 UTC m=+5972.799513191" observedRunningTime="2025-11-28 17:49:50.848849279 +0000 UTC m=+5973.245533843" watchObservedRunningTime="2025-11-28 17:49:50.880681443 +0000 UTC m=+5973.277365967" Nov 28 17:49:50 crc kubenswrapper[4909]: I1128 17:49:50.891365 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/octavia-image-upload-59f8cff499-xjdgz" podStartSLOduration=2.133611513 podStartE2EDuration="21.89134692s" podCreationTimestamp="2025-11-28 17:49:29 +0000 UTC" firstStartedPulling="2025-11-28 17:49:30.542117036 +0000 UTC m=+5952.938801560" lastFinishedPulling="2025-11-28 17:49:50.299852443 +0000 UTC m=+5972.696536967" observedRunningTime="2025-11-28 17:49:50.8690236 +0000 UTC m=+5973.265708134" watchObservedRunningTime="2025-11-28 17:49:50.89134692 +0000 UTC m=+5973.288031444" Nov 28 17:49:50 crc kubenswrapper[4909]: I1128 17:49:50.916533 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/octavia-housekeeping-v8g4q" podStartSLOduration=8.408199688 podStartE2EDuration="14.916504815s" podCreationTimestamp="2025-11-28 17:49:36 +0000 UTC" firstStartedPulling="2025-11-28 17:49:37.444003306 +0000 UTC m=+5959.840687830" lastFinishedPulling="2025-11-28 17:49:43.952308433 +0000 UTC m=+5966.348992957" observedRunningTime="2025-11-28 17:49:50.909591619 +0000 UTC m=+5973.306276143" watchObservedRunningTime="2025-11-28 17:49:50.916504815 +0000 UTC m=+5973.313189339" Nov 28 17:49:51 crc kubenswrapper[4909]: I1128 17:49:51.855706 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-worker-dbch8" event={"ID":"0961e7b7-53da-4f50-813c-048957ce0256","Type":"ContainerStarted","Data":"b19fa346d1cb6251c96181a4536068e9898380dbf8d7660359aca657a5350e5b"} Nov 28 17:49:51 crc kubenswrapper[4909]: I1128 17:49:51.856722 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/octavia-worker-dbch8" Nov 28 17:49:51 crc kubenswrapper[4909]: I1128 17:49:51.894897 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/octavia-worker-dbch8" podStartSLOduration=10.491676049 podStartE2EDuration="12.894865575s" podCreationTimestamp="2025-11-28 17:49:39 +0000 UTC" firstStartedPulling="2025-11-28 17:49:44.032153235 +0000 UTC m=+5966.428837759" lastFinishedPulling="2025-11-28 17:49:46.435342761 +0000 UTC m=+5968.832027285" observedRunningTime="2025-11-28 17:49:51.888935976 +0000 UTC m=+5974.285620530" watchObservedRunningTime="2025-11-28 17:49:51.894865575 +0000 UTC m=+5974.291550139" Nov 28 17:49:59 crc kubenswrapper[4909]: I1128 17:49:59.157777 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-hjphz" Nov 28 17:49:59 crc kubenswrapper[4909]: I1128 17:49:59.158234 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-hjphz" Nov 28 17:49:59 crc kubenswrapper[4909]: I1128 17:49:59.361519 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/octavia-rsyslog-7r7s9" Nov 28 17:50:00 crc kubenswrapper[4909]: I1128 17:50:00.222824 4909 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-hjphz" podUID="7db8835b-2eb9-44d9-b7f2-62171c2b314c" containerName="registry-server" probeResult="failure" output=< Nov 28 17:50:00 crc kubenswrapper[4909]: timeout: failed to connect service ":50051" within 1s Nov 28 17:50:00 crc kubenswrapper[4909]: > Nov 28 17:50:06 crc kubenswrapper[4909]: I1128 17:50:06.752506 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/octavia-housekeeping-v8g4q" Nov 28 17:50:09 crc kubenswrapper[4909]: I1128 17:50:09.205200 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-hjphz" Nov 28 17:50:09 crc kubenswrapper[4909]: I1128 17:50:09.253944 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-hjphz" Nov 28 17:50:09 crc kubenswrapper[4909]: I1128 17:50:09.854678 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/octavia-worker-dbch8" Nov 28 17:50:10 crc kubenswrapper[4909]: I1128 17:50:10.033501 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-hjphz"] Nov 28 17:50:11 crc kubenswrapper[4909]: I1128 17:50:11.072324 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-hjphz" podUID="7db8835b-2eb9-44d9-b7f2-62171c2b314c" containerName="registry-server" containerID="cri-o://808967b381bededdd9056ffe46d83b98cb37113619a358e9e18dc8ea5584db08" gracePeriod=2 Nov 28 17:50:11 crc kubenswrapper[4909]: I1128 17:50:11.588486 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-hjphz" Nov 28 17:50:11 crc kubenswrapper[4909]: I1128 17:50:11.719040 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7db8835b-2eb9-44d9-b7f2-62171c2b314c-catalog-content\") pod \"7db8835b-2eb9-44d9-b7f2-62171c2b314c\" (UID: \"7db8835b-2eb9-44d9-b7f2-62171c2b314c\") " Nov 28 17:50:11 crc kubenswrapper[4909]: I1128 17:50:11.719145 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bgq7r\" (UniqueName: \"kubernetes.io/projected/7db8835b-2eb9-44d9-b7f2-62171c2b314c-kube-api-access-bgq7r\") pod \"7db8835b-2eb9-44d9-b7f2-62171c2b314c\" (UID: \"7db8835b-2eb9-44d9-b7f2-62171c2b314c\") " Nov 28 17:50:11 crc kubenswrapper[4909]: I1128 17:50:11.719295 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7db8835b-2eb9-44d9-b7f2-62171c2b314c-utilities\") pod \"7db8835b-2eb9-44d9-b7f2-62171c2b314c\" (UID: \"7db8835b-2eb9-44d9-b7f2-62171c2b314c\") " Nov 28 17:50:11 crc kubenswrapper[4909]: I1128 17:50:11.721782 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7db8835b-2eb9-44d9-b7f2-62171c2b314c-utilities" (OuterVolumeSpecName: "utilities") pod "7db8835b-2eb9-44d9-b7f2-62171c2b314c" (UID: "7db8835b-2eb9-44d9-b7f2-62171c2b314c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:50:11 crc kubenswrapper[4909]: I1128 17:50:11.727885 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7db8835b-2eb9-44d9-b7f2-62171c2b314c-kube-api-access-bgq7r" (OuterVolumeSpecName: "kube-api-access-bgq7r") pod "7db8835b-2eb9-44d9-b7f2-62171c2b314c" (UID: "7db8835b-2eb9-44d9-b7f2-62171c2b314c"). InnerVolumeSpecName "kube-api-access-bgq7r". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:50:11 crc kubenswrapper[4909]: I1128 17:50:11.822039 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bgq7r\" (UniqueName: \"kubernetes.io/projected/7db8835b-2eb9-44d9-b7f2-62171c2b314c-kube-api-access-bgq7r\") on node \"crc\" DevicePath \"\"" Nov 28 17:50:11 crc kubenswrapper[4909]: I1128 17:50:11.822088 4909 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7db8835b-2eb9-44d9-b7f2-62171c2b314c-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 17:50:11 crc kubenswrapper[4909]: I1128 17:50:11.864160 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7db8835b-2eb9-44d9-b7f2-62171c2b314c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "7db8835b-2eb9-44d9-b7f2-62171c2b314c" (UID: "7db8835b-2eb9-44d9-b7f2-62171c2b314c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:50:11 crc kubenswrapper[4909]: I1128 17:50:11.924185 4909 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7db8835b-2eb9-44d9-b7f2-62171c2b314c-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 17:50:12 crc kubenswrapper[4909]: I1128 17:50:12.086042 4909 generic.go:334] "Generic (PLEG): container finished" podID="7db8835b-2eb9-44d9-b7f2-62171c2b314c" containerID="808967b381bededdd9056ffe46d83b98cb37113619a358e9e18dc8ea5584db08" exitCode=0 Nov 28 17:50:12 crc kubenswrapper[4909]: I1128 17:50:12.086293 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-hjphz" event={"ID":"7db8835b-2eb9-44d9-b7f2-62171c2b314c","Type":"ContainerDied","Data":"808967b381bededdd9056ffe46d83b98cb37113619a358e9e18dc8ea5584db08"} Nov 28 17:50:12 crc kubenswrapper[4909]: I1128 17:50:12.086799 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-hjphz" event={"ID":"7db8835b-2eb9-44d9-b7f2-62171c2b314c","Type":"ContainerDied","Data":"deb0413e5d322e6c4596510b57d91cc133f2c69177d9cc8f70e9eff7db21c4e7"} Nov 28 17:50:12 crc kubenswrapper[4909]: I1128 17:50:12.086821 4909 scope.go:117] "RemoveContainer" containerID="808967b381bededdd9056ffe46d83b98cb37113619a358e9e18dc8ea5584db08" Nov 28 17:50:12 crc kubenswrapper[4909]: I1128 17:50:12.086362 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-hjphz" Nov 28 17:50:12 crc kubenswrapper[4909]: I1128 17:50:12.116607 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-hjphz"] Nov 28 17:50:12 crc kubenswrapper[4909]: I1128 17:50:12.139378 4909 scope.go:117] "RemoveContainer" containerID="004130b3487e573e8873b847a58e61897656a265393502a791c6e4717707dc46" Nov 28 17:50:12 crc kubenswrapper[4909]: I1128 17:50:12.146444 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-hjphz"] Nov 28 17:50:12 crc kubenswrapper[4909]: I1128 17:50:12.183368 4909 scope.go:117] "RemoveContainer" containerID="9f1f038cd162ff4d335db50a6b751a9c0e24c76492c718c4aed37a2e2750c7cc" Nov 28 17:50:12 crc kubenswrapper[4909]: I1128 17:50:12.216260 4909 scope.go:117] "RemoveContainer" containerID="808967b381bededdd9056ffe46d83b98cb37113619a358e9e18dc8ea5584db08" Nov 28 17:50:12 crc kubenswrapper[4909]: E1128 17:50:12.216740 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"808967b381bededdd9056ffe46d83b98cb37113619a358e9e18dc8ea5584db08\": container with ID starting with 808967b381bededdd9056ffe46d83b98cb37113619a358e9e18dc8ea5584db08 not found: ID does not exist" containerID="808967b381bededdd9056ffe46d83b98cb37113619a358e9e18dc8ea5584db08" Nov 28 17:50:12 crc kubenswrapper[4909]: I1128 17:50:12.216787 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"808967b381bededdd9056ffe46d83b98cb37113619a358e9e18dc8ea5584db08"} err="failed to get container status \"808967b381bededdd9056ffe46d83b98cb37113619a358e9e18dc8ea5584db08\": rpc error: code = NotFound desc = could not find container \"808967b381bededdd9056ffe46d83b98cb37113619a358e9e18dc8ea5584db08\": container with ID starting with 808967b381bededdd9056ffe46d83b98cb37113619a358e9e18dc8ea5584db08 not found: ID does not exist" Nov 28 17:50:12 crc kubenswrapper[4909]: I1128 17:50:12.216812 4909 scope.go:117] "RemoveContainer" containerID="004130b3487e573e8873b847a58e61897656a265393502a791c6e4717707dc46" Nov 28 17:50:12 crc kubenswrapper[4909]: E1128 17:50:12.217061 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"004130b3487e573e8873b847a58e61897656a265393502a791c6e4717707dc46\": container with ID starting with 004130b3487e573e8873b847a58e61897656a265393502a791c6e4717707dc46 not found: ID does not exist" containerID="004130b3487e573e8873b847a58e61897656a265393502a791c6e4717707dc46" Nov 28 17:50:12 crc kubenswrapper[4909]: I1128 17:50:12.217090 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"004130b3487e573e8873b847a58e61897656a265393502a791c6e4717707dc46"} err="failed to get container status \"004130b3487e573e8873b847a58e61897656a265393502a791c6e4717707dc46\": rpc error: code = NotFound desc = could not find container \"004130b3487e573e8873b847a58e61897656a265393502a791c6e4717707dc46\": container with ID starting with 004130b3487e573e8873b847a58e61897656a265393502a791c6e4717707dc46 not found: ID does not exist" Nov 28 17:50:12 crc kubenswrapper[4909]: I1128 17:50:12.217108 4909 scope.go:117] "RemoveContainer" containerID="9f1f038cd162ff4d335db50a6b751a9c0e24c76492c718c4aed37a2e2750c7cc" Nov 28 17:50:12 crc kubenswrapper[4909]: E1128 17:50:12.217346 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9f1f038cd162ff4d335db50a6b751a9c0e24c76492c718c4aed37a2e2750c7cc\": container with ID starting with 9f1f038cd162ff4d335db50a6b751a9c0e24c76492c718c4aed37a2e2750c7cc not found: ID does not exist" containerID="9f1f038cd162ff4d335db50a6b751a9c0e24c76492c718c4aed37a2e2750c7cc" Nov 28 17:50:12 crc kubenswrapper[4909]: I1128 17:50:12.217418 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9f1f038cd162ff4d335db50a6b751a9c0e24c76492c718c4aed37a2e2750c7cc"} err="failed to get container status \"9f1f038cd162ff4d335db50a6b751a9c0e24c76492c718c4aed37a2e2750c7cc\": rpc error: code = NotFound desc = could not find container \"9f1f038cd162ff4d335db50a6b751a9c0e24c76492c718c4aed37a2e2750c7cc\": container with ID starting with 9f1f038cd162ff4d335db50a6b751a9c0e24c76492c718c4aed37a2e2750c7cc not found: ID does not exist" Nov 28 17:50:13 crc kubenswrapper[4909]: I1128 17:50:13.399775 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/octavia-image-upload-59f8cff499-xjdgz"] Nov 28 17:50:13 crc kubenswrapper[4909]: I1128 17:50:13.400248 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/octavia-image-upload-59f8cff499-xjdgz" podUID="a236d9b4-0661-4561-aabd-4c45d150c2ce" containerName="octavia-amphora-httpd" containerID="cri-o://beec615b3e17b1d91bffb4ee24558b90cf69698d1de77cbca37498f8916661f8" gracePeriod=30 Nov 28 17:50:13 crc kubenswrapper[4909]: I1128 17:50:13.917447 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7db8835b-2eb9-44d9-b7f2-62171c2b314c" path="/var/lib/kubelet/pods/7db8835b-2eb9-44d9-b7f2-62171c2b314c/volumes" Nov 28 17:50:14 crc kubenswrapper[4909]: I1128 17:50:14.006775 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-image-upload-59f8cff499-xjdgz" Nov 28 17:50:14 crc kubenswrapper[4909]: I1128 17:50:14.077388 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"amphora-image\" (UniqueName: \"kubernetes.io/empty-dir/a236d9b4-0661-4561-aabd-4c45d150c2ce-amphora-image\") pod \"a236d9b4-0661-4561-aabd-4c45d150c2ce\" (UID: \"a236d9b4-0661-4561-aabd-4c45d150c2ce\") " Nov 28 17:50:14 crc kubenswrapper[4909]: I1128 17:50:14.077457 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/a236d9b4-0661-4561-aabd-4c45d150c2ce-httpd-config\") pod \"a236d9b4-0661-4561-aabd-4c45d150c2ce\" (UID: \"a236d9b4-0661-4561-aabd-4c45d150c2ce\") " Nov 28 17:50:14 crc kubenswrapper[4909]: I1128 17:50:14.110167 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a236d9b4-0661-4561-aabd-4c45d150c2ce-httpd-config" (OuterVolumeSpecName: "httpd-config") pod "a236d9b4-0661-4561-aabd-4c45d150c2ce" (UID: "a236d9b4-0661-4561-aabd-4c45d150c2ce"). InnerVolumeSpecName "httpd-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:50:14 crc kubenswrapper[4909]: I1128 17:50:14.115048 4909 generic.go:334] "Generic (PLEG): container finished" podID="a236d9b4-0661-4561-aabd-4c45d150c2ce" containerID="beec615b3e17b1d91bffb4ee24558b90cf69698d1de77cbca37498f8916661f8" exitCode=0 Nov 28 17:50:14 crc kubenswrapper[4909]: I1128 17:50:14.115129 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-image-upload-59f8cff499-xjdgz" event={"ID":"a236d9b4-0661-4561-aabd-4c45d150c2ce","Type":"ContainerDied","Data":"beec615b3e17b1d91bffb4ee24558b90cf69698d1de77cbca37498f8916661f8"} Nov 28 17:50:14 crc kubenswrapper[4909]: I1128 17:50:14.115203 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-image-upload-59f8cff499-xjdgz" event={"ID":"a236d9b4-0661-4561-aabd-4c45d150c2ce","Type":"ContainerDied","Data":"1b11253488a4e40788825bc566de8f104b1e234b4a764cfeedbbcccb88fd63e9"} Nov 28 17:50:14 crc kubenswrapper[4909]: I1128 17:50:14.115225 4909 scope.go:117] "RemoveContainer" containerID="beec615b3e17b1d91bffb4ee24558b90cf69698d1de77cbca37498f8916661f8" Nov 28 17:50:14 crc kubenswrapper[4909]: I1128 17:50:14.115400 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-image-upload-59f8cff499-xjdgz" Nov 28 17:50:14 crc kubenswrapper[4909]: I1128 17:50:14.164489 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a236d9b4-0661-4561-aabd-4c45d150c2ce-amphora-image" (OuterVolumeSpecName: "amphora-image") pod "a236d9b4-0661-4561-aabd-4c45d150c2ce" (UID: "a236d9b4-0661-4561-aabd-4c45d150c2ce"). InnerVolumeSpecName "amphora-image". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:50:14 crc kubenswrapper[4909]: I1128 17:50:14.180377 4909 reconciler_common.go:293] "Volume detached for volume \"amphora-image\" (UniqueName: \"kubernetes.io/empty-dir/a236d9b4-0661-4561-aabd-4c45d150c2ce-amphora-image\") on node \"crc\" DevicePath \"\"" Nov 28 17:50:14 crc kubenswrapper[4909]: I1128 17:50:14.180429 4909 reconciler_common.go:293] "Volume detached for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/a236d9b4-0661-4561-aabd-4c45d150c2ce-httpd-config\") on node \"crc\" DevicePath \"\"" Nov 28 17:50:14 crc kubenswrapper[4909]: I1128 17:50:14.229534 4909 scope.go:117] "RemoveContainer" containerID="492af500c4559d2abedb0c4b688710114fe91fdadf37e26712352f4a91636f0b" Nov 28 17:50:14 crc kubenswrapper[4909]: I1128 17:50:14.257340 4909 scope.go:117] "RemoveContainer" containerID="beec615b3e17b1d91bffb4ee24558b90cf69698d1de77cbca37498f8916661f8" Nov 28 17:50:14 crc kubenswrapper[4909]: E1128 17:50:14.257992 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"beec615b3e17b1d91bffb4ee24558b90cf69698d1de77cbca37498f8916661f8\": container with ID starting with beec615b3e17b1d91bffb4ee24558b90cf69698d1de77cbca37498f8916661f8 not found: ID does not exist" containerID="beec615b3e17b1d91bffb4ee24558b90cf69698d1de77cbca37498f8916661f8" Nov 28 17:50:14 crc kubenswrapper[4909]: I1128 17:50:14.258032 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"beec615b3e17b1d91bffb4ee24558b90cf69698d1de77cbca37498f8916661f8"} err="failed to get container status \"beec615b3e17b1d91bffb4ee24558b90cf69698d1de77cbca37498f8916661f8\": rpc error: code = NotFound desc = could not find container \"beec615b3e17b1d91bffb4ee24558b90cf69698d1de77cbca37498f8916661f8\": container with ID starting with beec615b3e17b1d91bffb4ee24558b90cf69698d1de77cbca37498f8916661f8 not found: ID does not exist" Nov 28 17:50:14 crc kubenswrapper[4909]: I1128 17:50:14.258058 4909 scope.go:117] "RemoveContainer" containerID="492af500c4559d2abedb0c4b688710114fe91fdadf37e26712352f4a91636f0b" Nov 28 17:50:14 crc kubenswrapper[4909]: E1128 17:50:14.258762 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"492af500c4559d2abedb0c4b688710114fe91fdadf37e26712352f4a91636f0b\": container with ID starting with 492af500c4559d2abedb0c4b688710114fe91fdadf37e26712352f4a91636f0b not found: ID does not exist" containerID="492af500c4559d2abedb0c4b688710114fe91fdadf37e26712352f4a91636f0b" Nov 28 17:50:14 crc kubenswrapper[4909]: I1128 17:50:14.258820 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"492af500c4559d2abedb0c4b688710114fe91fdadf37e26712352f4a91636f0b"} err="failed to get container status \"492af500c4559d2abedb0c4b688710114fe91fdadf37e26712352f4a91636f0b\": rpc error: code = NotFound desc = could not find container \"492af500c4559d2abedb0c4b688710114fe91fdadf37e26712352f4a91636f0b\": container with ID starting with 492af500c4559d2abedb0c4b688710114fe91fdadf37e26712352f4a91636f0b not found: ID does not exist" Nov 28 17:50:14 crc kubenswrapper[4909]: I1128 17:50:14.465308 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/octavia-image-upload-59f8cff499-xjdgz"] Nov 28 17:50:14 crc kubenswrapper[4909]: I1128 17:50:14.479550 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/octavia-image-upload-59f8cff499-xjdgz"] Nov 28 17:50:15 crc kubenswrapper[4909]: I1128 17:50:15.916491 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a236d9b4-0661-4561-aabd-4c45d150c2ce" path="/var/lib/kubelet/pods/a236d9b4-0661-4561-aabd-4c45d150c2ce/volumes" Nov 28 17:50:16 crc kubenswrapper[4909]: I1128 17:50:16.655078 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/octavia-image-upload-59f8cff499-fqbq9"] Nov 28 17:50:16 crc kubenswrapper[4909]: E1128 17:50:16.655592 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7db8835b-2eb9-44d9-b7f2-62171c2b314c" containerName="registry-server" Nov 28 17:50:16 crc kubenswrapper[4909]: I1128 17:50:16.655612 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="7db8835b-2eb9-44d9-b7f2-62171c2b314c" containerName="registry-server" Nov 28 17:50:16 crc kubenswrapper[4909]: E1128 17:50:16.655628 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="44ec722e-82ca-4adf-b196-c76a6d192768" containerName="octavia-db-sync" Nov 28 17:50:16 crc kubenswrapper[4909]: I1128 17:50:16.655635 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="44ec722e-82ca-4adf-b196-c76a6d192768" containerName="octavia-db-sync" Nov 28 17:50:16 crc kubenswrapper[4909]: E1128 17:50:16.655646 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="44ec722e-82ca-4adf-b196-c76a6d192768" containerName="init" Nov 28 17:50:16 crc kubenswrapper[4909]: I1128 17:50:16.655667 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="44ec722e-82ca-4adf-b196-c76a6d192768" containerName="init" Nov 28 17:50:16 crc kubenswrapper[4909]: E1128 17:50:16.655700 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a236d9b4-0661-4561-aabd-4c45d150c2ce" containerName="octavia-amphora-httpd" Nov 28 17:50:16 crc kubenswrapper[4909]: I1128 17:50:16.655709 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="a236d9b4-0661-4561-aabd-4c45d150c2ce" containerName="octavia-amphora-httpd" Nov 28 17:50:16 crc kubenswrapper[4909]: E1128 17:50:16.655718 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7db8835b-2eb9-44d9-b7f2-62171c2b314c" containerName="extract-utilities" Nov 28 17:50:16 crc kubenswrapper[4909]: I1128 17:50:16.655724 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="7db8835b-2eb9-44d9-b7f2-62171c2b314c" containerName="extract-utilities" Nov 28 17:50:16 crc kubenswrapper[4909]: E1128 17:50:16.655737 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a236d9b4-0661-4561-aabd-4c45d150c2ce" containerName="init" Nov 28 17:50:16 crc kubenswrapper[4909]: I1128 17:50:16.655743 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="a236d9b4-0661-4561-aabd-4c45d150c2ce" containerName="init" Nov 28 17:50:16 crc kubenswrapper[4909]: E1128 17:50:16.655757 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7db8835b-2eb9-44d9-b7f2-62171c2b314c" containerName="extract-content" Nov 28 17:50:16 crc kubenswrapper[4909]: I1128 17:50:16.655763 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="7db8835b-2eb9-44d9-b7f2-62171c2b314c" containerName="extract-content" Nov 28 17:50:16 crc kubenswrapper[4909]: I1128 17:50:16.656023 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="a236d9b4-0661-4561-aabd-4c45d150c2ce" containerName="octavia-amphora-httpd" Nov 28 17:50:16 crc kubenswrapper[4909]: I1128 17:50:16.656040 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="7db8835b-2eb9-44d9-b7f2-62171c2b314c" containerName="registry-server" Nov 28 17:50:16 crc kubenswrapper[4909]: I1128 17:50:16.656049 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="44ec722e-82ca-4adf-b196-c76a6d192768" containerName="octavia-db-sync" Nov 28 17:50:16 crc kubenswrapper[4909]: I1128 17:50:16.657167 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-image-upload-59f8cff499-fqbq9" Nov 28 17:50:16 crc kubenswrapper[4909]: I1128 17:50:16.662168 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-config-data" Nov 28 17:50:16 crc kubenswrapper[4909]: I1128 17:50:16.672302 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-image-upload-59f8cff499-fqbq9"] Nov 28 17:50:16 crc kubenswrapper[4909]: I1128 17:50:16.837665 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"amphora-image\" (UniqueName: \"kubernetes.io/empty-dir/44d76067-4398-4f0c-b08e-ace6ec764257-amphora-image\") pod \"octavia-image-upload-59f8cff499-fqbq9\" (UID: \"44d76067-4398-4f0c-b08e-ace6ec764257\") " pod="openstack/octavia-image-upload-59f8cff499-fqbq9" Nov 28 17:50:16 crc kubenswrapper[4909]: I1128 17:50:16.838034 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/44d76067-4398-4f0c-b08e-ace6ec764257-httpd-config\") pod \"octavia-image-upload-59f8cff499-fqbq9\" (UID: \"44d76067-4398-4f0c-b08e-ace6ec764257\") " pod="openstack/octavia-image-upload-59f8cff499-fqbq9" Nov 28 17:50:16 crc kubenswrapper[4909]: I1128 17:50:16.940265 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"amphora-image\" (UniqueName: \"kubernetes.io/empty-dir/44d76067-4398-4f0c-b08e-ace6ec764257-amphora-image\") pod \"octavia-image-upload-59f8cff499-fqbq9\" (UID: \"44d76067-4398-4f0c-b08e-ace6ec764257\") " pod="openstack/octavia-image-upload-59f8cff499-fqbq9" Nov 28 17:50:16 crc kubenswrapper[4909]: I1128 17:50:16.940313 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/44d76067-4398-4f0c-b08e-ace6ec764257-httpd-config\") pod \"octavia-image-upload-59f8cff499-fqbq9\" (UID: \"44d76067-4398-4f0c-b08e-ace6ec764257\") " pod="openstack/octavia-image-upload-59f8cff499-fqbq9" Nov 28 17:50:16 crc kubenswrapper[4909]: I1128 17:50:16.940795 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"amphora-image\" (UniqueName: \"kubernetes.io/empty-dir/44d76067-4398-4f0c-b08e-ace6ec764257-amphora-image\") pod \"octavia-image-upload-59f8cff499-fqbq9\" (UID: \"44d76067-4398-4f0c-b08e-ace6ec764257\") " pod="openstack/octavia-image-upload-59f8cff499-fqbq9" Nov 28 17:50:16 crc kubenswrapper[4909]: I1128 17:50:16.947434 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/44d76067-4398-4f0c-b08e-ace6ec764257-httpd-config\") pod \"octavia-image-upload-59f8cff499-fqbq9\" (UID: \"44d76067-4398-4f0c-b08e-ace6ec764257\") " pod="openstack/octavia-image-upload-59f8cff499-fqbq9" Nov 28 17:50:16 crc kubenswrapper[4909]: I1128 17:50:16.990071 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-image-upload-59f8cff499-fqbq9" Nov 28 17:50:17 crc kubenswrapper[4909]: W1128 17:50:17.519578 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod44d76067_4398_4f0c_b08e_ace6ec764257.slice/crio-3d2206f0ad6cb986cc68c8e69dfe2971337ed8764a95d0d5ec5f9852e99b55f4 WatchSource:0}: Error finding container 3d2206f0ad6cb986cc68c8e69dfe2971337ed8764a95d0d5ec5f9852e99b55f4: Status 404 returned error can't find the container with id 3d2206f0ad6cb986cc68c8e69dfe2971337ed8764a95d0d5ec5f9852e99b55f4 Nov 28 17:50:17 crc kubenswrapper[4909]: I1128 17:50:17.525889 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-image-upload-59f8cff499-fqbq9"] Nov 28 17:50:18 crc kubenswrapper[4909]: I1128 17:50:18.167611 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-image-upload-59f8cff499-fqbq9" event={"ID":"44d76067-4398-4f0c-b08e-ace6ec764257","Type":"ContainerStarted","Data":"3d2206f0ad6cb986cc68c8e69dfe2971337ed8764a95d0d5ec5f9852e99b55f4"} Nov 28 17:50:19 crc kubenswrapper[4909]: I1128 17:50:19.181444 4909 generic.go:334] "Generic (PLEG): container finished" podID="44d76067-4398-4f0c-b08e-ace6ec764257" containerID="254e011b6046fd77f0d35f620c93c4565ce62c6614feb5ee60fd354f93e14eef" exitCode=0 Nov 28 17:50:19 crc kubenswrapper[4909]: I1128 17:50:19.181503 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-image-upload-59f8cff499-fqbq9" event={"ID":"44d76067-4398-4f0c-b08e-ace6ec764257","Type":"ContainerDied","Data":"254e011b6046fd77f0d35f620c93c4565ce62c6614feb5ee60fd354f93e14eef"} Nov 28 17:50:19 crc kubenswrapper[4909]: I1128 17:50:19.910731 4909 patch_prober.go:28] interesting pod/machine-config-daemon-d5nd7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 17:50:19 crc kubenswrapper[4909]: I1128 17:50:19.911170 4909 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 17:50:19 crc kubenswrapper[4909]: I1128 17:50:19.913445 4909 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" Nov 28 17:50:19 crc kubenswrapper[4909]: I1128 17:50:19.914242 4909 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"eb8a08a6c738fff0fcbfbb88427c9ed53477944abe7436212850e368ec229c4f"} pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 17:50:19 crc kubenswrapper[4909]: I1128 17:50:19.914318 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerName="machine-config-daemon" containerID="cri-o://eb8a08a6c738fff0fcbfbb88427c9ed53477944abe7436212850e368ec229c4f" gracePeriod=600 Nov 28 17:50:20 crc kubenswrapper[4909]: E1128 17:50:20.054972 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 17:50:20 crc kubenswrapper[4909]: I1128 17:50:20.196262 4909 generic.go:334] "Generic (PLEG): container finished" podID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerID="eb8a08a6c738fff0fcbfbb88427c9ed53477944abe7436212850e368ec229c4f" exitCode=0 Nov 28 17:50:20 crc kubenswrapper[4909]: I1128 17:50:20.196424 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" event={"ID":"5f0ac931-d37b-4342-8c12-c2779b455cc5","Type":"ContainerDied","Data":"eb8a08a6c738fff0fcbfbb88427c9ed53477944abe7436212850e368ec229c4f"} Nov 28 17:50:20 crc kubenswrapper[4909]: I1128 17:50:20.196611 4909 scope.go:117] "RemoveContainer" containerID="a9c5475473dd9ba62a24558f25eeaca06c83d54c4c834c5612f2192cce1e1a09" Nov 28 17:50:20 crc kubenswrapper[4909]: I1128 17:50:20.197321 4909 scope.go:117] "RemoveContainer" containerID="eb8a08a6c738fff0fcbfbb88427c9ed53477944abe7436212850e368ec229c4f" Nov 28 17:50:20 crc kubenswrapper[4909]: E1128 17:50:20.197558 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 17:50:23 crc kubenswrapper[4909]: I1128 17:50:23.228084 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-image-upload-59f8cff499-fqbq9" event={"ID":"44d76067-4398-4f0c-b08e-ace6ec764257","Type":"ContainerStarted","Data":"3382d4164e488c6e5a6d3b14c557907840fbfba8a09842637aedf9c4d681c783"} Nov 28 17:50:23 crc kubenswrapper[4909]: I1128 17:50:23.264816 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/octavia-image-upload-59f8cff499-fqbq9" podStartSLOduration=2.7971484049999997 podStartE2EDuration="7.264750454s" podCreationTimestamp="2025-11-28 17:50:16 +0000 UTC" firstStartedPulling="2025-11-28 17:50:17.522957563 +0000 UTC m=+5999.919642087" lastFinishedPulling="2025-11-28 17:50:21.990559602 +0000 UTC m=+6004.387244136" observedRunningTime="2025-11-28 17:50:23.249068384 +0000 UTC m=+6005.645752898" watchObservedRunningTime="2025-11-28 17:50:23.264750454 +0000 UTC m=+6005.661434998" Nov 28 17:50:30 crc kubenswrapper[4909]: I1128 17:50:30.902282 4909 scope.go:117] "RemoveContainer" containerID="eb8a08a6c738fff0fcbfbb88427c9ed53477944abe7436212850e368ec229c4f" Nov 28 17:50:30 crc kubenswrapper[4909]: E1128 17:50:30.903540 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 17:50:42 crc kubenswrapper[4909]: I1128 17:50:42.901798 4909 scope.go:117] "RemoveContainer" containerID="eb8a08a6c738fff0fcbfbb88427c9ed53477944abe7436212850e368ec229c4f" Nov 28 17:50:42 crc kubenswrapper[4909]: E1128 17:50:42.904867 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 17:50:54 crc kubenswrapper[4909]: I1128 17:50:54.902522 4909 scope.go:117] "RemoveContainer" containerID="eb8a08a6c738fff0fcbfbb88427c9ed53477944abe7436212850e368ec229c4f" Nov 28 17:50:54 crc kubenswrapper[4909]: E1128 17:50:54.903494 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 17:50:57 crc kubenswrapper[4909]: I1128 17:50:57.814023 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-5d66dd75d5-cvdgt"] Nov 28 17:50:57 crc kubenswrapper[4909]: I1128 17:50:57.822921 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-5d66dd75d5-cvdgt" Nov 28 17:50:57 crc kubenswrapper[4909]: I1128 17:50:57.831036 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"horizon-config-data" Nov 28 17:50:57 crc kubenswrapper[4909]: I1128 17:50:57.831214 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"horizon-scripts" Nov 28 17:50:57 crc kubenswrapper[4909]: I1128 17:50:57.831262 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"horizon" Nov 28 17:50:57 crc kubenswrapper[4909]: I1128 17:50:57.831226 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"horizon-horizon-dockercfg-gcfh7" Nov 28 17:50:57 crc kubenswrapper[4909]: I1128 17:50:57.855829 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-5d66dd75d5-cvdgt"] Nov 28 17:50:57 crc kubenswrapper[4909]: I1128 17:50:57.865072 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/08c9f91c-f9ee-4be9-b5c1-993d20615405-config-data\") pod \"horizon-5d66dd75d5-cvdgt\" (UID: \"08c9f91c-f9ee-4be9-b5c1-993d20615405\") " pod="openstack/horizon-5d66dd75d5-cvdgt" Nov 28 17:50:57 crc kubenswrapper[4909]: I1128 17:50:57.865210 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/08c9f91c-f9ee-4be9-b5c1-993d20615405-scripts\") pod \"horizon-5d66dd75d5-cvdgt\" (UID: \"08c9f91c-f9ee-4be9-b5c1-993d20615405\") " pod="openstack/horizon-5d66dd75d5-cvdgt" Nov 28 17:50:57 crc kubenswrapper[4909]: I1128 17:50:57.865267 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/08c9f91c-f9ee-4be9-b5c1-993d20615405-logs\") pod \"horizon-5d66dd75d5-cvdgt\" (UID: \"08c9f91c-f9ee-4be9-b5c1-993d20615405\") " pod="openstack/horizon-5d66dd75d5-cvdgt" Nov 28 17:50:57 crc kubenswrapper[4909]: I1128 17:50:57.865369 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x55xl\" (UniqueName: \"kubernetes.io/projected/08c9f91c-f9ee-4be9-b5c1-993d20615405-kube-api-access-x55xl\") pod \"horizon-5d66dd75d5-cvdgt\" (UID: \"08c9f91c-f9ee-4be9-b5c1-993d20615405\") " pod="openstack/horizon-5d66dd75d5-cvdgt" Nov 28 17:50:57 crc kubenswrapper[4909]: I1128 17:50:57.865449 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/08c9f91c-f9ee-4be9-b5c1-993d20615405-horizon-secret-key\") pod \"horizon-5d66dd75d5-cvdgt\" (UID: \"08c9f91c-f9ee-4be9-b5c1-993d20615405\") " pod="openstack/horizon-5d66dd75d5-cvdgt" Nov 28 17:50:57 crc kubenswrapper[4909]: I1128 17:50:57.899557 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 17:50:57 crc kubenswrapper[4909]: I1128 17:50:57.900649 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="5e86471a-d918-4d86-8696-842dc2205cd3" containerName="glance-log" containerID="cri-o://28ecd248bb264123a78903ef56997b7630ddb7524a3f36eb3f76971ca78e3127" gracePeriod=30 Nov 28 17:50:57 crc kubenswrapper[4909]: I1128 17:50:57.901136 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="5e86471a-d918-4d86-8696-842dc2205cd3" containerName="glance-httpd" containerID="cri-o://322e0616f54341d91165dfa4c57a0977fe42661c774e7cc09684d3743a0e28cb" gracePeriod=30 Nov 28 17:50:57 crc kubenswrapper[4909]: I1128 17:50:57.936775 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-59965f94f7-jr6d6"] Nov 28 17:50:57 crc kubenswrapper[4909]: I1128 17:50:57.938645 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-59965f94f7-jr6d6" Nov 28 17:50:57 crc kubenswrapper[4909]: I1128 17:50:57.987794 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/08c9f91c-f9ee-4be9-b5c1-993d20615405-horizon-secret-key\") pod \"horizon-5d66dd75d5-cvdgt\" (UID: \"08c9f91c-f9ee-4be9-b5c1-993d20615405\") " pod="openstack/horizon-5d66dd75d5-cvdgt" Nov 28 17:50:57 crc kubenswrapper[4909]: I1128 17:50:57.987855 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/08c9f91c-f9ee-4be9-b5c1-993d20615405-config-data\") pod \"horizon-5d66dd75d5-cvdgt\" (UID: \"08c9f91c-f9ee-4be9-b5c1-993d20615405\") " pod="openstack/horizon-5d66dd75d5-cvdgt" Nov 28 17:50:57 crc kubenswrapper[4909]: I1128 17:50:57.987917 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/08c9f91c-f9ee-4be9-b5c1-993d20615405-scripts\") pod \"horizon-5d66dd75d5-cvdgt\" (UID: \"08c9f91c-f9ee-4be9-b5c1-993d20615405\") " pod="openstack/horizon-5d66dd75d5-cvdgt" Nov 28 17:50:57 crc kubenswrapper[4909]: I1128 17:50:57.987958 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/08c9f91c-f9ee-4be9-b5c1-993d20615405-logs\") pod \"horizon-5d66dd75d5-cvdgt\" (UID: \"08c9f91c-f9ee-4be9-b5c1-993d20615405\") " pod="openstack/horizon-5d66dd75d5-cvdgt" Nov 28 17:50:57 crc kubenswrapper[4909]: I1128 17:50:57.988024 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x55xl\" (UniqueName: \"kubernetes.io/projected/08c9f91c-f9ee-4be9-b5c1-993d20615405-kube-api-access-x55xl\") pod \"horizon-5d66dd75d5-cvdgt\" (UID: \"08c9f91c-f9ee-4be9-b5c1-993d20615405\") " pod="openstack/horizon-5d66dd75d5-cvdgt" Nov 28 17:50:57 crc kubenswrapper[4909]: I1128 17:50:57.990050 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/08c9f91c-f9ee-4be9-b5c1-993d20615405-scripts\") pod \"horizon-5d66dd75d5-cvdgt\" (UID: \"08c9f91c-f9ee-4be9-b5c1-993d20615405\") " pod="openstack/horizon-5d66dd75d5-cvdgt" Nov 28 17:50:57 crc kubenswrapper[4909]: I1128 17:50:57.990345 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/08c9f91c-f9ee-4be9-b5c1-993d20615405-logs\") pod \"horizon-5d66dd75d5-cvdgt\" (UID: \"08c9f91c-f9ee-4be9-b5c1-993d20615405\") " pod="openstack/horizon-5d66dd75d5-cvdgt" Nov 28 17:50:57 crc kubenswrapper[4909]: I1128 17:50:57.990839 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/08c9f91c-f9ee-4be9-b5c1-993d20615405-config-data\") pod \"horizon-5d66dd75d5-cvdgt\" (UID: \"08c9f91c-f9ee-4be9-b5c1-993d20615405\") " pod="openstack/horizon-5d66dd75d5-cvdgt" Nov 28 17:50:58 crc kubenswrapper[4909]: I1128 17:50:58.027872 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/08c9f91c-f9ee-4be9-b5c1-993d20615405-horizon-secret-key\") pod \"horizon-5d66dd75d5-cvdgt\" (UID: \"08c9f91c-f9ee-4be9-b5c1-993d20615405\") " pod="openstack/horizon-5d66dd75d5-cvdgt" Nov 28 17:50:58 crc kubenswrapper[4909]: I1128 17:50:58.035570 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x55xl\" (UniqueName: \"kubernetes.io/projected/08c9f91c-f9ee-4be9-b5c1-993d20615405-kube-api-access-x55xl\") pod \"horizon-5d66dd75d5-cvdgt\" (UID: \"08c9f91c-f9ee-4be9-b5c1-993d20615405\") " pod="openstack/horizon-5d66dd75d5-cvdgt" Nov 28 17:50:58 crc kubenswrapper[4909]: I1128 17:50:58.039903 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-59965f94f7-jr6d6"] Nov 28 17:50:58 crc kubenswrapper[4909]: I1128 17:50:58.090035 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2714cbdd-52c8-4173-8ba6-f3fb9ea361a3-logs\") pod \"horizon-59965f94f7-jr6d6\" (UID: \"2714cbdd-52c8-4173-8ba6-f3fb9ea361a3\") " pod="openstack/horizon-59965f94f7-jr6d6" Nov 28 17:50:58 crc kubenswrapper[4909]: I1128 17:50:58.090112 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/2714cbdd-52c8-4173-8ba6-f3fb9ea361a3-horizon-secret-key\") pod \"horizon-59965f94f7-jr6d6\" (UID: \"2714cbdd-52c8-4173-8ba6-f3fb9ea361a3\") " pod="openstack/horizon-59965f94f7-jr6d6" Nov 28 17:50:58 crc kubenswrapper[4909]: I1128 17:50:58.090162 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g2k9j\" (UniqueName: \"kubernetes.io/projected/2714cbdd-52c8-4173-8ba6-f3fb9ea361a3-kube-api-access-g2k9j\") pod \"horizon-59965f94f7-jr6d6\" (UID: \"2714cbdd-52c8-4173-8ba6-f3fb9ea361a3\") " pod="openstack/horizon-59965f94f7-jr6d6" Nov 28 17:50:58 crc kubenswrapper[4909]: I1128 17:50:58.090193 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/2714cbdd-52c8-4173-8ba6-f3fb9ea361a3-config-data\") pod \"horizon-59965f94f7-jr6d6\" (UID: \"2714cbdd-52c8-4173-8ba6-f3fb9ea361a3\") " pod="openstack/horizon-59965f94f7-jr6d6" Nov 28 17:50:58 crc kubenswrapper[4909]: I1128 17:50:58.090229 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/2714cbdd-52c8-4173-8ba6-f3fb9ea361a3-scripts\") pod \"horizon-59965f94f7-jr6d6\" (UID: \"2714cbdd-52c8-4173-8ba6-f3fb9ea361a3\") " pod="openstack/horizon-59965f94f7-jr6d6" Nov 28 17:50:58 crc kubenswrapper[4909]: I1128 17:50:58.094748 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 17:50:58 crc kubenswrapper[4909]: I1128 17:50:58.094976 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="7aa429fe-eb79-40cb-9612-931f5b0e2b54" containerName="glance-log" containerID="cri-o://1aa6258fe04ef4d341553ed1ab2bebe7218d2d91ca983ecc85033b22b76f9b05" gracePeriod=30 Nov 28 17:50:58 crc kubenswrapper[4909]: I1128 17:50:58.095160 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="7aa429fe-eb79-40cb-9612-931f5b0e2b54" containerName="glance-httpd" containerID="cri-o://db86491c36d28334f4760b1a2499df8201d87e9374ffb080e7f82a02d4233608" gracePeriod=30 Nov 28 17:50:58 crc kubenswrapper[4909]: I1128 17:50:58.163855 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-5d66dd75d5-cvdgt" Nov 28 17:50:58 crc kubenswrapper[4909]: I1128 17:50:58.196960 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/2714cbdd-52c8-4173-8ba6-f3fb9ea361a3-horizon-secret-key\") pod \"horizon-59965f94f7-jr6d6\" (UID: \"2714cbdd-52c8-4173-8ba6-f3fb9ea361a3\") " pod="openstack/horizon-59965f94f7-jr6d6" Nov 28 17:50:58 crc kubenswrapper[4909]: I1128 17:50:58.197298 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g2k9j\" (UniqueName: \"kubernetes.io/projected/2714cbdd-52c8-4173-8ba6-f3fb9ea361a3-kube-api-access-g2k9j\") pod \"horizon-59965f94f7-jr6d6\" (UID: \"2714cbdd-52c8-4173-8ba6-f3fb9ea361a3\") " pod="openstack/horizon-59965f94f7-jr6d6" Nov 28 17:50:58 crc kubenswrapper[4909]: I1128 17:50:58.197343 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/2714cbdd-52c8-4173-8ba6-f3fb9ea361a3-config-data\") pod \"horizon-59965f94f7-jr6d6\" (UID: \"2714cbdd-52c8-4173-8ba6-f3fb9ea361a3\") " pod="openstack/horizon-59965f94f7-jr6d6" Nov 28 17:50:58 crc kubenswrapper[4909]: I1128 17:50:58.197378 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/2714cbdd-52c8-4173-8ba6-f3fb9ea361a3-scripts\") pod \"horizon-59965f94f7-jr6d6\" (UID: \"2714cbdd-52c8-4173-8ba6-f3fb9ea361a3\") " pod="openstack/horizon-59965f94f7-jr6d6" Nov 28 17:50:58 crc kubenswrapper[4909]: I1128 17:50:58.197484 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2714cbdd-52c8-4173-8ba6-f3fb9ea361a3-logs\") pod \"horizon-59965f94f7-jr6d6\" (UID: \"2714cbdd-52c8-4173-8ba6-f3fb9ea361a3\") " pod="openstack/horizon-59965f94f7-jr6d6" Nov 28 17:50:58 crc kubenswrapper[4909]: I1128 17:50:58.197903 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2714cbdd-52c8-4173-8ba6-f3fb9ea361a3-logs\") pod \"horizon-59965f94f7-jr6d6\" (UID: \"2714cbdd-52c8-4173-8ba6-f3fb9ea361a3\") " pod="openstack/horizon-59965f94f7-jr6d6" Nov 28 17:50:58 crc kubenswrapper[4909]: I1128 17:50:58.198602 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/2714cbdd-52c8-4173-8ba6-f3fb9ea361a3-scripts\") pod \"horizon-59965f94f7-jr6d6\" (UID: \"2714cbdd-52c8-4173-8ba6-f3fb9ea361a3\") " pod="openstack/horizon-59965f94f7-jr6d6" Nov 28 17:50:58 crc kubenswrapper[4909]: I1128 17:50:58.198940 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/2714cbdd-52c8-4173-8ba6-f3fb9ea361a3-config-data\") pod \"horizon-59965f94f7-jr6d6\" (UID: \"2714cbdd-52c8-4173-8ba6-f3fb9ea361a3\") " pod="openstack/horizon-59965f94f7-jr6d6" Nov 28 17:50:58 crc kubenswrapper[4909]: I1128 17:50:58.204160 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/2714cbdd-52c8-4173-8ba6-f3fb9ea361a3-horizon-secret-key\") pod \"horizon-59965f94f7-jr6d6\" (UID: \"2714cbdd-52c8-4173-8ba6-f3fb9ea361a3\") " pod="openstack/horizon-59965f94f7-jr6d6" Nov 28 17:50:58 crc kubenswrapper[4909]: I1128 17:50:58.229093 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g2k9j\" (UniqueName: \"kubernetes.io/projected/2714cbdd-52c8-4173-8ba6-f3fb9ea361a3-kube-api-access-g2k9j\") pod \"horizon-59965f94f7-jr6d6\" (UID: \"2714cbdd-52c8-4173-8ba6-f3fb9ea361a3\") " pod="openstack/horizon-59965f94f7-jr6d6" Nov 28 17:50:58 crc kubenswrapper[4909]: I1128 17:50:58.382378 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-59965f94f7-jr6d6" Nov 28 17:50:58 crc kubenswrapper[4909]: I1128 17:50:58.588582 4909 generic.go:334] "Generic (PLEG): container finished" podID="7aa429fe-eb79-40cb-9612-931f5b0e2b54" containerID="1aa6258fe04ef4d341553ed1ab2bebe7218d2d91ca983ecc85033b22b76f9b05" exitCode=143 Nov 28 17:50:58 crc kubenswrapper[4909]: I1128 17:50:58.588613 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"7aa429fe-eb79-40cb-9612-931f5b0e2b54","Type":"ContainerDied","Data":"1aa6258fe04ef4d341553ed1ab2bebe7218d2d91ca983ecc85033b22b76f9b05"} Nov 28 17:50:58 crc kubenswrapper[4909]: I1128 17:50:58.593210 4909 generic.go:334] "Generic (PLEG): container finished" podID="5e86471a-d918-4d86-8696-842dc2205cd3" containerID="28ecd248bb264123a78903ef56997b7630ddb7524a3f36eb3f76971ca78e3127" exitCode=143 Nov 28 17:50:58 crc kubenswrapper[4909]: I1128 17:50:58.593252 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"5e86471a-d918-4d86-8696-842dc2205cd3","Type":"ContainerDied","Data":"28ecd248bb264123a78903ef56997b7630ddb7524a3f36eb3f76971ca78e3127"} Nov 28 17:50:58 crc kubenswrapper[4909]: I1128 17:50:58.662559 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-59965f94f7-jr6d6"] Nov 28 17:50:58 crc kubenswrapper[4909]: I1128 17:50:58.671075 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-5d66dd75d5-cvdgt"] Nov 28 17:50:58 crc kubenswrapper[4909]: I1128 17:50:58.704112 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-7f7584cb65-cc7wv"] Nov 28 17:50:58 crc kubenswrapper[4909]: I1128 17:50:58.706812 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-7f7584cb65-cc7wv" Nov 28 17:50:58 crc kubenswrapper[4909]: I1128 17:50:58.721509 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-7f7584cb65-cc7wv"] Nov 28 17:50:58 crc kubenswrapper[4909]: W1128 17:50:58.818949 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2714cbdd_52c8_4173_8ba6_f3fb9ea361a3.slice/crio-13a5a0f3f761396806381a331784df5ca83456a4ae6f4b673e422911270f5b70 WatchSource:0}: Error finding container 13a5a0f3f761396806381a331784df5ca83456a4ae6f4b673e422911270f5b70: Status 404 returned error can't find the container with id 13a5a0f3f761396806381a331784df5ca83456a4ae6f4b673e422911270f5b70 Nov 28 17:50:58 crc kubenswrapper[4909]: I1128 17:50:58.823605 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-59965f94f7-jr6d6"] Nov 28 17:50:58 crc kubenswrapper[4909]: I1128 17:50:58.912059 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d9da4a21-03e8-4cd7-84b7-f64ee73f596b-logs\") pod \"horizon-7f7584cb65-cc7wv\" (UID: \"d9da4a21-03e8-4cd7-84b7-f64ee73f596b\") " pod="openstack/horizon-7f7584cb65-cc7wv" Nov 28 17:50:58 crc kubenswrapper[4909]: I1128 17:50:58.912119 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d9da4a21-03e8-4cd7-84b7-f64ee73f596b-scripts\") pod \"horizon-7f7584cb65-cc7wv\" (UID: \"d9da4a21-03e8-4cd7-84b7-f64ee73f596b\") " pod="openstack/horizon-7f7584cb65-cc7wv" Nov 28 17:50:58 crc kubenswrapper[4909]: I1128 17:50:58.912147 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/d9da4a21-03e8-4cd7-84b7-f64ee73f596b-config-data\") pod \"horizon-7f7584cb65-cc7wv\" (UID: \"d9da4a21-03e8-4cd7-84b7-f64ee73f596b\") " pod="openstack/horizon-7f7584cb65-cc7wv" Nov 28 17:50:58 crc kubenswrapper[4909]: I1128 17:50:58.912163 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rspdm\" (UniqueName: \"kubernetes.io/projected/d9da4a21-03e8-4cd7-84b7-f64ee73f596b-kube-api-access-rspdm\") pod \"horizon-7f7584cb65-cc7wv\" (UID: \"d9da4a21-03e8-4cd7-84b7-f64ee73f596b\") " pod="openstack/horizon-7f7584cb65-cc7wv" Nov 28 17:50:58 crc kubenswrapper[4909]: I1128 17:50:58.912321 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/d9da4a21-03e8-4cd7-84b7-f64ee73f596b-horizon-secret-key\") pod \"horizon-7f7584cb65-cc7wv\" (UID: \"d9da4a21-03e8-4cd7-84b7-f64ee73f596b\") " pod="openstack/horizon-7f7584cb65-cc7wv" Nov 28 17:50:59 crc kubenswrapper[4909]: I1128 17:50:59.014602 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/d9da4a21-03e8-4cd7-84b7-f64ee73f596b-horizon-secret-key\") pod \"horizon-7f7584cb65-cc7wv\" (UID: \"d9da4a21-03e8-4cd7-84b7-f64ee73f596b\") " pod="openstack/horizon-7f7584cb65-cc7wv" Nov 28 17:50:59 crc kubenswrapper[4909]: I1128 17:50:59.014765 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d9da4a21-03e8-4cd7-84b7-f64ee73f596b-logs\") pod \"horizon-7f7584cb65-cc7wv\" (UID: \"d9da4a21-03e8-4cd7-84b7-f64ee73f596b\") " pod="openstack/horizon-7f7584cb65-cc7wv" Nov 28 17:50:59 crc kubenswrapper[4909]: I1128 17:50:59.014819 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d9da4a21-03e8-4cd7-84b7-f64ee73f596b-scripts\") pod \"horizon-7f7584cb65-cc7wv\" (UID: \"d9da4a21-03e8-4cd7-84b7-f64ee73f596b\") " pod="openstack/horizon-7f7584cb65-cc7wv" Nov 28 17:50:59 crc kubenswrapper[4909]: I1128 17:50:59.014850 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/d9da4a21-03e8-4cd7-84b7-f64ee73f596b-config-data\") pod \"horizon-7f7584cb65-cc7wv\" (UID: \"d9da4a21-03e8-4cd7-84b7-f64ee73f596b\") " pod="openstack/horizon-7f7584cb65-cc7wv" Nov 28 17:50:59 crc kubenswrapper[4909]: I1128 17:50:59.014877 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rspdm\" (UniqueName: \"kubernetes.io/projected/d9da4a21-03e8-4cd7-84b7-f64ee73f596b-kube-api-access-rspdm\") pod \"horizon-7f7584cb65-cc7wv\" (UID: \"d9da4a21-03e8-4cd7-84b7-f64ee73f596b\") " pod="openstack/horizon-7f7584cb65-cc7wv" Nov 28 17:50:59 crc kubenswrapper[4909]: I1128 17:50:59.015729 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d9da4a21-03e8-4cd7-84b7-f64ee73f596b-logs\") pod \"horizon-7f7584cb65-cc7wv\" (UID: \"d9da4a21-03e8-4cd7-84b7-f64ee73f596b\") " pod="openstack/horizon-7f7584cb65-cc7wv" Nov 28 17:50:59 crc kubenswrapper[4909]: I1128 17:50:59.016420 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/d9da4a21-03e8-4cd7-84b7-f64ee73f596b-config-data\") pod \"horizon-7f7584cb65-cc7wv\" (UID: \"d9da4a21-03e8-4cd7-84b7-f64ee73f596b\") " pod="openstack/horizon-7f7584cb65-cc7wv" Nov 28 17:50:59 crc kubenswrapper[4909]: I1128 17:50:59.016785 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d9da4a21-03e8-4cd7-84b7-f64ee73f596b-scripts\") pod \"horizon-7f7584cb65-cc7wv\" (UID: \"d9da4a21-03e8-4cd7-84b7-f64ee73f596b\") " pod="openstack/horizon-7f7584cb65-cc7wv" Nov 28 17:50:59 crc kubenswrapper[4909]: I1128 17:50:59.022794 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/d9da4a21-03e8-4cd7-84b7-f64ee73f596b-horizon-secret-key\") pod \"horizon-7f7584cb65-cc7wv\" (UID: \"d9da4a21-03e8-4cd7-84b7-f64ee73f596b\") " pod="openstack/horizon-7f7584cb65-cc7wv" Nov 28 17:50:59 crc kubenswrapper[4909]: I1128 17:50:59.033066 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rspdm\" (UniqueName: \"kubernetes.io/projected/d9da4a21-03e8-4cd7-84b7-f64ee73f596b-kube-api-access-rspdm\") pod \"horizon-7f7584cb65-cc7wv\" (UID: \"d9da4a21-03e8-4cd7-84b7-f64ee73f596b\") " pod="openstack/horizon-7f7584cb65-cc7wv" Nov 28 17:50:59 crc kubenswrapper[4909]: I1128 17:50:59.325130 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-7f7584cb65-cc7wv" Nov 28 17:50:59 crc kubenswrapper[4909]: I1128 17:50:59.601790 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-59965f94f7-jr6d6" event={"ID":"2714cbdd-52c8-4173-8ba6-f3fb9ea361a3","Type":"ContainerStarted","Data":"13a5a0f3f761396806381a331784df5ca83456a4ae6f4b673e422911270f5b70"} Nov 28 17:50:59 crc kubenswrapper[4909]: I1128 17:50:59.602993 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5d66dd75d5-cvdgt" event={"ID":"08c9f91c-f9ee-4be9-b5c1-993d20615405","Type":"ContainerStarted","Data":"1fb84037ddafa1357af824415598953d9f48ae8ba7cfcbd1fdce9fb8b058db30"} Nov 28 17:50:59 crc kubenswrapper[4909]: I1128 17:50:59.789563 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-7f7584cb65-cc7wv"] Nov 28 17:51:00 crc kubenswrapper[4909]: I1128 17:51:00.612636 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-7f7584cb65-cc7wv" event={"ID":"d9da4a21-03e8-4cd7-84b7-f64ee73f596b","Type":"ContainerStarted","Data":"b5b330c35dcfa83a50e1ef16e5a9a0dad3c2428b0f8bf73d4270ccdb5dc1cd0f"} Nov 28 17:51:01 crc kubenswrapper[4909]: I1128 17:51:01.628696 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"7aa429fe-eb79-40cb-9612-931f5b0e2b54","Type":"ContainerDied","Data":"db86491c36d28334f4760b1a2499df8201d87e9374ffb080e7f82a02d4233608"} Nov 28 17:51:01 crc kubenswrapper[4909]: I1128 17:51:01.628549 4909 generic.go:334] "Generic (PLEG): container finished" podID="7aa429fe-eb79-40cb-9612-931f5b0e2b54" containerID="db86491c36d28334f4760b1a2499df8201d87e9374ffb080e7f82a02d4233608" exitCode=0 Nov 28 17:51:01 crc kubenswrapper[4909]: I1128 17:51:01.637079 4909 generic.go:334] "Generic (PLEG): container finished" podID="5e86471a-d918-4d86-8696-842dc2205cd3" containerID="322e0616f54341d91165dfa4c57a0977fe42661c774e7cc09684d3743a0e28cb" exitCode=0 Nov 28 17:51:01 crc kubenswrapper[4909]: I1128 17:51:01.637126 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"5e86471a-d918-4d86-8696-842dc2205cd3","Type":"ContainerDied","Data":"322e0616f54341d91165dfa4c57a0977fe42661c774e7cc09684d3743a0e28cb"} Nov 28 17:51:06 crc kubenswrapper[4909]: I1128 17:51:06.449712 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 28 17:51:06 crc kubenswrapper[4909]: I1128 17:51:06.571408 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 28 17:51:06 crc kubenswrapper[4909]: I1128 17:51:06.582478 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/5e86471a-d918-4d86-8696-842dc2205cd3-httpd-run\") pod \"5e86471a-d918-4d86-8696-842dc2205cd3\" (UID: \"5e86471a-d918-4d86-8696-842dc2205cd3\") " Nov 28 17:51:06 crc kubenswrapper[4909]: I1128 17:51:06.582688 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5e86471a-d918-4d86-8696-842dc2205cd3-combined-ca-bundle\") pod \"5e86471a-d918-4d86-8696-842dc2205cd3\" (UID: \"5e86471a-d918-4d86-8696-842dc2205cd3\") " Nov 28 17:51:06 crc kubenswrapper[4909]: I1128 17:51:06.582730 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5e86471a-d918-4d86-8696-842dc2205cd3-scripts\") pod \"5e86471a-d918-4d86-8696-842dc2205cd3\" (UID: \"5e86471a-d918-4d86-8696-842dc2205cd3\") " Nov 28 17:51:06 crc kubenswrapper[4909]: I1128 17:51:06.582771 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5e86471a-d918-4d86-8696-842dc2205cd3-config-data\") pod \"5e86471a-d918-4d86-8696-842dc2205cd3\" (UID: \"5e86471a-d918-4d86-8696-842dc2205cd3\") " Nov 28 17:51:06 crc kubenswrapper[4909]: I1128 17:51:06.582836 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5e86471a-d918-4d86-8696-842dc2205cd3-logs\") pod \"5e86471a-d918-4d86-8696-842dc2205cd3\" (UID: \"5e86471a-d918-4d86-8696-842dc2205cd3\") " Nov 28 17:51:06 crc kubenswrapper[4909]: I1128 17:51:06.582938 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dc7x6\" (UniqueName: \"kubernetes.io/projected/5e86471a-d918-4d86-8696-842dc2205cd3-kube-api-access-dc7x6\") pod \"5e86471a-d918-4d86-8696-842dc2205cd3\" (UID: \"5e86471a-d918-4d86-8696-842dc2205cd3\") " Nov 28 17:51:06 crc kubenswrapper[4909]: I1128 17:51:06.582994 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/5e86471a-d918-4d86-8696-842dc2205cd3-ceph\") pod \"5e86471a-d918-4d86-8696-842dc2205cd3\" (UID: \"5e86471a-d918-4d86-8696-842dc2205cd3\") " Nov 28 17:51:06 crc kubenswrapper[4909]: I1128 17:51:06.584148 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5e86471a-d918-4d86-8696-842dc2205cd3-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "5e86471a-d918-4d86-8696-842dc2205cd3" (UID: "5e86471a-d918-4d86-8696-842dc2205cd3"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:51:06 crc kubenswrapper[4909]: I1128 17:51:06.584727 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5e86471a-d918-4d86-8696-842dc2205cd3-logs" (OuterVolumeSpecName: "logs") pod "5e86471a-d918-4d86-8696-842dc2205cd3" (UID: "5e86471a-d918-4d86-8696-842dc2205cd3"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:51:06 crc kubenswrapper[4909]: I1128 17:51:06.602883 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5e86471a-d918-4d86-8696-842dc2205cd3-scripts" (OuterVolumeSpecName: "scripts") pod "5e86471a-d918-4d86-8696-842dc2205cd3" (UID: "5e86471a-d918-4d86-8696-842dc2205cd3"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:51:06 crc kubenswrapper[4909]: I1128 17:51:06.604840 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5e86471a-d918-4d86-8696-842dc2205cd3-ceph" (OuterVolumeSpecName: "ceph") pod "5e86471a-d918-4d86-8696-842dc2205cd3" (UID: "5e86471a-d918-4d86-8696-842dc2205cd3"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:51:06 crc kubenswrapper[4909]: I1128 17:51:06.608009 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5e86471a-d918-4d86-8696-842dc2205cd3-kube-api-access-dc7x6" (OuterVolumeSpecName: "kube-api-access-dc7x6") pod "5e86471a-d918-4d86-8696-842dc2205cd3" (UID: "5e86471a-d918-4d86-8696-842dc2205cd3"). InnerVolumeSpecName "kube-api-access-dc7x6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:51:06 crc kubenswrapper[4909]: I1128 17:51:06.685078 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7aa429fe-eb79-40cb-9612-931f5b0e2b54-logs\") pod \"7aa429fe-eb79-40cb-9612-931f5b0e2b54\" (UID: \"7aa429fe-eb79-40cb-9612-931f5b0e2b54\") " Nov 28 17:51:06 crc kubenswrapper[4909]: I1128 17:51:06.685133 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7aa429fe-eb79-40cb-9612-931f5b0e2b54-combined-ca-bundle\") pod \"7aa429fe-eb79-40cb-9612-931f5b0e2b54\" (UID: \"7aa429fe-eb79-40cb-9612-931f5b0e2b54\") " Nov 28 17:51:06 crc kubenswrapper[4909]: I1128 17:51:06.685297 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7aa429fe-eb79-40cb-9612-931f5b0e2b54-config-data\") pod \"7aa429fe-eb79-40cb-9612-931f5b0e2b54\" (UID: \"7aa429fe-eb79-40cb-9612-931f5b0e2b54\") " Nov 28 17:51:06 crc kubenswrapper[4909]: I1128 17:51:06.685438 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7aa429fe-eb79-40cb-9612-931f5b0e2b54-scripts\") pod \"7aa429fe-eb79-40cb-9612-931f5b0e2b54\" (UID: \"7aa429fe-eb79-40cb-9612-931f5b0e2b54\") " Nov 28 17:51:06 crc kubenswrapper[4909]: I1128 17:51:06.685516 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kjgjj\" (UniqueName: \"kubernetes.io/projected/7aa429fe-eb79-40cb-9612-931f5b0e2b54-kube-api-access-kjgjj\") pod \"7aa429fe-eb79-40cb-9612-931f5b0e2b54\" (UID: \"7aa429fe-eb79-40cb-9612-931f5b0e2b54\") " Nov 28 17:51:06 crc kubenswrapper[4909]: I1128 17:51:06.685554 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/7aa429fe-eb79-40cb-9612-931f5b0e2b54-ceph\") pod \"7aa429fe-eb79-40cb-9612-931f5b0e2b54\" (UID: \"7aa429fe-eb79-40cb-9612-931f5b0e2b54\") " Nov 28 17:51:06 crc kubenswrapper[4909]: I1128 17:51:06.685588 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/7aa429fe-eb79-40cb-9612-931f5b0e2b54-httpd-run\") pod \"7aa429fe-eb79-40cb-9612-931f5b0e2b54\" (UID: \"7aa429fe-eb79-40cb-9612-931f5b0e2b54\") " Nov 28 17:51:06 crc kubenswrapper[4909]: I1128 17:51:06.686304 4909 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5e86471a-d918-4d86-8696-842dc2205cd3-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 17:51:06 crc kubenswrapper[4909]: I1128 17:51:06.686320 4909 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5e86471a-d918-4d86-8696-842dc2205cd3-logs\") on node \"crc\" DevicePath \"\"" Nov 28 17:51:06 crc kubenswrapper[4909]: I1128 17:51:06.686337 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dc7x6\" (UniqueName: \"kubernetes.io/projected/5e86471a-d918-4d86-8696-842dc2205cd3-kube-api-access-dc7x6\") on node \"crc\" DevicePath \"\"" Nov 28 17:51:06 crc kubenswrapper[4909]: I1128 17:51:06.686347 4909 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/5e86471a-d918-4d86-8696-842dc2205cd3-ceph\") on node \"crc\" DevicePath \"\"" Nov 28 17:51:06 crc kubenswrapper[4909]: I1128 17:51:06.686358 4909 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/5e86471a-d918-4d86-8696-842dc2205cd3-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 28 17:51:06 crc kubenswrapper[4909]: I1128 17:51:06.687033 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7aa429fe-eb79-40cb-9612-931f5b0e2b54-logs" (OuterVolumeSpecName: "logs") pod "7aa429fe-eb79-40cb-9612-931f5b0e2b54" (UID: "7aa429fe-eb79-40cb-9612-931f5b0e2b54"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:51:06 crc kubenswrapper[4909]: I1128 17:51:06.688744 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7aa429fe-eb79-40cb-9612-931f5b0e2b54-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "7aa429fe-eb79-40cb-9612-931f5b0e2b54" (UID: "7aa429fe-eb79-40cb-9612-931f5b0e2b54"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:51:06 crc kubenswrapper[4909]: I1128 17:51:06.696433 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"7aa429fe-eb79-40cb-9612-931f5b0e2b54","Type":"ContainerDied","Data":"07e6dd46d956155726250cfb303e4a6a9ab5911d74ec23ac82ec22601f9a1e72"} Nov 28 17:51:06 crc kubenswrapper[4909]: I1128 17:51:06.696490 4909 scope.go:117] "RemoveContainer" containerID="db86491c36d28334f4760b1a2499df8201d87e9374ffb080e7f82a02d4233608" Nov 28 17:51:06 crc kubenswrapper[4909]: I1128 17:51:06.696624 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 28 17:51:06 crc kubenswrapper[4909]: I1128 17:51:06.697288 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7aa429fe-eb79-40cb-9612-931f5b0e2b54-ceph" (OuterVolumeSpecName: "ceph") pod "7aa429fe-eb79-40cb-9612-931f5b0e2b54" (UID: "7aa429fe-eb79-40cb-9612-931f5b0e2b54"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:51:06 crc kubenswrapper[4909]: I1128 17:51:06.700542 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-7f7584cb65-cc7wv" event={"ID":"d9da4a21-03e8-4cd7-84b7-f64ee73f596b","Type":"ContainerStarted","Data":"84aa3792cf62ba6ee304a37b097d3e048e9b824739f1f4f67a8a5e4ee9b23941"} Nov 28 17:51:06 crc kubenswrapper[4909]: I1128 17:51:06.704486 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7aa429fe-eb79-40cb-9612-931f5b0e2b54-kube-api-access-kjgjj" (OuterVolumeSpecName: "kube-api-access-kjgjj") pod "7aa429fe-eb79-40cb-9612-931f5b0e2b54" (UID: "7aa429fe-eb79-40cb-9612-931f5b0e2b54"). InnerVolumeSpecName "kube-api-access-kjgjj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:51:06 crc kubenswrapper[4909]: I1128 17:51:06.707870 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7aa429fe-eb79-40cb-9612-931f5b0e2b54-scripts" (OuterVolumeSpecName: "scripts") pod "7aa429fe-eb79-40cb-9612-931f5b0e2b54" (UID: "7aa429fe-eb79-40cb-9612-931f5b0e2b54"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:51:06 crc kubenswrapper[4909]: I1128 17:51:06.708670 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"5e86471a-d918-4d86-8696-842dc2205cd3","Type":"ContainerDied","Data":"6c3257a3dff06fa5f7d28458b1b354e38b9a2673f9944cf66769580abd7348ee"} Nov 28 17:51:06 crc kubenswrapper[4909]: I1128 17:51:06.708775 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 28 17:51:06 crc kubenswrapper[4909]: I1128 17:51:06.710748 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-59965f94f7-jr6d6" event={"ID":"2714cbdd-52c8-4173-8ba6-f3fb9ea361a3","Type":"ContainerStarted","Data":"743bb0300263e9d29f266a3237e27da8aced6a0274a448c4715d0a1b07cb2ab3"} Nov 28 17:51:06 crc kubenswrapper[4909]: I1128 17:51:06.710785 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-59965f94f7-jr6d6" event={"ID":"2714cbdd-52c8-4173-8ba6-f3fb9ea361a3","Type":"ContainerStarted","Data":"2c44ca4620701caa981707c19b746fec250e8822a377b1e08e67cb2d4487b9f0"} Nov 28 17:51:06 crc kubenswrapper[4909]: I1128 17:51:06.710886 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-59965f94f7-jr6d6" podUID="2714cbdd-52c8-4173-8ba6-f3fb9ea361a3" containerName="horizon-log" containerID="cri-o://2c44ca4620701caa981707c19b746fec250e8822a377b1e08e67cb2d4487b9f0" gracePeriod=30 Nov 28 17:51:06 crc kubenswrapper[4909]: I1128 17:51:06.711122 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-59965f94f7-jr6d6" podUID="2714cbdd-52c8-4173-8ba6-f3fb9ea361a3" containerName="horizon" containerID="cri-o://743bb0300263e9d29f266a3237e27da8aced6a0274a448c4715d0a1b07cb2ab3" gracePeriod=30 Nov 28 17:51:06 crc kubenswrapper[4909]: I1128 17:51:06.712429 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5d66dd75d5-cvdgt" event={"ID":"08c9f91c-f9ee-4be9-b5c1-993d20615405","Type":"ContainerStarted","Data":"27f064985149f47e89c604e0f17177632125c0fb8e4f5a17e1c819c685006698"} Nov 28 17:51:06 crc kubenswrapper[4909]: I1128 17:51:06.725717 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-7f7584cb65-cc7wv" podStartSLOduration=2.487427293 podStartE2EDuration="8.725699113s" podCreationTimestamp="2025-11-28 17:50:58 +0000 UTC" firstStartedPulling="2025-11-28 17:50:59.815646433 +0000 UTC m=+6042.212330957" lastFinishedPulling="2025-11-28 17:51:06.053918253 +0000 UTC m=+6048.450602777" observedRunningTime="2025-11-28 17:51:06.718219935 +0000 UTC m=+6049.114904459" watchObservedRunningTime="2025-11-28 17:51:06.725699113 +0000 UTC m=+6049.122383637" Nov 28 17:51:06 crc kubenswrapper[4909]: I1128 17:51:06.745221 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-59965f94f7-jr6d6" podStartSLOduration=2.4907013129999998 podStartE2EDuration="9.745204592s" podCreationTimestamp="2025-11-28 17:50:57 +0000 UTC" firstStartedPulling="2025-11-28 17:50:58.822518329 +0000 UTC m=+6041.219202853" lastFinishedPulling="2025-11-28 17:51:06.077021608 +0000 UTC m=+6048.473706132" observedRunningTime="2025-11-28 17:51:06.739934362 +0000 UTC m=+6049.136618886" watchObservedRunningTime="2025-11-28 17:51:06.745204592 +0000 UTC m=+6049.141889116" Nov 28 17:51:06 crc kubenswrapper[4909]: I1128 17:51:06.750406 4909 scope.go:117] "RemoveContainer" containerID="1aa6258fe04ef4d341553ed1ab2bebe7218d2d91ca983ecc85033b22b76f9b05" Nov 28 17:51:06 crc kubenswrapper[4909]: I1128 17:51:06.761313 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5e86471a-d918-4d86-8696-842dc2205cd3-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "5e86471a-d918-4d86-8696-842dc2205cd3" (UID: "5e86471a-d918-4d86-8696-842dc2205cd3"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:51:06 crc kubenswrapper[4909]: I1128 17:51:06.770281 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7aa429fe-eb79-40cb-9612-931f5b0e2b54-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "7aa429fe-eb79-40cb-9612-931f5b0e2b54" (UID: "7aa429fe-eb79-40cb-9612-931f5b0e2b54"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:51:06 crc kubenswrapper[4909]: I1128 17:51:06.771254 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5e86471a-d918-4d86-8696-842dc2205cd3-config-data" (OuterVolumeSpecName: "config-data") pod "5e86471a-d918-4d86-8696-842dc2205cd3" (UID: "5e86471a-d918-4d86-8696-842dc2205cd3"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:51:06 crc kubenswrapper[4909]: I1128 17:51:06.771842 4909 scope.go:117] "RemoveContainer" containerID="322e0616f54341d91165dfa4c57a0977fe42661c774e7cc09684d3743a0e28cb" Nov 28 17:51:06 crc kubenswrapper[4909]: I1128 17:51:06.787897 4909 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7aa429fe-eb79-40cb-9612-931f5b0e2b54-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 17:51:06 crc kubenswrapper[4909]: I1128 17:51:06.787928 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kjgjj\" (UniqueName: \"kubernetes.io/projected/7aa429fe-eb79-40cb-9612-931f5b0e2b54-kube-api-access-kjgjj\") on node \"crc\" DevicePath \"\"" Nov 28 17:51:06 crc kubenswrapper[4909]: I1128 17:51:06.787941 4909 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/7aa429fe-eb79-40cb-9612-931f5b0e2b54-ceph\") on node \"crc\" DevicePath \"\"" Nov 28 17:51:06 crc kubenswrapper[4909]: I1128 17:51:06.787950 4909 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5e86471a-d918-4d86-8696-842dc2205cd3-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 17:51:06 crc kubenswrapper[4909]: I1128 17:51:06.787959 4909 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/7aa429fe-eb79-40cb-9612-931f5b0e2b54-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 28 17:51:06 crc kubenswrapper[4909]: I1128 17:51:06.787969 4909 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5e86471a-d918-4d86-8696-842dc2205cd3-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 17:51:06 crc kubenswrapper[4909]: I1128 17:51:06.787977 4909 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7aa429fe-eb79-40cb-9612-931f5b0e2b54-logs\") on node \"crc\" DevicePath \"\"" Nov 28 17:51:06 crc kubenswrapper[4909]: I1128 17:51:06.787985 4909 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7aa429fe-eb79-40cb-9612-931f5b0e2b54-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 17:51:06 crc kubenswrapper[4909]: I1128 17:51:06.796870 4909 scope.go:117] "RemoveContainer" containerID="28ecd248bb264123a78903ef56997b7630ddb7524a3f36eb3f76971ca78e3127" Nov 28 17:51:06 crc kubenswrapper[4909]: I1128 17:51:06.815912 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7aa429fe-eb79-40cb-9612-931f5b0e2b54-config-data" (OuterVolumeSpecName: "config-data") pod "7aa429fe-eb79-40cb-9612-931f5b0e2b54" (UID: "7aa429fe-eb79-40cb-9612-931f5b0e2b54"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:51:06 crc kubenswrapper[4909]: I1128 17:51:06.890057 4909 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7aa429fe-eb79-40cb-9612-931f5b0e2b54-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 17:51:07 crc kubenswrapper[4909]: I1128 17:51:07.033193 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 17:51:07 crc kubenswrapper[4909]: I1128 17:51:07.045635 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 17:51:07 crc kubenswrapper[4909]: I1128 17:51:07.054209 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 17:51:07 crc kubenswrapper[4909]: I1128 17:51:07.068812 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 17:51:07 crc kubenswrapper[4909]: I1128 17:51:07.075867 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 17:51:07 crc kubenswrapper[4909]: E1128 17:51:07.076272 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5e86471a-d918-4d86-8696-842dc2205cd3" containerName="glance-log" Nov 28 17:51:07 crc kubenswrapper[4909]: I1128 17:51:07.076287 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="5e86471a-d918-4d86-8696-842dc2205cd3" containerName="glance-log" Nov 28 17:51:07 crc kubenswrapper[4909]: E1128 17:51:07.076301 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5e86471a-d918-4d86-8696-842dc2205cd3" containerName="glance-httpd" Nov 28 17:51:07 crc kubenswrapper[4909]: I1128 17:51:07.076307 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="5e86471a-d918-4d86-8696-842dc2205cd3" containerName="glance-httpd" Nov 28 17:51:07 crc kubenswrapper[4909]: E1128 17:51:07.076325 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7aa429fe-eb79-40cb-9612-931f5b0e2b54" containerName="glance-log" Nov 28 17:51:07 crc kubenswrapper[4909]: I1128 17:51:07.076331 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="7aa429fe-eb79-40cb-9612-931f5b0e2b54" containerName="glance-log" Nov 28 17:51:07 crc kubenswrapper[4909]: E1128 17:51:07.076350 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7aa429fe-eb79-40cb-9612-931f5b0e2b54" containerName="glance-httpd" Nov 28 17:51:07 crc kubenswrapper[4909]: I1128 17:51:07.076355 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="7aa429fe-eb79-40cb-9612-931f5b0e2b54" containerName="glance-httpd" Nov 28 17:51:07 crc kubenswrapper[4909]: I1128 17:51:07.076578 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="5e86471a-d918-4d86-8696-842dc2205cd3" containerName="glance-httpd" Nov 28 17:51:07 crc kubenswrapper[4909]: I1128 17:51:07.076613 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="5e86471a-d918-4d86-8696-842dc2205cd3" containerName="glance-log" Nov 28 17:51:07 crc kubenswrapper[4909]: I1128 17:51:07.076630 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="7aa429fe-eb79-40cb-9612-931f5b0e2b54" containerName="glance-httpd" Nov 28 17:51:07 crc kubenswrapper[4909]: I1128 17:51:07.076648 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="7aa429fe-eb79-40cb-9612-931f5b0e2b54" containerName="glance-log" Nov 28 17:51:07 crc kubenswrapper[4909]: I1128 17:51:07.079001 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 28 17:51:07 crc kubenswrapper[4909]: I1128 17:51:07.083162 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Nov 28 17:51:07 crc kubenswrapper[4909]: I1128 17:51:07.083416 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Nov 28 17:51:07 crc kubenswrapper[4909]: I1128 17:51:07.083569 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-lcbsj" Nov 28 17:51:07 crc kubenswrapper[4909]: I1128 17:51:07.098491 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 17:51:07 crc kubenswrapper[4909]: I1128 17:51:07.127086 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 17:51:07 crc kubenswrapper[4909]: I1128 17:51:07.127129 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 17:51:07 crc kubenswrapper[4909]: I1128 17:51:07.127236 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 28 17:51:07 crc kubenswrapper[4909]: I1128 17:51:07.133094 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Nov 28 17:51:07 crc kubenswrapper[4909]: I1128 17:51:07.198535 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/da12d29e-36b6-4d5c-90aa-9311ea8dd628-logs\") pod \"glance-default-internal-api-0\" (UID: \"da12d29e-36b6-4d5c-90aa-9311ea8dd628\") " pod="openstack/glance-default-internal-api-0" Nov 28 17:51:07 crc kubenswrapper[4909]: I1128 17:51:07.198624 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/da12d29e-36b6-4d5c-90aa-9311ea8dd628-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"da12d29e-36b6-4d5c-90aa-9311ea8dd628\") " pod="openstack/glance-default-internal-api-0" Nov 28 17:51:07 crc kubenswrapper[4909]: I1128 17:51:07.198725 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/da12d29e-36b6-4d5c-90aa-9311ea8dd628-scripts\") pod \"glance-default-internal-api-0\" (UID: \"da12d29e-36b6-4d5c-90aa-9311ea8dd628\") " pod="openstack/glance-default-internal-api-0" Nov 28 17:51:07 crc kubenswrapper[4909]: I1128 17:51:07.198787 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/da12d29e-36b6-4d5c-90aa-9311ea8dd628-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"da12d29e-36b6-4d5c-90aa-9311ea8dd628\") " pod="openstack/glance-default-internal-api-0" Nov 28 17:51:07 crc kubenswrapper[4909]: I1128 17:51:07.198813 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/da12d29e-36b6-4d5c-90aa-9311ea8dd628-config-data\") pod \"glance-default-internal-api-0\" (UID: \"da12d29e-36b6-4d5c-90aa-9311ea8dd628\") " pod="openstack/glance-default-internal-api-0" Nov 28 17:51:07 crc kubenswrapper[4909]: I1128 17:51:07.198840 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/da12d29e-36b6-4d5c-90aa-9311ea8dd628-ceph\") pod \"glance-default-internal-api-0\" (UID: \"da12d29e-36b6-4d5c-90aa-9311ea8dd628\") " pod="openstack/glance-default-internal-api-0" Nov 28 17:51:07 crc kubenswrapper[4909]: I1128 17:51:07.199014 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nxbmj\" (UniqueName: \"kubernetes.io/projected/da12d29e-36b6-4d5c-90aa-9311ea8dd628-kube-api-access-nxbmj\") pod \"glance-default-internal-api-0\" (UID: \"da12d29e-36b6-4d5c-90aa-9311ea8dd628\") " pod="openstack/glance-default-internal-api-0" Nov 28 17:51:07 crc kubenswrapper[4909]: I1128 17:51:07.300795 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/da12d29e-36b6-4d5c-90aa-9311ea8dd628-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"da12d29e-36b6-4d5c-90aa-9311ea8dd628\") " pod="openstack/glance-default-internal-api-0" Nov 28 17:51:07 crc kubenswrapper[4909]: I1128 17:51:07.300849 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/da12d29e-36b6-4d5c-90aa-9311ea8dd628-config-data\") pod \"glance-default-internal-api-0\" (UID: \"da12d29e-36b6-4d5c-90aa-9311ea8dd628\") " pod="openstack/glance-default-internal-api-0" Nov 28 17:51:07 crc kubenswrapper[4909]: I1128 17:51:07.300882 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/10420792-94b6-41b0-9e92-391c998c8b26-config-data\") pod \"glance-default-external-api-0\" (UID: \"10420792-94b6-41b0-9e92-391c998c8b26\") " pod="openstack/glance-default-external-api-0" Nov 28 17:51:07 crc kubenswrapper[4909]: I1128 17:51:07.300904 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/da12d29e-36b6-4d5c-90aa-9311ea8dd628-ceph\") pod \"glance-default-internal-api-0\" (UID: \"da12d29e-36b6-4d5c-90aa-9311ea8dd628\") " pod="openstack/glance-default-internal-api-0" Nov 28 17:51:07 crc kubenswrapper[4909]: I1128 17:51:07.300937 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/10420792-94b6-41b0-9e92-391c998c8b26-ceph\") pod \"glance-default-external-api-0\" (UID: \"10420792-94b6-41b0-9e92-391c998c8b26\") " pod="openstack/glance-default-external-api-0" Nov 28 17:51:07 crc kubenswrapper[4909]: I1128 17:51:07.301038 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/10420792-94b6-41b0-9e92-391c998c8b26-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"10420792-94b6-41b0-9e92-391c998c8b26\") " pod="openstack/glance-default-external-api-0" Nov 28 17:51:07 crc kubenswrapper[4909]: I1128 17:51:07.301114 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/10420792-94b6-41b0-9e92-391c998c8b26-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"10420792-94b6-41b0-9e92-391c998c8b26\") " pod="openstack/glance-default-external-api-0" Nov 28 17:51:07 crc kubenswrapper[4909]: I1128 17:51:07.301165 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bhs75\" (UniqueName: \"kubernetes.io/projected/10420792-94b6-41b0-9e92-391c998c8b26-kube-api-access-bhs75\") pod \"glance-default-external-api-0\" (UID: \"10420792-94b6-41b0-9e92-391c998c8b26\") " pod="openstack/glance-default-external-api-0" Nov 28 17:51:07 crc kubenswrapper[4909]: I1128 17:51:07.301276 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/10420792-94b6-41b0-9e92-391c998c8b26-logs\") pod \"glance-default-external-api-0\" (UID: \"10420792-94b6-41b0-9e92-391c998c8b26\") " pod="openstack/glance-default-external-api-0" Nov 28 17:51:07 crc kubenswrapper[4909]: I1128 17:51:07.301332 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nxbmj\" (UniqueName: \"kubernetes.io/projected/da12d29e-36b6-4d5c-90aa-9311ea8dd628-kube-api-access-nxbmj\") pod \"glance-default-internal-api-0\" (UID: \"da12d29e-36b6-4d5c-90aa-9311ea8dd628\") " pod="openstack/glance-default-internal-api-0" Nov 28 17:51:07 crc kubenswrapper[4909]: I1128 17:51:07.301400 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/da12d29e-36b6-4d5c-90aa-9311ea8dd628-logs\") pod \"glance-default-internal-api-0\" (UID: \"da12d29e-36b6-4d5c-90aa-9311ea8dd628\") " pod="openstack/glance-default-internal-api-0" Nov 28 17:51:07 crc kubenswrapper[4909]: I1128 17:51:07.301422 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/da12d29e-36b6-4d5c-90aa-9311ea8dd628-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"da12d29e-36b6-4d5c-90aa-9311ea8dd628\") " pod="openstack/glance-default-internal-api-0" Nov 28 17:51:07 crc kubenswrapper[4909]: I1128 17:51:07.301459 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/da12d29e-36b6-4d5c-90aa-9311ea8dd628-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"da12d29e-36b6-4d5c-90aa-9311ea8dd628\") " pod="openstack/glance-default-internal-api-0" Nov 28 17:51:07 crc kubenswrapper[4909]: I1128 17:51:07.301484 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/10420792-94b6-41b0-9e92-391c998c8b26-scripts\") pod \"glance-default-external-api-0\" (UID: \"10420792-94b6-41b0-9e92-391c998c8b26\") " pod="openstack/glance-default-external-api-0" Nov 28 17:51:07 crc kubenswrapper[4909]: I1128 17:51:07.301541 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/da12d29e-36b6-4d5c-90aa-9311ea8dd628-scripts\") pod \"glance-default-internal-api-0\" (UID: \"da12d29e-36b6-4d5c-90aa-9311ea8dd628\") " pod="openstack/glance-default-internal-api-0" Nov 28 17:51:07 crc kubenswrapper[4909]: I1128 17:51:07.302075 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/da12d29e-36b6-4d5c-90aa-9311ea8dd628-logs\") pod \"glance-default-internal-api-0\" (UID: \"da12d29e-36b6-4d5c-90aa-9311ea8dd628\") " pod="openstack/glance-default-internal-api-0" Nov 28 17:51:07 crc kubenswrapper[4909]: I1128 17:51:07.306910 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/da12d29e-36b6-4d5c-90aa-9311ea8dd628-ceph\") pod \"glance-default-internal-api-0\" (UID: \"da12d29e-36b6-4d5c-90aa-9311ea8dd628\") " pod="openstack/glance-default-internal-api-0" Nov 28 17:51:07 crc kubenswrapper[4909]: I1128 17:51:07.308048 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/da12d29e-36b6-4d5c-90aa-9311ea8dd628-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"da12d29e-36b6-4d5c-90aa-9311ea8dd628\") " pod="openstack/glance-default-internal-api-0" Nov 28 17:51:07 crc kubenswrapper[4909]: I1128 17:51:07.308383 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/da12d29e-36b6-4d5c-90aa-9311ea8dd628-scripts\") pod \"glance-default-internal-api-0\" (UID: \"da12d29e-36b6-4d5c-90aa-9311ea8dd628\") " pod="openstack/glance-default-internal-api-0" Nov 28 17:51:07 crc kubenswrapper[4909]: I1128 17:51:07.308878 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/da12d29e-36b6-4d5c-90aa-9311ea8dd628-config-data\") pod \"glance-default-internal-api-0\" (UID: \"da12d29e-36b6-4d5c-90aa-9311ea8dd628\") " pod="openstack/glance-default-internal-api-0" Nov 28 17:51:07 crc kubenswrapper[4909]: I1128 17:51:07.322461 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nxbmj\" (UniqueName: \"kubernetes.io/projected/da12d29e-36b6-4d5c-90aa-9311ea8dd628-kube-api-access-nxbmj\") pod \"glance-default-internal-api-0\" (UID: \"da12d29e-36b6-4d5c-90aa-9311ea8dd628\") " pod="openstack/glance-default-internal-api-0" Nov 28 17:51:07 crc kubenswrapper[4909]: I1128 17:51:07.399725 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 28 17:51:07 crc kubenswrapper[4909]: I1128 17:51:07.405141 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/10420792-94b6-41b0-9e92-391c998c8b26-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"10420792-94b6-41b0-9e92-391c998c8b26\") " pod="openstack/glance-default-external-api-0" Nov 28 17:51:07 crc kubenswrapper[4909]: I1128 17:51:07.405230 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/10420792-94b6-41b0-9e92-391c998c8b26-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"10420792-94b6-41b0-9e92-391c998c8b26\") " pod="openstack/glance-default-external-api-0" Nov 28 17:51:07 crc kubenswrapper[4909]: I1128 17:51:07.405259 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bhs75\" (UniqueName: \"kubernetes.io/projected/10420792-94b6-41b0-9e92-391c998c8b26-kube-api-access-bhs75\") pod \"glance-default-external-api-0\" (UID: \"10420792-94b6-41b0-9e92-391c998c8b26\") " pod="openstack/glance-default-external-api-0" Nov 28 17:51:07 crc kubenswrapper[4909]: I1128 17:51:07.405314 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/10420792-94b6-41b0-9e92-391c998c8b26-logs\") pod \"glance-default-external-api-0\" (UID: \"10420792-94b6-41b0-9e92-391c998c8b26\") " pod="openstack/glance-default-external-api-0" Nov 28 17:51:07 crc kubenswrapper[4909]: I1128 17:51:07.405393 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/10420792-94b6-41b0-9e92-391c998c8b26-scripts\") pod \"glance-default-external-api-0\" (UID: \"10420792-94b6-41b0-9e92-391c998c8b26\") " pod="openstack/glance-default-external-api-0" Nov 28 17:51:07 crc kubenswrapper[4909]: I1128 17:51:07.405469 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/10420792-94b6-41b0-9e92-391c998c8b26-config-data\") pod \"glance-default-external-api-0\" (UID: \"10420792-94b6-41b0-9e92-391c998c8b26\") " pod="openstack/glance-default-external-api-0" Nov 28 17:51:07 crc kubenswrapper[4909]: I1128 17:51:07.405494 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/10420792-94b6-41b0-9e92-391c998c8b26-ceph\") pod \"glance-default-external-api-0\" (UID: \"10420792-94b6-41b0-9e92-391c998c8b26\") " pod="openstack/glance-default-external-api-0" Nov 28 17:51:07 crc kubenswrapper[4909]: I1128 17:51:07.405631 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/10420792-94b6-41b0-9e92-391c998c8b26-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"10420792-94b6-41b0-9e92-391c998c8b26\") " pod="openstack/glance-default-external-api-0" Nov 28 17:51:07 crc kubenswrapper[4909]: I1128 17:51:07.406106 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/10420792-94b6-41b0-9e92-391c998c8b26-logs\") pod \"glance-default-external-api-0\" (UID: \"10420792-94b6-41b0-9e92-391c998c8b26\") " pod="openstack/glance-default-external-api-0" Nov 28 17:51:07 crc kubenswrapper[4909]: I1128 17:51:07.409339 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/10420792-94b6-41b0-9e92-391c998c8b26-scripts\") pod \"glance-default-external-api-0\" (UID: \"10420792-94b6-41b0-9e92-391c998c8b26\") " pod="openstack/glance-default-external-api-0" Nov 28 17:51:07 crc kubenswrapper[4909]: I1128 17:51:07.409990 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/10420792-94b6-41b0-9e92-391c998c8b26-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"10420792-94b6-41b0-9e92-391c998c8b26\") " pod="openstack/glance-default-external-api-0" Nov 28 17:51:07 crc kubenswrapper[4909]: I1128 17:51:07.410036 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/10420792-94b6-41b0-9e92-391c998c8b26-ceph\") pod \"glance-default-external-api-0\" (UID: \"10420792-94b6-41b0-9e92-391c998c8b26\") " pod="openstack/glance-default-external-api-0" Nov 28 17:51:07 crc kubenswrapper[4909]: I1128 17:51:07.410501 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/10420792-94b6-41b0-9e92-391c998c8b26-config-data\") pod \"glance-default-external-api-0\" (UID: \"10420792-94b6-41b0-9e92-391c998c8b26\") " pod="openstack/glance-default-external-api-0" Nov 28 17:51:07 crc kubenswrapper[4909]: I1128 17:51:07.444351 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bhs75\" (UniqueName: \"kubernetes.io/projected/10420792-94b6-41b0-9e92-391c998c8b26-kube-api-access-bhs75\") pod \"glance-default-external-api-0\" (UID: \"10420792-94b6-41b0-9e92-391c998c8b26\") " pod="openstack/glance-default-external-api-0" Nov 28 17:51:07 crc kubenswrapper[4909]: I1128 17:51:07.455495 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 28 17:51:07 crc kubenswrapper[4909]: I1128 17:51:07.776950 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-7f7584cb65-cc7wv" event={"ID":"d9da4a21-03e8-4cd7-84b7-f64ee73f596b","Type":"ContainerStarted","Data":"5ed3ae13293e114f4c328812afd45b85d9375588f7f0021eafc3831e71954665"} Nov 28 17:51:07 crc kubenswrapper[4909]: I1128 17:51:07.795711 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5d66dd75d5-cvdgt" event={"ID":"08c9f91c-f9ee-4be9-b5c1-993d20615405","Type":"ContainerStarted","Data":"ce849d0ea273f7addfc359c90cfb11d9de9743cf0e25c7272db7a5c88a93eff3"} Nov 28 17:51:07 crc kubenswrapper[4909]: I1128 17:51:07.837965 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-5d66dd75d5-cvdgt" podStartSLOduration=3.479959116 podStartE2EDuration="10.837940786s" podCreationTimestamp="2025-11-28 17:50:57 +0000 UTC" firstStartedPulling="2025-11-28 17:50:58.681488759 +0000 UTC m=+6041.078173283" lastFinishedPulling="2025-11-28 17:51:06.039470429 +0000 UTC m=+6048.436154953" observedRunningTime="2025-11-28 17:51:07.825203197 +0000 UTC m=+6050.221887721" watchObservedRunningTime="2025-11-28 17:51:07.837940786 +0000 UTC m=+6050.234625320" Nov 28 17:51:07 crc kubenswrapper[4909]: I1128 17:51:07.914308 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5e86471a-d918-4d86-8696-842dc2205cd3" path="/var/lib/kubelet/pods/5e86471a-d918-4d86-8696-842dc2205cd3/volumes" Nov 28 17:51:07 crc kubenswrapper[4909]: I1128 17:51:07.915298 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7aa429fe-eb79-40cb-9612-931f5b0e2b54" path="/var/lib/kubelet/pods/7aa429fe-eb79-40cb-9612-931f5b0e2b54/volumes" Nov 28 17:51:08 crc kubenswrapper[4909]: W1128 17:51:08.095290 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podda12d29e_36b6_4d5c_90aa_9311ea8dd628.slice/crio-2d8608a0250a73cca3f3adbfc6eca5c2d34831ff7db28aa8e45ad570d2143926 WatchSource:0}: Error finding container 2d8608a0250a73cca3f3adbfc6eca5c2d34831ff7db28aa8e45ad570d2143926: Status 404 returned error can't find the container with id 2d8608a0250a73cca3f3adbfc6eca5c2d34831ff7db28aa8e45ad570d2143926 Nov 28 17:51:08 crc kubenswrapper[4909]: I1128 17:51:08.095588 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 17:51:08 crc kubenswrapper[4909]: I1128 17:51:08.165759 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 17:51:08 crc kubenswrapper[4909]: I1128 17:51:08.166146 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-5d66dd75d5-cvdgt" Nov 28 17:51:08 crc kubenswrapper[4909]: I1128 17:51:08.166163 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-5d66dd75d5-cvdgt" Nov 28 17:51:08 crc kubenswrapper[4909]: W1128 17:51:08.166829 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod10420792_94b6_41b0_9e92_391c998c8b26.slice/crio-ad6cad6bbfc1e557614dcb2b3580c92fa68f4b6bf6e2adc72e641c1972fcde09 WatchSource:0}: Error finding container ad6cad6bbfc1e557614dcb2b3580c92fa68f4b6bf6e2adc72e641c1972fcde09: Status 404 returned error can't find the container with id ad6cad6bbfc1e557614dcb2b3580c92fa68f4b6bf6e2adc72e641c1972fcde09 Nov 28 17:51:08 crc kubenswrapper[4909]: I1128 17:51:08.383569 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-59965f94f7-jr6d6" Nov 28 17:51:08 crc kubenswrapper[4909]: I1128 17:51:08.809285 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"da12d29e-36b6-4d5c-90aa-9311ea8dd628","Type":"ContainerStarted","Data":"2d8608a0250a73cca3f3adbfc6eca5c2d34831ff7db28aa8e45ad570d2143926"} Nov 28 17:51:08 crc kubenswrapper[4909]: I1128 17:51:08.811335 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"10420792-94b6-41b0-9e92-391c998c8b26","Type":"ContainerStarted","Data":"ad6cad6bbfc1e557614dcb2b3580c92fa68f4b6bf6e2adc72e641c1972fcde09"} Nov 28 17:51:09 crc kubenswrapper[4909]: I1128 17:51:09.327441 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-7f7584cb65-cc7wv" Nov 28 17:51:09 crc kubenswrapper[4909]: I1128 17:51:09.327789 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-7f7584cb65-cc7wv" Nov 28 17:51:09 crc kubenswrapper[4909]: I1128 17:51:09.825714 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"10420792-94b6-41b0-9e92-391c998c8b26","Type":"ContainerStarted","Data":"49998f343f392d4d67e396a3d475bc7e3ce49a77a9e78e939337e2c3e1bae101"} Nov 28 17:51:09 crc kubenswrapper[4909]: I1128 17:51:09.828250 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"da12d29e-36b6-4d5c-90aa-9311ea8dd628","Type":"ContainerStarted","Data":"44b669a3d204e49c61a0299280ba09429e13c6eb002501b29f898cea9ece764b"} Nov 28 17:51:09 crc kubenswrapper[4909]: I1128 17:51:09.901446 4909 scope.go:117] "RemoveContainer" containerID="eb8a08a6c738fff0fcbfbb88427c9ed53477944abe7436212850e368ec229c4f" Nov 28 17:51:09 crc kubenswrapper[4909]: E1128 17:51:09.901775 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 17:51:10 crc kubenswrapper[4909]: I1128 17:51:10.845411 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"da12d29e-36b6-4d5c-90aa-9311ea8dd628","Type":"ContainerStarted","Data":"2b95042a57ef1fa6ec40287289e7e24c214eb9edb2e49eb50dc3d9cbd87f6542"} Nov 28 17:51:10 crc kubenswrapper[4909]: I1128 17:51:10.852732 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"10420792-94b6-41b0-9e92-391c998c8b26","Type":"ContainerStarted","Data":"d9742da9db53eaca188f178aa985290a8c7f46171ba0b1bb75cdc0091d9287cb"} Nov 28 17:51:10 crc kubenswrapper[4909]: I1128 17:51:10.886577 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=3.88655295 podStartE2EDuration="3.88655295s" podCreationTimestamp="2025-11-28 17:51:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 17:51:10.873418691 +0000 UTC m=+6053.270103225" watchObservedRunningTime="2025-11-28 17:51:10.88655295 +0000 UTC m=+6053.283237494" Nov 28 17:51:10 crc kubenswrapper[4909]: I1128 17:51:10.926889 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=3.926867252 podStartE2EDuration="3.926867252s" podCreationTimestamp="2025-11-28 17:51:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 17:51:10.907557679 +0000 UTC m=+6053.304242213" watchObservedRunningTime="2025-11-28 17:51:10.926867252 +0000 UTC m=+6053.323551786" Nov 28 17:51:17 crc kubenswrapper[4909]: I1128 17:51:17.400867 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 28 17:51:17 crc kubenswrapper[4909]: I1128 17:51:17.401444 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 28 17:51:17 crc kubenswrapper[4909]: I1128 17:51:17.452593 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 28 17:51:17 crc kubenswrapper[4909]: I1128 17:51:17.457353 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 28 17:51:17 crc kubenswrapper[4909]: I1128 17:51:17.457421 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 28 17:51:17 crc kubenswrapper[4909]: I1128 17:51:17.486736 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 28 17:51:17 crc kubenswrapper[4909]: I1128 17:51:17.517471 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 28 17:51:17 crc kubenswrapper[4909]: I1128 17:51:17.525826 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 28 17:51:17 crc kubenswrapper[4909]: I1128 17:51:17.956502 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 28 17:51:17 crc kubenswrapper[4909]: I1128 17:51:17.956770 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 28 17:51:17 crc kubenswrapper[4909]: I1128 17:51:17.956780 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 28 17:51:17 crc kubenswrapper[4909]: I1128 17:51:17.956790 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 28 17:51:18 crc kubenswrapper[4909]: I1128 17:51:18.165994 4909 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-5d66dd75d5-cvdgt" podUID="08c9f91c-f9ee-4be9-b5c1-993d20615405" containerName="horizon" probeResult="failure" output="Get \"http://10.217.1.110:8080/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.1.110:8080: connect: connection refused" Nov 28 17:51:19 crc kubenswrapper[4909]: I1128 17:51:19.327351 4909 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-7f7584cb65-cc7wv" podUID="d9da4a21-03e8-4cd7-84b7-f64ee73f596b" containerName="horizon" probeResult="failure" output="Get \"http://10.217.1.112:8080/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.1.112:8080: connect: connection refused" Nov 28 17:51:19 crc kubenswrapper[4909]: I1128 17:51:19.821615 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 28 17:51:19 crc kubenswrapper[4909]: I1128 17:51:19.899403 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 28 17:51:19 crc kubenswrapper[4909]: I1128 17:51:19.997185 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 28 17:51:19 crc kubenswrapper[4909]: I1128 17:51:19.997311 4909 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 28 17:51:20 crc kubenswrapper[4909]: I1128 17:51:20.002014 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 28 17:51:24 crc kubenswrapper[4909]: I1128 17:51:24.902142 4909 scope.go:117] "RemoveContainer" containerID="eb8a08a6c738fff0fcbfbb88427c9ed53477944abe7436212850e368ec229c4f" Nov 28 17:51:24 crc kubenswrapper[4909]: E1128 17:51:24.903060 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 17:51:29 crc kubenswrapper[4909]: I1128 17:51:29.927894 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/horizon-5d66dd75d5-cvdgt" Nov 28 17:51:31 crc kubenswrapper[4909]: I1128 17:51:31.080410 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/horizon-7f7584cb65-cc7wv" Nov 28 17:51:31 crc kubenswrapper[4909]: I1128 17:51:31.594758 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/horizon-5d66dd75d5-cvdgt" Nov 28 17:51:32 crc kubenswrapper[4909]: I1128 17:51:32.975101 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/horizon-7f7584cb65-cc7wv" Nov 28 17:51:33 crc kubenswrapper[4909]: I1128 17:51:33.045369 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-5d66dd75d5-cvdgt"] Nov 28 17:51:33 crc kubenswrapper[4909]: I1128 17:51:33.045586 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-5d66dd75d5-cvdgt" podUID="08c9f91c-f9ee-4be9-b5c1-993d20615405" containerName="horizon-log" containerID="cri-o://27f064985149f47e89c604e0f17177632125c0fb8e4f5a17e1c819c685006698" gracePeriod=30 Nov 28 17:51:33 crc kubenswrapper[4909]: I1128 17:51:33.045740 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-5d66dd75d5-cvdgt" podUID="08c9f91c-f9ee-4be9-b5c1-993d20615405" containerName="horizon" containerID="cri-o://ce849d0ea273f7addfc359c90cfb11d9de9743cf0e25c7272db7a5c88a93eff3" gracePeriod=30 Nov 28 17:51:33 crc kubenswrapper[4909]: I1128 17:51:33.947579 4909 scope.go:117] "RemoveContainer" containerID="5d546ecbf4e9f4c6725f9278cd75bda9b2bf6f3321d224afd07055c3c9076f2f" Nov 28 17:51:33 crc kubenswrapper[4909]: I1128 17:51:33.991264 4909 scope.go:117] "RemoveContainer" containerID="f191de5981361672a5e668ad37f6f1ab7da4d3987321bc62a1cdfb4529df68ce" Nov 28 17:51:34 crc kubenswrapper[4909]: I1128 17:51:34.028416 4909 scope.go:117] "RemoveContainer" containerID="dde5f87023b06fc0c065d4ecdc2440ef61ee2578afc1e009d913580c4fba117e" Nov 28 17:51:34 crc kubenswrapper[4909]: I1128 17:51:34.096901 4909 scope.go:117] "RemoveContainer" containerID="1e8f253edf2feb8a9ef876c0d68b5f2e2e9898587f99da9553788231dc4d87a9" Nov 28 17:51:37 crc kubenswrapper[4909]: I1128 17:51:37.168219 4909 generic.go:334] "Generic (PLEG): container finished" podID="2714cbdd-52c8-4173-8ba6-f3fb9ea361a3" containerID="743bb0300263e9d29f266a3237e27da8aced6a0274a448c4715d0a1b07cb2ab3" exitCode=137 Nov 28 17:51:37 crc kubenswrapper[4909]: I1128 17:51:37.169471 4909 generic.go:334] "Generic (PLEG): container finished" podID="2714cbdd-52c8-4173-8ba6-f3fb9ea361a3" containerID="2c44ca4620701caa981707c19b746fec250e8822a377b1e08e67cb2d4487b9f0" exitCode=137 Nov 28 17:51:37 crc kubenswrapper[4909]: I1128 17:51:37.168289 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-59965f94f7-jr6d6" event={"ID":"2714cbdd-52c8-4173-8ba6-f3fb9ea361a3","Type":"ContainerDied","Data":"743bb0300263e9d29f266a3237e27da8aced6a0274a448c4715d0a1b07cb2ab3"} Nov 28 17:51:37 crc kubenswrapper[4909]: I1128 17:51:37.169629 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-59965f94f7-jr6d6" event={"ID":"2714cbdd-52c8-4173-8ba6-f3fb9ea361a3","Type":"ContainerDied","Data":"2c44ca4620701caa981707c19b746fec250e8822a377b1e08e67cb2d4487b9f0"} Nov 28 17:51:37 crc kubenswrapper[4909]: I1128 17:51:37.169643 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-59965f94f7-jr6d6" event={"ID":"2714cbdd-52c8-4173-8ba6-f3fb9ea361a3","Type":"ContainerDied","Data":"13a5a0f3f761396806381a331784df5ca83456a4ae6f4b673e422911270f5b70"} Nov 28 17:51:37 crc kubenswrapper[4909]: I1128 17:51:37.169668 4909 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="13a5a0f3f761396806381a331784df5ca83456a4ae6f4b673e422911270f5b70" Nov 28 17:51:37 crc kubenswrapper[4909]: I1128 17:51:37.172332 4909 generic.go:334] "Generic (PLEG): container finished" podID="08c9f91c-f9ee-4be9-b5c1-993d20615405" containerID="ce849d0ea273f7addfc359c90cfb11d9de9743cf0e25c7272db7a5c88a93eff3" exitCode=0 Nov 28 17:51:37 crc kubenswrapper[4909]: I1128 17:51:37.172404 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5d66dd75d5-cvdgt" event={"ID":"08c9f91c-f9ee-4be9-b5c1-993d20615405","Type":"ContainerDied","Data":"ce849d0ea273f7addfc359c90cfb11d9de9743cf0e25c7272db7a5c88a93eff3"} Nov 28 17:51:37 crc kubenswrapper[4909]: I1128 17:51:37.236478 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-59965f94f7-jr6d6" Nov 28 17:51:37 crc kubenswrapper[4909]: I1128 17:51:37.338371 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/2714cbdd-52c8-4173-8ba6-f3fb9ea361a3-config-data\") pod \"2714cbdd-52c8-4173-8ba6-f3fb9ea361a3\" (UID: \"2714cbdd-52c8-4173-8ba6-f3fb9ea361a3\") " Nov 28 17:51:37 crc kubenswrapper[4909]: I1128 17:51:37.338418 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/2714cbdd-52c8-4173-8ba6-f3fb9ea361a3-scripts\") pod \"2714cbdd-52c8-4173-8ba6-f3fb9ea361a3\" (UID: \"2714cbdd-52c8-4173-8ba6-f3fb9ea361a3\") " Nov 28 17:51:37 crc kubenswrapper[4909]: I1128 17:51:37.338513 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/2714cbdd-52c8-4173-8ba6-f3fb9ea361a3-horizon-secret-key\") pod \"2714cbdd-52c8-4173-8ba6-f3fb9ea361a3\" (UID: \"2714cbdd-52c8-4173-8ba6-f3fb9ea361a3\") " Nov 28 17:51:37 crc kubenswrapper[4909]: I1128 17:51:37.338579 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g2k9j\" (UniqueName: \"kubernetes.io/projected/2714cbdd-52c8-4173-8ba6-f3fb9ea361a3-kube-api-access-g2k9j\") pod \"2714cbdd-52c8-4173-8ba6-f3fb9ea361a3\" (UID: \"2714cbdd-52c8-4173-8ba6-f3fb9ea361a3\") " Nov 28 17:51:37 crc kubenswrapper[4909]: I1128 17:51:37.338608 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2714cbdd-52c8-4173-8ba6-f3fb9ea361a3-logs\") pod \"2714cbdd-52c8-4173-8ba6-f3fb9ea361a3\" (UID: \"2714cbdd-52c8-4173-8ba6-f3fb9ea361a3\") " Nov 28 17:51:37 crc kubenswrapper[4909]: I1128 17:51:37.339422 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2714cbdd-52c8-4173-8ba6-f3fb9ea361a3-logs" (OuterVolumeSpecName: "logs") pod "2714cbdd-52c8-4173-8ba6-f3fb9ea361a3" (UID: "2714cbdd-52c8-4173-8ba6-f3fb9ea361a3"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:51:37 crc kubenswrapper[4909]: I1128 17:51:37.344549 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2714cbdd-52c8-4173-8ba6-f3fb9ea361a3-kube-api-access-g2k9j" (OuterVolumeSpecName: "kube-api-access-g2k9j") pod "2714cbdd-52c8-4173-8ba6-f3fb9ea361a3" (UID: "2714cbdd-52c8-4173-8ba6-f3fb9ea361a3"). InnerVolumeSpecName "kube-api-access-g2k9j". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:51:37 crc kubenswrapper[4909]: I1128 17:51:37.344899 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2714cbdd-52c8-4173-8ba6-f3fb9ea361a3-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "2714cbdd-52c8-4173-8ba6-f3fb9ea361a3" (UID: "2714cbdd-52c8-4173-8ba6-f3fb9ea361a3"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:51:37 crc kubenswrapper[4909]: I1128 17:51:37.364044 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2714cbdd-52c8-4173-8ba6-f3fb9ea361a3-config-data" (OuterVolumeSpecName: "config-data") pod "2714cbdd-52c8-4173-8ba6-f3fb9ea361a3" (UID: "2714cbdd-52c8-4173-8ba6-f3fb9ea361a3"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 17:51:37 crc kubenswrapper[4909]: I1128 17:51:37.386012 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2714cbdd-52c8-4173-8ba6-f3fb9ea361a3-scripts" (OuterVolumeSpecName: "scripts") pod "2714cbdd-52c8-4173-8ba6-f3fb9ea361a3" (UID: "2714cbdd-52c8-4173-8ba6-f3fb9ea361a3"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 17:51:37 crc kubenswrapper[4909]: I1128 17:51:37.441869 4909 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/2714cbdd-52c8-4173-8ba6-f3fb9ea361a3-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 17:51:37 crc kubenswrapper[4909]: I1128 17:51:37.441897 4909 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/2714cbdd-52c8-4173-8ba6-f3fb9ea361a3-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 17:51:37 crc kubenswrapper[4909]: I1128 17:51:37.441908 4909 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/2714cbdd-52c8-4173-8ba6-f3fb9ea361a3-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Nov 28 17:51:37 crc kubenswrapper[4909]: I1128 17:51:37.441921 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g2k9j\" (UniqueName: \"kubernetes.io/projected/2714cbdd-52c8-4173-8ba6-f3fb9ea361a3-kube-api-access-g2k9j\") on node \"crc\" DevicePath \"\"" Nov 28 17:51:37 crc kubenswrapper[4909]: I1128 17:51:37.441931 4909 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2714cbdd-52c8-4173-8ba6-f3fb9ea361a3-logs\") on node \"crc\" DevicePath \"\"" Nov 28 17:51:37 crc kubenswrapper[4909]: I1128 17:51:37.935780 4909 scope.go:117] "RemoveContainer" containerID="eb8a08a6c738fff0fcbfbb88427c9ed53477944abe7436212850e368ec229c4f" Nov 28 17:51:37 crc kubenswrapper[4909]: E1128 17:51:37.936279 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 17:51:38 crc kubenswrapper[4909]: I1128 17:51:38.164588 4909 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-5d66dd75d5-cvdgt" podUID="08c9f91c-f9ee-4be9-b5c1-993d20615405" containerName="horizon" probeResult="failure" output="Get \"http://10.217.1.110:8080/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.1.110:8080: connect: connection refused" Nov 28 17:51:38 crc kubenswrapper[4909]: I1128 17:51:38.185758 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-59965f94f7-jr6d6" Nov 28 17:51:38 crc kubenswrapper[4909]: I1128 17:51:38.222707 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-59965f94f7-jr6d6"] Nov 28 17:51:38 crc kubenswrapper[4909]: I1128 17:51:38.234684 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-59965f94f7-jr6d6"] Nov 28 17:51:39 crc kubenswrapper[4909]: I1128 17:51:39.915642 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2714cbdd-52c8-4173-8ba6-f3fb9ea361a3" path="/var/lib/kubelet/pods/2714cbdd-52c8-4173-8ba6-f3fb9ea361a3/volumes" Nov 28 17:51:40 crc kubenswrapper[4909]: I1128 17:51:40.144617 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-65fb6b7dff-zjwdh"] Nov 28 17:51:40 crc kubenswrapper[4909]: E1128 17:51:40.145136 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2714cbdd-52c8-4173-8ba6-f3fb9ea361a3" containerName="horizon-log" Nov 28 17:51:40 crc kubenswrapper[4909]: I1128 17:51:40.145159 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="2714cbdd-52c8-4173-8ba6-f3fb9ea361a3" containerName="horizon-log" Nov 28 17:51:40 crc kubenswrapper[4909]: E1128 17:51:40.145178 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2714cbdd-52c8-4173-8ba6-f3fb9ea361a3" containerName="horizon" Nov 28 17:51:40 crc kubenswrapper[4909]: I1128 17:51:40.145186 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="2714cbdd-52c8-4173-8ba6-f3fb9ea361a3" containerName="horizon" Nov 28 17:51:40 crc kubenswrapper[4909]: I1128 17:51:40.145441 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="2714cbdd-52c8-4173-8ba6-f3fb9ea361a3" containerName="horizon-log" Nov 28 17:51:40 crc kubenswrapper[4909]: I1128 17:51:40.145465 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="2714cbdd-52c8-4173-8ba6-f3fb9ea361a3" containerName="horizon" Nov 28 17:51:40 crc kubenswrapper[4909]: I1128 17:51:40.146769 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-65fb6b7dff-zjwdh" Nov 28 17:51:40 crc kubenswrapper[4909]: I1128 17:51:40.186918 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-65fb6b7dff-zjwdh"] Nov 28 17:51:40 crc kubenswrapper[4909]: I1128 17:51:40.306698 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/cf2fee0c-2628-4b4c-bd64-fe39147a51d1-horizon-secret-key\") pod \"horizon-65fb6b7dff-zjwdh\" (UID: \"cf2fee0c-2628-4b4c-bd64-fe39147a51d1\") " pod="openstack/horizon-65fb6b7dff-zjwdh" Nov 28 17:51:40 crc kubenswrapper[4909]: I1128 17:51:40.306768 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9xrl5\" (UniqueName: \"kubernetes.io/projected/cf2fee0c-2628-4b4c-bd64-fe39147a51d1-kube-api-access-9xrl5\") pod \"horizon-65fb6b7dff-zjwdh\" (UID: \"cf2fee0c-2628-4b4c-bd64-fe39147a51d1\") " pod="openstack/horizon-65fb6b7dff-zjwdh" Nov 28 17:51:40 crc kubenswrapper[4909]: I1128 17:51:40.306861 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/cf2fee0c-2628-4b4c-bd64-fe39147a51d1-config-data\") pod \"horizon-65fb6b7dff-zjwdh\" (UID: \"cf2fee0c-2628-4b4c-bd64-fe39147a51d1\") " pod="openstack/horizon-65fb6b7dff-zjwdh" Nov 28 17:51:40 crc kubenswrapper[4909]: I1128 17:51:40.306935 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cf2fee0c-2628-4b4c-bd64-fe39147a51d1-logs\") pod \"horizon-65fb6b7dff-zjwdh\" (UID: \"cf2fee0c-2628-4b4c-bd64-fe39147a51d1\") " pod="openstack/horizon-65fb6b7dff-zjwdh" Nov 28 17:51:40 crc kubenswrapper[4909]: I1128 17:51:40.307037 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/cf2fee0c-2628-4b4c-bd64-fe39147a51d1-scripts\") pod \"horizon-65fb6b7dff-zjwdh\" (UID: \"cf2fee0c-2628-4b4c-bd64-fe39147a51d1\") " pod="openstack/horizon-65fb6b7dff-zjwdh" Nov 28 17:51:40 crc kubenswrapper[4909]: I1128 17:51:40.409075 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/cf2fee0c-2628-4b4c-bd64-fe39147a51d1-config-data\") pod \"horizon-65fb6b7dff-zjwdh\" (UID: \"cf2fee0c-2628-4b4c-bd64-fe39147a51d1\") " pod="openstack/horizon-65fb6b7dff-zjwdh" Nov 28 17:51:40 crc kubenswrapper[4909]: I1128 17:51:40.409129 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cf2fee0c-2628-4b4c-bd64-fe39147a51d1-logs\") pod \"horizon-65fb6b7dff-zjwdh\" (UID: \"cf2fee0c-2628-4b4c-bd64-fe39147a51d1\") " pod="openstack/horizon-65fb6b7dff-zjwdh" Nov 28 17:51:40 crc kubenswrapper[4909]: I1128 17:51:40.409211 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/cf2fee0c-2628-4b4c-bd64-fe39147a51d1-scripts\") pod \"horizon-65fb6b7dff-zjwdh\" (UID: \"cf2fee0c-2628-4b4c-bd64-fe39147a51d1\") " pod="openstack/horizon-65fb6b7dff-zjwdh" Nov 28 17:51:40 crc kubenswrapper[4909]: I1128 17:51:40.409335 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/cf2fee0c-2628-4b4c-bd64-fe39147a51d1-horizon-secret-key\") pod \"horizon-65fb6b7dff-zjwdh\" (UID: \"cf2fee0c-2628-4b4c-bd64-fe39147a51d1\") " pod="openstack/horizon-65fb6b7dff-zjwdh" Nov 28 17:51:40 crc kubenswrapper[4909]: I1128 17:51:40.409593 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9xrl5\" (UniqueName: \"kubernetes.io/projected/cf2fee0c-2628-4b4c-bd64-fe39147a51d1-kube-api-access-9xrl5\") pod \"horizon-65fb6b7dff-zjwdh\" (UID: \"cf2fee0c-2628-4b4c-bd64-fe39147a51d1\") " pod="openstack/horizon-65fb6b7dff-zjwdh" Nov 28 17:51:40 crc kubenswrapper[4909]: I1128 17:51:40.410264 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cf2fee0c-2628-4b4c-bd64-fe39147a51d1-logs\") pod \"horizon-65fb6b7dff-zjwdh\" (UID: \"cf2fee0c-2628-4b4c-bd64-fe39147a51d1\") " pod="openstack/horizon-65fb6b7dff-zjwdh" Nov 28 17:51:40 crc kubenswrapper[4909]: I1128 17:51:40.411199 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/cf2fee0c-2628-4b4c-bd64-fe39147a51d1-scripts\") pod \"horizon-65fb6b7dff-zjwdh\" (UID: \"cf2fee0c-2628-4b4c-bd64-fe39147a51d1\") " pod="openstack/horizon-65fb6b7dff-zjwdh" Nov 28 17:51:40 crc kubenswrapper[4909]: I1128 17:51:40.411981 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/cf2fee0c-2628-4b4c-bd64-fe39147a51d1-config-data\") pod \"horizon-65fb6b7dff-zjwdh\" (UID: \"cf2fee0c-2628-4b4c-bd64-fe39147a51d1\") " pod="openstack/horizon-65fb6b7dff-zjwdh" Nov 28 17:51:40 crc kubenswrapper[4909]: I1128 17:51:40.418208 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/cf2fee0c-2628-4b4c-bd64-fe39147a51d1-horizon-secret-key\") pod \"horizon-65fb6b7dff-zjwdh\" (UID: \"cf2fee0c-2628-4b4c-bd64-fe39147a51d1\") " pod="openstack/horizon-65fb6b7dff-zjwdh" Nov 28 17:51:40 crc kubenswrapper[4909]: I1128 17:51:40.440283 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9xrl5\" (UniqueName: \"kubernetes.io/projected/cf2fee0c-2628-4b4c-bd64-fe39147a51d1-kube-api-access-9xrl5\") pod \"horizon-65fb6b7dff-zjwdh\" (UID: \"cf2fee0c-2628-4b4c-bd64-fe39147a51d1\") " pod="openstack/horizon-65fb6b7dff-zjwdh" Nov 28 17:51:40 crc kubenswrapper[4909]: I1128 17:51:40.478698 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-65fb6b7dff-zjwdh" Nov 28 17:51:40 crc kubenswrapper[4909]: I1128 17:51:40.978087 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-65fb6b7dff-zjwdh"] Nov 28 17:51:41 crc kubenswrapper[4909]: I1128 17:51:41.080537 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-create-gvx6t"] Nov 28 17:51:41 crc kubenswrapper[4909]: I1128 17:51:41.102931 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-create-gvx6t"] Nov 28 17:51:41 crc kubenswrapper[4909]: I1128 17:51:41.126318 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-5a74-account-create-update-69kzf"] Nov 28 17:51:41 crc kubenswrapper[4909]: I1128 17:51:41.144765 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-5a74-account-create-update-69kzf"] Nov 28 17:51:41 crc kubenswrapper[4909]: I1128 17:51:41.229502 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-65fb6b7dff-zjwdh" event={"ID":"cf2fee0c-2628-4b4c-bd64-fe39147a51d1","Type":"ContainerStarted","Data":"b64c68d4589091a9d36a11e99854816ecb8633c37c8c67297d54c008b2a56a8d"} Nov 28 17:51:41 crc kubenswrapper[4909]: I1128 17:51:41.445664 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-db-create-gp9f5"] Nov 28 17:51:41 crc kubenswrapper[4909]: I1128 17:51:41.446928 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-create-gp9f5" Nov 28 17:51:41 crc kubenswrapper[4909]: I1128 17:51:41.482542 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-db-create-gp9f5"] Nov 28 17:51:41 crc kubenswrapper[4909]: I1128 17:51:41.542424 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-9063-account-create-update-j27rx"] Nov 28 17:51:41 crc kubenswrapper[4909]: I1128 17:51:41.543771 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-9063-account-create-update-j27rx" Nov 28 17:51:41 crc kubenswrapper[4909]: I1128 17:51:41.548847 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-db-secret" Nov 28 17:51:41 crc kubenswrapper[4909]: I1128 17:51:41.554534 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-9063-account-create-update-j27rx"] Nov 28 17:51:41 crc kubenswrapper[4909]: I1128 17:51:41.641454 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0a2be1c1-d2d5-4683-b57f-6be731743630-operator-scripts\") pod \"heat-db-create-gp9f5\" (UID: \"0a2be1c1-d2d5-4683-b57f-6be731743630\") " pod="openstack/heat-db-create-gp9f5" Nov 28 17:51:41 crc kubenswrapper[4909]: I1128 17:51:41.641505 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4gx7b\" (UniqueName: \"kubernetes.io/projected/0a2be1c1-d2d5-4683-b57f-6be731743630-kube-api-access-4gx7b\") pod \"heat-db-create-gp9f5\" (UID: \"0a2be1c1-d2d5-4683-b57f-6be731743630\") " pod="openstack/heat-db-create-gp9f5" Nov 28 17:51:41 crc kubenswrapper[4909]: I1128 17:51:41.641584 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pxxvq\" (UniqueName: \"kubernetes.io/projected/0d62d869-b3e1-417f-b946-587d26d6549c-kube-api-access-pxxvq\") pod \"heat-9063-account-create-update-j27rx\" (UID: \"0d62d869-b3e1-417f-b946-587d26d6549c\") " pod="openstack/heat-9063-account-create-update-j27rx" Nov 28 17:51:41 crc kubenswrapper[4909]: I1128 17:51:41.641684 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0d62d869-b3e1-417f-b946-587d26d6549c-operator-scripts\") pod \"heat-9063-account-create-update-j27rx\" (UID: \"0d62d869-b3e1-417f-b946-587d26d6549c\") " pod="openstack/heat-9063-account-create-update-j27rx" Nov 28 17:51:41 crc kubenswrapper[4909]: I1128 17:51:41.743150 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0d62d869-b3e1-417f-b946-587d26d6549c-operator-scripts\") pod \"heat-9063-account-create-update-j27rx\" (UID: \"0d62d869-b3e1-417f-b946-587d26d6549c\") " pod="openstack/heat-9063-account-create-update-j27rx" Nov 28 17:51:41 crc kubenswrapper[4909]: I1128 17:51:41.743341 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0a2be1c1-d2d5-4683-b57f-6be731743630-operator-scripts\") pod \"heat-db-create-gp9f5\" (UID: \"0a2be1c1-d2d5-4683-b57f-6be731743630\") " pod="openstack/heat-db-create-gp9f5" Nov 28 17:51:41 crc kubenswrapper[4909]: I1128 17:51:41.743373 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4gx7b\" (UniqueName: \"kubernetes.io/projected/0a2be1c1-d2d5-4683-b57f-6be731743630-kube-api-access-4gx7b\") pod \"heat-db-create-gp9f5\" (UID: \"0a2be1c1-d2d5-4683-b57f-6be731743630\") " pod="openstack/heat-db-create-gp9f5" Nov 28 17:51:41 crc kubenswrapper[4909]: I1128 17:51:41.743412 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pxxvq\" (UniqueName: \"kubernetes.io/projected/0d62d869-b3e1-417f-b946-587d26d6549c-kube-api-access-pxxvq\") pod \"heat-9063-account-create-update-j27rx\" (UID: \"0d62d869-b3e1-417f-b946-587d26d6549c\") " pod="openstack/heat-9063-account-create-update-j27rx" Nov 28 17:51:41 crc kubenswrapper[4909]: I1128 17:51:41.744050 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0a2be1c1-d2d5-4683-b57f-6be731743630-operator-scripts\") pod \"heat-db-create-gp9f5\" (UID: \"0a2be1c1-d2d5-4683-b57f-6be731743630\") " pod="openstack/heat-db-create-gp9f5" Nov 28 17:51:41 crc kubenswrapper[4909]: I1128 17:51:41.744545 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0d62d869-b3e1-417f-b946-587d26d6549c-operator-scripts\") pod \"heat-9063-account-create-update-j27rx\" (UID: \"0d62d869-b3e1-417f-b946-587d26d6549c\") " pod="openstack/heat-9063-account-create-update-j27rx" Nov 28 17:51:41 crc kubenswrapper[4909]: I1128 17:51:41.762853 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4gx7b\" (UniqueName: \"kubernetes.io/projected/0a2be1c1-d2d5-4683-b57f-6be731743630-kube-api-access-4gx7b\") pod \"heat-db-create-gp9f5\" (UID: \"0a2be1c1-d2d5-4683-b57f-6be731743630\") " pod="openstack/heat-db-create-gp9f5" Nov 28 17:51:41 crc kubenswrapper[4909]: I1128 17:51:41.766013 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pxxvq\" (UniqueName: \"kubernetes.io/projected/0d62d869-b3e1-417f-b946-587d26d6549c-kube-api-access-pxxvq\") pod \"heat-9063-account-create-update-j27rx\" (UID: \"0d62d869-b3e1-417f-b946-587d26d6549c\") " pod="openstack/heat-9063-account-create-update-j27rx" Nov 28 17:51:41 crc kubenswrapper[4909]: I1128 17:51:41.797820 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-create-gp9f5" Nov 28 17:51:41 crc kubenswrapper[4909]: I1128 17:51:41.864826 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-9063-account-create-update-j27rx" Nov 28 17:51:41 crc kubenswrapper[4909]: I1128 17:51:41.925414 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3ccc3c3c-a2ec-455d-b0a2-72ed6b32bf41" path="/var/lib/kubelet/pods/3ccc3c3c-a2ec-455d-b0a2-72ed6b32bf41/volumes" Nov 28 17:51:41 crc kubenswrapper[4909]: I1128 17:51:41.926490 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4478262b-7ded-47d1-91ac-4957886c7628" path="/var/lib/kubelet/pods/4478262b-7ded-47d1-91ac-4957886c7628/volumes" Nov 28 17:51:42 crc kubenswrapper[4909]: I1128 17:51:42.256556 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-65fb6b7dff-zjwdh" event={"ID":"cf2fee0c-2628-4b4c-bd64-fe39147a51d1","Type":"ContainerStarted","Data":"519d7069b7beda9bd61b5905c0092a27e48fa14ef3b80b3c0d4b5105b1e8d504"} Nov 28 17:51:42 crc kubenswrapper[4909]: I1128 17:51:42.256939 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-65fb6b7dff-zjwdh" event={"ID":"cf2fee0c-2628-4b4c-bd64-fe39147a51d1","Type":"ContainerStarted","Data":"98d33a1a36eef9ce589be30552b52a7d690428223f98fdc769e40bd3225071f0"} Nov 28 17:51:42 crc kubenswrapper[4909]: I1128 17:51:42.299120 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-65fb6b7dff-zjwdh" podStartSLOduration=2.299101319 podStartE2EDuration="2.299101319s" podCreationTimestamp="2025-11-28 17:51:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 17:51:42.286969857 +0000 UTC m=+6084.683654391" watchObservedRunningTime="2025-11-28 17:51:42.299101319 +0000 UTC m=+6084.695785843" Nov 28 17:51:42 crc kubenswrapper[4909]: I1128 17:51:42.322571 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-db-create-gp9f5"] Nov 28 17:51:42 crc kubenswrapper[4909]: W1128 17:51:42.322814 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0a2be1c1_d2d5_4683_b57f_6be731743630.slice/crio-490717c0f00d6f00ea3fd90db7b91ddd71fe38e7ea090905ae6de668bce27a6b WatchSource:0}: Error finding container 490717c0f00d6f00ea3fd90db7b91ddd71fe38e7ea090905ae6de668bce27a6b: Status 404 returned error can't find the container with id 490717c0f00d6f00ea3fd90db7b91ddd71fe38e7ea090905ae6de668bce27a6b Nov 28 17:51:42 crc kubenswrapper[4909]: I1128 17:51:42.394889 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-9063-account-create-update-j27rx"] Nov 28 17:51:43 crc kubenswrapper[4909]: I1128 17:51:43.268424 4909 generic.go:334] "Generic (PLEG): container finished" podID="0d62d869-b3e1-417f-b946-587d26d6549c" containerID="bb33086f8dbabc2ba18b8f027a4f4587a7ad592c030b15a0c74dd86c6ea691c6" exitCode=0 Nov 28 17:51:43 crc kubenswrapper[4909]: I1128 17:51:43.268486 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-9063-account-create-update-j27rx" event={"ID":"0d62d869-b3e1-417f-b946-587d26d6549c","Type":"ContainerDied","Data":"bb33086f8dbabc2ba18b8f027a4f4587a7ad592c030b15a0c74dd86c6ea691c6"} Nov 28 17:51:43 crc kubenswrapper[4909]: I1128 17:51:43.268936 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-9063-account-create-update-j27rx" event={"ID":"0d62d869-b3e1-417f-b946-587d26d6549c","Type":"ContainerStarted","Data":"3bd472c7083afa5b5f947f618f25642bf21b6e960315ba347905148f5e9afdf4"} Nov 28 17:51:43 crc kubenswrapper[4909]: I1128 17:51:43.273434 4909 generic.go:334] "Generic (PLEG): container finished" podID="0a2be1c1-d2d5-4683-b57f-6be731743630" containerID="2b6990b20d2a79caf67eb63869a8d96a3328d56573f8b5d96523553ad7d6ddf0" exitCode=0 Nov 28 17:51:43 crc kubenswrapper[4909]: I1128 17:51:43.273540 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-create-gp9f5" event={"ID":"0a2be1c1-d2d5-4683-b57f-6be731743630","Type":"ContainerDied","Data":"2b6990b20d2a79caf67eb63869a8d96a3328d56573f8b5d96523553ad7d6ddf0"} Nov 28 17:51:43 crc kubenswrapper[4909]: I1128 17:51:43.273588 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-create-gp9f5" event={"ID":"0a2be1c1-d2d5-4683-b57f-6be731743630","Type":"ContainerStarted","Data":"490717c0f00d6f00ea3fd90db7b91ddd71fe38e7ea090905ae6de668bce27a6b"} Nov 28 17:51:44 crc kubenswrapper[4909]: I1128 17:51:44.810059 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-9063-account-create-update-j27rx" Nov 28 17:51:44 crc kubenswrapper[4909]: I1128 17:51:44.817904 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-create-gp9f5" Nov 28 17:51:44 crc kubenswrapper[4909]: I1128 17:51:44.903143 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pxxvq\" (UniqueName: \"kubernetes.io/projected/0d62d869-b3e1-417f-b946-587d26d6549c-kube-api-access-pxxvq\") pod \"0d62d869-b3e1-417f-b946-587d26d6549c\" (UID: \"0d62d869-b3e1-417f-b946-587d26d6549c\") " Nov 28 17:51:44 crc kubenswrapper[4909]: I1128 17:51:44.903179 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0a2be1c1-d2d5-4683-b57f-6be731743630-operator-scripts\") pod \"0a2be1c1-d2d5-4683-b57f-6be731743630\" (UID: \"0a2be1c1-d2d5-4683-b57f-6be731743630\") " Nov 28 17:51:44 crc kubenswrapper[4909]: I1128 17:51:44.903417 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4gx7b\" (UniqueName: \"kubernetes.io/projected/0a2be1c1-d2d5-4683-b57f-6be731743630-kube-api-access-4gx7b\") pod \"0a2be1c1-d2d5-4683-b57f-6be731743630\" (UID: \"0a2be1c1-d2d5-4683-b57f-6be731743630\") " Nov 28 17:51:44 crc kubenswrapper[4909]: I1128 17:51:44.903498 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0d62d869-b3e1-417f-b946-587d26d6549c-operator-scripts\") pod \"0d62d869-b3e1-417f-b946-587d26d6549c\" (UID: \"0d62d869-b3e1-417f-b946-587d26d6549c\") " Nov 28 17:51:44 crc kubenswrapper[4909]: I1128 17:51:44.903717 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0a2be1c1-d2d5-4683-b57f-6be731743630-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "0a2be1c1-d2d5-4683-b57f-6be731743630" (UID: "0a2be1c1-d2d5-4683-b57f-6be731743630"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 17:51:44 crc kubenswrapper[4909]: I1128 17:51:44.903934 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0d62d869-b3e1-417f-b946-587d26d6549c-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "0d62d869-b3e1-417f-b946-587d26d6549c" (UID: "0d62d869-b3e1-417f-b946-587d26d6549c"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 17:51:44 crc kubenswrapper[4909]: I1128 17:51:44.904120 4909 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0d62d869-b3e1-417f-b946-587d26d6549c-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 17:51:44 crc kubenswrapper[4909]: I1128 17:51:44.904177 4909 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0a2be1c1-d2d5-4683-b57f-6be731743630-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 17:51:44 crc kubenswrapper[4909]: I1128 17:51:44.908952 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0d62d869-b3e1-417f-b946-587d26d6549c-kube-api-access-pxxvq" (OuterVolumeSpecName: "kube-api-access-pxxvq") pod "0d62d869-b3e1-417f-b946-587d26d6549c" (UID: "0d62d869-b3e1-417f-b946-587d26d6549c"). InnerVolumeSpecName "kube-api-access-pxxvq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:51:44 crc kubenswrapper[4909]: I1128 17:51:44.909187 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0a2be1c1-d2d5-4683-b57f-6be731743630-kube-api-access-4gx7b" (OuterVolumeSpecName: "kube-api-access-4gx7b") pod "0a2be1c1-d2d5-4683-b57f-6be731743630" (UID: "0a2be1c1-d2d5-4683-b57f-6be731743630"). InnerVolumeSpecName "kube-api-access-4gx7b". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:51:45 crc kubenswrapper[4909]: I1128 17:51:45.006817 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pxxvq\" (UniqueName: \"kubernetes.io/projected/0d62d869-b3e1-417f-b946-587d26d6549c-kube-api-access-pxxvq\") on node \"crc\" DevicePath \"\"" Nov 28 17:51:45 crc kubenswrapper[4909]: I1128 17:51:45.006862 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4gx7b\" (UniqueName: \"kubernetes.io/projected/0a2be1c1-d2d5-4683-b57f-6be731743630-kube-api-access-4gx7b\") on node \"crc\" DevicePath \"\"" Nov 28 17:51:45 crc kubenswrapper[4909]: I1128 17:51:45.325111 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-9063-account-create-update-j27rx" Nov 28 17:51:45 crc kubenswrapper[4909]: I1128 17:51:45.325109 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-9063-account-create-update-j27rx" event={"ID":"0d62d869-b3e1-417f-b946-587d26d6549c","Type":"ContainerDied","Data":"3bd472c7083afa5b5f947f618f25642bf21b6e960315ba347905148f5e9afdf4"} Nov 28 17:51:45 crc kubenswrapper[4909]: I1128 17:51:45.325927 4909 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3bd472c7083afa5b5f947f618f25642bf21b6e960315ba347905148f5e9afdf4" Nov 28 17:51:45 crc kubenswrapper[4909]: I1128 17:51:45.329292 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-create-gp9f5" event={"ID":"0a2be1c1-d2d5-4683-b57f-6be731743630","Type":"ContainerDied","Data":"490717c0f00d6f00ea3fd90db7b91ddd71fe38e7ea090905ae6de668bce27a6b"} Nov 28 17:51:45 crc kubenswrapper[4909]: I1128 17:51:45.329333 4909 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="490717c0f00d6f00ea3fd90db7b91ddd71fe38e7ea090905ae6de668bce27a6b" Nov 28 17:51:45 crc kubenswrapper[4909]: I1128 17:51:45.329424 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-create-gp9f5" Nov 28 17:51:46 crc kubenswrapper[4909]: I1128 17:51:46.722217 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-db-sync-rgqnh"] Nov 28 17:51:46 crc kubenswrapper[4909]: E1128 17:51:46.723042 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0d62d869-b3e1-417f-b946-587d26d6549c" containerName="mariadb-account-create-update" Nov 28 17:51:46 crc kubenswrapper[4909]: I1128 17:51:46.723059 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="0d62d869-b3e1-417f-b946-587d26d6549c" containerName="mariadb-account-create-update" Nov 28 17:51:46 crc kubenswrapper[4909]: E1128 17:51:46.723071 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0a2be1c1-d2d5-4683-b57f-6be731743630" containerName="mariadb-database-create" Nov 28 17:51:46 crc kubenswrapper[4909]: I1128 17:51:46.723077 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="0a2be1c1-d2d5-4683-b57f-6be731743630" containerName="mariadb-database-create" Nov 28 17:51:46 crc kubenswrapper[4909]: I1128 17:51:46.723308 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="0d62d869-b3e1-417f-b946-587d26d6549c" containerName="mariadb-account-create-update" Nov 28 17:51:46 crc kubenswrapper[4909]: I1128 17:51:46.723318 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="0a2be1c1-d2d5-4683-b57f-6be731743630" containerName="mariadb-database-create" Nov 28 17:51:46 crc kubenswrapper[4909]: I1128 17:51:46.724010 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-rgqnh" Nov 28 17:51:46 crc kubenswrapper[4909]: I1128 17:51:46.726051 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-config-data" Nov 28 17:51:46 crc kubenswrapper[4909]: I1128 17:51:46.727330 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-heat-dockercfg-xhqw6" Nov 28 17:51:46 crc kubenswrapper[4909]: I1128 17:51:46.732246 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-db-sync-rgqnh"] Nov 28 17:51:46 crc kubenswrapper[4909]: I1128 17:51:46.853844 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8051d548-77d4-4c16-be81-d2f97bacca54-config-data\") pod \"heat-db-sync-rgqnh\" (UID: \"8051d548-77d4-4c16-be81-d2f97bacca54\") " pod="openstack/heat-db-sync-rgqnh" Nov 28 17:51:46 crc kubenswrapper[4909]: I1128 17:51:46.853989 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8051d548-77d4-4c16-be81-d2f97bacca54-combined-ca-bundle\") pod \"heat-db-sync-rgqnh\" (UID: \"8051d548-77d4-4c16-be81-d2f97bacca54\") " pod="openstack/heat-db-sync-rgqnh" Nov 28 17:51:46 crc kubenswrapper[4909]: I1128 17:51:46.854059 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nnqt5\" (UniqueName: \"kubernetes.io/projected/8051d548-77d4-4c16-be81-d2f97bacca54-kube-api-access-nnqt5\") pod \"heat-db-sync-rgqnh\" (UID: \"8051d548-77d4-4c16-be81-d2f97bacca54\") " pod="openstack/heat-db-sync-rgqnh" Nov 28 17:51:46 crc kubenswrapper[4909]: I1128 17:51:46.955982 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8051d548-77d4-4c16-be81-d2f97bacca54-combined-ca-bundle\") pod \"heat-db-sync-rgqnh\" (UID: \"8051d548-77d4-4c16-be81-d2f97bacca54\") " pod="openstack/heat-db-sync-rgqnh" Nov 28 17:51:46 crc kubenswrapper[4909]: I1128 17:51:46.956058 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nnqt5\" (UniqueName: \"kubernetes.io/projected/8051d548-77d4-4c16-be81-d2f97bacca54-kube-api-access-nnqt5\") pod \"heat-db-sync-rgqnh\" (UID: \"8051d548-77d4-4c16-be81-d2f97bacca54\") " pod="openstack/heat-db-sync-rgqnh" Nov 28 17:51:46 crc kubenswrapper[4909]: I1128 17:51:46.956163 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8051d548-77d4-4c16-be81-d2f97bacca54-config-data\") pod \"heat-db-sync-rgqnh\" (UID: \"8051d548-77d4-4c16-be81-d2f97bacca54\") " pod="openstack/heat-db-sync-rgqnh" Nov 28 17:51:46 crc kubenswrapper[4909]: I1128 17:51:46.964789 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8051d548-77d4-4c16-be81-d2f97bacca54-combined-ca-bundle\") pod \"heat-db-sync-rgqnh\" (UID: \"8051d548-77d4-4c16-be81-d2f97bacca54\") " pod="openstack/heat-db-sync-rgqnh" Nov 28 17:51:46 crc kubenswrapper[4909]: I1128 17:51:46.965867 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8051d548-77d4-4c16-be81-d2f97bacca54-config-data\") pod \"heat-db-sync-rgqnh\" (UID: \"8051d548-77d4-4c16-be81-d2f97bacca54\") " pod="openstack/heat-db-sync-rgqnh" Nov 28 17:51:46 crc kubenswrapper[4909]: I1128 17:51:46.974808 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nnqt5\" (UniqueName: \"kubernetes.io/projected/8051d548-77d4-4c16-be81-d2f97bacca54-kube-api-access-nnqt5\") pod \"heat-db-sync-rgqnh\" (UID: \"8051d548-77d4-4c16-be81-d2f97bacca54\") " pod="openstack/heat-db-sync-rgqnh" Nov 28 17:51:47 crc kubenswrapper[4909]: I1128 17:51:47.048679 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-sync-6pvd8"] Nov 28 17:51:47 crc kubenswrapper[4909]: I1128 17:51:47.059636 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-rgqnh" Nov 28 17:51:47 crc kubenswrapper[4909]: I1128 17:51:47.062707 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-sync-6pvd8"] Nov 28 17:51:47 crc kubenswrapper[4909]: I1128 17:51:47.569294 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-db-sync-rgqnh"] Nov 28 17:51:47 crc kubenswrapper[4909]: I1128 17:51:47.575488 4909 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 28 17:51:47 crc kubenswrapper[4909]: I1128 17:51:47.918673 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="65413c3a-c6e0-4f61-89bd-52005f25872e" path="/var/lib/kubelet/pods/65413c3a-c6e0-4f61-89bd-52005f25872e/volumes" Nov 28 17:51:48 crc kubenswrapper[4909]: I1128 17:51:48.165170 4909 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-5d66dd75d5-cvdgt" podUID="08c9f91c-f9ee-4be9-b5c1-993d20615405" containerName="horizon" probeResult="failure" output="Get \"http://10.217.1.110:8080/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.1.110:8080: connect: connection refused" Nov 28 17:51:48 crc kubenswrapper[4909]: I1128 17:51:48.395290 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-rgqnh" event={"ID":"8051d548-77d4-4c16-be81-d2f97bacca54","Type":"ContainerStarted","Data":"ed39c4b950fbace093bebfe70b7e829340ee0710c76deb6f8c844fe544290ab8"} Nov 28 17:51:50 crc kubenswrapper[4909]: I1128 17:51:50.479411 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-65fb6b7dff-zjwdh" Nov 28 17:51:50 crc kubenswrapper[4909]: I1128 17:51:50.485902 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-65fb6b7dff-zjwdh" Nov 28 17:51:52 crc kubenswrapper[4909]: I1128 17:51:52.902021 4909 scope.go:117] "RemoveContainer" containerID="eb8a08a6c738fff0fcbfbb88427c9ed53477944abe7436212850e368ec229c4f" Nov 28 17:51:52 crc kubenswrapper[4909]: E1128 17:51:52.903032 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 17:51:55 crc kubenswrapper[4909]: I1128 17:51:55.475927 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-rgqnh" event={"ID":"8051d548-77d4-4c16-be81-d2f97bacca54","Type":"ContainerStarted","Data":"3348844ce4b8fdf98c144937128d422c6411096d47ad8cc31301b703f9a5c148"} Nov 28 17:51:55 crc kubenswrapper[4909]: I1128 17:51:55.495528 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-db-sync-rgqnh" podStartSLOduration=2.102326702 podStartE2EDuration="9.495509048s" podCreationTimestamp="2025-11-28 17:51:46 +0000 UTC" firstStartedPulling="2025-11-28 17:51:47.5753081 +0000 UTC m=+6089.971992624" lastFinishedPulling="2025-11-28 17:51:54.968490416 +0000 UTC m=+6097.365174970" observedRunningTime="2025-11-28 17:51:55.488323937 +0000 UTC m=+6097.885008461" watchObservedRunningTime="2025-11-28 17:51:55.495509048 +0000 UTC m=+6097.892193572" Nov 28 17:51:57 crc kubenswrapper[4909]: I1128 17:51:57.495819 4909 generic.go:334] "Generic (PLEG): container finished" podID="8051d548-77d4-4c16-be81-d2f97bacca54" containerID="3348844ce4b8fdf98c144937128d422c6411096d47ad8cc31301b703f9a5c148" exitCode=0 Nov 28 17:51:57 crc kubenswrapper[4909]: I1128 17:51:57.496604 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-rgqnh" event={"ID":"8051d548-77d4-4c16-be81-d2f97bacca54","Type":"ContainerDied","Data":"3348844ce4b8fdf98c144937128d422c6411096d47ad8cc31301b703f9a5c148"} Nov 28 17:51:58 crc kubenswrapper[4909]: I1128 17:51:58.164573 4909 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-5d66dd75d5-cvdgt" podUID="08c9f91c-f9ee-4be9-b5c1-993d20615405" containerName="horizon" probeResult="failure" output="Get \"http://10.217.1.110:8080/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.1.110:8080: connect: connection refused" Nov 28 17:51:58 crc kubenswrapper[4909]: I1128 17:51:58.164743 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-5d66dd75d5-cvdgt" Nov 28 17:51:58 crc kubenswrapper[4909]: I1128 17:51:58.952251 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-rgqnh" Nov 28 17:51:59 crc kubenswrapper[4909]: I1128 17:51:59.051159 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8051d548-77d4-4c16-be81-d2f97bacca54-combined-ca-bundle\") pod \"8051d548-77d4-4c16-be81-d2f97bacca54\" (UID: \"8051d548-77d4-4c16-be81-d2f97bacca54\") " Nov 28 17:51:59 crc kubenswrapper[4909]: I1128 17:51:59.051256 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8051d548-77d4-4c16-be81-d2f97bacca54-config-data\") pod \"8051d548-77d4-4c16-be81-d2f97bacca54\" (UID: \"8051d548-77d4-4c16-be81-d2f97bacca54\") " Nov 28 17:51:59 crc kubenswrapper[4909]: I1128 17:51:59.051333 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nnqt5\" (UniqueName: \"kubernetes.io/projected/8051d548-77d4-4c16-be81-d2f97bacca54-kube-api-access-nnqt5\") pod \"8051d548-77d4-4c16-be81-d2f97bacca54\" (UID: \"8051d548-77d4-4c16-be81-d2f97bacca54\") " Nov 28 17:51:59 crc kubenswrapper[4909]: I1128 17:51:59.057982 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8051d548-77d4-4c16-be81-d2f97bacca54-kube-api-access-nnqt5" (OuterVolumeSpecName: "kube-api-access-nnqt5") pod "8051d548-77d4-4c16-be81-d2f97bacca54" (UID: "8051d548-77d4-4c16-be81-d2f97bacca54"). InnerVolumeSpecName "kube-api-access-nnqt5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:51:59 crc kubenswrapper[4909]: I1128 17:51:59.087154 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8051d548-77d4-4c16-be81-d2f97bacca54-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "8051d548-77d4-4c16-be81-d2f97bacca54" (UID: "8051d548-77d4-4c16-be81-d2f97bacca54"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:51:59 crc kubenswrapper[4909]: I1128 17:51:59.153221 4909 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8051d548-77d4-4c16-be81-d2f97bacca54-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 17:51:59 crc kubenswrapper[4909]: I1128 17:51:59.153560 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nnqt5\" (UniqueName: \"kubernetes.io/projected/8051d548-77d4-4c16-be81-d2f97bacca54-kube-api-access-nnqt5\") on node \"crc\" DevicePath \"\"" Nov 28 17:51:59 crc kubenswrapper[4909]: I1128 17:51:59.169240 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8051d548-77d4-4c16-be81-d2f97bacca54-config-data" (OuterVolumeSpecName: "config-data") pod "8051d548-77d4-4c16-be81-d2f97bacca54" (UID: "8051d548-77d4-4c16-be81-d2f97bacca54"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:51:59 crc kubenswrapper[4909]: I1128 17:51:59.258530 4909 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8051d548-77d4-4c16-be81-d2f97bacca54-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 17:51:59 crc kubenswrapper[4909]: I1128 17:51:59.521399 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-rgqnh" event={"ID":"8051d548-77d4-4c16-be81-d2f97bacca54","Type":"ContainerDied","Data":"ed39c4b950fbace093bebfe70b7e829340ee0710c76deb6f8c844fe544290ab8"} Nov 28 17:51:59 crc kubenswrapper[4909]: I1128 17:51:59.521763 4909 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ed39c4b950fbace093bebfe70b7e829340ee0710c76deb6f8c844fe544290ab8" Nov 28 17:51:59 crc kubenswrapper[4909]: I1128 17:51:59.521583 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-rgqnh" Nov 28 17:52:00 crc kubenswrapper[4909]: I1128 17:52:00.671362 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-engine-745d4ddf6b-r7v9k"] Nov 28 17:52:00 crc kubenswrapper[4909]: E1128 17:52:00.671835 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8051d548-77d4-4c16-be81-d2f97bacca54" containerName="heat-db-sync" Nov 28 17:52:00 crc kubenswrapper[4909]: I1128 17:52:00.671847 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="8051d548-77d4-4c16-be81-d2f97bacca54" containerName="heat-db-sync" Nov 28 17:52:00 crc kubenswrapper[4909]: I1128 17:52:00.672058 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="8051d548-77d4-4c16-be81-d2f97bacca54" containerName="heat-db-sync" Nov 28 17:52:00 crc kubenswrapper[4909]: I1128 17:52:00.672772 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-745d4ddf6b-r7v9k" Nov 28 17:52:00 crc kubenswrapper[4909]: I1128 17:52:00.679863 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-engine-config-data" Nov 28 17:52:00 crc kubenswrapper[4909]: I1128 17:52:00.680105 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-heat-dockercfg-xhqw6" Nov 28 17:52:00 crc kubenswrapper[4909]: I1128 17:52:00.680921 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-config-data" Nov 28 17:52:00 crc kubenswrapper[4909]: I1128 17:52:00.681923 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dhblt\" (UniqueName: \"kubernetes.io/projected/e547b3a5-efff-473b-842f-5c4a1334bc46-kube-api-access-dhblt\") pod \"heat-engine-745d4ddf6b-r7v9k\" (UID: \"e547b3a5-efff-473b-842f-5c4a1334bc46\") " pod="openstack/heat-engine-745d4ddf6b-r7v9k" Nov 28 17:52:00 crc kubenswrapper[4909]: I1128 17:52:00.681995 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e547b3a5-efff-473b-842f-5c4a1334bc46-combined-ca-bundle\") pod \"heat-engine-745d4ddf6b-r7v9k\" (UID: \"e547b3a5-efff-473b-842f-5c4a1334bc46\") " pod="openstack/heat-engine-745d4ddf6b-r7v9k" Nov 28 17:52:00 crc kubenswrapper[4909]: I1128 17:52:00.682151 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/e547b3a5-efff-473b-842f-5c4a1334bc46-config-data-custom\") pod \"heat-engine-745d4ddf6b-r7v9k\" (UID: \"e547b3a5-efff-473b-842f-5c4a1334bc46\") " pod="openstack/heat-engine-745d4ddf6b-r7v9k" Nov 28 17:52:00 crc kubenswrapper[4909]: I1128 17:52:00.682199 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e547b3a5-efff-473b-842f-5c4a1334bc46-config-data\") pod \"heat-engine-745d4ddf6b-r7v9k\" (UID: \"e547b3a5-efff-473b-842f-5c4a1334bc46\") " pod="openstack/heat-engine-745d4ddf6b-r7v9k" Nov 28 17:52:00 crc kubenswrapper[4909]: I1128 17:52:00.698727 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-engine-745d4ddf6b-r7v9k"] Nov 28 17:52:00 crc kubenswrapper[4909]: I1128 17:52:00.757308 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-cfnapi-86554d647c-254tn"] Nov 28 17:52:00 crc kubenswrapper[4909]: I1128 17:52:00.758745 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-86554d647c-254tn" Nov 28 17:52:00 crc kubenswrapper[4909]: I1128 17:52:00.761272 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-cfnapi-config-data" Nov 28 17:52:00 crc kubenswrapper[4909]: I1128 17:52:00.771309 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-86554d647c-254tn"] Nov 28 17:52:00 crc kubenswrapper[4909]: I1128 17:52:00.783129 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/55707b9e-aa43-4152-999d-6ac4a4ac49d2-config-data-custom\") pod \"heat-cfnapi-86554d647c-254tn\" (UID: \"55707b9e-aa43-4152-999d-6ac4a4ac49d2\") " pod="openstack/heat-cfnapi-86554d647c-254tn" Nov 28 17:52:00 crc kubenswrapper[4909]: I1128 17:52:00.783180 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-svfcp\" (UniqueName: \"kubernetes.io/projected/55707b9e-aa43-4152-999d-6ac4a4ac49d2-kube-api-access-svfcp\") pod \"heat-cfnapi-86554d647c-254tn\" (UID: \"55707b9e-aa43-4152-999d-6ac4a4ac49d2\") " pod="openstack/heat-cfnapi-86554d647c-254tn" Nov 28 17:52:00 crc kubenswrapper[4909]: I1128 17:52:00.783211 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/e547b3a5-efff-473b-842f-5c4a1334bc46-config-data-custom\") pod \"heat-engine-745d4ddf6b-r7v9k\" (UID: \"e547b3a5-efff-473b-842f-5c4a1334bc46\") " pod="openstack/heat-engine-745d4ddf6b-r7v9k" Nov 28 17:52:00 crc kubenswrapper[4909]: I1128 17:52:00.783243 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e547b3a5-efff-473b-842f-5c4a1334bc46-config-data\") pod \"heat-engine-745d4ddf6b-r7v9k\" (UID: \"e547b3a5-efff-473b-842f-5c4a1334bc46\") " pod="openstack/heat-engine-745d4ddf6b-r7v9k" Nov 28 17:52:00 crc kubenswrapper[4909]: I1128 17:52:00.783270 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/55707b9e-aa43-4152-999d-6ac4a4ac49d2-combined-ca-bundle\") pod \"heat-cfnapi-86554d647c-254tn\" (UID: \"55707b9e-aa43-4152-999d-6ac4a4ac49d2\") " pod="openstack/heat-cfnapi-86554d647c-254tn" Nov 28 17:52:00 crc kubenswrapper[4909]: I1128 17:52:00.783313 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dhblt\" (UniqueName: \"kubernetes.io/projected/e547b3a5-efff-473b-842f-5c4a1334bc46-kube-api-access-dhblt\") pod \"heat-engine-745d4ddf6b-r7v9k\" (UID: \"e547b3a5-efff-473b-842f-5c4a1334bc46\") " pod="openstack/heat-engine-745d4ddf6b-r7v9k" Nov 28 17:52:00 crc kubenswrapper[4909]: I1128 17:52:00.783342 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/55707b9e-aa43-4152-999d-6ac4a4ac49d2-config-data\") pod \"heat-cfnapi-86554d647c-254tn\" (UID: \"55707b9e-aa43-4152-999d-6ac4a4ac49d2\") " pod="openstack/heat-cfnapi-86554d647c-254tn" Nov 28 17:52:00 crc kubenswrapper[4909]: I1128 17:52:00.783364 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e547b3a5-efff-473b-842f-5c4a1334bc46-combined-ca-bundle\") pod \"heat-engine-745d4ddf6b-r7v9k\" (UID: \"e547b3a5-efff-473b-842f-5c4a1334bc46\") " pod="openstack/heat-engine-745d4ddf6b-r7v9k" Nov 28 17:52:00 crc kubenswrapper[4909]: I1128 17:52:00.792358 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e547b3a5-efff-473b-842f-5c4a1334bc46-combined-ca-bundle\") pod \"heat-engine-745d4ddf6b-r7v9k\" (UID: \"e547b3a5-efff-473b-842f-5c4a1334bc46\") " pod="openstack/heat-engine-745d4ddf6b-r7v9k" Nov 28 17:52:00 crc kubenswrapper[4909]: I1128 17:52:00.805158 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e547b3a5-efff-473b-842f-5c4a1334bc46-config-data\") pod \"heat-engine-745d4ddf6b-r7v9k\" (UID: \"e547b3a5-efff-473b-842f-5c4a1334bc46\") " pod="openstack/heat-engine-745d4ddf6b-r7v9k" Nov 28 17:52:00 crc kubenswrapper[4909]: I1128 17:52:00.810317 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/e547b3a5-efff-473b-842f-5c4a1334bc46-config-data-custom\") pod \"heat-engine-745d4ddf6b-r7v9k\" (UID: \"e547b3a5-efff-473b-842f-5c4a1334bc46\") " pod="openstack/heat-engine-745d4ddf6b-r7v9k" Nov 28 17:52:00 crc kubenswrapper[4909]: I1128 17:52:00.840536 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dhblt\" (UniqueName: \"kubernetes.io/projected/e547b3a5-efff-473b-842f-5c4a1334bc46-kube-api-access-dhblt\") pod \"heat-engine-745d4ddf6b-r7v9k\" (UID: \"e547b3a5-efff-473b-842f-5c4a1334bc46\") " pod="openstack/heat-engine-745d4ddf6b-r7v9k" Nov 28 17:52:00 crc kubenswrapper[4909]: I1128 17:52:00.861723 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-api-7fb999b7d7-5c5hr"] Nov 28 17:52:00 crc kubenswrapper[4909]: I1128 17:52:00.863136 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-7fb999b7d7-5c5hr" Nov 28 17:52:00 crc kubenswrapper[4909]: I1128 17:52:00.878714 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-api-config-data" Nov 28 17:52:00 crc kubenswrapper[4909]: I1128 17:52:00.881912 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-7fb999b7d7-5c5hr"] Nov 28 17:52:00 crc kubenswrapper[4909]: I1128 17:52:00.887041 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/55707b9e-aa43-4152-999d-6ac4a4ac49d2-config-data-custom\") pod \"heat-cfnapi-86554d647c-254tn\" (UID: \"55707b9e-aa43-4152-999d-6ac4a4ac49d2\") " pod="openstack/heat-cfnapi-86554d647c-254tn" Nov 28 17:52:00 crc kubenswrapper[4909]: I1128 17:52:00.887097 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nvbd8\" (UniqueName: \"kubernetes.io/projected/dbf8fec5-911d-4e34-8cd6-dd4d7e279741-kube-api-access-nvbd8\") pod \"heat-api-7fb999b7d7-5c5hr\" (UID: \"dbf8fec5-911d-4e34-8cd6-dd4d7e279741\") " pod="openstack/heat-api-7fb999b7d7-5c5hr" Nov 28 17:52:00 crc kubenswrapper[4909]: I1128 17:52:00.887136 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-svfcp\" (UniqueName: \"kubernetes.io/projected/55707b9e-aa43-4152-999d-6ac4a4ac49d2-kube-api-access-svfcp\") pod \"heat-cfnapi-86554d647c-254tn\" (UID: \"55707b9e-aa43-4152-999d-6ac4a4ac49d2\") " pod="openstack/heat-cfnapi-86554d647c-254tn" Nov 28 17:52:00 crc kubenswrapper[4909]: I1128 17:52:00.887195 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dbf8fec5-911d-4e34-8cd6-dd4d7e279741-config-data\") pod \"heat-api-7fb999b7d7-5c5hr\" (UID: \"dbf8fec5-911d-4e34-8cd6-dd4d7e279741\") " pod="openstack/heat-api-7fb999b7d7-5c5hr" Nov 28 17:52:00 crc kubenswrapper[4909]: I1128 17:52:00.887222 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/55707b9e-aa43-4152-999d-6ac4a4ac49d2-combined-ca-bundle\") pod \"heat-cfnapi-86554d647c-254tn\" (UID: \"55707b9e-aa43-4152-999d-6ac4a4ac49d2\") " pod="openstack/heat-cfnapi-86554d647c-254tn" Nov 28 17:52:00 crc kubenswrapper[4909]: I1128 17:52:00.887268 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dbf8fec5-911d-4e34-8cd6-dd4d7e279741-combined-ca-bundle\") pod \"heat-api-7fb999b7d7-5c5hr\" (UID: \"dbf8fec5-911d-4e34-8cd6-dd4d7e279741\") " pod="openstack/heat-api-7fb999b7d7-5c5hr" Nov 28 17:52:00 crc kubenswrapper[4909]: I1128 17:52:00.887299 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/55707b9e-aa43-4152-999d-6ac4a4ac49d2-config-data\") pod \"heat-cfnapi-86554d647c-254tn\" (UID: \"55707b9e-aa43-4152-999d-6ac4a4ac49d2\") " pod="openstack/heat-cfnapi-86554d647c-254tn" Nov 28 17:52:00 crc kubenswrapper[4909]: I1128 17:52:00.887339 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/dbf8fec5-911d-4e34-8cd6-dd4d7e279741-config-data-custom\") pod \"heat-api-7fb999b7d7-5c5hr\" (UID: \"dbf8fec5-911d-4e34-8cd6-dd4d7e279741\") " pod="openstack/heat-api-7fb999b7d7-5c5hr" Nov 28 17:52:00 crc kubenswrapper[4909]: I1128 17:52:00.927500 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/55707b9e-aa43-4152-999d-6ac4a4ac49d2-combined-ca-bundle\") pod \"heat-cfnapi-86554d647c-254tn\" (UID: \"55707b9e-aa43-4152-999d-6ac4a4ac49d2\") " pod="openstack/heat-cfnapi-86554d647c-254tn" Nov 28 17:52:00 crc kubenswrapper[4909]: I1128 17:52:00.928133 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/55707b9e-aa43-4152-999d-6ac4a4ac49d2-config-data-custom\") pod \"heat-cfnapi-86554d647c-254tn\" (UID: \"55707b9e-aa43-4152-999d-6ac4a4ac49d2\") " pod="openstack/heat-cfnapi-86554d647c-254tn" Nov 28 17:52:00 crc kubenswrapper[4909]: I1128 17:52:00.929014 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/55707b9e-aa43-4152-999d-6ac4a4ac49d2-config-data\") pod \"heat-cfnapi-86554d647c-254tn\" (UID: \"55707b9e-aa43-4152-999d-6ac4a4ac49d2\") " pod="openstack/heat-cfnapi-86554d647c-254tn" Nov 28 17:52:00 crc kubenswrapper[4909]: I1128 17:52:00.949888 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-svfcp\" (UniqueName: \"kubernetes.io/projected/55707b9e-aa43-4152-999d-6ac4a4ac49d2-kube-api-access-svfcp\") pod \"heat-cfnapi-86554d647c-254tn\" (UID: \"55707b9e-aa43-4152-999d-6ac4a4ac49d2\") " pod="openstack/heat-cfnapi-86554d647c-254tn" Nov 28 17:52:00 crc kubenswrapper[4909]: I1128 17:52:00.993475 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-745d4ddf6b-r7v9k" Nov 28 17:52:01 crc kubenswrapper[4909]: I1128 17:52:01.001915 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nvbd8\" (UniqueName: \"kubernetes.io/projected/dbf8fec5-911d-4e34-8cd6-dd4d7e279741-kube-api-access-nvbd8\") pod \"heat-api-7fb999b7d7-5c5hr\" (UID: \"dbf8fec5-911d-4e34-8cd6-dd4d7e279741\") " pod="openstack/heat-api-7fb999b7d7-5c5hr" Nov 28 17:52:01 crc kubenswrapper[4909]: I1128 17:52:01.002117 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dbf8fec5-911d-4e34-8cd6-dd4d7e279741-config-data\") pod \"heat-api-7fb999b7d7-5c5hr\" (UID: \"dbf8fec5-911d-4e34-8cd6-dd4d7e279741\") " pod="openstack/heat-api-7fb999b7d7-5c5hr" Nov 28 17:52:01 crc kubenswrapper[4909]: I1128 17:52:01.002296 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dbf8fec5-911d-4e34-8cd6-dd4d7e279741-combined-ca-bundle\") pod \"heat-api-7fb999b7d7-5c5hr\" (UID: \"dbf8fec5-911d-4e34-8cd6-dd4d7e279741\") " pod="openstack/heat-api-7fb999b7d7-5c5hr" Nov 28 17:52:01 crc kubenswrapper[4909]: I1128 17:52:01.002696 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/dbf8fec5-911d-4e34-8cd6-dd4d7e279741-config-data-custom\") pod \"heat-api-7fb999b7d7-5c5hr\" (UID: \"dbf8fec5-911d-4e34-8cd6-dd4d7e279741\") " pod="openstack/heat-api-7fb999b7d7-5c5hr" Nov 28 17:52:01 crc kubenswrapper[4909]: I1128 17:52:01.039993 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nvbd8\" (UniqueName: \"kubernetes.io/projected/dbf8fec5-911d-4e34-8cd6-dd4d7e279741-kube-api-access-nvbd8\") pod \"heat-api-7fb999b7d7-5c5hr\" (UID: \"dbf8fec5-911d-4e34-8cd6-dd4d7e279741\") " pod="openstack/heat-api-7fb999b7d7-5c5hr" Nov 28 17:52:01 crc kubenswrapper[4909]: I1128 17:52:01.042672 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/dbf8fec5-911d-4e34-8cd6-dd4d7e279741-config-data-custom\") pod \"heat-api-7fb999b7d7-5c5hr\" (UID: \"dbf8fec5-911d-4e34-8cd6-dd4d7e279741\") " pod="openstack/heat-api-7fb999b7d7-5c5hr" Nov 28 17:52:01 crc kubenswrapper[4909]: I1128 17:52:01.043320 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dbf8fec5-911d-4e34-8cd6-dd4d7e279741-combined-ca-bundle\") pod \"heat-api-7fb999b7d7-5c5hr\" (UID: \"dbf8fec5-911d-4e34-8cd6-dd4d7e279741\") " pod="openstack/heat-api-7fb999b7d7-5c5hr" Nov 28 17:52:01 crc kubenswrapper[4909]: I1128 17:52:01.065066 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dbf8fec5-911d-4e34-8cd6-dd4d7e279741-config-data\") pod \"heat-api-7fb999b7d7-5c5hr\" (UID: \"dbf8fec5-911d-4e34-8cd6-dd4d7e279741\") " pod="openstack/heat-api-7fb999b7d7-5c5hr" Nov 28 17:52:01 crc kubenswrapper[4909]: I1128 17:52:01.074930 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-86554d647c-254tn" Nov 28 17:52:01 crc kubenswrapper[4909]: I1128 17:52:01.197622 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-7fb999b7d7-5c5hr" Nov 28 17:52:01 crc kubenswrapper[4909]: I1128 17:52:01.655623 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-engine-745d4ddf6b-r7v9k"] Nov 28 17:52:01 crc kubenswrapper[4909]: I1128 17:52:01.740289 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-86554d647c-254tn"] Nov 28 17:52:01 crc kubenswrapper[4909]: W1128 17:52:01.743248 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod55707b9e_aa43_4152_999d_6ac4a4ac49d2.slice/crio-643602d3629241c6cd5edd84117969622a35ffb6c6bd716b5a33cd46f2cb73a8 WatchSource:0}: Error finding container 643602d3629241c6cd5edd84117969622a35ffb6c6bd716b5a33cd46f2cb73a8: Status 404 returned error can't find the container with id 643602d3629241c6cd5edd84117969622a35ffb6c6bd716b5a33cd46f2cb73a8 Nov 28 17:52:01 crc kubenswrapper[4909]: I1128 17:52:01.914297 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-7fb999b7d7-5c5hr"] Nov 28 17:52:01 crc kubenswrapper[4909]: W1128 17:52:01.916545 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poddbf8fec5_911d_4e34_8cd6_dd4d7e279741.slice/crio-d912e14bd35201893d44c25a4033861a044e2d487df5ae066f68ed3766ca7be6 WatchSource:0}: Error finding container d912e14bd35201893d44c25a4033861a044e2d487df5ae066f68ed3766ca7be6: Status 404 returned error can't find the container with id d912e14bd35201893d44c25a4033861a044e2d487df5ae066f68ed3766ca7be6 Nov 28 17:52:02 crc kubenswrapper[4909]: I1128 17:52:02.557748 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-86554d647c-254tn" event={"ID":"55707b9e-aa43-4152-999d-6ac4a4ac49d2","Type":"ContainerStarted","Data":"643602d3629241c6cd5edd84117969622a35ffb6c6bd716b5a33cd46f2cb73a8"} Nov 28 17:52:02 crc kubenswrapper[4909]: I1128 17:52:02.559275 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-7fb999b7d7-5c5hr" event={"ID":"dbf8fec5-911d-4e34-8cd6-dd4d7e279741","Type":"ContainerStarted","Data":"d912e14bd35201893d44c25a4033861a044e2d487df5ae066f68ed3766ca7be6"} Nov 28 17:52:02 crc kubenswrapper[4909]: I1128 17:52:02.561286 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-745d4ddf6b-r7v9k" event={"ID":"e547b3a5-efff-473b-842f-5c4a1334bc46","Type":"ContainerStarted","Data":"1e257bd16d4dc8a832a06f1cc670014d8d9cf6f2bf6c3a266bb0f9f78e369393"} Nov 28 17:52:02 crc kubenswrapper[4909]: I1128 17:52:02.561318 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-745d4ddf6b-r7v9k" event={"ID":"e547b3a5-efff-473b-842f-5c4a1334bc46","Type":"ContainerStarted","Data":"c7d5f8bd9adc8ce181ee4833991717678ccf2ecd92b717ae205d52d113e46283"} Nov 28 17:52:02 crc kubenswrapper[4909]: I1128 17:52:02.561834 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-engine-745d4ddf6b-r7v9k" Nov 28 17:52:02 crc kubenswrapper[4909]: I1128 17:52:02.586155 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-engine-745d4ddf6b-r7v9k" podStartSLOduration=2.58613017 podStartE2EDuration="2.58613017s" podCreationTimestamp="2025-11-28 17:52:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 17:52:02.58010966 +0000 UTC m=+6104.976794184" watchObservedRunningTime="2025-11-28 17:52:02.58613017 +0000 UTC m=+6104.982814714" Nov 28 17:52:02 crc kubenswrapper[4909]: I1128 17:52:02.919122 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/horizon-65fb6b7dff-zjwdh" Nov 28 17:52:03 crc kubenswrapper[4909]: I1128 17:52:03.588348 4909 generic.go:334] "Generic (PLEG): container finished" podID="08c9f91c-f9ee-4be9-b5c1-993d20615405" containerID="27f064985149f47e89c604e0f17177632125c0fb8e4f5a17e1c819c685006698" exitCode=137 Nov 28 17:52:03 crc kubenswrapper[4909]: I1128 17:52:03.589025 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5d66dd75d5-cvdgt" event={"ID":"08c9f91c-f9ee-4be9-b5c1-993d20615405","Type":"ContainerDied","Data":"27f064985149f47e89c604e0f17177632125c0fb8e4f5a17e1c819c685006698"} Nov 28 17:52:04 crc kubenswrapper[4909]: I1128 17:52:04.111308 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-5d66dd75d5-cvdgt" Nov 28 17:52:04 crc kubenswrapper[4909]: I1128 17:52:04.279524 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/08c9f91c-f9ee-4be9-b5c1-993d20615405-horizon-secret-key\") pod \"08c9f91c-f9ee-4be9-b5c1-993d20615405\" (UID: \"08c9f91c-f9ee-4be9-b5c1-993d20615405\") " Nov 28 17:52:04 crc kubenswrapper[4909]: I1128 17:52:04.279616 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/08c9f91c-f9ee-4be9-b5c1-993d20615405-logs\") pod \"08c9f91c-f9ee-4be9-b5c1-993d20615405\" (UID: \"08c9f91c-f9ee-4be9-b5c1-993d20615405\") " Nov 28 17:52:04 crc kubenswrapper[4909]: I1128 17:52:04.279669 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x55xl\" (UniqueName: \"kubernetes.io/projected/08c9f91c-f9ee-4be9-b5c1-993d20615405-kube-api-access-x55xl\") pod \"08c9f91c-f9ee-4be9-b5c1-993d20615405\" (UID: \"08c9f91c-f9ee-4be9-b5c1-993d20615405\") " Nov 28 17:52:04 crc kubenswrapper[4909]: I1128 17:52:04.279723 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/08c9f91c-f9ee-4be9-b5c1-993d20615405-scripts\") pod \"08c9f91c-f9ee-4be9-b5c1-993d20615405\" (UID: \"08c9f91c-f9ee-4be9-b5c1-993d20615405\") " Nov 28 17:52:04 crc kubenswrapper[4909]: I1128 17:52:04.279782 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/08c9f91c-f9ee-4be9-b5c1-993d20615405-config-data\") pod \"08c9f91c-f9ee-4be9-b5c1-993d20615405\" (UID: \"08c9f91c-f9ee-4be9-b5c1-993d20615405\") " Nov 28 17:52:04 crc kubenswrapper[4909]: I1128 17:52:04.281591 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/08c9f91c-f9ee-4be9-b5c1-993d20615405-logs" (OuterVolumeSpecName: "logs") pod "08c9f91c-f9ee-4be9-b5c1-993d20615405" (UID: "08c9f91c-f9ee-4be9-b5c1-993d20615405"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:52:04 crc kubenswrapper[4909]: I1128 17:52:04.303403 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/08c9f91c-f9ee-4be9-b5c1-993d20615405-kube-api-access-x55xl" (OuterVolumeSpecName: "kube-api-access-x55xl") pod "08c9f91c-f9ee-4be9-b5c1-993d20615405" (UID: "08c9f91c-f9ee-4be9-b5c1-993d20615405"). InnerVolumeSpecName "kube-api-access-x55xl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:52:04 crc kubenswrapper[4909]: I1128 17:52:04.308388 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/08c9f91c-f9ee-4be9-b5c1-993d20615405-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "08c9f91c-f9ee-4be9-b5c1-993d20615405" (UID: "08c9f91c-f9ee-4be9-b5c1-993d20615405"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:52:04 crc kubenswrapper[4909]: I1128 17:52:04.327182 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/08c9f91c-f9ee-4be9-b5c1-993d20615405-scripts" (OuterVolumeSpecName: "scripts") pod "08c9f91c-f9ee-4be9-b5c1-993d20615405" (UID: "08c9f91c-f9ee-4be9-b5c1-993d20615405"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 17:52:04 crc kubenswrapper[4909]: I1128 17:52:04.329256 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/08c9f91c-f9ee-4be9-b5c1-993d20615405-config-data" (OuterVolumeSpecName: "config-data") pod "08c9f91c-f9ee-4be9-b5c1-993d20615405" (UID: "08c9f91c-f9ee-4be9-b5c1-993d20615405"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 17:52:04 crc kubenswrapper[4909]: I1128 17:52:04.383746 4909 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/08c9f91c-f9ee-4be9-b5c1-993d20615405-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 17:52:04 crc kubenswrapper[4909]: I1128 17:52:04.383856 4909 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/08c9f91c-f9ee-4be9-b5c1-993d20615405-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Nov 28 17:52:04 crc kubenswrapper[4909]: I1128 17:52:04.383868 4909 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/08c9f91c-f9ee-4be9-b5c1-993d20615405-logs\") on node \"crc\" DevicePath \"\"" Nov 28 17:52:04 crc kubenswrapper[4909]: I1128 17:52:04.383886 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x55xl\" (UniqueName: \"kubernetes.io/projected/08c9f91c-f9ee-4be9-b5c1-993d20615405-kube-api-access-x55xl\") on node \"crc\" DevicePath \"\"" Nov 28 17:52:04 crc kubenswrapper[4909]: I1128 17:52:04.383897 4909 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/08c9f91c-f9ee-4be9-b5c1-993d20615405-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 17:52:04 crc kubenswrapper[4909]: I1128 17:52:04.600669 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5d66dd75d5-cvdgt" event={"ID":"08c9f91c-f9ee-4be9-b5c1-993d20615405","Type":"ContainerDied","Data":"1fb84037ddafa1357af824415598953d9f48ae8ba7cfcbd1fdce9fb8b058db30"} Nov 28 17:52:04 crc kubenswrapper[4909]: I1128 17:52:04.600715 4909 scope.go:117] "RemoveContainer" containerID="ce849d0ea273f7addfc359c90cfb11d9de9743cf0e25c7272db7a5c88a93eff3" Nov 28 17:52:04 crc kubenswrapper[4909]: I1128 17:52:04.600826 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-5d66dd75d5-cvdgt" Nov 28 17:52:04 crc kubenswrapper[4909]: I1128 17:52:04.645463 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-5d66dd75d5-cvdgt"] Nov 28 17:52:04 crc kubenswrapper[4909]: I1128 17:52:04.656725 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-5d66dd75d5-cvdgt"] Nov 28 17:52:04 crc kubenswrapper[4909]: I1128 17:52:04.988515 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/horizon-65fb6b7dff-zjwdh" Nov 28 17:52:05 crc kubenswrapper[4909]: I1128 17:52:05.071170 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-7f7584cb65-cc7wv"] Nov 28 17:52:05 crc kubenswrapper[4909]: I1128 17:52:05.071388 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-7f7584cb65-cc7wv" podUID="d9da4a21-03e8-4cd7-84b7-f64ee73f596b" containerName="horizon-log" containerID="cri-o://84aa3792cf62ba6ee304a37b097d3e048e9b824739f1f4f67a8a5e4ee9b23941" gracePeriod=30 Nov 28 17:52:05 crc kubenswrapper[4909]: I1128 17:52:05.071820 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-7f7584cb65-cc7wv" podUID="d9da4a21-03e8-4cd7-84b7-f64ee73f596b" containerName="horizon" containerID="cri-o://5ed3ae13293e114f4c328812afd45b85d9375588f7f0021eafc3831e71954665" gracePeriod=30 Nov 28 17:52:05 crc kubenswrapper[4909]: I1128 17:52:05.846982 4909 scope.go:117] "RemoveContainer" containerID="27f064985149f47e89c604e0f17177632125c0fb8e4f5a17e1c819c685006698" Nov 28 17:52:05 crc kubenswrapper[4909]: I1128 17:52:05.921631 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="08c9f91c-f9ee-4be9-b5c1-993d20615405" path="/var/lib/kubelet/pods/08c9f91c-f9ee-4be9-b5c1-993d20615405/volumes" Nov 28 17:52:06 crc kubenswrapper[4909]: I1128 17:52:06.626364 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-86554d647c-254tn" event={"ID":"55707b9e-aa43-4152-999d-6ac4a4ac49d2","Type":"ContainerStarted","Data":"9fc5ba2db8f0a46c22faafb44f89a96aac2166df1744bccc0c62c955c8bc3759"} Nov 28 17:52:06 crc kubenswrapper[4909]: I1128 17:52:06.626746 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-cfnapi-86554d647c-254tn" Nov 28 17:52:06 crc kubenswrapper[4909]: I1128 17:52:06.633552 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-7fb999b7d7-5c5hr" event={"ID":"dbf8fec5-911d-4e34-8cd6-dd4d7e279741","Type":"ContainerStarted","Data":"509d69f0f1354556ec5e019b916160b1fa0bb0459526309c70461ef36a845eff"} Nov 28 17:52:06 crc kubenswrapper[4909]: I1128 17:52:06.633698 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-api-7fb999b7d7-5c5hr" Nov 28 17:52:06 crc kubenswrapper[4909]: I1128 17:52:06.646924 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-cfnapi-86554d647c-254tn" podStartSLOduration=2.545753817 podStartE2EDuration="6.646908416s" podCreationTimestamp="2025-11-28 17:52:00 +0000 UTC" firstStartedPulling="2025-11-28 17:52:01.747805251 +0000 UTC m=+6104.144489765" lastFinishedPulling="2025-11-28 17:52:05.84895984 +0000 UTC m=+6108.245644364" observedRunningTime="2025-11-28 17:52:06.642767105 +0000 UTC m=+6109.039451649" watchObservedRunningTime="2025-11-28 17:52:06.646908416 +0000 UTC m=+6109.043592940" Nov 28 17:52:06 crc kubenswrapper[4909]: I1128 17:52:06.662450 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-api-7fb999b7d7-5c5hr" podStartSLOduration=2.736252902 podStartE2EDuration="6.662426558s" podCreationTimestamp="2025-11-28 17:52:00 +0000 UTC" firstStartedPulling="2025-11-28 17:52:01.923368779 +0000 UTC m=+6104.320053303" lastFinishedPulling="2025-11-28 17:52:05.849542445 +0000 UTC m=+6108.246226959" observedRunningTime="2025-11-28 17:52:06.656305145 +0000 UTC m=+6109.052989669" watchObservedRunningTime="2025-11-28 17:52:06.662426558 +0000 UTC m=+6109.059111082" Nov 28 17:52:07 crc kubenswrapper[4909]: I1128 17:52:07.914816 4909 scope.go:117] "RemoveContainer" containerID="eb8a08a6c738fff0fcbfbb88427c9ed53477944abe7436212850e368ec229c4f" Nov 28 17:52:07 crc kubenswrapper[4909]: E1128 17:52:07.915384 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 17:52:08 crc kubenswrapper[4909]: I1128 17:52:08.655801 4909 generic.go:334] "Generic (PLEG): container finished" podID="d9da4a21-03e8-4cd7-84b7-f64ee73f596b" containerID="5ed3ae13293e114f4c328812afd45b85d9375588f7f0021eafc3831e71954665" exitCode=0 Nov 28 17:52:08 crc kubenswrapper[4909]: I1128 17:52:08.655877 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-7f7584cb65-cc7wv" event={"ID":"d9da4a21-03e8-4cd7-84b7-f64ee73f596b","Type":"ContainerDied","Data":"5ed3ae13293e114f4c328812afd45b85d9375588f7f0021eafc3831e71954665"} Nov 28 17:52:09 crc kubenswrapper[4909]: I1128 17:52:09.325728 4909 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-7f7584cb65-cc7wv" podUID="d9da4a21-03e8-4cd7-84b7-f64ee73f596b" containerName="horizon" probeResult="failure" output="Get \"http://10.217.1.112:8080/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.1.112:8080: connect: connection refused" Nov 28 17:52:12 crc kubenswrapper[4909]: I1128 17:52:12.028380 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-create-mqt5d"] Nov 28 17:52:12 crc kubenswrapper[4909]: I1128 17:52:12.039292 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-0ec6-account-create-update-fz282"] Nov 28 17:52:12 crc kubenswrapper[4909]: I1128 17:52:12.050852 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-create-mqt5d"] Nov 28 17:52:12 crc kubenswrapper[4909]: I1128 17:52:12.061854 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-0ec6-account-create-update-fz282"] Nov 28 17:52:12 crc kubenswrapper[4909]: I1128 17:52:12.490173 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/heat-cfnapi-86554d647c-254tn" Nov 28 17:52:12 crc kubenswrapper[4909]: I1128 17:52:12.532126 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/heat-api-7fb999b7d7-5c5hr" Nov 28 17:52:13 crc kubenswrapper[4909]: I1128 17:52:13.915239 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1805556c-845c-4ac7-aa36-4a32906b45fe" path="/var/lib/kubelet/pods/1805556c-845c-4ac7-aa36-4a32906b45fe/volumes" Nov 28 17:52:13 crc kubenswrapper[4909]: I1128 17:52:13.916563 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e080779e-a48f-4e59-9c88-8d458d3791df" path="/var/lib/kubelet/pods/e080779e-a48f-4e59-9c88-8d458d3791df/volumes" Nov 28 17:52:19 crc kubenswrapper[4909]: I1128 17:52:19.326588 4909 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-7f7584cb65-cc7wv" podUID="d9da4a21-03e8-4cd7-84b7-f64ee73f596b" containerName="horizon" probeResult="failure" output="Get \"http://10.217.1.112:8080/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.1.112:8080: connect: connection refused" Nov 28 17:52:21 crc kubenswrapper[4909]: I1128 17:52:21.035258 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/heat-engine-745d4ddf6b-r7v9k" Nov 28 17:52:21 crc kubenswrapper[4909]: I1128 17:52:21.064018 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-sync-8jlbg"] Nov 28 17:52:21 crc kubenswrapper[4909]: I1128 17:52:21.096060 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-sync-8jlbg"] Nov 28 17:52:21 crc kubenswrapper[4909]: I1128 17:52:21.907971 4909 scope.go:117] "RemoveContainer" containerID="eb8a08a6c738fff0fcbfbb88427c9ed53477944abe7436212850e368ec229c4f" Nov 28 17:52:21 crc kubenswrapper[4909]: E1128 17:52:21.908615 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 17:52:21 crc kubenswrapper[4909]: I1128 17:52:21.916259 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="00ebab16-ee08-4375-877d-12fff71e44be" path="/var/lib/kubelet/pods/00ebab16-ee08-4375-877d-12fff71e44be/volumes" Nov 28 17:52:29 crc kubenswrapper[4909]: I1128 17:52:29.326559 4909 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-7f7584cb65-cc7wv" podUID="d9da4a21-03e8-4cd7-84b7-f64ee73f596b" containerName="horizon" probeResult="failure" output="Get \"http://10.217.1.112:8080/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.1.112:8080: connect: connection refused" Nov 28 17:52:29 crc kubenswrapper[4909]: I1128 17:52:29.327233 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-7f7584cb65-cc7wv" Nov 28 17:52:33 crc kubenswrapper[4909]: I1128 17:52:33.528890 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92104cfhz"] Nov 28 17:52:33 crc kubenswrapper[4909]: E1128 17:52:33.529818 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="08c9f91c-f9ee-4be9-b5c1-993d20615405" containerName="horizon" Nov 28 17:52:33 crc kubenswrapper[4909]: I1128 17:52:33.529834 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="08c9f91c-f9ee-4be9-b5c1-993d20615405" containerName="horizon" Nov 28 17:52:33 crc kubenswrapper[4909]: E1128 17:52:33.529848 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="08c9f91c-f9ee-4be9-b5c1-993d20615405" containerName="horizon-log" Nov 28 17:52:33 crc kubenswrapper[4909]: I1128 17:52:33.529857 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="08c9f91c-f9ee-4be9-b5c1-993d20615405" containerName="horizon-log" Nov 28 17:52:33 crc kubenswrapper[4909]: I1128 17:52:33.530146 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="08c9f91c-f9ee-4be9-b5c1-993d20615405" containerName="horizon" Nov 28 17:52:33 crc kubenswrapper[4909]: I1128 17:52:33.530164 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="08c9f91c-f9ee-4be9-b5c1-993d20615405" containerName="horizon-log" Nov 28 17:52:33 crc kubenswrapper[4909]: I1128 17:52:33.532018 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92104cfhz" Nov 28 17:52:33 crc kubenswrapper[4909]: I1128 17:52:33.535423 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Nov 28 17:52:33 crc kubenswrapper[4909]: I1128 17:52:33.550491 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92104cfhz"] Nov 28 17:52:33 crc kubenswrapper[4909]: I1128 17:52:33.634211 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/b31728fc-2e37-4b7c-9dd4-3d41a22bc00d-util\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92104cfhz\" (UID: \"b31728fc-2e37-4b7c-9dd4-3d41a22bc00d\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92104cfhz" Nov 28 17:52:33 crc kubenswrapper[4909]: I1128 17:52:33.634297 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/b31728fc-2e37-4b7c-9dd4-3d41a22bc00d-bundle\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92104cfhz\" (UID: \"b31728fc-2e37-4b7c-9dd4-3d41a22bc00d\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92104cfhz" Nov 28 17:52:33 crc kubenswrapper[4909]: I1128 17:52:33.634651 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9jvmw\" (UniqueName: \"kubernetes.io/projected/b31728fc-2e37-4b7c-9dd4-3d41a22bc00d-kube-api-access-9jvmw\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92104cfhz\" (UID: \"b31728fc-2e37-4b7c-9dd4-3d41a22bc00d\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92104cfhz" Nov 28 17:52:33 crc kubenswrapper[4909]: I1128 17:52:33.737952 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/b31728fc-2e37-4b7c-9dd4-3d41a22bc00d-util\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92104cfhz\" (UID: \"b31728fc-2e37-4b7c-9dd4-3d41a22bc00d\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92104cfhz" Nov 28 17:52:33 crc kubenswrapper[4909]: I1128 17:52:33.738091 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/b31728fc-2e37-4b7c-9dd4-3d41a22bc00d-bundle\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92104cfhz\" (UID: \"b31728fc-2e37-4b7c-9dd4-3d41a22bc00d\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92104cfhz" Nov 28 17:52:33 crc kubenswrapper[4909]: I1128 17:52:33.738564 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/b31728fc-2e37-4b7c-9dd4-3d41a22bc00d-util\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92104cfhz\" (UID: \"b31728fc-2e37-4b7c-9dd4-3d41a22bc00d\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92104cfhz" Nov 28 17:52:33 crc kubenswrapper[4909]: I1128 17:52:33.738748 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/b31728fc-2e37-4b7c-9dd4-3d41a22bc00d-bundle\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92104cfhz\" (UID: \"b31728fc-2e37-4b7c-9dd4-3d41a22bc00d\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92104cfhz" Nov 28 17:52:33 crc kubenswrapper[4909]: I1128 17:52:33.738305 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9jvmw\" (UniqueName: \"kubernetes.io/projected/b31728fc-2e37-4b7c-9dd4-3d41a22bc00d-kube-api-access-9jvmw\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92104cfhz\" (UID: \"b31728fc-2e37-4b7c-9dd4-3d41a22bc00d\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92104cfhz" Nov 28 17:52:33 crc kubenswrapper[4909]: I1128 17:52:33.764146 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9jvmw\" (UniqueName: \"kubernetes.io/projected/b31728fc-2e37-4b7c-9dd4-3d41a22bc00d-kube-api-access-9jvmw\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92104cfhz\" (UID: \"b31728fc-2e37-4b7c-9dd4-3d41a22bc00d\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92104cfhz" Nov 28 17:52:33 crc kubenswrapper[4909]: I1128 17:52:33.854170 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92104cfhz" Nov 28 17:52:34 crc kubenswrapper[4909]: I1128 17:52:34.211525 4909 scope.go:117] "RemoveContainer" containerID="fcc222908bacb3596ecca1a232cbd552340adfaabb38e259c211ecb0919d07b2" Nov 28 17:52:34 crc kubenswrapper[4909]: I1128 17:52:34.243288 4909 scope.go:117] "RemoveContainer" containerID="103ea4de5ee4e81fc593501bacd6154237ed3912f0c3c5e1d3301f7ad9bc415e" Nov 28 17:52:34 crc kubenswrapper[4909]: I1128 17:52:34.295852 4909 scope.go:117] "RemoveContainer" containerID="dc6675478ee391625360b48caf9c7bc81074f2db2c0d62631302a25656382e7d" Nov 28 17:52:34 crc kubenswrapper[4909]: I1128 17:52:34.354270 4909 scope.go:117] "RemoveContainer" containerID="49da55a0a93ceaa414483e1d4f76d6003a274d793c56881e4eb87322806c570b" Nov 28 17:52:34 crc kubenswrapper[4909]: I1128 17:52:34.378469 4909 scope.go:117] "RemoveContainer" containerID="3170aef21dd9696ed3a74b81f6690d79a1021b41218a4501a000fa87b8063bc3" Nov 28 17:52:34 crc kubenswrapper[4909]: I1128 17:52:34.409441 4909 scope.go:117] "RemoveContainer" containerID="92f1bb93adb3c8d54919e6b67a98fb85f8a3e20f9b9cd19cafff4ffe87a45585" Nov 28 17:52:34 crc kubenswrapper[4909]: I1128 17:52:34.415133 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92104cfhz"] Nov 28 17:52:34 crc kubenswrapper[4909]: W1128 17:52:34.422978 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb31728fc_2e37_4b7c_9dd4_3d41a22bc00d.slice/crio-537f60f28afbdde614c0721ee91500669a9916459f15d9abd2ccd16f63bfd5c0 WatchSource:0}: Error finding container 537f60f28afbdde614c0721ee91500669a9916459f15d9abd2ccd16f63bfd5c0: Status 404 returned error can't find the container with id 537f60f28afbdde614c0721ee91500669a9916459f15d9abd2ccd16f63bfd5c0 Nov 28 17:52:34 crc kubenswrapper[4909]: I1128 17:52:34.956650 4909 generic.go:334] "Generic (PLEG): container finished" podID="b31728fc-2e37-4b7c-9dd4-3d41a22bc00d" containerID="9c5f9b10239762cd6c6c46ceaf25dd4b3ec286f99b3233f7f7cbc9e2c1b89b62" exitCode=0 Nov 28 17:52:34 crc kubenswrapper[4909]: I1128 17:52:34.956769 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92104cfhz" event={"ID":"b31728fc-2e37-4b7c-9dd4-3d41a22bc00d","Type":"ContainerDied","Data":"9c5f9b10239762cd6c6c46ceaf25dd4b3ec286f99b3233f7f7cbc9e2c1b89b62"} Nov 28 17:52:34 crc kubenswrapper[4909]: I1128 17:52:34.957046 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92104cfhz" event={"ID":"b31728fc-2e37-4b7c-9dd4-3d41a22bc00d","Type":"ContainerStarted","Data":"537f60f28afbdde614c0721ee91500669a9916459f15d9abd2ccd16f63bfd5c0"} Nov 28 17:52:35 crc kubenswrapper[4909]: I1128 17:52:35.495791 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-7f7584cb65-cc7wv" Nov 28 17:52:35 crc kubenswrapper[4909]: I1128 17:52:35.581925 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/d9da4a21-03e8-4cd7-84b7-f64ee73f596b-horizon-secret-key\") pod \"d9da4a21-03e8-4cd7-84b7-f64ee73f596b\" (UID: \"d9da4a21-03e8-4cd7-84b7-f64ee73f596b\") " Nov 28 17:52:35 crc kubenswrapper[4909]: I1128 17:52:35.582101 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d9da4a21-03e8-4cd7-84b7-f64ee73f596b-scripts\") pod \"d9da4a21-03e8-4cd7-84b7-f64ee73f596b\" (UID: \"d9da4a21-03e8-4cd7-84b7-f64ee73f596b\") " Nov 28 17:52:35 crc kubenswrapper[4909]: I1128 17:52:35.582295 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rspdm\" (UniqueName: \"kubernetes.io/projected/d9da4a21-03e8-4cd7-84b7-f64ee73f596b-kube-api-access-rspdm\") pod \"d9da4a21-03e8-4cd7-84b7-f64ee73f596b\" (UID: \"d9da4a21-03e8-4cd7-84b7-f64ee73f596b\") " Nov 28 17:52:35 crc kubenswrapper[4909]: I1128 17:52:35.582338 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/d9da4a21-03e8-4cd7-84b7-f64ee73f596b-config-data\") pod \"d9da4a21-03e8-4cd7-84b7-f64ee73f596b\" (UID: \"d9da4a21-03e8-4cd7-84b7-f64ee73f596b\") " Nov 28 17:52:35 crc kubenswrapper[4909]: I1128 17:52:35.582370 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d9da4a21-03e8-4cd7-84b7-f64ee73f596b-logs\") pod \"d9da4a21-03e8-4cd7-84b7-f64ee73f596b\" (UID: \"d9da4a21-03e8-4cd7-84b7-f64ee73f596b\") " Nov 28 17:52:35 crc kubenswrapper[4909]: I1128 17:52:35.582892 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d9da4a21-03e8-4cd7-84b7-f64ee73f596b-logs" (OuterVolumeSpecName: "logs") pod "d9da4a21-03e8-4cd7-84b7-f64ee73f596b" (UID: "d9da4a21-03e8-4cd7-84b7-f64ee73f596b"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:52:35 crc kubenswrapper[4909]: I1128 17:52:35.583342 4909 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d9da4a21-03e8-4cd7-84b7-f64ee73f596b-logs\") on node \"crc\" DevicePath \"\"" Nov 28 17:52:35 crc kubenswrapper[4909]: I1128 17:52:35.587299 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d9da4a21-03e8-4cd7-84b7-f64ee73f596b-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "d9da4a21-03e8-4cd7-84b7-f64ee73f596b" (UID: "d9da4a21-03e8-4cd7-84b7-f64ee73f596b"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:52:35 crc kubenswrapper[4909]: I1128 17:52:35.587345 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d9da4a21-03e8-4cd7-84b7-f64ee73f596b-kube-api-access-rspdm" (OuterVolumeSpecName: "kube-api-access-rspdm") pod "d9da4a21-03e8-4cd7-84b7-f64ee73f596b" (UID: "d9da4a21-03e8-4cd7-84b7-f64ee73f596b"). InnerVolumeSpecName "kube-api-access-rspdm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:52:35 crc kubenswrapper[4909]: I1128 17:52:35.606794 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d9da4a21-03e8-4cd7-84b7-f64ee73f596b-scripts" (OuterVolumeSpecName: "scripts") pod "d9da4a21-03e8-4cd7-84b7-f64ee73f596b" (UID: "d9da4a21-03e8-4cd7-84b7-f64ee73f596b"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 17:52:35 crc kubenswrapper[4909]: I1128 17:52:35.619893 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d9da4a21-03e8-4cd7-84b7-f64ee73f596b-config-data" (OuterVolumeSpecName: "config-data") pod "d9da4a21-03e8-4cd7-84b7-f64ee73f596b" (UID: "d9da4a21-03e8-4cd7-84b7-f64ee73f596b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 17:52:35 crc kubenswrapper[4909]: I1128 17:52:35.685108 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rspdm\" (UniqueName: \"kubernetes.io/projected/d9da4a21-03e8-4cd7-84b7-f64ee73f596b-kube-api-access-rspdm\") on node \"crc\" DevicePath \"\"" Nov 28 17:52:35 crc kubenswrapper[4909]: I1128 17:52:35.685149 4909 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/d9da4a21-03e8-4cd7-84b7-f64ee73f596b-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 17:52:35 crc kubenswrapper[4909]: I1128 17:52:35.685159 4909 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/d9da4a21-03e8-4cd7-84b7-f64ee73f596b-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Nov 28 17:52:35 crc kubenswrapper[4909]: I1128 17:52:35.685169 4909 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d9da4a21-03e8-4cd7-84b7-f64ee73f596b-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 17:52:35 crc kubenswrapper[4909]: I1128 17:52:35.983883 4909 generic.go:334] "Generic (PLEG): container finished" podID="d9da4a21-03e8-4cd7-84b7-f64ee73f596b" containerID="84aa3792cf62ba6ee304a37b097d3e048e9b824739f1f4f67a8a5e4ee9b23941" exitCode=137 Nov 28 17:52:35 crc kubenswrapper[4909]: I1128 17:52:35.983939 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-7f7584cb65-cc7wv" event={"ID":"d9da4a21-03e8-4cd7-84b7-f64ee73f596b","Type":"ContainerDied","Data":"84aa3792cf62ba6ee304a37b097d3e048e9b824739f1f4f67a8a5e4ee9b23941"} Nov 28 17:52:35 crc kubenswrapper[4909]: I1128 17:52:35.983983 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-7f7584cb65-cc7wv" Nov 28 17:52:35 crc kubenswrapper[4909]: I1128 17:52:35.984008 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-7f7584cb65-cc7wv" event={"ID":"d9da4a21-03e8-4cd7-84b7-f64ee73f596b","Type":"ContainerDied","Data":"b5b330c35dcfa83a50e1ef16e5a9a0dad3c2428b0f8bf73d4270ccdb5dc1cd0f"} Nov 28 17:52:35 crc kubenswrapper[4909]: I1128 17:52:35.984027 4909 scope.go:117] "RemoveContainer" containerID="5ed3ae13293e114f4c328812afd45b85d9375588f7f0021eafc3831e71954665" Nov 28 17:52:36 crc kubenswrapper[4909]: I1128 17:52:36.032691 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-7f7584cb65-cc7wv"] Nov 28 17:52:36 crc kubenswrapper[4909]: I1128 17:52:36.069862 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-7f7584cb65-cc7wv"] Nov 28 17:52:36 crc kubenswrapper[4909]: I1128 17:52:36.180000 4909 scope.go:117] "RemoveContainer" containerID="84aa3792cf62ba6ee304a37b097d3e048e9b824739f1f4f67a8a5e4ee9b23941" Nov 28 17:52:36 crc kubenswrapper[4909]: I1128 17:52:36.210381 4909 scope.go:117] "RemoveContainer" containerID="5ed3ae13293e114f4c328812afd45b85d9375588f7f0021eafc3831e71954665" Nov 28 17:52:36 crc kubenswrapper[4909]: E1128 17:52:36.210966 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5ed3ae13293e114f4c328812afd45b85d9375588f7f0021eafc3831e71954665\": container with ID starting with 5ed3ae13293e114f4c328812afd45b85d9375588f7f0021eafc3831e71954665 not found: ID does not exist" containerID="5ed3ae13293e114f4c328812afd45b85d9375588f7f0021eafc3831e71954665" Nov 28 17:52:36 crc kubenswrapper[4909]: I1128 17:52:36.211006 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5ed3ae13293e114f4c328812afd45b85d9375588f7f0021eafc3831e71954665"} err="failed to get container status \"5ed3ae13293e114f4c328812afd45b85d9375588f7f0021eafc3831e71954665\": rpc error: code = NotFound desc = could not find container \"5ed3ae13293e114f4c328812afd45b85d9375588f7f0021eafc3831e71954665\": container with ID starting with 5ed3ae13293e114f4c328812afd45b85d9375588f7f0021eafc3831e71954665 not found: ID does not exist" Nov 28 17:52:36 crc kubenswrapper[4909]: I1128 17:52:36.211032 4909 scope.go:117] "RemoveContainer" containerID="84aa3792cf62ba6ee304a37b097d3e048e9b824739f1f4f67a8a5e4ee9b23941" Nov 28 17:52:36 crc kubenswrapper[4909]: E1128 17:52:36.211435 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"84aa3792cf62ba6ee304a37b097d3e048e9b824739f1f4f67a8a5e4ee9b23941\": container with ID starting with 84aa3792cf62ba6ee304a37b097d3e048e9b824739f1f4f67a8a5e4ee9b23941 not found: ID does not exist" containerID="84aa3792cf62ba6ee304a37b097d3e048e9b824739f1f4f67a8a5e4ee9b23941" Nov 28 17:52:36 crc kubenswrapper[4909]: I1128 17:52:36.211470 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"84aa3792cf62ba6ee304a37b097d3e048e9b824739f1f4f67a8a5e4ee9b23941"} err="failed to get container status \"84aa3792cf62ba6ee304a37b097d3e048e9b824739f1f4f67a8a5e4ee9b23941\": rpc error: code = NotFound desc = could not find container \"84aa3792cf62ba6ee304a37b097d3e048e9b824739f1f4f67a8a5e4ee9b23941\": container with ID starting with 84aa3792cf62ba6ee304a37b097d3e048e9b824739f1f4f67a8a5e4ee9b23941 not found: ID does not exist" Nov 28 17:52:36 crc kubenswrapper[4909]: I1128 17:52:36.902059 4909 scope.go:117] "RemoveContainer" containerID="eb8a08a6c738fff0fcbfbb88427c9ed53477944abe7436212850e368ec229c4f" Nov 28 17:52:36 crc kubenswrapper[4909]: E1128 17:52:36.902854 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 17:52:36 crc kubenswrapper[4909]: I1128 17:52:36.998975 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92104cfhz" event={"ID":"b31728fc-2e37-4b7c-9dd4-3d41a22bc00d","Type":"ContainerStarted","Data":"4cc75606623aa30e9256f3249af62fb67fa5cf3386bb41086dccdb1a4cb4c69c"} Nov 28 17:52:37 crc kubenswrapper[4909]: I1128 17:52:37.921175 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d9da4a21-03e8-4cd7-84b7-f64ee73f596b" path="/var/lib/kubelet/pods/d9da4a21-03e8-4cd7-84b7-f64ee73f596b/volumes" Nov 28 17:52:38 crc kubenswrapper[4909]: I1128 17:52:38.016168 4909 generic.go:334] "Generic (PLEG): container finished" podID="b31728fc-2e37-4b7c-9dd4-3d41a22bc00d" containerID="4cc75606623aa30e9256f3249af62fb67fa5cf3386bb41086dccdb1a4cb4c69c" exitCode=0 Nov 28 17:52:38 crc kubenswrapper[4909]: I1128 17:52:38.016229 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92104cfhz" event={"ID":"b31728fc-2e37-4b7c-9dd4-3d41a22bc00d","Type":"ContainerDied","Data":"4cc75606623aa30e9256f3249af62fb67fa5cf3386bb41086dccdb1a4cb4c69c"} Nov 28 17:52:38 crc kubenswrapper[4909]: E1128 17:52:38.715335 4909 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb31728fc_2e37_4b7c_9dd4_3d41a22bc00d.slice/crio-31c6af9febe6c1b1b40ea01a104bf57b29dafafd184b79bca4788e8b63a302b2.scope\": RecentStats: unable to find data in memory cache]" Nov 28 17:52:39 crc kubenswrapper[4909]: I1128 17:52:39.035994 4909 generic.go:334] "Generic (PLEG): container finished" podID="b31728fc-2e37-4b7c-9dd4-3d41a22bc00d" containerID="31c6af9febe6c1b1b40ea01a104bf57b29dafafd184b79bca4788e8b63a302b2" exitCode=0 Nov 28 17:52:39 crc kubenswrapper[4909]: I1128 17:52:39.036474 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92104cfhz" event={"ID":"b31728fc-2e37-4b7c-9dd4-3d41a22bc00d","Type":"ContainerDied","Data":"31c6af9febe6c1b1b40ea01a104bf57b29dafafd184b79bca4788e8b63a302b2"} Nov 28 17:52:40 crc kubenswrapper[4909]: I1128 17:52:40.437855 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92104cfhz" Nov 28 17:52:40 crc kubenswrapper[4909]: I1128 17:52:40.496272 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/b31728fc-2e37-4b7c-9dd4-3d41a22bc00d-util\") pod \"b31728fc-2e37-4b7c-9dd4-3d41a22bc00d\" (UID: \"b31728fc-2e37-4b7c-9dd4-3d41a22bc00d\") " Nov 28 17:52:40 crc kubenswrapper[4909]: I1128 17:52:40.496546 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9jvmw\" (UniqueName: \"kubernetes.io/projected/b31728fc-2e37-4b7c-9dd4-3d41a22bc00d-kube-api-access-9jvmw\") pod \"b31728fc-2e37-4b7c-9dd4-3d41a22bc00d\" (UID: \"b31728fc-2e37-4b7c-9dd4-3d41a22bc00d\") " Nov 28 17:52:40 crc kubenswrapper[4909]: I1128 17:52:40.496615 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/b31728fc-2e37-4b7c-9dd4-3d41a22bc00d-bundle\") pod \"b31728fc-2e37-4b7c-9dd4-3d41a22bc00d\" (UID: \"b31728fc-2e37-4b7c-9dd4-3d41a22bc00d\") " Nov 28 17:52:40 crc kubenswrapper[4909]: I1128 17:52:40.499752 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b31728fc-2e37-4b7c-9dd4-3d41a22bc00d-bundle" (OuterVolumeSpecName: "bundle") pod "b31728fc-2e37-4b7c-9dd4-3d41a22bc00d" (UID: "b31728fc-2e37-4b7c-9dd4-3d41a22bc00d"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:52:40 crc kubenswrapper[4909]: I1128 17:52:40.501740 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b31728fc-2e37-4b7c-9dd4-3d41a22bc00d-kube-api-access-9jvmw" (OuterVolumeSpecName: "kube-api-access-9jvmw") pod "b31728fc-2e37-4b7c-9dd4-3d41a22bc00d" (UID: "b31728fc-2e37-4b7c-9dd4-3d41a22bc00d"). InnerVolumeSpecName "kube-api-access-9jvmw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:52:40 crc kubenswrapper[4909]: I1128 17:52:40.507273 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b31728fc-2e37-4b7c-9dd4-3d41a22bc00d-util" (OuterVolumeSpecName: "util") pod "b31728fc-2e37-4b7c-9dd4-3d41a22bc00d" (UID: "b31728fc-2e37-4b7c-9dd4-3d41a22bc00d"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:52:40 crc kubenswrapper[4909]: I1128 17:52:40.600117 4909 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/b31728fc-2e37-4b7c-9dd4-3d41a22bc00d-util\") on node \"crc\" DevicePath \"\"" Nov 28 17:52:40 crc kubenswrapper[4909]: I1128 17:52:40.600170 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9jvmw\" (UniqueName: \"kubernetes.io/projected/b31728fc-2e37-4b7c-9dd4-3d41a22bc00d-kube-api-access-9jvmw\") on node \"crc\" DevicePath \"\"" Nov 28 17:52:40 crc kubenswrapper[4909]: I1128 17:52:40.600185 4909 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/b31728fc-2e37-4b7c-9dd4-3d41a22bc00d-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 17:52:41 crc kubenswrapper[4909]: I1128 17:52:41.061196 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92104cfhz" event={"ID":"b31728fc-2e37-4b7c-9dd4-3d41a22bc00d","Type":"ContainerDied","Data":"537f60f28afbdde614c0721ee91500669a9916459f15d9abd2ccd16f63bfd5c0"} Nov 28 17:52:41 crc kubenswrapper[4909]: I1128 17:52:41.061770 4909 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="537f60f28afbdde614c0721ee91500669a9916459f15d9abd2ccd16f63bfd5c0" Nov 28 17:52:41 crc kubenswrapper[4909]: I1128 17:52:41.061347 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92104cfhz" Nov 28 17:52:50 crc kubenswrapper[4909]: I1128 17:52:50.902415 4909 scope.go:117] "RemoveContainer" containerID="eb8a08a6c738fff0fcbfbb88427c9ed53477944abe7436212850e368ec229c4f" Nov 28 17:52:50 crc kubenswrapper[4909]: E1128 17:52:50.903341 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 17:52:52 crc kubenswrapper[4909]: I1128 17:52:52.679631 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/obo-prometheus-operator-668cf9dfbb-q8sth"] Nov 28 17:52:52 crc kubenswrapper[4909]: E1128 17:52:52.680640 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b31728fc-2e37-4b7c-9dd4-3d41a22bc00d" containerName="pull" Nov 28 17:52:52 crc kubenswrapper[4909]: I1128 17:52:52.680675 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="b31728fc-2e37-4b7c-9dd4-3d41a22bc00d" containerName="pull" Nov 28 17:52:52 crc kubenswrapper[4909]: E1128 17:52:52.680713 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b31728fc-2e37-4b7c-9dd4-3d41a22bc00d" containerName="util" Nov 28 17:52:52 crc kubenswrapper[4909]: I1128 17:52:52.680721 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="b31728fc-2e37-4b7c-9dd4-3d41a22bc00d" containerName="util" Nov 28 17:52:52 crc kubenswrapper[4909]: E1128 17:52:52.680732 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d9da4a21-03e8-4cd7-84b7-f64ee73f596b" containerName="horizon" Nov 28 17:52:52 crc kubenswrapper[4909]: I1128 17:52:52.680739 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="d9da4a21-03e8-4cd7-84b7-f64ee73f596b" containerName="horizon" Nov 28 17:52:52 crc kubenswrapper[4909]: E1128 17:52:52.680748 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b31728fc-2e37-4b7c-9dd4-3d41a22bc00d" containerName="extract" Nov 28 17:52:52 crc kubenswrapper[4909]: I1128 17:52:52.680757 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="b31728fc-2e37-4b7c-9dd4-3d41a22bc00d" containerName="extract" Nov 28 17:52:52 crc kubenswrapper[4909]: E1128 17:52:52.680781 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d9da4a21-03e8-4cd7-84b7-f64ee73f596b" containerName="horizon-log" Nov 28 17:52:52 crc kubenswrapper[4909]: I1128 17:52:52.680788 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="d9da4a21-03e8-4cd7-84b7-f64ee73f596b" containerName="horizon-log" Nov 28 17:52:52 crc kubenswrapper[4909]: I1128 17:52:52.680995 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="b31728fc-2e37-4b7c-9dd4-3d41a22bc00d" containerName="extract" Nov 28 17:52:52 crc kubenswrapper[4909]: I1128 17:52:52.681008 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="d9da4a21-03e8-4cd7-84b7-f64ee73f596b" containerName="horizon-log" Nov 28 17:52:52 crc kubenswrapper[4909]: I1128 17:52:52.681022 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="d9da4a21-03e8-4cd7-84b7-f64ee73f596b" containerName="horizon" Nov 28 17:52:52 crc kubenswrapper[4909]: I1128 17:52:52.681794 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-q8sth" Nov 28 17:52:52 crc kubenswrapper[4909]: I1128 17:52:52.683726 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operators"/"openshift-service-ca.crt" Nov 28 17:52:52 crc kubenswrapper[4909]: I1128 17:52:52.686566 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"obo-prometheus-operator-dockercfg-hf47c" Nov 28 17:52:52 crc kubenswrapper[4909]: I1128 17:52:52.686931 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operators"/"kube-root-ca.crt" Nov 28 17:52:52 crc kubenswrapper[4909]: I1128 17:52:52.697227 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-668cf9dfbb-q8sth"] Nov 28 17:52:52 crc kubenswrapper[4909]: I1128 17:52:52.750642 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-blnqg\" (UniqueName: \"kubernetes.io/projected/a5271a09-d325-4edf-838a-52dd9a124eba-kube-api-access-blnqg\") pod \"obo-prometheus-operator-668cf9dfbb-q8sth\" (UID: \"a5271a09-d325-4edf-838a-52dd9a124eba\") " pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-q8sth" Nov 28 17:52:52 crc kubenswrapper[4909]: I1128 17:52:52.803268 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-6bf745c75-ssnwd"] Nov 28 17:52:52 crc kubenswrapper[4909]: I1128 17:52:52.804910 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-6bf745c75-ssnwd" Nov 28 17:52:52 crc kubenswrapper[4909]: I1128 17:52:52.806643 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"obo-prometheus-operator-admission-webhook-service-cert" Nov 28 17:52:52 crc kubenswrapper[4909]: I1128 17:52:52.806866 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"obo-prometheus-operator-admission-webhook-dockercfg-z2c52" Nov 28 17:52:52 crc kubenswrapper[4909]: I1128 17:52:52.816122 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-6bf745c75-hpn6w"] Nov 28 17:52:52 crc kubenswrapper[4909]: I1128 17:52:52.817421 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-6bf745c75-hpn6w" Nov 28 17:52:52 crc kubenswrapper[4909]: I1128 17:52:52.826458 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-6bf745c75-ssnwd"] Nov 28 17:52:52 crc kubenswrapper[4909]: I1128 17:52:52.836467 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-6bf745c75-hpn6w"] Nov 28 17:52:52 crc kubenswrapper[4909]: I1128 17:52:52.854928 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-blnqg\" (UniqueName: \"kubernetes.io/projected/a5271a09-d325-4edf-838a-52dd9a124eba-kube-api-access-blnqg\") pod \"obo-prometheus-operator-668cf9dfbb-q8sth\" (UID: \"a5271a09-d325-4edf-838a-52dd9a124eba\") " pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-q8sth" Nov 28 17:52:52 crc kubenswrapper[4909]: I1128 17:52:52.892624 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-blnqg\" (UniqueName: \"kubernetes.io/projected/a5271a09-d325-4edf-838a-52dd9a124eba-kube-api-access-blnqg\") pod \"obo-prometheus-operator-668cf9dfbb-q8sth\" (UID: \"a5271a09-d325-4edf-838a-52dd9a124eba\") " pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-q8sth" Nov 28 17:52:52 crc kubenswrapper[4909]: I1128 17:52:52.957997 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/51f0f5a0-6592-4651-9483-cde6c0fd1afe-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-6bf745c75-ssnwd\" (UID: \"51f0f5a0-6592-4651-9483-cde6c0fd1afe\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-6bf745c75-ssnwd" Nov 28 17:52:52 crc kubenswrapper[4909]: I1128 17:52:52.958054 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/981108b4-6d9a-421c-8e5b-8ae7d1643fc4-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-6bf745c75-hpn6w\" (UID: \"981108b4-6d9a-421c-8e5b-8ae7d1643fc4\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-6bf745c75-hpn6w" Nov 28 17:52:52 crc kubenswrapper[4909]: I1128 17:52:52.958195 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/981108b4-6d9a-421c-8e5b-8ae7d1643fc4-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-6bf745c75-hpn6w\" (UID: \"981108b4-6d9a-421c-8e5b-8ae7d1643fc4\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-6bf745c75-hpn6w" Nov 28 17:52:52 crc kubenswrapper[4909]: I1128 17:52:52.958286 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/51f0f5a0-6592-4651-9483-cde6c0fd1afe-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-6bf745c75-ssnwd\" (UID: \"51f0f5a0-6592-4651-9483-cde6c0fd1afe\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-6bf745c75-ssnwd" Nov 28 17:52:52 crc kubenswrapper[4909]: I1128 17:52:52.991317 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/observability-operator-d8bb48f5d-hzmm9"] Nov 28 17:52:52 crc kubenswrapper[4909]: I1128 17:52:52.992714 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-d8bb48f5d-hzmm9" Nov 28 17:52:52 crc kubenswrapper[4909]: I1128 17:52:52.994959 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"observability-operator-sa-dockercfg-6b29x" Nov 28 17:52:52 crc kubenswrapper[4909]: I1128 17:52:52.995827 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"observability-operator-tls" Nov 28 17:52:53 crc kubenswrapper[4909]: I1128 17:52:53.005158 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-q8sth" Nov 28 17:52:53 crc kubenswrapper[4909]: I1128 17:52:53.023994 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/observability-operator-d8bb48f5d-hzmm9"] Nov 28 17:52:53 crc kubenswrapper[4909]: I1128 17:52:53.060200 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/51f0f5a0-6592-4651-9483-cde6c0fd1afe-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-6bf745c75-ssnwd\" (UID: \"51f0f5a0-6592-4651-9483-cde6c0fd1afe\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-6bf745c75-ssnwd" Nov 28 17:52:53 crc kubenswrapper[4909]: I1128 17:52:53.060250 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/981108b4-6d9a-421c-8e5b-8ae7d1643fc4-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-6bf745c75-hpn6w\" (UID: \"981108b4-6d9a-421c-8e5b-8ae7d1643fc4\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-6bf745c75-hpn6w" Nov 28 17:52:53 crc kubenswrapper[4909]: I1128 17:52:53.060338 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/981108b4-6d9a-421c-8e5b-8ae7d1643fc4-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-6bf745c75-hpn6w\" (UID: \"981108b4-6d9a-421c-8e5b-8ae7d1643fc4\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-6bf745c75-hpn6w" Nov 28 17:52:53 crc kubenswrapper[4909]: I1128 17:52:53.060390 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/51f0f5a0-6592-4651-9483-cde6c0fd1afe-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-6bf745c75-ssnwd\" (UID: \"51f0f5a0-6592-4651-9483-cde6c0fd1afe\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-6bf745c75-ssnwd" Nov 28 17:52:53 crc kubenswrapper[4909]: I1128 17:52:53.082520 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/51f0f5a0-6592-4651-9483-cde6c0fd1afe-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-6bf745c75-ssnwd\" (UID: \"51f0f5a0-6592-4651-9483-cde6c0fd1afe\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-6bf745c75-ssnwd" Nov 28 17:52:53 crc kubenswrapper[4909]: I1128 17:52:53.088133 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/981108b4-6d9a-421c-8e5b-8ae7d1643fc4-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-6bf745c75-hpn6w\" (UID: \"981108b4-6d9a-421c-8e5b-8ae7d1643fc4\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-6bf745c75-hpn6w" Nov 28 17:52:53 crc kubenswrapper[4909]: I1128 17:52:53.093169 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/981108b4-6d9a-421c-8e5b-8ae7d1643fc4-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-6bf745c75-hpn6w\" (UID: \"981108b4-6d9a-421c-8e5b-8ae7d1643fc4\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-6bf745c75-hpn6w" Nov 28 17:52:53 crc kubenswrapper[4909]: I1128 17:52:53.097098 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/51f0f5a0-6592-4651-9483-cde6c0fd1afe-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-6bf745c75-ssnwd\" (UID: \"51f0f5a0-6592-4651-9483-cde6c0fd1afe\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-6bf745c75-ssnwd" Nov 28 17:52:53 crc kubenswrapper[4909]: I1128 17:52:53.124806 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/perses-operator-5446b9c989-xk4qq"] Nov 28 17:52:53 crc kubenswrapper[4909]: I1128 17:52:53.127274 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5446b9c989-xk4qq" Nov 28 17:52:53 crc kubenswrapper[4909]: I1128 17:52:53.129320 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-6bf745c75-ssnwd" Nov 28 17:52:53 crc kubenswrapper[4909]: I1128 17:52:53.131069 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"perses-operator-dockercfg-7b7rr" Nov 28 17:52:53 crc kubenswrapper[4909]: I1128 17:52:53.136597 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/perses-operator-5446b9c989-xk4qq"] Nov 28 17:52:53 crc kubenswrapper[4909]: I1128 17:52:53.142089 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-6bf745c75-hpn6w" Nov 28 17:52:53 crc kubenswrapper[4909]: I1128 17:52:53.168773 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7zl76\" (UniqueName: \"kubernetes.io/projected/7a779e0e-c999-4317-8315-a950ae29cd23-kube-api-access-7zl76\") pod \"observability-operator-d8bb48f5d-hzmm9\" (UID: \"7a779e0e-c999-4317-8315-a950ae29cd23\") " pod="openshift-operators/observability-operator-d8bb48f5d-hzmm9" Nov 28 17:52:53 crc kubenswrapper[4909]: I1128 17:52:53.168813 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"observability-operator-tls\" (UniqueName: \"kubernetes.io/secret/7a779e0e-c999-4317-8315-a950ae29cd23-observability-operator-tls\") pod \"observability-operator-d8bb48f5d-hzmm9\" (UID: \"7a779e0e-c999-4317-8315-a950ae29cd23\") " pod="openshift-operators/observability-operator-d8bb48f5d-hzmm9" Nov 28 17:52:53 crc kubenswrapper[4909]: I1128 17:52:53.271633 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vcp2t\" (UniqueName: \"kubernetes.io/projected/68065799-9c6f-4fac-9aa3-2dfc97bfca78-kube-api-access-vcp2t\") pod \"perses-operator-5446b9c989-xk4qq\" (UID: \"68065799-9c6f-4fac-9aa3-2dfc97bfca78\") " pod="openshift-operators/perses-operator-5446b9c989-xk4qq" Nov 28 17:52:53 crc kubenswrapper[4909]: I1128 17:52:53.271873 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openshift-service-ca\" (UniqueName: \"kubernetes.io/configmap/68065799-9c6f-4fac-9aa3-2dfc97bfca78-openshift-service-ca\") pod \"perses-operator-5446b9c989-xk4qq\" (UID: \"68065799-9c6f-4fac-9aa3-2dfc97bfca78\") " pod="openshift-operators/perses-operator-5446b9c989-xk4qq" Nov 28 17:52:53 crc kubenswrapper[4909]: I1128 17:52:53.271934 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7zl76\" (UniqueName: \"kubernetes.io/projected/7a779e0e-c999-4317-8315-a950ae29cd23-kube-api-access-7zl76\") pod \"observability-operator-d8bb48f5d-hzmm9\" (UID: \"7a779e0e-c999-4317-8315-a950ae29cd23\") " pod="openshift-operators/observability-operator-d8bb48f5d-hzmm9" Nov 28 17:52:53 crc kubenswrapper[4909]: I1128 17:52:53.271973 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"observability-operator-tls\" (UniqueName: \"kubernetes.io/secret/7a779e0e-c999-4317-8315-a950ae29cd23-observability-operator-tls\") pod \"observability-operator-d8bb48f5d-hzmm9\" (UID: \"7a779e0e-c999-4317-8315-a950ae29cd23\") " pod="openshift-operators/observability-operator-d8bb48f5d-hzmm9" Nov 28 17:52:53 crc kubenswrapper[4909]: I1128 17:52:53.293806 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7zl76\" (UniqueName: \"kubernetes.io/projected/7a779e0e-c999-4317-8315-a950ae29cd23-kube-api-access-7zl76\") pod \"observability-operator-d8bb48f5d-hzmm9\" (UID: \"7a779e0e-c999-4317-8315-a950ae29cd23\") " pod="openshift-operators/observability-operator-d8bb48f5d-hzmm9" Nov 28 17:52:53 crc kubenswrapper[4909]: I1128 17:52:53.295264 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"observability-operator-tls\" (UniqueName: \"kubernetes.io/secret/7a779e0e-c999-4317-8315-a950ae29cd23-observability-operator-tls\") pod \"observability-operator-d8bb48f5d-hzmm9\" (UID: \"7a779e0e-c999-4317-8315-a950ae29cd23\") " pod="openshift-operators/observability-operator-d8bb48f5d-hzmm9" Nov 28 17:52:53 crc kubenswrapper[4909]: I1128 17:52:53.314338 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-d8bb48f5d-hzmm9" Nov 28 17:52:53 crc kubenswrapper[4909]: I1128 17:52:53.374543 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vcp2t\" (UniqueName: \"kubernetes.io/projected/68065799-9c6f-4fac-9aa3-2dfc97bfca78-kube-api-access-vcp2t\") pod \"perses-operator-5446b9c989-xk4qq\" (UID: \"68065799-9c6f-4fac-9aa3-2dfc97bfca78\") " pod="openshift-operators/perses-operator-5446b9c989-xk4qq" Nov 28 17:52:53 crc kubenswrapper[4909]: I1128 17:52:53.374736 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openshift-service-ca\" (UniqueName: \"kubernetes.io/configmap/68065799-9c6f-4fac-9aa3-2dfc97bfca78-openshift-service-ca\") pod \"perses-operator-5446b9c989-xk4qq\" (UID: \"68065799-9c6f-4fac-9aa3-2dfc97bfca78\") " pod="openshift-operators/perses-operator-5446b9c989-xk4qq" Nov 28 17:52:53 crc kubenswrapper[4909]: I1128 17:52:53.375816 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openshift-service-ca\" (UniqueName: \"kubernetes.io/configmap/68065799-9c6f-4fac-9aa3-2dfc97bfca78-openshift-service-ca\") pod \"perses-operator-5446b9c989-xk4qq\" (UID: \"68065799-9c6f-4fac-9aa3-2dfc97bfca78\") " pod="openshift-operators/perses-operator-5446b9c989-xk4qq" Nov 28 17:52:53 crc kubenswrapper[4909]: I1128 17:52:53.400336 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vcp2t\" (UniqueName: \"kubernetes.io/projected/68065799-9c6f-4fac-9aa3-2dfc97bfca78-kube-api-access-vcp2t\") pod \"perses-operator-5446b9c989-xk4qq\" (UID: \"68065799-9c6f-4fac-9aa3-2dfc97bfca78\") " pod="openshift-operators/perses-operator-5446b9c989-xk4qq" Nov 28 17:52:53 crc kubenswrapper[4909]: I1128 17:52:53.597280 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-cvq7c"] Nov 28 17:52:53 crc kubenswrapper[4909]: I1128 17:52:53.599619 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-cvq7c" Nov 28 17:52:53 crc kubenswrapper[4909]: I1128 17:52:53.614167 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-cvq7c"] Nov 28 17:52:53 crc kubenswrapper[4909]: I1128 17:52:53.633941 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5446b9c989-xk4qq" Nov 28 17:52:53 crc kubenswrapper[4909]: I1128 17:52:53.783602 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f6567f9a-5cde-4593-a172-292e96363f7b-utilities\") pod \"certified-operators-cvq7c\" (UID: \"f6567f9a-5cde-4593-a172-292e96363f7b\") " pod="openshift-marketplace/certified-operators-cvq7c" Nov 28 17:52:53 crc kubenswrapper[4909]: I1128 17:52:53.784101 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f6567f9a-5cde-4593-a172-292e96363f7b-catalog-content\") pod \"certified-operators-cvq7c\" (UID: \"f6567f9a-5cde-4593-a172-292e96363f7b\") " pod="openshift-marketplace/certified-operators-cvq7c" Nov 28 17:52:53 crc kubenswrapper[4909]: I1128 17:52:53.784147 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-szzs4\" (UniqueName: \"kubernetes.io/projected/f6567f9a-5cde-4593-a172-292e96363f7b-kube-api-access-szzs4\") pod \"certified-operators-cvq7c\" (UID: \"f6567f9a-5cde-4593-a172-292e96363f7b\") " pod="openshift-marketplace/certified-operators-cvq7c" Nov 28 17:52:53 crc kubenswrapper[4909]: I1128 17:52:53.892786 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f6567f9a-5cde-4593-a172-292e96363f7b-utilities\") pod \"certified-operators-cvq7c\" (UID: \"f6567f9a-5cde-4593-a172-292e96363f7b\") " pod="openshift-marketplace/certified-operators-cvq7c" Nov 28 17:52:53 crc kubenswrapper[4909]: I1128 17:52:53.892857 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f6567f9a-5cde-4593-a172-292e96363f7b-catalog-content\") pod \"certified-operators-cvq7c\" (UID: \"f6567f9a-5cde-4593-a172-292e96363f7b\") " pod="openshift-marketplace/certified-operators-cvq7c" Nov 28 17:52:53 crc kubenswrapper[4909]: I1128 17:52:53.892888 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-szzs4\" (UniqueName: \"kubernetes.io/projected/f6567f9a-5cde-4593-a172-292e96363f7b-kube-api-access-szzs4\") pod \"certified-operators-cvq7c\" (UID: \"f6567f9a-5cde-4593-a172-292e96363f7b\") " pod="openshift-marketplace/certified-operators-cvq7c" Nov 28 17:52:53 crc kubenswrapper[4909]: I1128 17:52:53.894171 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f6567f9a-5cde-4593-a172-292e96363f7b-utilities\") pod \"certified-operators-cvq7c\" (UID: \"f6567f9a-5cde-4593-a172-292e96363f7b\") " pod="openshift-marketplace/certified-operators-cvq7c" Nov 28 17:52:53 crc kubenswrapper[4909]: I1128 17:52:53.894457 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f6567f9a-5cde-4593-a172-292e96363f7b-catalog-content\") pod \"certified-operators-cvq7c\" (UID: \"f6567f9a-5cde-4593-a172-292e96363f7b\") " pod="openshift-marketplace/certified-operators-cvq7c" Nov 28 17:52:53 crc kubenswrapper[4909]: I1128 17:52:53.944154 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-szzs4\" (UniqueName: \"kubernetes.io/projected/f6567f9a-5cde-4593-a172-292e96363f7b-kube-api-access-szzs4\") pod \"certified-operators-cvq7c\" (UID: \"f6567f9a-5cde-4593-a172-292e96363f7b\") " pod="openshift-marketplace/certified-operators-cvq7c" Nov 28 17:52:54 crc kubenswrapper[4909]: I1128 17:52:54.221171 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-cvq7c" Nov 28 17:52:54 crc kubenswrapper[4909]: I1128 17:52:54.559012 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-668cf9dfbb-q8sth"] Nov 28 17:52:54 crc kubenswrapper[4909]: I1128 17:52:54.599757 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/observability-operator-d8bb48f5d-hzmm9"] Nov 28 17:52:54 crc kubenswrapper[4909]: W1128 17:52:54.600098 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod981108b4_6d9a_421c_8e5b_8ae7d1643fc4.slice/crio-33d880b25a9b56cef5d0ec96264d86bc3cdf1e3bc16b56b876e0cb24bfd197fb WatchSource:0}: Error finding container 33d880b25a9b56cef5d0ec96264d86bc3cdf1e3bc16b56b876e0cb24bfd197fb: Status 404 returned error can't find the container with id 33d880b25a9b56cef5d0ec96264d86bc3cdf1e3bc16b56b876e0cb24bfd197fb Nov 28 17:52:54 crc kubenswrapper[4909]: I1128 17:52:54.619517 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-6bf745c75-hpn6w"] Nov 28 17:52:54 crc kubenswrapper[4909]: I1128 17:52:54.630322 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/perses-operator-5446b9c989-xk4qq"] Nov 28 17:52:54 crc kubenswrapper[4909]: I1128 17:52:54.672516 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-6bf745c75-ssnwd"] Nov 28 17:52:54 crc kubenswrapper[4909]: I1128 17:52:54.918406 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-cvq7c"] Nov 28 17:52:55 crc kubenswrapper[4909]: I1128 17:52:55.244514 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-6bf745c75-ssnwd" event={"ID":"51f0f5a0-6592-4651-9483-cde6c0fd1afe","Type":"ContainerStarted","Data":"c0dbe7166b92e993f2e7286f9a2c786409dc200e91a3370371d49dbe542b7f50"} Nov 28 17:52:55 crc kubenswrapper[4909]: I1128 17:52:55.246202 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-q8sth" event={"ID":"a5271a09-d325-4edf-838a-52dd9a124eba","Type":"ContainerStarted","Data":"c3a70be110ee58244733f0e14463020ff87baef135377d6750c5012baab7bba6"} Nov 28 17:52:55 crc kubenswrapper[4909]: I1128 17:52:55.247473 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/observability-operator-d8bb48f5d-hzmm9" event={"ID":"7a779e0e-c999-4317-8315-a950ae29cd23","Type":"ContainerStarted","Data":"c94475449ebbcd11db80efa48d8b9ef0af886799190fb29fd8dd5bc0ac21a86e"} Nov 28 17:52:55 crc kubenswrapper[4909]: I1128 17:52:55.250085 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-cvq7c" event={"ID":"f6567f9a-5cde-4593-a172-292e96363f7b","Type":"ContainerStarted","Data":"d4567035a6f688deb13483cbed3fc8d9602f5ac93f8aa7b88af7ea23ec1acd5a"} Nov 28 17:52:55 crc kubenswrapper[4909]: I1128 17:52:55.252955 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-6bf745c75-hpn6w" event={"ID":"981108b4-6d9a-421c-8e5b-8ae7d1643fc4","Type":"ContainerStarted","Data":"33d880b25a9b56cef5d0ec96264d86bc3cdf1e3bc16b56b876e0cb24bfd197fb"} Nov 28 17:52:55 crc kubenswrapper[4909]: I1128 17:52:55.255425 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/perses-operator-5446b9c989-xk4qq" event={"ID":"68065799-9c6f-4fac-9aa3-2dfc97bfca78","Type":"ContainerStarted","Data":"3b74d50005e9004445241e0af81670c6154b3a95bd327c120fe2f8d1bfc0b09e"} Nov 28 17:52:56 crc kubenswrapper[4909]: I1128 17:52:56.266508 4909 generic.go:334] "Generic (PLEG): container finished" podID="f6567f9a-5cde-4593-a172-292e96363f7b" containerID="b0323071ebfeabe260ee9c798fa05fc57d5f9eba5f0d41c2c19dcde824380584" exitCode=0 Nov 28 17:52:56 crc kubenswrapper[4909]: I1128 17:52:56.266605 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-cvq7c" event={"ID":"f6567f9a-5cde-4593-a172-292e96363f7b","Type":"ContainerDied","Data":"b0323071ebfeabe260ee9c798fa05fc57d5f9eba5f0d41c2c19dcde824380584"} Nov 28 17:53:02 crc kubenswrapper[4909]: I1128 17:53:02.902231 4909 scope.go:117] "RemoveContainer" containerID="eb8a08a6c738fff0fcbfbb88427c9ed53477944abe7436212850e368ec229c4f" Nov 28 17:53:02 crc kubenswrapper[4909]: E1128 17:53:02.903038 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 17:53:03 crc kubenswrapper[4909]: I1128 17:53:03.051237 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-create-2n9cc"] Nov 28 17:53:03 crc kubenswrapper[4909]: I1128 17:53:03.062041 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-e5ef-account-create-update-rxlg6"] Nov 28 17:53:03 crc kubenswrapper[4909]: I1128 17:53:03.070895 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-create-2n9cc"] Nov 28 17:53:03 crc kubenswrapper[4909]: I1128 17:53:03.079251 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-e5ef-account-create-update-rxlg6"] Nov 28 17:53:03 crc kubenswrapper[4909]: I1128 17:53:03.913710 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="87d51f30-31ed-490c-b758-43cdaf1d73d1" path="/var/lib/kubelet/pods/87d51f30-31ed-490c-b758-43cdaf1d73d1/volumes" Nov 28 17:53:03 crc kubenswrapper[4909]: I1128 17:53:03.914475 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e11133f8-5269-401c-aa3d-3658264356f6" path="/var/lib/kubelet/pods/e11133f8-5269-401c-aa3d-3658264356f6/volumes" Nov 28 17:53:05 crc kubenswrapper[4909]: I1128 17:53:05.394096 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-cvq7c" event={"ID":"f6567f9a-5cde-4593-a172-292e96363f7b","Type":"ContainerStarted","Data":"72ff729792836f29c983a7c559680e4a32a938929477a46021cbf377f5d2a196"} Nov 28 17:53:05 crc kubenswrapper[4909]: I1128 17:53:05.396618 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-6bf745c75-hpn6w" event={"ID":"981108b4-6d9a-421c-8e5b-8ae7d1643fc4","Type":"ContainerStarted","Data":"db38c5ebfb2cb19802aa4a16d1b763a3793234cfd71996dc25336179c3283e88"} Nov 28 17:53:05 crc kubenswrapper[4909]: I1128 17:53:05.398625 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/perses-operator-5446b9c989-xk4qq" event={"ID":"68065799-9c6f-4fac-9aa3-2dfc97bfca78","Type":"ContainerStarted","Data":"d342c6d19b6b4c0e0490feeff47484cbb408a1cf19ea4dc9cdfddea90ab718bd"} Nov 28 17:53:05 crc kubenswrapper[4909]: I1128 17:53:05.398771 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operators/perses-operator-5446b9c989-xk4qq" Nov 28 17:53:05 crc kubenswrapper[4909]: I1128 17:53:05.401175 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-6bf745c75-ssnwd" event={"ID":"51f0f5a0-6592-4651-9483-cde6c0fd1afe","Type":"ContainerStarted","Data":"dbc095feb8f8a576f250817357efbd716f3ecd6e6b6926b6e11ab1783c81ae85"} Nov 28 17:53:05 crc kubenswrapper[4909]: I1128 17:53:05.403029 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-q8sth" event={"ID":"a5271a09-d325-4edf-838a-52dd9a124eba","Type":"ContainerStarted","Data":"21696f18425fd4e77f4ccce884f74f87b742693d331f333218d94fc8c804d488"} Nov 28 17:53:05 crc kubenswrapper[4909]: I1128 17:53:05.405005 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/observability-operator-d8bb48f5d-hzmm9" event={"ID":"7a779e0e-c999-4317-8315-a950ae29cd23","Type":"ContainerStarted","Data":"d808640f41bfa4849a9822cc10b05220d095165dfd33b1375812a9b3b0fae3d1"} Nov 28 17:53:05 crc kubenswrapper[4909]: I1128 17:53:05.405222 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operators/observability-operator-d8bb48f5d-hzmm9" Nov 28 17:53:05 crc kubenswrapper[4909]: I1128 17:53:05.437410 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/perses-operator-5446b9c989-xk4qq" podStartSLOduration=2.519312596 podStartE2EDuration="12.437391043s" podCreationTimestamp="2025-11-28 17:52:53 +0000 UTC" firstStartedPulling="2025-11-28 17:52:54.637498722 +0000 UTC m=+6157.034183246" lastFinishedPulling="2025-11-28 17:53:04.555577169 +0000 UTC m=+6166.952261693" observedRunningTime="2025-11-28 17:53:05.433217682 +0000 UTC m=+6167.829902216" watchObservedRunningTime="2025-11-28 17:53:05.437391043 +0000 UTC m=+6167.834075567" Nov 28 17:53:05 crc kubenswrapper[4909]: I1128 17:53:05.462024 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-q8sth" podStartSLOduration=3.517269191 podStartE2EDuration="13.462005207s" podCreationTimestamp="2025-11-28 17:52:52 +0000 UTC" firstStartedPulling="2025-11-28 17:52:54.610558395 +0000 UTC m=+6157.007242919" lastFinishedPulling="2025-11-28 17:53:04.555294411 +0000 UTC m=+6166.951978935" observedRunningTime="2025-11-28 17:53:05.45421625 +0000 UTC m=+6167.850900774" watchObservedRunningTime="2025-11-28 17:53:05.462005207 +0000 UTC m=+6167.858689731" Nov 28 17:53:05 crc kubenswrapper[4909]: I1128 17:53:05.483303 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operators/observability-operator-d8bb48f5d-hzmm9" Nov 28 17:53:05 crc kubenswrapper[4909]: I1128 17:53:05.490942 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/observability-operator-d8bb48f5d-hzmm9" podStartSLOduration=3.542812261 podStartE2EDuration="13.490920176s" podCreationTimestamp="2025-11-28 17:52:52 +0000 UTC" firstStartedPulling="2025-11-28 17:52:54.607766591 +0000 UTC m=+6157.004451115" lastFinishedPulling="2025-11-28 17:53:04.555874506 +0000 UTC m=+6166.952559030" observedRunningTime="2025-11-28 17:53:05.4854339 +0000 UTC m=+6167.882118444" watchObservedRunningTime="2025-11-28 17:53:05.490920176 +0000 UTC m=+6167.887604700" Nov 28 17:53:05 crc kubenswrapper[4909]: I1128 17:53:05.508628 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/obo-prometheus-operator-admission-webhook-6bf745c75-ssnwd" podStartSLOduration=3.704244083 podStartE2EDuration="13.508615137s" podCreationTimestamp="2025-11-28 17:52:52 +0000 UTC" firstStartedPulling="2025-11-28 17:52:54.700934418 +0000 UTC m=+6157.097618942" lastFinishedPulling="2025-11-28 17:53:04.505305482 +0000 UTC m=+6166.901989996" observedRunningTime="2025-11-28 17:53:05.503965603 +0000 UTC m=+6167.900650127" watchObservedRunningTime="2025-11-28 17:53:05.508615137 +0000 UTC m=+6167.905299661" Nov 28 17:53:05 crc kubenswrapper[4909]: I1128 17:53:05.552495 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/obo-prometheus-operator-admission-webhook-6bf745c75-hpn6w" podStartSLOduration=3.632504526 podStartE2EDuration="13.552473603s" podCreationTimestamp="2025-11-28 17:52:52 +0000 UTC" firstStartedPulling="2025-11-28 17:52:54.610162105 +0000 UTC m=+6157.006846629" lastFinishedPulling="2025-11-28 17:53:04.530131182 +0000 UTC m=+6166.926815706" observedRunningTime="2025-11-28 17:53:05.538347907 +0000 UTC m=+6167.935032431" watchObservedRunningTime="2025-11-28 17:53:05.552473603 +0000 UTC m=+6167.949158207" Nov 28 17:53:06 crc kubenswrapper[4909]: I1128 17:53:06.416045 4909 generic.go:334] "Generic (PLEG): container finished" podID="f6567f9a-5cde-4593-a172-292e96363f7b" containerID="72ff729792836f29c983a7c559680e4a32a938929477a46021cbf377f5d2a196" exitCode=0 Nov 28 17:53:06 crc kubenswrapper[4909]: I1128 17:53:06.416172 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-cvq7c" event={"ID":"f6567f9a-5cde-4593-a172-292e96363f7b","Type":"ContainerDied","Data":"72ff729792836f29c983a7c559680e4a32a938929477a46021cbf377f5d2a196"} Nov 28 17:53:07 crc kubenswrapper[4909]: I1128 17:53:07.428853 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-cvq7c" event={"ID":"f6567f9a-5cde-4593-a172-292e96363f7b","Type":"ContainerStarted","Data":"b224c8a0fe463dd6587e92dd323ea61e8f5d50a1055f438095fbc65ffc912746"} Nov 28 17:53:07 crc kubenswrapper[4909]: I1128 17:53:07.456779 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-cvq7c" podStartSLOduration=3.87342445 podStartE2EDuration="14.456758283s" podCreationTimestamp="2025-11-28 17:52:53 +0000 UTC" firstStartedPulling="2025-11-28 17:52:56.270632643 +0000 UTC m=+6158.667317167" lastFinishedPulling="2025-11-28 17:53:06.853966486 +0000 UTC m=+6169.250651000" observedRunningTime="2025-11-28 17:53:07.455077769 +0000 UTC m=+6169.851762293" watchObservedRunningTime="2025-11-28 17:53:07.456758283 +0000 UTC m=+6169.853442817" Nov 28 17:53:12 crc kubenswrapper[4909]: I1128 17:53:12.038027 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-sync-4pqhr"] Nov 28 17:53:12 crc kubenswrapper[4909]: I1128 17:53:12.050445 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-sync-4pqhr"] Nov 28 17:53:13 crc kubenswrapper[4909]: I1128 17:53:13.636914 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operators/perses-operator-5446b9c989-xk4qq" Nov 28 17:53:13 crc kubenswrapper[4909]: I1128 17:53:13.915607 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="614d33fc-17b9-4a47-b85a-20a0dec18413" path="/var/lib/kubelet/pods/614d33fc-17b9-4a47-b85a-20a0dec18413/volumes" Nov 28 17:53:14 crc kubenswrapper[4909]: I1128 17:53:14.222364 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-cvq7c" Nov 28 17:53:14 crc kubenswrapper[4909]: I1128 17:53:14.222404 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-cvq7c" Nov 28 17:53:14 crc kubenswrapper[4909]: I1128 17:53:14.272339 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-cvq7c" Nov 28 17:53:14 crc kubenswrapper[4909]: I1128 17:53:14.572834 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-cvq7c" Nov 28 17:53:14 crc kubenswrapper[4909]: I1128 17:53:14.621973 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-cvq7c"] Nov 28 17:53:16 crc kubenswrapper[4909]: I1128 17:53:16.528164 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-cvq7c" podUID="f6567f9a-5cde-4593-a172-292e96363f7b" containerName="registry-server" containerID="cri-o://b224c8a0fe463dd6587e92dd323ea61e8f5d50a1055f438095fbc65ffc912746" gracePeriod=2 Nov 28 17:53:16 crc kubenswrapper[4909]: I1128 17:53:16.906223 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstackclient"] Nov 28 17:53:16 crc kubenswrapper[4909]: I1128 17:53:16.907122 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/openstackclient" podUID="917f1c58-6e8e-4503-b697-d69434d8622c" containerName="openstackclient" containerID="cri-o://702919c982ad62faf4b76e95d433f71f799d1238591069039fd5683c4198668e" gracePeriod=2 Nov 28 17:53:16 crc kubenswrapper[4909]: I1128 17:53:16.919422 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/openstackclient"] Nov 28 17:53:16 crc kubenswrapper[4909]: I1128 17:53:16.979006 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstackclient"] Nov 28 17:53:16 crc kubenswrapper[4909]: E1128 17:53:16.979450 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="917f1c58-6e8e-4503-b697-d69434d8622c" containerName="openstackclient" Nov 28 17:53:16 crc kubenswrapper[4909]: I1128 17:53:16.979462 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="917f1c58-6e8e-4503-b697-d69434d8622c" containerName="openstackclient" Nov 28 17:53:16 crc kubenswrapper[4909]: I1128 17:53:16.979635 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="917f1c58-6e8e-4503-b697-d69434d8622c" containerName="openstackclient" Nov 28 17:53:16 crc kubenswrapper[4909]: I1128 17:53:16.980346 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 28 17:53:16 crc kubenswrapper[4909]: I1128 17:53:16.995751 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Nov 28 17:53:17 crc kubenswrapper[4909]: I1128 17:53:17.034222 4909 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openstack/openstackclient" oldPodUID="917f1c58-6e8e-4503-b697-d69434d8622c" podUID="ddeeec31-082e-4934-a8e0-5fd8c5beaf81" Nov 28 17:53:17 crc kubenswrapper[4909]: I1128 17:53:17.100869 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/ddeeec31-082e-4934-a8e0-5fd8c5beaf81-openstack-config-secret\") pod \"openstackclient\" (UID: \"ddeeec31-082e-4934-a8e0-5fd8c5beaf81\") " pod="openstack/openstackclient" Nov 28 17:53:17 crc kubenswrapper[4909]: I1128 17:53:17.100908 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qk5pm\" (UniqueName: \"kubernetes.io/projected/ddeeec31-082e-4934-a8e0-5fd8c5beaf81-kube-api-access-qk5pm\") pod \"openstackclient\" (UID: \"ddeeec31-082e-4934-a8e0-5fd8c5beaf81\") " pod="openstack/openstackclient" Nov 28 17:53:17 crc kubenswrapper[4909]: I1128 17:53:17.100972 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/ddeeec31-082e-4934-a8e0-5fd8c5beaf81-openstack-config\") pod \"openstackclient\" (UID: \"ddeeec31-082e-4934-a8e0-5fd8c5beaf81\") " pod="openstack/openstackclient" Nov 28 17:53:17 crc kubenswrapper[4909]: I1128 17:53:17.202428 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/ddeeec31-082e-4934-a8e0-5fd8c5beaf81-openstack-config-secret\") pod \"openstackclient\" (UID: \"ddeeec31-082e-4934-a8e0-5fd8c5beaf81\") " pod="openstack/openstackclient" Nov 28 17:53:17 crc kubenswrapper[4909]: I1128 17:53:17.202476 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qk5pm\" (UniqueName: \"kubernetes.io/projected/ddeeec31-082e-4934-a8e0-5fd8c5beaf81-kube-api-access-qk5pm\") pod \"openstackclient\" (UID: \"ddeeec31-082e-4934-a8e0-5fd8c5beaf81\") " pod="openstack/openstackclient" Nov 28 17:53:17 crc kubenswrapper[4909]: I1128 17:53:17.202562 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/ddeeec31-082e-4934-a8e0-5fd8c5beaf81-openstack-config\") pod \"openstackclient\" (UID: \"ddeeec31-082e-4934-a8e0-5fd8c5beaf81\") " pod="openstack/openstackclient" Nov 28 17:53:17 crc kubenswrapper[4909]: I1128 17:53:17.203746 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/ddeeec31-082e-4934-a8e0-5fd8c5beaf81-openstack-config\") pod \"openstackclient\" (UID: \"ddeeec31-082e-4934-a8e0-5fd8c5beaf81\") " pod="openstack/openstackclient" Nov 28 17:53:17 crc kubenswrapper[4909]: I1128 17:53:17.208439 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/ddeeec31-082e-4934-a8e0-5fd8c5beaf81-openstack-config-secret\") pod \"openstackclient\" (UID: \"ddeeec31-082e-4934-a8e0-5fd8c5beaf81\") " pod="openstack/openstackclient" Nov 28 17:53:17 crc kubenswrapper[4909]: I1128 17:53:17.225335 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Nov 28 17:53:17 crc kubenswrapper[4909]: I1128 17:53:17.226610 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 28 17:53:17 crc kubenswrapper[4909]: I1128 17:53:17.230958 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-ceilometer-dockercfg-j5fhn" Nov 28 17:53:17 crc kubenswrapper[4909]: I1128 17:53:17.258125 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qk5pm\" (UniqueName: \"kubernetes.io/projected/ddeeec31-082e-4934-a8e0-5fd8c5beaf81-kube-api-access-qk5pm\") pod \"openstackclient\" (UID: \"ddeeec31-082e-4934-a8e0-5fd8c5beaf81\") " pod="openstack/openstackclient" Nov 28 17:53:17 crc kubenswrapper[4909]: I1128 17:53:17.280911 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 28 17:53:17 crc kubenswrapper[4909]: I1128 17:53:17.317802 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g4qh4\" (UniqueName: \"kubernetes.io/projected/3c15f4bf-b1ef-4b7b-b7b8-f3e9e26eccd0-kube-api-access-g4qh4\") pod \"kube-state-metrics-0\" (UID: \"3c15f4bf-b1ef-4b7b-b7b8-f3e9e26eccd0\") " pod="openstack/kube-state-metrics-0" Nov 28 17:53:17 crc kubenswrapper[4909]: I1128 17:53:17.327424 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 28 17:53:17 crc kubenswrapper[4909]: I1128 17:53:17.422948 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g4qh4\" (UniqueName: \"kubernetes.io/projected/3c15f4bf-b1ef-4b7b-b7b8-f3e9e26eccd0-kube-api-access-g4qh4\") pod \"kube-state-metrics-0\" (UID: \"3c15f4bf-b1ef-4b7b-b7b8-f3e9e26eccd0\") " pod="openstack/kube-state-metrics-0" Nov 28 17:53:17 crc kubenswrapper[4909]: I1128 17:53:17.462554 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g4qh4\" (UniqueName: \"kubernetes.io/projected/3c15f4bf-b1ef-4b7b-b7b8-f3e9e26eccd0-kube-api-access-g4qh4\") pod \"kube-state-metrics-0\" (UID: \"3c15f4bf-b1ef-4b7b-b7b8-f3e9e26eccd0\") " pod="openstack/kube-state-metrics-0" Nov 28 17:53:17 crc kubenswrapper[4909]: I1128 17:53:17.515588 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 28 17:53:17 crc kubenswrapper[4909]: I1128 17:53:17.596527 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-cvq7c" Nov 28 17:53:17 crc kubenswrapper[4909]: I1128 17:53:17.632004 4909 generic.go:334] "Generic (PLEG): container finished" podID="f6567f9a-5cde-4593-a172-292e96363f7b" containerID="b224c8a0fe463dd6587e92dd323ea61e8f5d50a1055f438095fbc65ffc912746" exitCode=0 Nov 28 17:53:17 crc kubenswrapper[4909]: I1128 17:53:17.635266 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-cvq7c" event={"ID":"f6567f9a-5cde-4593-a172-292e96363f7b","Type":"ContainerDied","Data":"b224c8a0fe463dd6587e92dd323ea61e8f5d50a1055f438095fbc65ffc912746"} Nov 28 17:53:17 crc kubenswrapper[4909]: I1128 17:53:17.638517 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-cvq7c" event={"ID":"f6567f9a-5cde-4593-a172-292e96363f7b","Type":"ContainerDied","Data":"d4567035a6f688deb13483cbed3fc8d9602f5ac93f8aa7b88af7ea23ec1acd5a"} Nov 28 17:53:17 crc kubenswrapper[4909]: I1128 17:53:17.638621 4909 scope.go:117] "RemoveContainer" containerID="b224c8a0fe463dd6587e92dd323ea61e8f5d50a1055f438095fbc65ffc912746" Nov 28 17:53:17 crc kubenswrapper[4909]: I1128 17:53:17.753545 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f6567f9a-5cde-4593-a172-292e96363f7b-utilities\") pod \"f6567f9a-5cde-4593-a172-292e96363f7b\" (UID: \"f6567f9a-5cde-4593-a172-292e96363f7b\") " Nov 28 17:53:17 crc kubenswrapper[4909]: I1128 17:53:17.753718 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-szzs4\" (UniqueName: \"kubernetes.io/projected/f6567f9a-5cde-4593-a172-292e96363f7b-kube-api-access-szzs4\") pod \"f6567f9a-5cde-4593-a172-292e96363f7b\" (UID: \"f6567f9a-5cde-4593-a172-292e96363f7b\") " Nov 28 17:53:17 crc kubenswrapper[4909]: I1128 17:53:17.753782 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f6567f9a-5cde-4593-a172-292e96363f7b-catalog-content\") pod \"f6567f9a-5cde-4593-a172-292e96363f7b\" (UID: \"f6567f9a-5cde-4593-a172-292e96363f7b\") " Nov 28 17:53:17 crc kubenswrapper[4909]: I1128 17:53:17.761720 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f6567f9a-5cde-4593-a172-292e96363f7b-utilities" (OuterVolumeSpecName: "utilities") pod "f6567f9a-5cde-4593-a172-292e96363f7b" (UID: "f6567f9a-5cde-4593-a172-292e96363f7b"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:53:17 crc kubenswrapper[4909]: I1128 17:53:17.778812 4909 scope.go:117] "RemoveContainer" containerID="72ff729792836f29c983a7c559680e4a32a938929477a46021cbf377f5d2a196" Nov 28 17:53:17 crc kubenswrapper[4909]: I1128 17:53:17.805442 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f6567f9a-5cde-4593-a172-292e96363f7b-kube-api-access-szzs4" (OuterVolumeSpecName: "kube-api-access-szzs4") pod "f6567f9a-5cde-4593-a172-292e96363f7b" (UID: "f6567f9a-5cde-4593-a172-292e96363f7b"). InnerVolumeSpecName "kube-api-access-szzs4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:53:17 crc kubenswrapper[4909]: I1128 17:53:17.822642 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f6567f9a-5cde-4593-a172-292e96363f7b-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "f6567f9a-5cde-4593-a172-292e96363f7b" (UID: "f6567f9a-5cde-4593-a172-292e96363f7b"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:53:17 crc kubenswrapper[4909]: I1128 17:53:17.857282 4909 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f6567f9a-5cde-4593-a172-292e96363f7b-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 17:53:17 crc kubenswrapper[4909]: I1128 17:53:17.857312 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-szzs4\" (UniqueName: \"kubernetes.io/projected/f6567f9a-5cde-4593-a172-292e96363f7b-kube-api-access-szzs4\") on node \"crc\" DevicePath \"\"" Nov 28 17:53:17 crc kubenswrapper[4909]: I1128 17:53:17.857322 4909 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f6567f9a-5cde-4593-a172-292e96363f7b-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 17:53:17 crc kubenswrapper[4909]: I1128 17:53:17.925550 4909 scope.go:117] "RemoveContainer" containerID="eb8a08a6c738fff0fcbfbb88427c9ed53477944abe7436212850e368ec229c4f" Nov 28 17:53:17 crc kubenswrapper[4909]: E1128 17:53:17.927510 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 17:53:17 crc kubenswrapper[4909]: I1128 17:53:17.944713 4909 scope.go:117] "RemoveContainer" containerID="b0323071ebfeabe260ee9c798fa05fc57d5f9eba5f0d41c2c19dcde824380584" Nov 28 17:53:18 crc kubenswrapper[4909]: I1128 17:53:18.121922 4909 scope.go:117] "RemoveContainer" containerID="b224c8a0fe463dd6587e92dd323ea61e8f5d50a1055f438095fbc65ffc912746" Nov 28 17:53:18 crc kubenswrapper[4909]: E1128 17:53:18.133091 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b224c8a0fe463dd6587e92dd323ea61e8f5d50a1055f438095fbc65ffc912746\": container with ID starting with b224c8a0fe463dd6587e92dd323ea61e8f5d50a1055f438095fbc65ffc912746 not found: ID does not exist" containerID="b224c8a0fe463dd6587e92dd323ea61e8f5d50a1055f438095fbc65ffc912746" Nov 28 17:53:18 crc kubenswrapper[4909]: I1128 17:53:18.133152 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b224c8a0fe463dd6587e92dd323ea61e8f5d50a1055f438095fbc65ffc912746"} err="failed to get container status \"b224c8a0fe463dd6587e92dd323ea61e8f5d50a1055f438095fbc65ffc912746\": rpc error: code = NotFound desc = could not find container \"b224c8a0fe463dd6587e92dd323ea61e8f5d50a1055f438095fbc65ffc912746\": container with ID starting with b224c8a0fe463dd6587e92dd323ea61e8f5d50a1055f438095fbc65ffc912746 not found: ID does not exist" Nov 28 17:53:18 crc kubenswrapper[4909]: I1128 17:53:18.133185 4909 scope.go:117] "RemoveContainer" containerID="72ff729792836f29c983a7c559680e4a32a938929477a46021cbf377f5d2a196" Nov 28 17:53:18 crc kubenswrapper[4909]: E1128 17:53:18.135797 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"72ff729792836f29c983a7c559680e4a32a938929477a46021cbf377f5d2a196\": container with ID starting with 72ff729792836f29c983a7c559680e4a32a938929477a46021cbf377f5d2a196 not found: ID does not exist" containerID="72ff729792836f29c983a7c559680e4a32a938929477a46021cbf377f5d2a196" Nov 28 17:53:18 crc kubenswrapper[4909]: I1128 17:53:18.135834 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"72ff729792836f29c983a7c559680e4a32a938929477a46021cbf377f5d2a196"} err="failed to get container status \"72ff729792836f29c983a7c559680e4a32a938929477a46021cbf377f5d2a196\": rpc error: code = NotFound desc = could not find container \"72ff729792836f29c983a7c559680e4a32a938929477a46021cbf377f5d2a196\": container with ID starting with 72ff729792836f29c983a7c559680e4a32a938929477a46021cbf377f5d2a196 not found: ID does not exist" Nov 28 17:53:18 crc kubenswrapper[4909]: I1128 17:53:18.135867 4909 scope.go:117] "RemoveContainer" containerID="b0323071ebfeabe260ee9c798fa05fc57d5f9eba5f0d41c2c19dcde824380584" Nov 28 17:53:18 crc kubenswrapper[4909]: E1128 17:53:18.142805 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b0323071ebfeabe260ee9c798fa05fc57d5f9eba5f0d41c2c19dcde824380584\": container with ID starting with b0323071ebfeabe260ee9c798fa05fc57d5f9eba5f0d41c2c19dcde824380584 not found: ID does not exist" containerID="b0323071ebfeabe260ee9c798fa05fc57d5f9eba5f0d41c2c19dcde824380584" Nov 28 17:53:18 crc kubenswrapper[4909]: I1128 17:53:18.142848 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b0323071ebfeabe260ee9c798fa05fc57d5f9eba5f0d41c2c19dcde824380584"} err="failed to get container status \"b0323071ebfeabe260ee9c798fa05fc57d5f9eba5f0d41c2c19dcde824380584\": rpc error: code = NotFound desc = could not find container \"b0323071ebfeabe260ee9c798fa05fc57d5f9eba5f0d41c2c19dcde824380584\": container with ID starting with b0323071ebfeabe260ee9c798fa05fc57d5f9eba5f0d41c2c19dcde824380584 not found: ID does not exist" Nov 28 17:53:18 crc kubenswrapper[4909]: I1128 17:53:18.289930 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/alertmanager-metric-storage-0"] Nov 28 17:53:18 crc kubenswrapper[4909]: E1128 17:53:18.290483 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f6567f9a-5cde-4593-a172-292e96363f7b" containerName="extract-content" Nov 28 17:53:18 crc kubenswrapper[4909]: I1128 17:53:18.290498 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="f6567f9a-5cde-4593-a172-292e96363f7b" containerName="extract-content" Nov 28 17:53:18 crc kubenswrapper[4909]: E1128 17:53:18.290534 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f6567f9a-5cde-4593-a172-292e96363f7b" containerName="extract-utilities" Nov 28 17:53:18 crc kubenswrapper[4909]: I1128 17:53:18.290541 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="f6567f9a-5cde-4593-a172-292e96363f7b" containerName="extract-utilities" Nov 28 17:53:18 crc kubenswrapper[4909]: E1128 17:53:18.290571 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f6567f9a-5cde-4593-a172-292e96363f7b" containerName="registry-server" Nov 28 17:53:18 crc kubenswrapper[4909]: I1128 17:53:18.290579 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="f6567f9a-5cde-4593-a172-292e96363f7b" containerName="registry-server" Nov 28 17:53:18 crc kubenswrapper[4909]: I1128 17:53:18.290868 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="f6567f9a-5cde-4593-a172-292e96363f7b" containerName="registry-server" Nov 28 17:53:18 crc kubenswrapper[4909]: I1128 17:53:18.295435 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/alertmanager-metric-storage-0" Nov 28 17:53:18 crc kubenswrapper[4909]: I1128 17:53:18.304596 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"alertmanager-metric-storage-generated" Nov 28 17:53:18 crc kubenswrapper[4909]: I1128 17:53:18.304891 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"alertmanager-metric-storage-tls-assets-0" Nov 28 17:53:18 crc kubenswrapper[4909]: I1128 17:53:18.305173 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"alertmanager-metric-storage-cluster-tls-config" Nov 28 17:53:18 crc kubenswrapper[4909]: I1128 17:53:18.305323 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"alertmanager-metric-storage-web-config" Nov 28 17:53:18 crc kubenswrapper[4909]: I1128 17:53:18.305456 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"metric-storage-alertmanager-dockercfg-wtbtz" Nov 28 17:53:18 crc kubenswrapper[4909]: I1128 17:53:18.319088 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/alertmanager-metric-storage-0"] Nov 28 17:53:18 crc kubenswrapper[4909]: I1128 17:53:18.373803 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cluster-tls-config\" (UniqueName: \"kubernetes.io/secret/abb9ef1a-19b4-4dca-a532-ec15cb3625b1-cluster-tls-config\") pod \"alertmanager-metric-storage-0\" (UID: \"abb9ef1a-19b4-4dca-a532-ec15cb3625b1\") " pod="openstack/alertmanager-metric-storage-0" Nov 28 17:53:18 crc kubenswrapper[4909]: I1128 17:53:18.374086 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-klqq4\" (UniqueName: \"kubernetes.io/projected/abb9ef1a-19b4-4dca-a532-ec15cb3625b1-kube-api-access-klqq4\") pod \"alertmanager-metric-storage-0\" (UID: \"abb9ef1a-19b4-4dca-a532-ec15cb3625b1\") " pod="openstack/alertmanager-metric-storage-0" Nov 28 17:53:18 crc kubenswrapper[4909]: I1128 17:53:18.374192 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/abb9ef1a-19b4-4dca-a532-ec15cb3625b1-tls-assets\") pod \"alertmanager-metric-storage-0\" (UID: \"abb9ef1a-19b4-4dca-a532-ec15cb3625b1\") " pod="openstack/alertmanager-metric-storage-0" Nov 28 17:53:18 crc kubenswrapper[4909]: I1128 17:53:18.374229 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/abb9ef1a-19b4-4dca-a532-ec15cb3625b1-web-config\") pod \"alertmanager-metric-storage-0\" (UID: \"abb9ef1a-19b4-4dca-a532-ec15cb3625b1\") " pod="openstack/alertmanager-metric-storage-0" Nov 28 17:53:18 crc kubenswrapper[4909]: I1128 17:53:18.374251 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/abb9ef1a-19b4-4dca-a532-ec15cb3625b1-config-out\") pod \"alertmanager-metric-storage-0\" (UID: \"abb9ef1a-19b4-4dca-a532-ec15cb3625b1\") " pod="openstack/alertmanager-metric-storage-0" Nov 28 17:53:18 crc kubenswrapper[4909]: I1128 17:53:18.374293 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"alertmanager-metric-storage-db\" (UniqueName: \"kubernetes.io/empty-dir/abb9ef1a-19b4-4dca-a532-ec15cb3625b1-alertmanager-metric-storage-db\") pod \"alertmanager-metric-storage-0\" (UID: \"abb9ef1a-19b4-4dca-a532-ec15cb3625b1\") " pod="openstack/alertmanager-metric-storage-0" Nov 28 17:53:18 crc kubenswrapper[4909]: I1128 17:53:18.374340 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/secret/abb9ef1a-19b4-4dca-a532-ec15cb3625b1-config-volume\") pod \"alertmanager-metric-storage-0\" (UID: \"abb9ef1a-19b4-4dca-a532-ec15cb3625b1\") " pod="openstack/alertmanager-metric-storage-0" Nov 28 17:53:18 crc kubenswrapper[4909]: I1128 17:53:18.404955 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Nov 28 17:53:18 crc kubenswrapper[4909]: I1128 17:53:18.475493 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/secret/abb9ef1a-19b4-4dca-a532-ec15cb3625b1-config-volume\") pod \"alertmanager-metric-storage-0\" (UID: \"abb9ef1a-19b4-4dca-a532-ec15cb3625b1\") " pod="openstack/alertmanager-metric-storage-0" Nov 28 17:53:18 crc kubenswrapper[4909]: I1128 17:53:18.475551 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cluster-tls-config\" (UniqueName: \"kubernetes.io/secret/abb9ef1a-19b4-4dca-a532-ec15cb3625b1-cluster-tls-config\") pod \"alertmanager-metric-storage-0\" (UID: \"abb9ef1a-19b4-4dca-a532-ec15cb3625b1\") " pod="openstack/alertmanager-metric-storage-0" Nov 28 17:53:18 crc kubenswrapper[4909]: I1128 17:53:18.475580 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-klqq4\" (UniqueName: \"kubernetes.io/projected/abb9ef1a-19b4-4dca-a532-ec15cb3625b1-kube-api-access-klqq4\") pod \"alertmanager-metric-storage-0\" (UID: \"abb9ef1a-19b4-4dca-a532-ec15cb3625b1\") " pod="openstack/alertmanager-metric-storage-0" Nov 28 17:53:18 crc kubenswrapper[4909]: I1128 17:53:18.475669 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/abb9ef1a-19b4-4dca-a532-ec15cb3625b1-tls-assets\") pod \"alertmanager-metric-storage-0\" (UID: \"abb9ef1a-19b4-4dca-a532-ec15cb3625b1\") " pod="openstack/alertmanager-metric-storage-0" Nov 28 17:53:18 crc kubenswrapper[4909]: I1128 17:53:18.475702 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/abb9ef1a-19b4-4dca-a532-ec15cb3625b1-web-config\") pod \"alertmanager-metric-storage-0\" (UID: \"abb9ef1a-19b4-4dca-a532-ec15cb3625b1\") " pod="openstack/alertmanager-metric-storage-0" Nov 28 17:53:18 crc kubenswrapper[4909]: I1128 17:53:18.475727 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/abb9ef1a-19b4-4dca-a532-ec15cb3625b1-config-out\") pod \"alertmanager-metric-storage-0\" (UID: \"abb9ef1a-19b4-4dca-a532-ec15cb3625b1\") " pod="openstack/alertmanager-metric-storage-0" Nov 28 17:53:18 crc kubenswrapper[4909]: I1128 17:53:18.475766 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"alertmanager-metric-storage-db\" (UniqueName: \"kubernetes.io/empty-dir/abb9ef1a-19b4-4dca-a532-ec15cb3625b1-alertmanager-metric-storage-db\") pod \"alertmanager-metric-storage-0\" (UID: \"abb9ef1a-19b4-4dca-a532-ec15cb3625b1\") " pod="openstack/alertmanager-metric-storage-0" Nov 28 17:53:18 crc kubenswrapper[4909]: I1128 17:53:18.476198 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"alertmanager-metric-storage-db\" (UniqueName: \"kubernetes.io/empty-dir/abb9ef1a-19b4-4dca-a532-ec15cb3625b1-alertmanager-metric-storage-db\") pod \"alertmanager-metric-storage-0\" (UID: \"abb9ef1a-19b4-4dca-a532-ec15cb3625b1\") " pod="openstack/alertmanager-metric-storage-0" Nov 28 17:53:18 crc kubenswrapper[4909]: I1128 17:53:18.492064 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/abb9ef1a-19b4-4dca-a532-ec15cb3625b1-config-out\") pod \"alertmanager-metric-storage-0\" (UID: \"abb9ef1a-19b4-4dca-a532-ec15cb3625b1\") " pod="openstack/alertmanager-metric-storage-0" Nov 28 17:53:18 crc kubenswrapper[4909]: I1128 17:53:18.499948 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cluster-tls-config\" (UniqueName: \"kubernetes.io/secret/abb9ef1a-19b4-4dca-a532-ec15cb3625b1-cluster-tls-config\") pod \"alertmanager-metric-storage-0\" (UID: \"abb9ef1a-19b4-4dca-a532-ec15cb3625b1\") " pod="openstack/alertmanager-metric-storage-0" Nov 28 17:53:18 crc kubenswrapper[4909]: I1128 17:53:18.499962 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/abb9ef1a-19b4-4dca-a532-ec15cb3625b1-web-config\") pod \"alertmanager-metric-storage-0\" (UID: \"abb9ef1a-19b4-4dca-a532-ec15cb3625b1\") " pod="openstack/alertmanager-metric-storage-0" Nov 28 17:53:18 crc kubenswrapper[4909]: I1128 17:53:18.500284 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/secret/abb9ef1a-19b4-4dca-a532-ec15cb3625b1-config-volume\") pod \"alertmanager-metric-storage-0\" (UID: \"abb9ef1a-19b4-4dca-a532-ec15cb3625b1\") " pod="openstack/alertmanager-metric-storage-0" Nov 28 17:53:18 crc kubenswrapper[4909]: I1128 17:53:18.501357 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/abb9ef1a-19b4-4dca-a532-ec15cb3625b1-tls-assets\") pod \"alertmanager-metric-storage-0\" (UID: \"abb9ef1a-19b4-4dca-a532-ec15cb3625b1\") " pod="openstack/alertmanager-metric-storage-0" Nov 28 17:53:18 crc kubenswrapper[4909]: I1128 17:53:18.517814 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-klqq4\" (UniqueName: \"kubernetes.io/projected/abb9ef1a-19b4-4dca-a532-ec15cb3625b1-kube-api-access-klqq4\") pod \"alertmanager-metric-storage-0\" (UID: \"abb9ef1a-19b4-4dca-a532-ec15cb3625b1\") " pod="openstack/alertmanager-metric-storage-0" Nov 28 17:53:18 crc kubenswrapper[4909]: I1128 17:53:18.635477 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 28 17:53:18 crc kubenswrapper[4909]: W1128 17:53:18.644465 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3c15f4bf_b1ef_4b7b_b7b8_f3e9e26eccd0.slice/crio-63a52ba7b5bff44d19a4d14861b68e447455910a819d3f21b8aa2c543f23b3c3 WatchSource:0}: Error finding container 63a52ba7b5bff44d19a4d14861b68e447455910a819d3f21b8aa2c543f23b3c3: Status 404 returned error can't find the container with id 63a52ba7b5bff44d19a4d14861b68e447455910a819d3f21b8aa2c543f23b3c3 Nov 28 17:53:18 crc kubenswrapper[4909]: I1128 17:53:18.648797 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/alertmanager-metric-storage-0" Nov 28 17:53:18 crc kubenswrapper[4909]: I1128 17:53:18.666940 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-cvq7c" Nov 28 17:53:18 crc kubenswrapper[4909]: I1128 17:53:18.671097 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"ddeeec31-082e-4934-a8e0-5fd8c5beaf81","Type":"ContainerStarted","Data":"251ab433d44ff671640b47e38506d897e4dd7e5fc86af51f6620f9bf7d4fea8c"} Nov 28 17:53:18 crc kubenswrapper[4909]: I1128 17:53:18.779957 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-cvq7c"] Nov 28 17:53:18 crc kubenswrapper[4909]: I1128 17:53:18.812247 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-cvq7c"] Nov 28 17:53:18 crc kubenswrapper[4909]: I1128 17:53:18.979891 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/prometheus-metric-storage-0"] Nov 28 17:53:18 crc kubenswrapper[4909]: I1128 17:53:18.982685 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Nov 28 17:53:18 crc kubenswrapper[4909]: I1128 17:53:18.986781 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage" Nov 28 17:53:18 crc kubenswrapper[4909]: I1128 17:53:18.986937 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"metric-storage-prometheus-dockercfg-ppvzv" Nov 28 17:53:18 crc kubenswrapper[4909]: I1128 17:53:18.988329 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-thanos-prometheus-http-client-file" Nov 28 17:53:18 crc kubenswrapper[4909]: I1128 17:53:18.988515 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-0" Nov 28 17:53:18 crc kubenswrapper[4909]: I1128 17:53:18.988653 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-web-config" Nov 28 17:53:18 crc kubenswrapper[4909]: I1128 17:53:18.993574 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Nov 28 17:53:18 crc kubenswrapper[4909]: I1128 17:53:18.993652 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-tls-assets-0" Nov 28 17:53:19 crc kubenswrapper[4909]: I1128 17:53:19.097021 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/873236a5-5fa6-43c8-a44f-cbb590ff9bdc-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"873236a5-5fa6-43c8-a44f-cbb590ff9bdc\") " pod="openstack/prometheus-metric-storage-0" Nov 28 17:53:19 crc kubenswrapper[4909]: I1128 17:53:19.097069 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/873236a5-5fa6-43c8-a44f-cbb590ff9bdc-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"873236a5-5fa6-43c8-a44f-cbb590ff9bdc\") " pod="openstack/prometheus-metric-storage-0" Nov 28 17:53:19 crc kubenswrapper[4909]: I1128 17:53:19.097117 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c6bks\" (UniqueName: \"kubernetes.io/projected/873236a5-5fa6-43c8-a44f-cbb590ff9bdc-kube-api-access-c6bks\") pod \"prometheus-metric-storage-0\" (UID: \"873236a5-5fa6-43c8-a44f-cbb590ff9bdc\") " pod="openstack/prometheus-metric-storage-0" Nov 28 17:53:19 crc kubenswrapper[4909]: I1128 17:53:19.097210 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/873236a5-5fa6-43c8-a44f-cbb590ff9bdc-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"873236a5-5fa6-43c8-a44f-cbb590ff9bdc\") " pod="openstack/prometheus-metric-storage-0" Nov 28 17:53:19 crc kubenswrapper[4909]: I1128 17:53:19.097231 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/873236a5-5fa6-43c8-a44f-cbb590ff9bdc-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"873236a5-5fa6-43c8-a44f-cbb590ff9bdc\") " pod="openstack/prometheus-metric-storage-0" Nov 28 17:53:19 crc kubenswrapper[4909]: I1128 17:53:19.097247 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-3d18ea77-ecd9-41ae-9c45-bd8ed0e89237\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-3d18ea77-ecd9-41ae-9c45-bd8ed0e89237\") pod \"prometheus-metric-storage-0\" (UID: \"873236a5-5fa6-43c8-a44f-cbb590ff9bdc\") " pod="openstack/prometheus-metric-storage-0" Nov 28 17:53:19 crc kubenswrapper[4909]: I1128 17:53:19.097288 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/873236a5-5fa6-43c8-a44f-cbb590ff9bdc-config\") pod \"prometheus-metric-storage-0\" (UID: \"873236a5-5fa6-43c8-a44f-cbb590ff9bdc\") " pod="openstack/prometheus-metric-storage-0" Nov 28 17:53:19 crc kubenswrapper[4909]: I1128 17:53:19.097336 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/873236a5-5fa6-43c8-a44f-cbb590ff9bdc-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"873236a5-5fa6-43c8-a44f-cbb590ff9bdc\") " pod="openstack/prometheus-metric-storage-0" Nov 28 17:53:19 crc kubenswrapper[4909]: I1128 17:53:19.165752 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/alertmanager-metric-storage-0"] Nov 28 17:53:19 crc kubenswrapper[4909]: I1128 17:53:19.199257 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/873236a5-5fa6-43c8-a44f-cbb590ff9bdc-config\") pod \"prometheus-metric-storage-0\" (UID: \"873236a5-5fa6-43c8-a44f-cbb590ff9bdc\") " pod="openstack/prometheus-metric-storage-0" Nov 28 17:53:19 crc kubenswrapper[4909]: I1128 17:53:19.199353 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/873236a5-5fa6-43c8-a44f-cbb590ff9bdc-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"873236a5-5fa6-43c8-a44f-cbb590ff9bdc\") " pod="openstack/prometheus-metric-storage-0" Nov 28 17:53:19 crc kubenswrapper[4909]: I1128 17:53:19.199439 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/873236a5-5fa6-43c8-a44f-cbb590ff9bdc-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"873236a5-5fa6-43c8-a44f-cbb590ff9bdc\") " pod="openstack/prometheus-metric-storage-0" Nov 28 17:53:19 crc kubenswrapper[4909]: I1128 17:53:19.199482 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/873236a5-5fa6-43c8-a44f-cbb590ff9bdc-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"873236a5-5fa6-43c8-a44f-cbb590ff9bdc\") " pod="openstack/prometheus-metric-storage-0" Nov 28 17:53:19 crc kubenswrapper[4909]: I1128 17:53:19.199519 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c6bks\" (UniqueName: \"kubernetes.io/projected/873236a5-5fa6-43c8-a44f-cbb590ff9bdc-kube-api-access-c6bks\") pod \"prometheus-metric-storage-0\" (UID: \"873236a5-5fa6-43c8-a44f-cbb590ff9bdc\") " pod="openstack/prometheus-metric-storage-0" Nov 28 17:53:19 crc kubenswrapper[4909]: I1128 17:53:19.199625 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/873236a5-5fa6-43c8-a44f-cbb590ff9bdc-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"873236a5-5fa6-43c8-a44f-cbb590ff9bdc\") " pod="openstack/prometheus-metric-storage-0" Nov 28 17:53:19 crc kubenswrapper[4909]: I1128 17:53:19.199648 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/873236a5-5fa6-43c8-a44f-cbb590ff9bdc-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"873236a5-5fa6-43c8-a44f-cbb590ff9bdc\") " pod="openstack/prometheus-metric-storage-0" Nov 28 17:53:19 crc kubenswrapper[4909]: I1128 17:53:19.199691 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-3d18ea77-ecd9-41ae-9c45-bd8ed0e89237\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-3d18ea77-ecd9-41ae-9c45-bd8ed0e89237\") pod \"prometheus-metric-storage-0\" (UID: \"873236a5-5fa6-43c8-a44f-cbb590ff9bdc\") " pod="openstack/prometheus-metric-storage-0" Nov 28 17:53:19 crc kubenswrapper[4909]: I1128 17:53:19.201048 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/873236a5-5fa6-43c8-a44f-cbb590ff9bdc-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"873236a5-5fa6-43c8-a44f-cbb590ff9bdc\") " pod="openstack/prometheus-metric-storage-0" Nov 28 17:53:19 crc kubenswrapper[4909]: I1128 17:53:19.204158 4909 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 28 17:53:19 crc kubenswrapper[4909]: I1128 17:53:19.204192 4909 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-3d18ea77-ecd9-41ae-9c45-bd8ed0e89237\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-3d18ea77-ecd9-41ae-9c45-bd8ed0e89237\") pod \"prometheus-metric-storage-0\" (UID: \"873236a5-5fa6-43c8-a44f-cbb590ff9bdc\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/4a900cce16a12f41041ef8f5e5d66c413a565ff5fd3dd088e5da341fb057f347/globalmount\"" pod="openstack/prometheus-metric-storage-0" Nov 28 17:53:19 crc kubenswrapper[4909]: I1128 17:53:19.204918 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/873236a5-5fa6-43c8-a44f-cbb590ff9bdc-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"873236a5-5fa6-43c8-a44f-cbb590ff9bdc\") " pod="openstack/prometheus-metric-storage-0" Nov 28 17:53:19 crc kubenswrapper[4909]: I1128 17:53:19.205428 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/873236a5-5fa6-43c8-a44f-cbb590ff9bdc-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"873236a5-5fa6-43c8-a44f-cbb590ff9bdc\") " pod="openstack/prometheus-metric-storage-0" Nov 28 17:53:19 crc kubenswrapper[4909]: I1128 17:53:19.205630 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/873236a5-5fa6-43c8-a44f-cbb590ff9bdc-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"873236a5-5fa6-43c8-a44f-cbb590ff9bdc\") " pod="openstack/prometheus-metric-storage-0" Nov 28 17:53:19 crc kubenswrapper[4909]: I1128 17:53:19.206321 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/873236a5-5fa6-43c8-a44f-cbb590ff9bdc-config\") pod \"prometheus-metric-storage-0\" (UID: \"873236a5-5fa6-43c8-a44f-cbb590ff9bdc\") " pod="openstack/prometheus-metric-storage-0" Nov 28 17:53:19 crc kubenswrapper[4909]: I1128 17:53:19.212405 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/873236a5-5fa6-43c8-a44f-cbb590ff9bdc-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"873236a5-5fa6-43c8-a44f-cbb590ff9bdc\") " pod="openstack/prometheus-metric-storage-0" Nov 28 17:53:19 crc kubenswrapper[4909]: I1128 17:53:19.227401 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c6bks\" (UniqueName: \"kubernetes.io/projected/873236a5-5fa6-43c8-a44f-cbb590ff9bdc-kube-api-access-c6bks\") pod \"prometheus-metric-storage-0\" (UID: \"873236a5-5fa6-43c8-a44f-cbb590ff9bdc\") " pod="openstack/prometheus-metric-storage-0" Nov 28 17:53:19 crc kubenswrapper[4909]: I1128 17:53:19.265635 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-3d18ea77-ecd9-41ae-9c45-bd8ed0e89237\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-3d18ea77-ecd9-41ae-9c45-bd8ed0e89237\") pod \"prometheus-metric-storage-0\" (UID: \"873236a5-5fa6-43c8-a44f-cbb590ff9bdc\") " pod="openstack/prometheus-metric-storage-0" Nov 28 17:53:19 crc kubenswrapper[4909]: I1128 17:53:19.270586 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 28 17:53:19 crc kubenswrapper[4909]: I1128 17:53:19.275730 4909 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openstack/openstackclient" oldPodUID="917f1c58-6e8e-4503-b697-d69434d8622c" podUID="ddeeec31-082e-4934-a8e0-5fd8c5beaf81" Nov 28 17:53:19 crc kubenswrapper[4909]: I1128 17:53:19.340195 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Nov 28 17:53:19 crc kubenswrapper[4909]: I1128 17:53:19.404094 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/917f1c58-6e8e-4503-b697-d69434d8622c-openstack-config\") pod \"917f1c58-6e8e-4503-b697-d69434d8622c\" (UID: \"917f1c58-6e8e-4503-b697-d69434d8622c\") " Nov 28 17:53:19 crc kubenswrapper[4909]: I1128 17:53:19.404413 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kftc4\" (UniqueName: \"kubernetes.io/projected/917f1c58-6e8e-4503-b697-d69434d8622c-kube-api-access-kftc4\") pod \"917f1c58-6e8e-4503-b697-d69434d8622c\" (UID: \"917f1c58-6e8e-4503-b697-d69434d8622c\") " Nov 28 17:53:19 crc kubenswrapper[4909]: I1128 17:53:19.404449 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/917f1c58-6e8e-4503-b697-d69434d8622c-openstack-config-secret\") pod \"917f1c58-6e8e-4503-b697-d69434d8622c\" (UID: \"917f1c58-6e8e-4503-b697-d69434d8622c\") " Nov 28 17:53:19 crc kubenswrapper[4909]: I1128 17:53:19.411459 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/917f1c58-6e8e-4503-b697-d69434d8622c-kube-api-access-kftc4" (OuterVolumeSpecName: "kube-api-access-kftc4") pod "917f1c58-6e8e-4503-b697-d69434d8622c" (UID: "917f1c58-6e8e-4503-b697-d69434d8622c"). InnerVolumeSpecName "kube-api-access-kftc4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:53:19 crc kubenswrapper[4909]: I1128 17:53:19.458043 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/917f1c58-6e8e-4503-b697-d69434d8622c-openstack-config" (OuterVolumeSpecName: "openstack-config") pod "917f1c58-6e8e-4503-b697-d69434d8622c" (UID: "917f1c58-6e8e-4503-b697-d69434d8622c"). InnerVolumeSpecName "openstack-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 17:53:19 crc kubenswrapper[4909]: I1128 17:53:19.487161 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/917f1c58-6e8e-4503-b697-d69434d8622c-openstack-config-secret" (OuterVolumeSpecName: "openstack-config-secret") pod "917f1c58-6e8e-4503-b697-d69434d8622c" (UID: "917f1c58-6e8e-4503-b697-d69434d8622c"). InnerVolumeSpecName "openstack-config-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:53:19 crc kubenswrapper[4909]: I1128 17:53:19.508113 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kftc4\" (UniqueName: \"kubernetes.io/projected/917f1c58-6e8e-4503-b697-d69434d8622c-kube-api-access-kftc4\") on node \"crc\" DevicePath \"\"" Nov 28 17:53:19 crc kubenswrapper[4909]: I1128 17:53:19.508137 4909 reconciler_common.go:293] "Volume detached for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/917f1c58-6e8e-4503-b697-d69434d8622c-openstack-config-secret\") on node \"crc\" DevicePath \"\"" Nov 28 17:53:19 crc kubenswrapper[4909]: I1128 17:53:19.508147 4909 reconciler_common.go:293] "Volume detached for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/917f1c58-6e8e-4503-b697-d69434d8622c-openstack-config\") on node \"crc\" DevicePath \"\"" Nov 28 17:53:19 crc kubenswrapper[4909]: I1128 17:53:19.683713 4909 scope.go:117] "RemoveContainer" containerID="702919c982ad62faf4b76e95d433f71f799d1238591069039fd5683c4198668e" Nov 28 17:53:19 crc kubenswrapper[4909]: I1128 17:53:19.683713 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 28 17:53:19 crc kubenswrapper[4909]: I1128 17:53:19.683609 4909 generic.go:334] "Generic (PLEG): container finished" podID="917f1c58-6e8e-4503-b697-d69434d8622c" containerID="702919c982ad62faf4b76e95d433f71f799d1238591069039fd5683c4198668e" exitCode=137 Nov 28 17:53:19 crc kubenswrapper[4909]: I1128 17:53:19.688463 4909 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openstack/openstackclient" oldPodUID="917f1c58-6e8e-4503-b697-d69434d8622c" podUID="ddeeec31-082e-4934-a8e0-5fd8c5beaf81" Nov 28 17:53:19 crc kubenswrapper[4909]: I1128 17:53:19.689982 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"ddeeec31-082e-4934-a8e0-5fd8c5beaf81","Type":"ContainerStarted","Data":"032524a5151f16b90280438e5237a886abc5caa4aa53f1a2fa742075bc595fa7"} Nov 28 17:53:19 crc kubenswrapper[4909]: I1128 17:53:19.711352 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"3c15f4bf-b1ef-4b7b-b7b8-f3e9e26eccd0","Type":"ContainerStarted","Data":"f1dcae84a329c9c3820a2cf6caa18218b0310fdaa2785a86cb262b12945c2d4f"} Nov 28 17:53:19 crc kubenswrapper[4909]: I1128 17:53:19.711402 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"3c15f4bf-b1ef-4b7b-b7b8-f3e9e26eccd0","Type":"ContainerStarted","Data":"63a52ba7b5bff44d19a4d14861b68e447455910a819d3f21b8aa2c543f23b3c3"} Nov 28 17:53:19 crc kubenswrapper[4909]: I1128 17:53:19.712259 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Nov 28 17:53:19 crc kubenswrapper[4909]: I1128 17:53:19.718287 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/alertmanager-metric-storage-0" event={"ID":"abb9ef1a-19b4-4dca-a532-ec15cb3625b1","Type":"ContainerStarted","Data":"dbc90c04573df4c67f40f1fae373e9ab8a5c1629452e344fdc888169f12a403d"} Nov 28 17:53:19 crc kubenswrapper[4909]: I1128 17:53:19.718516 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstackclient" podStartSLOduration=3.718500841 podStartE2EDuration="3.718500841s" podCreationTimestamp="2025-11-28 17:53:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 17:53:19.71245651 +0000 UTC m=+6182.109141034" watchObservedRunningTime="2025-11-28 17:53:19.718500841 +0000 UTC m=+6182.115185365" Nov 28 17:53:19 crc kubenswrapper[4909]: I1128 17:53:19.739529 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=2.353313122 podStartE2EDuration="2.739509429s" podCreationTimestamp="2025-11-28 17:53:17 +0000 UTC" firstStartedPulling="2025-11-28 17:53:18.702369725 +0000 UTC m=+6181.099054249" lastFinishedPulling="2025-11-28 17:53:19.088566032 +0000 UTC m=+6181.485250556" observedRunningTime="2025-11-28 17:53:19.73202899 +0000 UTC m=+6182.128713514" watchObservedRunningTime="2025-11-28 17:53:19.739509429 +0000 UTC m=+6182.136193943" Nov 28 17:53:19 crc kubenswrapper[4909]: I1128 17:53:19.760186 4909 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openstack/openstackclient" oldPodUID="917f1c58-6e8e-4503-b697-d69434d8622c" podUID="ddeeec31-082e-4934-a8e0-5fd8c5beaf81" Nov 28 17:53:19 crc kubenswrapper[4909]: I1128 17:53:19.768283 4909 scope.go:117] "RemoveContainer" containerID="702919c982ad62faf4b76e95d433f71f799d1238591069039fd5683c4198668e" Nov 28 17:53:19 crc kubenswrapper[4909]: E1128 17:53:19.768715 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"702919c982ad62faf4b76e95d433f71f799d1238591069039fd5683c4198668e\": container with ID starting with 702919c982ad62faf4b76e95d433f71f799d1238591069039fd5683c4198668e not found: ID does not exist" containerID="702919c982ad62faf4b76e95d433f71f799d1238591069039fd5683c4198668e" Nov 28 17:53:19 crc kubenswrapper[4909]: I1128 17:53:19.768757 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"702919c982ad62faf4b76e95d433f71f799d1238591069039fd5683c4198668e"} err="failed to get container status \"702919c982ad62faf4b76e95d433f71f799d1238591069039fd5683c4198668e\": rpc error: code = NotFound desc = could not find container \"702919c982ad62faf4b76e95d433f71f799d1238591069039fd5683c4198668e\": container with ID starting with 702919c982ad62faf4b76e95d433f71f799d1238591069039fd5683c4198668e not found: ID does not exist" Nov 28 17:53:19 crc kubenswrapper[4909]: I1128 17:53:19.872182 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Nov 28 17:53:19 crc kubenswrapper[4909]: E1128 17:53:19.877386 4909 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod917f1c58_6e8e_4503_b697_d69434d8622c.slice/crio-1c7a8c9186cf735f903a1fb880d8bdfc2aba875870c5515eb98bd315dcff8d66\": RecentStats: unable to find data in memory cache]" Nov 28 17:53:19 crc kubenswrapper[4909]: W1128 17:53:19.915897 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod873236a5_5fa6_43c8_a44f_cbb590ff9bdc.slice/crio-e42078882e75c0d9efb10c768408c92def5e7156eb95794229db1201324d65e9 WatchSource:0}: Error finding container e42078882e75c0d9efb10c768408c92def5e7156eb95794229db1201324d65e9: Status 404 returned error can't find the container with id e42078882e75c0d9efb10c768408c92def5e7156eb95794229db1201324d65e9 Nov 28 17:53:19 crc kubenswrapper[4909]: I1128 17:53:19.934990 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="917f1c58-6e8e-4503-b697-d69434d8622c" path="/var/lib/kubelet/pods/917f1c58-6e8e-4503-b697-d69434d8622c/volumes" Nov 28 17:53:19 crc kubenswrapper[4909]: I1128 17:53:19.935511 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f6567f9a-5cde-4593-a172-292e96363f7b" path="/var/lib/kubelet/pods/f6567f9a-5cde-4593-a172-292e96363f7b/volumes" Nov 28 17:53:20 crc kubenswrapper[4909]: I1128 17:53:20.727899 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"873236a5-5fa6-43c8-a44f-cbb590ff9bdc","Type":"ContainerStarted","Data":"e42078882e75c0d9efb10c768408c92def5e7156eb95794229db1201324d65e9"} Nov 28 17:53:25 crc kubenswrapper[4909]: I1128 17:53:25.772958 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"873236a5-5fa6-43c8-a44f-cbb590ff9bdc","Type":"ContainerStarted","Data":"2e42ea10b55b13da9ff44dbab41754629e8ef1b06334fa823ad3c8241f61e697"} Nov 28 17:53:25 crc kubenswrapper[4909]: I1128 17:53:25.775250 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/alertmanager-metric-storage-0" event={"ID":"abb9ef1a-19b4-4dca-a532-ec15cb3625b1","Type":"ContainerStarted","Data":"9746d57b6e0765cc1951a9232becf265503fbaa1034fbdd207bd9fa054ade29b"} Nov 28 17:53:27 crc kubenswrapper[4909]: I1128 17:53:27.521299 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Nov 28 17:53:30 crc kubenswrapper[4909]: I1128 17:53:30.901547 4909 scope.go:117] "RemoveContainer" containerID="eb8a08a6c738fff0fcbfbb88427c9ed53477944abe7436212850e368ec229c4f" Nov 28 17:53:30 crc kubenswrapper[4909]: E1128 17:53:30.902698 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 17:53:31 crc kubenswrapper[4909]: I1128 17:53:31.856225 4909 generic.go:334] "Generic (PLEG): container finished" podID="873236a5-5fa6-43c8-a44f-cbb590ff9bdc" containerID="2e42ea10b55b13da9ff44dbab41754629e8ef1b06334fa823ad3c8241f61e697" exitCode=0 Nov 28 17:53:31 crc kubenswrapper[4909]: I1128 17:53:31.856279 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"873236a5-5fa6-43c8-a44f-cbb590ff9bdc","Type":"ContainerDied","Data":"2e42ea10b55b13da9ff44dbab41754629e8ef1b06334fa823ad3c8241f61e697"} Nov 28 17:53:33 crc kubenswrapper[4909]: I1128 17:53:33.886295 4909 generic.go:334] "Generic (PLEG): container finished" podID="abb9ef1a-19b4-4dca-a532-ec15cb3625b1" containerID="9746d57b6e0765cc1951a9232becf265503fbaa1034fbdd207bd9fa054ade29b" exitCode=0 Nov 28 17:53:33 crc kubenswrapper[4909]: I1128 17:53:33.886418 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/alertmanager-metric-storage-0" event={"ID":"abb9ef1a-19b4-4dca-a532-ec15cb3625b1","Type":"ContainerDied","Data":"9746d57b6e0765cc1951a9232becf265503fbaa1034fbdd207bd9fa054ade29b"} Nov 28 17:53:34 crc kubenswrapper[4909]: I1128 17:53:34.613015 4909 scope.go:117] "RemoveContainer" containerID="0b3308e20a8fe2c349170f13d9c193e690b8b406744a2fbabef4ba64c37e82fa" Nov 28 17:53:34 crc kubenswrapper[4909]: I1128 17:53:34.644534 4909 scope.go:117] "RemoveContainer" containerID="e40dceaab2b0f4da20494408ea54519ae2c665306e73fedcfbc8428dd8372633" Nov 28 17:53:34 crc kubenswrapper[4909]: I1128 17:53:34.692876 4909 scope.go:117] "RemoveContainer" containerID="43def0371b58b8f0cb0c33aac74483346a5f2f1c01c81d3509584522fcb06d36" Nov 28 17:53:39 crc kubenswrapper[4909]: I1128 17:53:39.983582 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/alertmanager-metric-storage-0" event={"ID":"abb9ef1a-19b4-4dca-a532-ec15cb3625b1","Type":"ContainerStarted","Data":"46e71060acfc4bcb0e9168b7798631ce49cb4ce4190b325238b8fb801c01d2a0"} Nov 28 17:53:40 crc kubenswrapper[4909]: I1128 17:53:40.001994 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"873236a5-5fa6-43c8-a44f-cbb590ff9bdc","Type":"ContainerStarted","Data":"7ab5c93c0d85bc4871dc9027a1f7e2ba7f7f7dff57b9db4326bc6ac95790d009"} Nov 28 17:53:41 crc kubenswrapper[4909]: I1128 17:53:41.901478 4909 scope.go:117] "RemoveContainer" containerID="eb8a08a6c738fff0fcbfbb88427c9ed53477944abe7436212850e368ec229c4f" Nov 28 17:53:41 crc kubenswrapper[4909]: E1128 17:53:41.902240 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 17:53:42 crc kubenswrapper[4909]: I1128 17:53:42.034830 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-create-vq4q8"] Nov 28 17:53:42 crc kubenswrapper[4909]: I1128 17:53:42.047060 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-d22a-account-create-update-4vb8l"] Nov 28 17:53:42 crc kubenswrapper[4909]: I1128 17:53:42.058427 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-create-vq4q8"] Nov 28 17:53:42 crc kubenswrapper[4909]: I1128 17:53:42.066324 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-d22a-account-create-update-4vb8l"] Nov 28 17:53:43 crc kubenswrapper[4909]: I1128 17:53:43.913677 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="68825d63-727c-4c82-a876-6b85b0224061" path="/var/lib/kubelet/pods/68825d63-727c-4c82-a876-6b85b0224061/volumes" Nov 28 17:53:43 crc kubenswrapper[4909]: I1128 17:53:43.914901 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="80c89465-bce9-4402-98a8-11da00a3cb88" path="/var/lib/kubelet/pods/80c89465-bce9-4402-98a8-11da00a3cb88/volumes" Nov 28 17:53:44 crc kubenswrapper[4909]: I1128 17:53:44.049101 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/alertmanager-metric-storage-0" event={"ID":"abb9ef1a-19b4-4dca-a532-ec15cb3625b1","Type":"ContainerStarted","Data":"d441cc08b794f9f02f5c1c2296bb25cbe1c20db794d565872d787a8bba7b0ee4"} Nov 28 17:53:44 crc kubenswrapper[4909]: I1128 17:53:44.050763 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/alertmanager-metric-storage-0" Nov 28 17:53:44 crc kubenswrapper[4909]: I1128 17:53:44.053341 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/alertmanager-metric-storage-0" Nov 28 17:53:44 crc kubenswrapper[4909]: I1128 17:53:44.055575 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"873236a5-5fa6-43c8-a44f-cbb590ff9bdc","Type":"ContainerStarted","Data":"0b4b5a7b4010a387b1556c1d6dbd494666e0785e9283576ab356a6115d822130"} Nov 28 17:53:44 crc kubenswrapper[4909]: I1128 17:53:44.073252 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/alertmanager-metric-storage-0" podStartSLOduration=5.63951876 podStartE2EDuration="26.07322949s" podCreationTimestamp="2025-11-28 17:53:18 +0000 UTC" firstStartedPulling="2025-11-28 17:53:19.185350936 +0000 UTC m=+6181.582035460" lastFinishedPulling="2025-11-28 17:53:39.619061666 +0000 UTC m=+6202.015746190" observedRunningTime="2025-11-28 17:53:44.068804143 +0000 UTC m=+6206.465488687" watchObservedRunningTime="2025-11-28 17:53:44.07322949 +0000 UTC m=+6206.469914024" Nov 28 17:53:47 crc kubenswrapper[4909]: I1128 17:53:47.050624 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-sync-2vp5l"] Nov 28 17:53:47 crc kubenswrapper[4909]: I1128 17:53:47.061059 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-sync-2vp5l"] Nov 28 17:53:47 crc kubenswrapper[4909]: I1128 17:53:47.091131 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"873236a5-5fa6-43c8-a44f-cbb590ff9bdc","Type":"ContainerStarted","Data":"980cfdd381df63abf2e3855a036929857f5af68ba339f6e8e17c3674f7d06571"} Nov 28 17:53:47 crc kubenswrapper[4909]: I1128 17:53:47.132087 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/prometheus-metric-storage-0" podStartSLOduration=3.5411660940000003 podStartE2EDuration="30.132070398s" podCreationTimestamp="2025-11-28 17:53:17 +0000 UTC" firstStartedPulling="2025-11-28 17:53:19.96481429 +0000 UTC m=+6182.361498804" lastFinishedPulling="2025-11-28 17:53:46.555718584 +0000 UTC m=+6208.952403108" observedRunningTime="2025-11-28 17:53:47.118798455 +0000 UTC m=+6209.515483009" watchObservedRunningTime="2025-11-28 17:53:47.132070398 +0000 UTC m=+6209.528754912" Nov 28 17:53:47 crc kubenswrapper[4909]: I1128 17:53:47.917221 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f35bbae0-db18-4b8a-924e-18e33ad585e2" path="/var/lib/kubelet/pods/f35bbae0-db18-4b8a-924e-18e33ad585e2/volumes" Nov 28 17:53:49 crc kubenswrapper[4909]: I1128 17:53:49.341142 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/prometheus-metric-storage-0" Nov 28 17:53:49 crc kubenswrapper[4909]: I1128 17:53:49.341471 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/prometheus-metric-storage-0" Nov 28 17:53:49 crc kubenswrapper[4909]: I1128 17:53:49.346931 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/prometheus-metric-storage-0" Nov 28 17:53:50 crc kubenswrapper[4909]: I1128 17:53:50.122152 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/prometheus-metric-storage-0" Nov 28 17:53:51 crc kubenswrapper[4909]: I1128 17:53:51.050855 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 28 17:53:51 crc kubenswrapper[4909]: I1128 17:53:51.053630 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 17:53:51 crc kubenswrapper[4909]: I1128 17:53:51.056902 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 28 17:53:51 crc kubenswrapper[4909]: I1128 17:53:51.056750 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 28 17:53:51 crc kubenswrapper[4909]: I1128 17:53:51.066417 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 28 17:53:51 crc kubenswrapper[4909]: I1128 17:53:51.173632 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6d806d2c-d2e6-4d8a-ac60-243bdb4c246b-run-httpd\") pod \"ceilometer-0\" (UID: \"6d806d2c-d2e6-4d8a-ac60-243bdb4c246b\") " pod="openstack/ceilometer-0" Nov 28 17:53:51 crc kubenswrapper[4909]: I1128 17:53:51.173804 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6d806d2c-d2e6-4d8a-ac60-243bdb4c246b-scripts\") pod \"ceilometer-0\" (UID: \"6d806d2c-d2e6-4d8a-ac60-243bdb4c246b\") " pod="openstack/ceilometer-0" Nov 28 17:53:51 crc kubenswrapper[4909]: I1128 17:53:51.173839 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6d806d2c-d2e6-4d8a-ac60-243bdb4c246b-config-data\") pod \"ceilometer-0\" (UID: \"6d806d2c-d2e6-4d8a-ac60-243bdb4c246b\") " pod="openstack/ceilometer-0" Nov 28 17:53:51 crc kubenswrapper[4909]: I1128 17:53:51.173932 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/6d806d2c-d2e6-4d8a-ac60-243bdb4c246b-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"6d806d2c-d2e6-4d8a-ac60-243bdb4c246b\") " pod="openstack/ceilometer-0" Nov 28 17:53:51 crc kubenswrapper[4909]: I1128 17:53:51.174016 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tzrvk\" (UniqueName: \"kubernetes.io/projected/6d806d2c-d2e6-4d8a-ac60-243bdb4c246b-kube-api-access-tzrvk\") pod \"ceilometer-0\" (UID: \"6d806d2c-d2e6-4d8a-ac60-243bdb4c246b\") " pod="openstack/ceilometer-0" Nov 28 17:53:51 crc kubenswrapper[4909]: I1128 17:53:51.174089 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6d806d2c-d2e6-4d8a-ac60-243bdb4c246b-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"6d806d2c-d2e6-4d8a-ac60-243bdb4c246b\") " pod="openstack/ceilometer-0" Nov 28 17:53:51 crc kubenswrapper[4909]: I1128 17:53:51.174226 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6d806d2c-d2e6-4d8a-ac60-243bdb4c246b-log-httpd\") pod \"ceilometer-0\" (UID: \"6d806d2c-d2e6-4d8a-ac60-243bdb4c246b\") " pod="openstack/ceilometer-0" Nov 28 17:53:51 crc kubenswrapper[4909]: I1128 17:53:51.285860 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6d806d2c-d2e6-4d8a-ac60-243bdb4c246b-run-httpd\") pod \"ceilometer-0\" (UID: \"6d806d2c-d2e6-4d8a-ac60-243bdb4c246b\") " pod="openstack/ceilometer-0" Nov 28 17:53:51 crc kubenswrapper[4909]: I1128 17:53:51.286130 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6d806d2c-d2e6-4d8a-ac60-243bdb4c246b-scripts\") pod \"ceilometer-0\" (UID: \"6d806d2c-d2e6-4d8a-ac60-243bdb4c246b\") " pod="openstack/ceilometer-0" Nov 28 17:53:51 crc kubenswrapper[4909]: I1128 17:53:51.286149 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6d806d2c-d2e6-4d8a-ac60-243bdb4c246b-config-data\") pod \"ceilometer-0\" (UID: \"6d806d2c-d2e6-4d8a-ac60-243bdb4c246b\") " pod="openstack/ceilometer-0" Nov 28 17:53:51 crc kubenswrapper[4909]: I1128 17:53:51.286195 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/6d806d2c-d2e6-4d8a-ac60-243bdb4c246b-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"6d806d2c-d2e6-4d8a-ac60-243bdb4c246b\") " pod="openstack/ceilometer-0" Nov 28 17:53:51 crc kubenswrapper[4909]: I1128 17:53:51.286236 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tzrvk\" (UniqueName: \"kubernetes.io/projected/6d806d2c-d2e6-4d8a-ac60-243bdb4c246b-kube-api-access-tzrvk\") pod \"ceilometer-0\" (UID: \"6d806d2c-d2e6-4d8a-ac60-243bdb4c246b\") " pod="openstack/ceilometer-0" Nov 28 17:53:51 crc kubenswrapper[4909]: I1128 17:53:51.286271 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6d806d2c-d2e6-4d8a-ac60-243bdb4c246b-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"6d806d2c-d2e6-4d8a-ac60-243bdb4c246b\") " pod="openstack/ceilometer-0" Nov 28 17:53:51 crc kubenswrapper[4909]: I1128 17:53:51.286335 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6d806d2c-d2e6-4d8a-ac60-243bdb4c246b-log-httpd\") pod \"ceilometer-0\" (UID: \"6d806d2c-d2e6-4d8a-ac60-243bdb4c246b\") " pod="openstack/ceilometer-0" Nov 28 17:53:51 crc kubenswrapper[4909]: I1128 17:53:51.288337 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6d806d2c-d2e6-4d8a-ac60-243bdb4c246b-log-httpd\") pod \"ceilometer-0\" (UID: \"6d806d2c-d2e6-4d8a-ac60-243bdb4c246b\") " pod="openstack/ceilometer-0" Nov 28 17:53:51 crc kubenswrapper[4909]: I1128 17:53:51.288850 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6d806d2c-d2e6-4d8a-ac60-243bdb4c246b-run-httpd\") pod \"ceilometer-0\" (UID: \"6d806d2c-d2e6-4d8a-ac60-243bdb4c246b\") " pod="openstack/ceilometer-0" Nov 28 17:53:51 crc kubenswrapper[4909]: I1128 17:53:51.295408 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6d806d2c-d2e6-4d8a-ac60-243bdb4c246b-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"6d806d2c-d2e6-4d8a-ac60-243bdb4c246b\") " pod="openstack/ceilometer-0" Nov 28 17:53:51 crc kubenswrapper[4909]: I1128 17:53:51.295600 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6d806d2c-d2e6-4d8a-ac60-243bdb4c246b-scripts\") pod \"ceilometer-0\" (UID: \"6d806d2c-d2e6-4d8a-ac60-243bdb4c246b\") " pod="openstack/ceilometer-0" Nov 28 17:53:51 crc kubenswrapper[4909]: I1128 17:53:51.296694 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/6d806d2c-d2e6-4d8a-ac60-243bdb4c246b-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"6d806d2c-d2e6-4d8a-ac60-243bdb4c246b\") " pod="openstack/ceilometer-0" Nov 28 17:53:51 crc kubenswrapper[4909]: I1128 17:53:51.318818 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6d806d2c-d2e6-4d8a-ac60-243bdb4c246b-config-data\") pod \"ceilometer-0\" (UID: \"6d806d2c-d2e6-4d8a-ac60-243bdb4c246b\") " pod="openstack/ceilometer-0" Nov 28 17:53:51 crc kubenswrapper[4909]: I1128 17:53:51.332423 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tzrvk\" (UniqueName: \"kubernetes.io/projected/6d806d2c-d2e6-4d8a-ac60-243bdb4c246b-kube-api-access-tzrvk\") pod \"ceilometer-0\" (UID: \"6d806d2c-d2e6-4d8a-ac60-243bdb4c246b\") " pod="openstack/ceilometer-0" Nov 28 17:53:51 crc kubenswrapper[4909]: I1128 17:53:51.374611 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 17:53:51 crc kubenswrapper[4909]: I1128 17:53:51.883997 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 28 17:53:52 crc kubenswrapper[4909]: I1128 17:53:52.138799 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6d806d2c-d2e6-4d8a-ac60-243bdb4c246b","Type":"ContainerStarted","Data":"53420b20488737dc9fe1cee90b10767aaa2ebb3e4e1fae696eaa096b36cbba8f"} Nov 28 17:53:52 crc kubenswrapper[4909]: I1128 17:53:52.901933 4909 scope.go:117] "RemoveContainer" containerID="eb8a08a6c738fff0fcbfbb88427c9ed53477944abe7436212850e368ec229c4f" Nov 28 17:53:52 crc kubenswrapper[4909]: E1128 17:53:52.902765 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 17:53:53 crc kubenswrapper[4909]: I1128 17:53:53.150241 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6d806d2c-d2e6-4d8a-ac60-243bdb4c246b","Type":"ContainerStarted","Data":"fe37426916ad8394a9617f80a472e10ff30517be0673103e90057e4201cc6ea2"} Nov 28 17:53:54 crc kubenswrapper[4909]: I1128 17:53:54.164198 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6d806d2c-d2e6-4d8a-ac60-243bdb4c246b","Type":"ContainerStarted","Data":"a780c085997cd1aa0eccc4d286a160c3f03ffac162e308f414edd3477f686e4c"} Nov 28 17:53:55 crc kubenswrapper[4909]: I1128 17:53:55.173400 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6d806d2c-d2e6-4d8a-ac60-243bdb4c246b","Type":"ContainerStarted","Data":"970d171d3a3dbe0a8ecf220b947ae3ee654529eff77f94b2ee78781056ae20c8"} Nov 28 17:53:57 crc kubenswrapper[4909]: I1128 17:53:57.201329 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6d806d2c-d2e6-4d8a-ac60-243bdb4c246b","Type":"ContainerStarted","Data":"5b1a47c7837ffcbec21488060cba0f1296cdbb9be8601670de3ba04e6f0608dd"} Nov 28 17:53:57 crc kubenswrapper[4909]: I1128 17:53:57.203786 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 28 17:53:57 crc kubenswrapper[4909]: I1128 17:53:57.224486 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=1.756613819 podStartE2EDuration="6.224470968s" podCreationTimestamp="2025-11-28 17:53:51 +0000 UTC" firstStartedPulling="2025-11-28 17:53:51.889266249 +0000 UTC m=+6214.285950773" lastFinishedPulling="2025-11-28 17:53:56.357123398 +0000 UTC m=+6218.753807922" observedRunningTime="2025-11-28 17:53:57.222750323 +0000 UTC m=+6219.619434867" watchObservedRunningTime="2025-11-28 17:53:57.224470968 +0000 UTC m=+6219.621155492" Nov 28 17:54:02 crc kubenswrapper[4909]: I1128 17:54:02.053986 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/aodh-db-create-njpg8"] Nov 28 17:54:02 crc kubenswrapper[4909]: I1128 17:54:02.055982 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-create-njpg8" Nov 28 17:54:02 crc kubenswrapper[4909]: I1128 17:54:02.073779 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-db-create-njpg8"] Nov 28 17:54:02 crc kubenswrapper[4909]: I1128 17:54:02.145115 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kcz65\" (UniqueName: \"kubernetes.io/projected/26c39cf0-5737-49da-bf23-62236e1b6926-kube-api-access-kcz65\") pod \"aodh-db-create-njpg8\" (UID: \"26c39cf0-5737-49da-bf23-62236e1b6926\") " pod="openstack/aodh-db-create-njpg8" Nov 28 17:54:02 crc kubenswrapper[4909]: I1128 17:54:02.145219 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/26c39cf0-5737-49da-bf23-62236e1b6926-operator-scripts\") pod \"aodh-db-create-njpg8\" (UID: \"26c39cf0-5737-49da-bf23-62236e1b6926\") " pod="openstack/aodh-db-create-njpg8" Nov 28 17:54:02 crc kubenswrapper[4909]: I1128 17:54:02.155578 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/aodh-f20d-account-create-update-dgwgg"] Nov 28 17:54:02 crc kubenswrapper[4909]: I1128 17:54:02.157239 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-f20d-account-create-update-dgwgg" Nov 28 17:54:02 crc kubenswrapper[4909]: I1128 17:54:02.159468 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-db-secret" Nov 28 17:54:02 crc kubenswrapper[4909]: I1128 17:54:02.175645 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-f20d-account-create-update-dgwgg"] Nov 28 17:54:02 crc kubenswrapper[4909]: I1128 17:54:02.248034 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/69754b0d-df57-4e81-b969-7b484861990c-operator-scripts\") pod \"aodh-f20d-account-create-update-dgwgg\" (UID: \"69754b0d-df57-4e81-b969-7b484861990c\") " pod="openstack/aodh-f20d-account-create-update-dgwgg" Nov 28 17:54:02 crc kubenswrapper[4909]: I1128 17:54:02.248215 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9kchv\" (UniqueName: \"kubernetes.io/projected/69754b0d-df57-4e81-b969-7b484861990c-kube-api-access-9kchv\") pod \"aodh-f20d-account-create-update-dgwgg\" (UID: \"69754b0d-df57-4e81-b969-7b484861990c\") " pod="openstack/aodh-f20d-account-create-update-dgwgg" Nov 28 17:54:02 crc kubenswrapper[4909]: I1128 17:54:02.248258 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kcz65\" (UniqueName: \"kubernetes.io/projected/26c39cf0-5737-49da-bf23-62236e1b6926-kube-api-access-kcz65\") pod \"aodh-db-create-njpg8\" (UID: \"26c39cf0-5737-49da-bf23-62236e1b6926\") " pod="openstack/aodh-db-create-njpg8" Nov 28 17:54:02 crc kubenswrapper[4909]: I1128 17:54:02.248322 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/26c39cf0-5737-49da-bf23-62236e1b6926-operator-scripts\") pod \"aodh-db-create-njpg8\" (UID: \"26c39cf0-5737-49da-bf23-62236e1b6926\") " pod="openstack/aodh-db-create-njpg8" Nov 28 17:54:02 crc kubenswrapper[4909]: I1128 17:54:02.249069 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/26c39cf0-5737-49da-bf23-62236e1b6926-operator-scripts\") pod \"aodh-db-create-njpg8\" (UID: \"26c39cf0-5737-49da-bf23-62236e1b6926\") " pod="openstack/aodh-db-create-njpg8" Nov 28 17:54:02 crc kubenswrapper[4909]: I1128 17:54:02.279342 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kcz65\" (UniqueName: \"kubernetes.io/projected/26c39cf0-5737-49da-bf23-62236e1b6926-kube-api-access-kcz65\") pod \"aodh-db-create-njpg8\" (UID: \"26c39cf0-5737-49da-bf23-62236e1b6926\") " pod="openstack/aodh-db-create-njpg8" Nov 28 17:54:02 crc kubenswrapper[4909]: I1128 17:54:02.349985 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9kchv\" (UniqueName: \"kubernetes.io/projected/69754b0d-df57-4e81-b969-7b484861990c-kube-api-access-9kchv\") pod \"aodh-f20d-account-create-update-dgwgg\" (UID: \"69754b0d-df57-4e81-b969-7b484861990c\") " pod="openstack/aodh-f20d-account-create-update-dgwgg" Nov 28 17:54:02 crc kubenswrapper[4909]: I1128 17:54:02.350127 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/69754b0d-df57-4e81-b969-7b484861990c-operator-scripts\") pod \"aodh-f20d-account-create-update-dgwgg\" (UID: \"69754b0d-df57-4e81-b969-7b484861990c\") " pod="openstack/aodh-f20d-account-create-update-dgwgg" Nov 28 17:54:02 crc kubenswrapper[4909]: I1128 17:54:02.350841 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/69754b0d-df57-4e81-b969-7b484861990c-operator-scripts\") pod \"aodh-f20d-account-create-update-dgwgg\" (UID: \"69754b0d-df57-4e81-b969-7b484861990c\") " pod="openstack/aodh-f20d-account-create-update-dgwgg" Nov 28 17:54:02 crc kubenswrapper[4909]: I1128 17:54:02.366259 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9kchv\" (UniqueName: \"kubernetes.io/projected/69754b0d-df57-4e81-b969-7b484861990c-kube-api-access-9kchv\") pod \"aodh-f20d-account-create-update-dgwgg\" (UID: \"69754b0d-df57-4e81-b969-7b484861990c\") " pod="openstack/aodh-f20d-account-create-update-dgwgg" Nov 28 17:54:02 crc kubenswrapper[4909]: I1128 17:54:02.372955 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-create-njpg8" Nov 28 17:54:02 crc kubenswrapper[4909]: I1128 17:54:02.473441 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-f20d-account-create-update-dgwgg" Nov 28 17:54:02 crc kubenswrapper[4909]: W1128 17:54:02.851629 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod26c39cf0_5737_49da_bf23_62236e1b6926.slice/crio-8f240978f4be7a80eb60a48d2e10ef0a829e2d4e117636267bd95ece9413f20d WatchSource:0}: Error finding container 8f240978f4be7a80eb60a48d2e10ef0a829e2d4e117636267bd95ece9413f20d: Status 404 returned error can't find the container with id 8f240978f4be7a80eb60a48d2e10ef0a829e2d4e117636267bd95ece9413f20d Nov 28 17:54:02 crc kubenswrapper[4909]: I1128 17:54:02.859316 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-db-create-njpg8"] Nov 28 17:54:03 crc kubenswrapper[4909]: W1128 17:54:03.009795 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod69754b0d_df57_4e81_b969_7b484861990c.slice/crio-d9bc3f1609f1574ae24c6c9a35e484e7e3a0a4762c4aae8ad3bdb25c874270e0 WatchSource:0}: Error finding container d9bc3f1609f1574ae24c6c9a35e484e7e3a0a4762c4aae8ad3bdb25c874270e0: Status 404 returned error can't find the container with id d9bc3f1609f1574ae24c6c9a35e484e7e3a0a4762c4aae8ad3bdb25c874270e0 Nov 28 17:54:03 crc kubenswrapper[4909]: I1128 17:54:03.011214 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-f20d-account-create-update-dgwgg"] Nov 28 17:54:03 crc kubenswrapper[4909]: I1128 17:54:03.266777 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-create-njpg8" event={"ID":"26c39cf0-5737-49da-bf23-62236e1b6926","Type":"ContainerStarted","Data":"238214b8a53273a7f9f765651f3f44817c7ed73b37059b05d3566f86766ef766"} Nov 28 17:54:03 crc kubenswrapper[4909]: I1128 17:54:03.266835 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-create-njpg8" event={"ID":"26c39cf0-5737-49da-bf23-62236e1b6926","Type":"ContainerStarted","Data":"8f240978f4be7a80eb60a48d2e10ef0a829e2d4e117636267bd95ece9413f20d"} Nov 28 17:54:03 crc kubenswrapper[4909]: I1128 17:54:03.275095 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-f20d-account-create-update-dgwgg" event={"ID":"69754b0d-df57-4e81-b969-7b484861990c","Type":"ContainerStarted","Data":"77baa6b7715e3c053f1badb04f6ca480175cafdc7e941ad36f4ceffd0dc67a71"} Nov 28 17:54:03 crc kubenswrapper[4909]: I1128 17:54:03.275131 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-f20d-account-create-update-dgwgg" event={"ID":"69754b0d-df57-4e81-b969-7b484861990c","Type":"ContainerStarted","Data":"d9bc3f1609f1574ae24c6c9a35e484e7e3a0a4762c4aae8ad3bdb25c874270e0"} Nov 28 17:54:03 crc kubenswrapper[4909]: I1128 17:54:03.290052 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/aodh-db-create-njpg8" podStartSLOduration=1.290030626 podStartE2EDuration="1.290030626s" podCreationTimestamp="2025-11-28 17:54:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 17:54:03.281063367 +0000 UTC m=+6225.677747891" watchObservedRunningTime="2025-11-28 17:54:03.290030626 +0000 UTC m=+6225.686715150" Nov 28 17:54:03 crc kubenswrapper[4909]: I1128 17:54:03.317532 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/aodh-f20d-account-create-update-dgwgg" podStartSLOduration=1.317508386 podStartE2EDuration="1.317508386s" podCreationTimestamp="2025-11-28 17:54:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 17:54:03.305462526 +0000 UTC m=+6225.702147050" watchObservedRunningTime="2025-11-28 17:54:03.317508386 +0000 UTC m=+6225.714192910" Nov 28 17:54:03 crc kubenswrapper[4909]: I1128 17:54:03.902297 4909 scope.go:117] "RemoveContainer" containerID="eb8a08a6c738fff0fcbfbb88427c9ed53477944abe7436212850e368ec229c4f" Nov 28 17:54:03 crc kubenswrapper[4909]: E1128 17:54:03.902915 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 17:54:04 crc kubenswrapper[4909]: I1128 17:54:04.325002 4909 generic.go:334] "Generic (PLEG): container finished" podID="26c39cf0-5737-49da-bf23-62236e1b6926" containerID="238214b8a53273a7f9f765651f3f44817c7ed73b37059b05d3566f86766ef766" exitCode=0 Nov 28 17:54:04 crc kubenswrapper[4909]: I1128 17:54:04.325105 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-create-njpg8" event={"ID":"26c39cf0-5737-49da-bf23-62236e1b6926","Type":"ContainerDied","Data":"238214b8a53273a7f9f765651f3f44817c7ed73b37059b05d3566f86766ef766"} Nov 28 17:54:04 crc kubenswrapper[4909]: I1128 17:54:04.344464 4909 generic.go:334] "Generic (PLEG): container finished" podID="69754b0d-df57-4e81-b969-7b484861990c" containerID="77baa6b7715e3c053f1badb04f6ca480175cafdc7e941ad36f4ceffd0dc67a71" exitCode=0 Nov 28 17:54:04 crc kubenswrapper[4909]: I1128 17:54:04.344509 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-f20d-account-create-update-dgwgg" event={"ID":"69754b0d-df57-4e81-b969-7b484861990c","Type":"ContainerDied","Data":"77baa6b7715e3c053f1badb04f6ca480175cafdc7e941ad36f4ceffd0dc67a71"} Nov 28 17:54:05 crc kubenswrapper[4909]: I1128 17:54:05.914066 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-f20d-account-create-update-dgwgg" Nov 28 17:54:05 crc kubenswrapper[4909]: I1128 17:54:05.923145 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-create-njpg8" Nov 28 17:54:05 crc kubenswrapper[4909]: I1128 17:54:05.943265 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kcz65\" (UniqueName: \"kubernetes.io/projected/26c39cf0-5737-49da-bf23-62236e1b6926-kube-api-access-kcz65\") pod \"26c39cf0-5737-49da-bf23-62236e1b6926\" (UID: \"26c39cf0-5737-49da-bf23-62236e1b6926\") " Nov 28 17:54:05 crc kubenswrapper[4909]: I1128 17:54:05.943351 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/69754b0d-df57-4e81-b969-7b484861990c-operator-scripts\") pod \"69754b0d-df57-4e81-b969-7b484861990c\" (UID: \"69754b0d-df57-4e81-b969-7b484861990c\") " Nov 28 17:54:05 crc kubenswrapper[4909]: I1128 17:54:05.943390 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/26c39cf0-5737-49da-bf23-62236e1b6926-operator-scripts\") pod \"26c39cf0-5737-49da-bf23-62236e1b6926\" (UID: \"26c39cf0-5737-49da-bf23-62236e1b6926\") " Nov 28 17:54:05 crc kubenswrapper[4909]: I1128 17:54:05.943494 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9kchv\" (UniqueName: \"kubernetes.io/projected/69754b0d-df57-4e81-b969-7b484861990c-kube-api-access-9kchv\") pod \"69754b0d-df57-4e81-b969-7b484861990c\" (UID: \"69754b0d-df57-4e81-b969-7b484861990c\") " Nov 28 17:54:05 crc kubenswrapper[4909]: I1128 17:54:05.943903 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/69754b0d-df57-4e81-b969-7b484861990c-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "69754b0d-df57-4e81-b969-7b484861990c" (UID: "69754b0d-df57-4e81-b969-7b484861990c"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 17:54:05 crc kubenswrapper[4909]: I1128 17:54:05.944359 4909 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/69754b0d-df57-4e81-b969-7b484861990c-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 17:54:05 crc kubenswrapper[4909]: I1128 17:54:05.944396 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/26c39cf0-5737-49da-bf23-62236e1b6926-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "26c39cf0-5737-49da-bf23-62236e1b6926" (UID: "26c39cf0-5737-49da-bf23-62236e1b6926"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 17:54:05 crc kubenswrapper[4909]: I1128 17:54:05.952428 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/26c39cf0-5737-49da-bf23-62236e1b6926-kube-api-access-kcz65" (OuterVolumeSpecName: "kube-api-access-kcz65") pod "26c39cf0-5737-49da-bf23-62236e1b6926" (UID: "26c39cf0-5737-49da-bf23-62236e1b6926"). InnerVolumeSpecName "kube-api-access-kcz65". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:54:05 crc kubenswrapper[4909]: I1128 17:54:05.961466 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/69754b0d-df57-4e81-b969-7b484861990c-kube-api-access-9kchv" (OuterVolumeSpecName: "kube-api-access-9kchv") pod "69754b0d-df57-4e81-b969-7b484861990c" (UID: "69754b0d-df57-4e81-b969-7b484861990c"). InnerVolumeSpecName "kube-api-access-9kchv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:54:06 crc kubenswrapper[4909]: I1128 17:54:06.046235 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kcz65\" (UniqueName: \"kubernetes.io/projected/26c39cf0-5737-49da-bf23-62236e1b6926-kube-api-access-kcz65\") on node \"crc\" DevicePath \"\"" Nov 28 17:54:06 crc kubenswrapper[4909]: I1128 17:54:06.046596 4909 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/26c39cf0-5737-49da-bf23-62236e1b6926-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 17:54:06 crc kubenswrapper[4909]: I1128 17:54:06.046606 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9kchv\" (UniqueName: \"kubernetes.io/projected/69754b0d-df57-4e81-b969-7b484861990c-kube-api-access-9kchv\") on node \"crc\" DevicePath \"\"" Nov 28 17:54:06 crc kubenswrapper[4909]: I1128 17:54:06.364914 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-f20d-account-create-update-dgwgg" event={"ID":"69754b0d-df57-4e81-b969-7b484861990c","Type":"ContainerDied","Data":"d9bc3f1609f1574ae24c6c9a35e484e7e3a0a4762c4aae8ad3bdb25c874270e0"} Nov 28 17:54:06 crc kubenswrapper[4909]: I1128 17:54:06.365132 4909 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d9bc3f1609f1574ae24c6c9a35e484e7e3a0a4762c4aae8ad3bdb25c874270e0" Nov 28 17:54:06 crc kubenswrapper[4909]: I1128 17:54:06.364988 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-f20d-account-create-update-dgwgg" Nov 28 17:54:06 crc kubenswrapper[4909]: I1128 17:54:06.367025 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-create-njpg8" event={"ID":"26c39cf0-5737-49da-bf23-62236e1b6926","Type":"ContainerDied","Data":"8f240978f4be7a80eb60a48d2e10ef0a829e2d4e117636267bd95ece9413f20d"} Nov 28 17:54:06 crc kubenswrapper[4909]: I1128 17:54:06.367067 4909 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8f240978f4be7a80eb60a48d2e10ef0a829e2d4e117636267bd95ece9413f20d" Nov 28 17:54:06 crc kubenswrapper[4909]: I1128 17:54:06.367112 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-create-njpg8" Nov 28 17:54:07 crc kubenswrapper[4909]: I1128 17:54:07.523422 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/aodh-db-sync-lbjjd"] Nov 28 17:54:07 crc kubenswrapper[4909]: E1128 17:54:07.524260 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="69754b0d-df57-4e81-b969-7b484861990c" containerName="mariadb-account-create-update" Nov 28 17:54:07 crc kubenswrapper[4909]: I1128 17:54:07.524272 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="69754b0d-df57-4e81-b969-7b484861990c" containerName="mariadb-account-create-update" Nov 28 17:54:07 crc kubenswrapper[4909]: E1128 17:54:07.524307 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="26c39cf0-5737-49da-bf23-62236e1b6926" containerName="mariadb-database-create" Nov 28 17:54:07 crc kubenswrapper[4909]: I1128 17:54:07.524315 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="26c39cf0-5737-49da-bf23-62236e1b6926" containerName="mariadb-database-create" Nov 28 17:54:07 crc kubenswrapper[4909]: I1128 17:54:07.524528 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="26c39cf0-5737-49da-bf23-62236e1b6926" containerName="mariadb-database-create" Nov 28 17:54:07 crc kubenswrapper[4909]: I1128 17:54:07.524543 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="69754b0d-df57-4e81-b969-7b484861990c" containerName="mariadb-account-create-update" Nov 28 17:54:07 crc kubenswrapper[4909]: I1128 17:54:07.525372 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-sync-lbjjd" Nov 28 17:54:07 crc kubenswrapper[4909]: I1128 17:54:07.528457 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-scripts" Nov 28 17:54:07 crc kubenswrapper[4909]: I1128 17:54:07.528544 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-autoscaling-dockercfg-2z7t7" Nov 28 17:54:07 crc kubenswrapper[4909]: I1128 17:54:07.528813 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-config-data" Nov 28 17:54:07 crc kubenswrapper[4909]: I1128 17:54:07.528901 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Nov 28 17:54:07 crc kubenswrapper[4909]: I1128 17:54:07.539386 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-db-sync-lbjjd"] Nov 28 17:54:07 crc kubenswrapper[4909]: I1128 17:54:07.574993 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0404b029-383d-4300-a459-fe85d6e0509e-scripts\") pod \"aodh-db-sync-lbjjd\" (UID: \"0404b029-383d-4300-a459-fe85d6e0509e\") " pod="openstack/aodh-db-sync-lbjjd" Nov 28 17:54:07 crc kubenswrapper[4909]: I1128 17:54:07.575073 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9mrbn\" (UniqueName: \"kubernetes.io/projected/0404b029-383d-4300-a459-fe85d6e0509e-kube-api-access-9mrbn\") pod \"aodh-db-sync-lbjjd\" (UID: \"0404b029-383d-4300-a459-fe85d6e0509e\") " pod="openstack/aodh-db-sync-lbjjd" Nov 28 17:54:07 crc kubenswrapper[4909]: I1128 17:54:07.575123 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0404b029-383d-4300-a459-fe85d6e0509e-combined-ca-bundle\") pod \"aodh-db-sync-lbjjd\" (UID: \"0404b029-383d-4300-a459-fe85d6e0509e\") " pod="openstack/aodh-db-sync-lbjjd" Nov 28 17:54:07 crc kubenswrapper[4909]: I1128 17:54:07.575145 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0404b029-383d-4300-a459-fe85d6e0509e-config-data\") pod \"aodh-db-sync-lbjjd\" (UID: \"0404b029-383d-4300-a459-fe85d6e0509e\") " pod="openstack/aodh-db-sync-lbjjd" Nov 28 17:54:07 crc kubenswrapper[4909]: I1128 17:54:07.683676 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0404b029-383d-4300-a459-fe85d6e0509e-scripts\") pod \"aodh-db-sync-lbjjd\" (UID: \"0404b029-383d-4300-a459-fe85d6e0509e\") " pod="openstack/aodh-db-sync-lbjjd" Nov 28 17:54:07 crc kubenswrapper[4909]: I1128 17:54:07.683814 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9mrbn\" (UniqueName: \"kubernetes.io/projected/0404b029-383d-4300-a459-fe85d6e0509e-kube-api-access-9mrbn\") pod \"aodh-db-sync-lbjjd\" (UID: \"0404b029-383d-4300-a459-fe85d6e0509e\") " pod="openstack/aodh-db-sync-lbjjd" Nov 28 17:54:07 crc kubenswrapper[4909]: I1128 17:54:07.683900 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0404b029-383d-4300-a459-fe85d6e0509e-combined-ca-bundle\") pod \"aodh-db-sync-lbjjd\" (UID: \"0404b029-383d-4300-a459-fe85d6e0509e\") " pod="openstack/aodh-db-sync-lbjjd" Nov 28 17:54:07 crc kubenswrapper[4909]: I1128 17:54:07.683931 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0404b029-383d-4300-a459-fe85d6e0509e-config-data\") pod \"aodh-db-sync-lbjjd\" (UID: \"0404b029-383d-4300-a459-fe85d6e0509e\") " pod="openstack/aodh-db-sync-lbjjd" Nov 28 17:54:07 crc kubenswrapper[4909]: I1128 17:54:07.691334 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0404b029-383d-4300-a459-fe85d6e0509e-combined-ca-bundle\") pod \"aodh-db-sync-lbjjd\" (UID: \"0404b029-383d-4300-a459-fe85d6e0509e\") " pod="openstack/aodh-db-sync-lbjjd" Nov 28 17:54:07 crc kubenswrapper[4909]: I1128 17:54:07.728945 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0404b029-383d-4300-a459-fe85d6e0509e-scripts\") pod \"aodh-db-sync-lbjjd\" (UID: \"0404b029-383d-4300-a459-fe85d6e0509e\") " pod="openstack/aodh-db-sync-lbjjd" Nov 28 17:54:07 crc kubenswrapper[4909]: I1128 17:54:07.730224 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0404b029-383d-4300-a459-fe85d6e0509e-config-data\") pod \"aodh-db-sync-lbjjd\" (UID: \"0404b029-383d-4300-a459-fe85d6e0509e\") " pod="openstack/aodh-db-sync-lbjjd" Nov 28 17:54:07 crc kubenswrapper[4909]: I1128 17:54:07.732629 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9mrbn\" (UniqueName: \"kubernetes.io/projected/0404b029-383d-4300-a459-fe85d6e0509e-kube-api-access-9mrbn\") pod \"aodh-db-sync-lbjjd\" (UID: \"0404b029-383d-4300-a459-fe85d6e0509e\") " pod="openstack/aodh-db-sync-lbjjd" Nov 28 17:54:07 crc kubenswrapper[4909]: I1128 17:54:07.846426 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-sync-lbjjd" Nov 28 17:54:08 crc kubenswrapper[4909]: I1128 17:54:08.344098 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-db-sync-lbjjd"] Nov 28 17:54:08 crc kubenswrapper[4909]: I1128 17:54:08.386917 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-sync-lbjjd" event={"ID":"0404b029-383d-4300-a459-fe85d6e0509e","Type":"ContainerStarted","Data":"70368114d9d5fdf6a8909598f23a4a38ec8555dba00c91b828ba439343867dd7"} Nov 28 17:54:14 crc kubenswrapper[4909]: I1128 17:54:14.451710 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-sync-lbjjd" event={"ID":"0404b029-383d-4300-a459-fe85d6e0509e","Type":"ContainerStarted","Data":"466d211ad07f41ac1df05bb5a2741ee033fe079aa964f7938758abedc216b7fd"} Nov 28 17:54:14 crc kubenswrapper[4909]: I1128 17:54:14.467438 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/aodh-db-sync-lbjjd" podStartSLOduration=2.577473104 podStartE2EDuration="7.467406724s" podCreationTimestamp="2025-11-28 17:54:07 +0000 UTC" firstStartedPulling="2025-11-28 17:54:08.349240478 +0000 UTC m=+6230.745925002" lastFinishedPulling="2025-11-28 17:54:13.239174098 +0000 UTC m=+6235.635858622" observedRunningTime="2025-11-28 17:54:14.467080955 +0000 UTC m=+6236.863765489" watchObservedRunningTime="2025-11-28 17:54:14.467406724 +0000 UTC m=+6236.864091288" Nov 28 17:54:16 crc kubenswrapper[4909]: I1128 17:54:16.482643 4909 generic.go:334] "Generic (PLEG): container finished" podID="0404b029-383d-4300-a459-fe85d6e0509e" containerID="466d211ad07f41ac1df05bb5a2741ee033fe079aa964f7938758abedc216b7fd" exitCode=0 Nov 28 17:54:16 crc kubenswrapper[4909]: I1128 17:54:16.482788 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-sync-lbjjd" event={"ID":"0404b029-383d-4300-a459-fe85d6e0509e","Type":"ContainerDied","Data":"466d211ad07f41ac1df05bb5a2741ee033fe079aa964f7938758abedc216b7fd"} Nov 28 17:54:17 crc kubenswrapper[4909]: I1128 17:54:17.902639 4909 scope.go:117] "RemoveContainer" containerID="eb8a08a6c738fff0fcbfbb88427c9ed53477944abe7436212850e368ec229c4f" Nov 28 17:54:17 crc kubenswrapper[4909]: E1128 17:54:17.903436 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 17:54:17 crc kubenswrapper[4909]: I1128 17:54:17.999250 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-sync-lbjjd" Nov 28 17:54:18 crc kubenswrapper[4909]: I1128 17:54:18.021712 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0404b029-383d-4300-a459-fe85d6e0509e-combined-ca-bundle\") pod \"0404b029-383d-4300-a459-fe85d6e0509e\" (UID: \"0404b029-383d-4300-a459-fe85d6e0509e\") " Nov 28 17:54:18 crc kubenswrapper[4909]: I1128 17:54:18.021961 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0404b029-383d-4300-a459-fe85d6e0509e-config-data\") pod \"0404b029-383d-4300-a459-fe85d6e0509e\" (UID: \"0404b029-383d-4300-a459-fe85d6e0509e\") " Nov 28 17:54:18 crc kubenswrapper[4909]: I1128 17:54:18.022241 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0404b029-383d-4300-a459-fe85d6e0509e-scripts\") pod \"0404b029-383d-4300-a459-fe85d6e0509e\" (UID: \"0404b029-383d-4300-a459-fe85d6e0509e\") " Nov 28 17:54:18 crc kubenswrapper[4909]: I1128 17:54:18.022480 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9mrbn\" (UniqueName: \"kubernetes.io/projected/0404b029-383d-4300-a459-fe85d6e0509e-kube-api-access-9mrbn\") pod \"0404b029-383d-4300-a459-fe85d6e0509e\" (UID: \"0404b029-383d-4300-a459-fe85d6e0509e\") " Nov 28 17:54:18 crc kubenswrapper[4909]: I1128 17:54:18.060108 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0404b029-383d-4300-a459-fe85d6e0509e-scripts" (OuterVolumeSpecName: "scripts") pod "0404b029-383d-4300-a459-fe85d6e0509e" (UID: "0404b029-383d-4300-a459-fe85d6e0509e"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:54:18 crc kubenswrapper[4909]: I1128 17:54:18.065503 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0404b029-383d-4300-a459-fe85d6e0509e-kube-api-access-9mrbn" (OuterVolumeSpecName: "kube-api-access-9mrbn") pod "0404b029-383d-4300-a459-fe85d6e0509e" (UID: "0404b029-383d-4300-a459-fe85d6e0509e"). InnerVolumeSpecName "kube-api-access-9mrbn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:54:18 crc kubenswrapper[4909]: I1128 17:54:18.072324 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0404b029-383d-4300-a459-fe85d6e0509e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0404b029-383d-4300-a459-fe85d6e0509e" (UID: "0404b029-383d-4300-a459-fe85d6e0509e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:54:18 crc kubenswrapper[4909]: I1128 17:54:18.114588 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0404b029-383d-4300-a459-fe85d6e0509e-config-data" (OuterVolumeSpecName: "config-data") pod "0404b029-383d-4300-a459-fe85d6e0509e" (UID: "0404b029-383d-4300-a459-fe85d6e0509e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:54:18 crc kubenswrapper[4909]: I1128 17:54:18.125830 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9mrbn\" (UniqueName: \"kubernetes.io/projected/0404b029-383d-4300-a459-fe85d6e0509e-kube-api-access-9mrbn\") on node \"crc\" DevicePath \"\"" Nov 28 17:54:18 crc kubenswrapper[4909]: I1128 17:54:18.125884 4909 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0404b029-383d-4300-a459-fe85d6e0509e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 17:54:18 crc kubenswrapper[4909]: I1128 17:54:18.125897 4909 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0404b029-383d-4300-a459-fe85d6e0509e-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 17:54:18 crc kubenswrapper[4909]: I1128 17:54:18.125910 4909 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0404b029-383d-4300-a459-fe85d6e0509e-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 17:54:18 crc kubenswrapper[4909]: I1128 17:54:18.512974 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-sync-lbjjd" event={"ID":"0404b029-383d-4300-a459-fe85d6e0509e","Type":"ContainerDied","Data":"70368114d9d5fdf6a8909598f23a4a38ec8555dba00c91b828ba439343867dd7"} Nov 28 17:54:18 crc kubenswrapper[4909]: I1128 17:54:18.513021 4909 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="70368114d9d5fdf6a8909598f23a4a38ec8555dba00c91b828ba439343867dd7" Nov 28 17:54:18 crc kubenswrapper[4909]: I1128 17:54:18.513041 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-sync-lbjjd" Nov 28 17:54:21 crc kubenswrapper[4909]: I1128 17:54:21.386438 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Nov 28 17:54:22 crc kubenswrapper[4909]: I1128 17:54:22.643682 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/aodh-0"] Nov 28 17:54:22 crc kubenswrapper[4909]: E1128 17:54:22.644368 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0404b029-383d-4300-a459-fe85d6e0509e" containerName="aodh-db-sync" Nov 28 17:54:22 crc kubenswrapper[4909]: I1128 17:54:22.644383 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="0404b029-383d-4300-a459-fe85d6e0509e" containerName="aodh-db-sync" Nov 28 17:54:22 crc kubenswrapper[4909]: I1128 17:54:22.644580 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="0404b029-383d-4300-a459-fe85d6e0509e" containerName="aodh-db-sync" Nov 28 17:54:22 crc kubenswrapper[4909]: I1128 17:54:22.646390 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Nov 28 17:54:22 crc kubenswrapper[4909]: I1128 17:54:22.649237 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-scripts" Nov 28 17:54:22 crc kubenswrapper[4909]: I1128 17:54:22.654550 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-autoscaling-dockercfg-2z7t7" Nov 28 17:54:22 crc kubenswrapper[4909]: I1128 17:54:22.655032 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-config-data" Nov 28 17:54:22 crc kubenswrapper[4909]: I1128 17:54:22.678976 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-0"] Nov 28 17:54:22 crc kubenswrapper[4909]: I1128 17:54:22.829786 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c0f248c2-fae0-4996-b2d1-3d19d18f6cc5-config-data\") pod \"aodh-0\" (UID: \"c0f248c2-fae0-4996-b2d1-3d19d18f6cc5\") " pod="openstack/aodh-0" Nov 28 17:54:22 crc kubenswrapper[4909]: I1128 17:54:22.829839 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c0f248c2-fae0-4996-b2d1-3d19d18f6cc5-combined-ca-bundle\") pod \"aodh-0\" (UID: \"c0f248c2-fae0-4996-b2d1-3d19d18f6cc5\") " pod="openstack/aodh-0" Nov 28 17:54:22 crc kubenswrapper[4909]: I1128 17:54:22.830263 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5nqzh\" (UniqueName: \"kubernetes.io/projected/c0f248c2-fae0-4996-b2d1-3d19d18f6cc5-kube-api-access-5nqzh\") pod \"aodh-0\" (UID: \"c0f248c2-fae0-4996-b2d1-3d19d18f6cc5\") " pod="openstack/aodh-0" Nov 28 17:54:22 crc kubenswrapper[4909]: I1128 17:54:22.830321 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c0f248c2-fae0-4996-b2d1-3d19d18f6cc5-scripts\") pod \"aodh-0\" (UID: \"c0f248c2-fae0-4996-b2d1-3d19d18f6cc5\") " pod="openstack/aodh-0" Nov 28 17:54:22 crc kubenswrapper[4909]: I1128 17:54:22.932834 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c0f248c2-fae0-4996-b2d1-3d19d18f6cc5-config-data\") pod \"aodh-0\" (UID: \"c0f248c2-fae0-4996-b2d1-3d19d18f6cc5\") " pod="openstack/aodh-0" Nov 28 17:54:22 crc kubenswrapper[4909]: I1128 17:54:22.932885 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c0f248c2-fae0-4996-b2d1-3d19d18f6cc5-combined-ca-bundle\") pod \"aodh-0\" (UID: \"c0f248c2-fae0-4996-b2d1-3d19d18f6cc5\") " pod="openstack/aodh-0" Nov 28 17:54:22 crc kubenswrapper[4909]: I1128 17:54:22.932982 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5nqzh\" (UniqueName: \"kubernetes.io/projected/c0f248c2-fae0-4996-b2d1-3d19d18f6cc5-kube-api-access-5nqzh\") pod \"aodh-0\" (UID: \"c0f248c2-fae0-4996-b2d1-3d19d18f6cc5\") " pod="openstack/aodh-0" Nov 28 17:54:22 crc kubenswrapper[4909]: I1128 17:54:22.933030 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c0f248c2-fae0-4996-b2d1-3d19d18f6cc5-scripts\") pod \"aodh-0\" (UID: \"c0f248c2-fae0-4996-b2d1-3d19d18f6cc5\") " pod="openstack/aodh-0" Nov 28 17:54:22 crc kubenswrapper[4909]: I1128 17:54:22.939358 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c0f248c2-fae0-4996-b2d1-3d19d18f6cc5-scripts\") pod \"aodh-0\" (UID: \"c0f248c2-fae0-4996-b2d1-3d19d18f6cc5\") " pod="openstack/aodh-0" Nov 28 17:54:22 crc kubenswrapper[4909]: I1128 17:54:22.940102 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c0f248c2-fae0-4996-b2d1-3d19d18f6cc5-combined-ca-bundle\") pod \"aodh-0\" (UID: \"c0f248c2-fae0-4996-b2d1-3d19d18f6cc5\") " pod="openstack/aodh-0" Nov 28 17:54:22 crc kubenswrapper[4909]: I1128 17:54:22.945828 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c0f248c2-fae0-4996-b2d1-3d19d18f6cc5-config-data\") pod \"aodh-0\" (UID: \"c0f248c2-fae0-4996-b2d1-3d19d18f6cc5\") " pod="openstack/aodh-0" Nov 28 17:54:22 crc kubenswrapper[4909]: I1128 17:54:22.957169 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5nqzh\" (UniqueName: \"kubernetes.io/projected/c0f248c2-fae0-4996-b2d1-3d19d18f6cc5-kube-api-access-5nqzh\") pod \"aodh-0\" (UID: \"c0f248c2-fae0-4996-b2d1-3d19d18f6cc5\") " pod="openstack/aodh-0" Nov 28 17:54:22 crc kubenswrapper[4909]: I1128 17:54:22.967060 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Nov 28 17:54:23 crc kubenswrapper[4909]: I1128 17:54:23.488836 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-0"] Nov 28 17:54:23 crc kubenswrapper[4909]: I1128 17:54:23.570908 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"c0f248c2-fae0-4996-b2d1-3d19d18f6cc5","Type":"ContainerStarted","Data":"f3c2adcf9eda0015d906a49ac58f2c464671554471d51394eb3c43c4ff26e080"} Nov 28 17:54:24 crc kubenswrapper[4909]: I1128 17:54:24.238767 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 28 17:54:24 crc kubenswrapper[4909]: I1128 17:54:24.241781 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="6d806d2c-d2e6-4d8a-ac60-243bdb4c246b" containerName="ceilometer-central-agent" containerID="cri-o://fe37426916ad8394a9617f80a472e10ff30517be0673103e90057e4201cc6ea2" gracePeriod=30 Nov 28 17:54:24 crc kubenswrapper[4909]: I1128 17:54:24.241902 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="6d806d2c-d2e6-4d8a-ac60-243bdb4c246b" containerName="sg-core" containerID="cri-o://970d171d3a3dbe0a8ecf220b947ae3ee654529eff77f94b2ee78781056ae20c8" gracePeriod=30 Nov 28 17:54:24 crc kubenswrapper[4909]: I1128 17:54:24.241935 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="6d806d2c-d2e6-4d8a-ac60-243bdb4c246b" containerName="proxy-httpd" containerID="cri-o://5b1a47c7837ffcbec21488060cba0f1296cdbb9be8601670de3ba04e6f0608dd" gracePeriod=30 Nov 28 17:54:24 crc kubenswrapper[4909]: I1128 17:54:24.241956 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="6d806d2c-d2e6-4d8a-ac60-243bdb4c246b" containerName="ceilometer-notification-agent" containerID="cri-o://a780c085997cd1aa0eccc4d286a160c3f03ffac162e308f414edd3477f686e4c" gracePeriod=30 Nov 28 17:54:24 crc kubenswrapper[4909]: I1128 17:54:24.582761 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"c0f248c2-fae0-4996-b2d1-3d19d18f6cc5","Type":"ContainerStarted","Data":"65891ded26c78f891cfb63b8717a12bd440caf3750e6fcb7687fdc03abfd220e"} Nov 28 17:54:24 crc kubenswrapper[4909]: I1128 17:54:24.591327 4909 generic.go:334] "Generic (PLEG): container finished" podID="6d806d2c-d2e6-4d8a-ac60-243bdb4c246b" containerID="5b1a47c7837ffcbec21488060cba0f1296cdbb9be8601670de3ba04e6f0608dd" exitCode=0 Nov 28 17:54:24 crc kubenswrapper[4909]: I1128 17:54:24.591383 4909 generic.go:334] "Generic (PLEG): container finished" podID="6d806d2c-d2e6-4d8a-ac60-243bdb4c246b" containerID="970d171d3a3dbe0a8ecf220b947ae3ee654529eff77f94b2ee78781056ae20c8" exitCode=2 Nov 28 17:54:24 crc kubenswrapper[4909]: I1128 17:54:24.591418 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6d806d2c-d2e6-4d8a-ac60-243bdb4c246b","Type":"ContainerDied","Data":"5b1a47c7837ffcbec21488060cba0f1296cdbb9be8601670de3ba04e6f0608dd"} Nov 28 17:54:24 crc kubenswrapper[4909]: I1128 17:54:24.591475 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6d806d2c-d2e6-4d8a-ac60-243bdb4c246b","Type":"ContainerDied","Data":"970d171d3a3dbe0a8ecf220b947ae3ee654529eff77f94b2ee78781056ae20c8"} Nov 28 17:54:25 crc kubenswrapper[4909]: I1128 17:54:25.685947 4909 generic.go:334] "Generic (PLEG): container finished" podID="6d806d2c-d2e6-4d8a-ac60-243bdb4c246b" containerID="a780c085997cd1aa0eccc4d286a160c3f03ffac162e308f414edd3477f686e4c" exitCode=0 Nov 28 17:54:25 crc kubenswrapper[4909]: I1128 17:54:25.685995 4909 generic.go:334] "Generic (PLEG): container finished" podID="6d806d2c-d2e6-4d8a-ac60-243bdb4c246b" containerID="fe37426916ad8394a9617f80a472e10ff30517be0673103e90057e4201cc6ea2" exitCode=0 Nov 28 17:54:25 crc kubenswrapper[4909]: I1128 17:54:25.686016 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6d806d2c-d2e6-4d8a-ac60-243bdb4c246b","Type":"ContainerDied","Data":"a780c085997cd1aa0eccc4d286a160c3f03ffac162e308f414edd3477f686e4c"} Nov 28 17:54:25 crc kubenswrapper[4909]: I1128 17:54:25.686044 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6d806d2c-d2e6-4d8a-ac60-243bdb4c246b","Type":"ContainerDied","Data":"fe37426916ad8394a9617f80a472e10ff30517be0673103e90057e4201cc6ea2"} Nov 28 17:54:25 crc kubenswrapper[4909]: I1128 17:54:25.888324 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 17:54:25 crc kubenswrapper[4909]: I1128 17:54:25.902266 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6d806d2c-d2e6-4d8a-ac60-243bdb4c246b-combined-ca-bundle\") pod \"6d806d2c-d2e6-4d8a-ac60-243bdb4c246b\" (UID: \"6d806d2c-d2e6-4d8a-ac60-243bdb4c246b\") " Nov 28 17:54:25 crc kubenswrapper[4909]: I1128 17:54:25.902387 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6d806d2c-d2e6-4d8a-ac60-243bdb4c246b-scripts\") pod \"6d806d2c-d2e6-4d8a-ac60-243bdb4c246b\" (UID: \"6d806d2c-d2e6-4d8a-ac60-243bdb4c246b\") " Nov 28 17:54:25 crc kubenswrapper[4909]: I1128 17:54:25.902429 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tzrvk\" (UniqueName: \"kubernetes.io/projected/6d806d2c-d2e6-4d8a-ac60-243bdb4c246b-kube-api-access-tzrvk\") pod \"6d806d2c-d2e6-4d8a-ac60-243bdb4c246b\" (UID: \"6d806d2c-d2e6-4d8a-ac60-243bdb4c246b\") " Nov 28 17:54:25 crc kubenswrapper[4909]: I1128 17:54:25.902462 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/6d806d2c-d2e6-4d8a-ac60-243bdb4c246b-sg-core-conf-yaml\") pod \"6d806d2c-d2e6-4d8a-ac60-243bdb4c246b\" (UID: \"6d806d2c-d2e6-4d8a-ac60-243bdb4c246b\") " Nov 28 17:54:25 crc kubenswrapper[4909]: I1128 17:54:25.902528 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6d806d2c-d2e6-4d8a-ac60-243bdb4c246b-run-httpd\") pod \"6d806d2c-d2e6-4d8a-ac60-243bdb4c246b\" (UID: \"6d806d2c-d2e6-4d8a-ac60-243bdb4c246b\") " Nov 28 17:54:25 crc kubenswrapper[4909]: I1128 17:54:25.903197 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6d806d2c-d2e6-4d8a-ac60-243bdb4c246b-log-httpd\") pod \"6d806d2c-d2e6-4d8a-ac60-243bdb4c246b\" (UID: \"6d806d2c-d2e6-4d8a-ac60-243bdb4c246b\") " Nov 28 17:54:25 crc kubenswrapper[4909]: I1128 17:54:25.903358 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6d806d2c-d2e6-4d8a-ac60-243bdb4c246b-config-data\") pod \"6d806d2c-d2e6-4d8a-ac60-243bdb4c246b\" (UID: \"6d806d2c-d2e6-4d8a-ac60-243bdb4c246b\") " Nov 28 17:54:25 crc kubenswrapper[4909]: I1128 17:54:25.904156 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6d806d2c-d2e6-4d8a-ac60-243bdb4c246b-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "6d806d2c-d2e6-4d8a-ac60-243bdb4c246b" (UID: "6d806d2c-d2e6-4d8a-ac60-243bdb4c246b"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:54:25 crc kubenswrapper[4909]: I1128 17:54:25.904401 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6d806d2c-d2e6-4d8a-ac60-243bdb4c246b-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "6d806d2c-d2e6-4d8a-ac60-243bdb4c246b" (UID: "6d806d2c-d2e6-4d8a-ac60-243bdb4c246b"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:54:25 crc kubenswrapper[4909]: I1128 17:54:25.907059 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6d806d2c-d2e6-4d8a-ac60-243bdb4c246b-scripts" (OuterVolumeSpecName: "scripts") pod "6d806d2c-d2e6-4d8a-ac60-243bdb4c246b" (UID: "6d806d2c-d2e6-4d8a-ac60-243bdb4c246b"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:54:25 crc kubenswrapper[4909]: I1128 17:54:25.911597 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6d806d2c-d2e6-4d8a-ac60-243bdb4c246b-kube-api-access-tzrvk" (OuterVolumeSpecName: "kube-api-access-tzrvk") pod "6d806d2c-d2e6-4d8a-ac60-243bdb4c246b" (UID: "6d806d2c-d2e6-4d8a-ac60-243bdb4c246b"). InnerVolumeSpecName "kube-api-access-tzrvk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:54:25 crc kubenswrapper[4909]: I1128 17:54:25.946191 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6d806d2c-d2e6-4d8a-ac60-243bdb4c246b-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "6d806d2c-d2e6-4d8a-ac60-243bdb4c246b" (UID: "6d806d2c-d2e6-4d8a-ac60-243bdb4c246b"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:54:25 crc kubenswrapper[4909]: I1128 17:54:25.983591 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6d806d2c-d2e6-4d8a-ac60-243bdb4c246b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6d806d2c-d2e6-4d8a-ac60-243bdb4c246b" (UID: "6d806d2c-d2e6-4d8a-ac60-243bdb4c246b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:54:26 crc kubenswrapper[4909]: I1128 17:54:26.005756 4909 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6d806d2c-d2e6-4d8a-ac60-243bdb4c246b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 17:54:26 crc kubenswrapper[4909]: I1128 17:54:26.005788 4909 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6d806d2c-d2e6-4d8a-ac60-243bdb4c246b-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 17:54:26 crc kubenswrapper[4909]: I1128 17:54:26.005824 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tzrvk\" (UniqueName: \"kubernetes.io/projected/6d806d2c-d2e6-4d8a-ac60-243bdb4c246b-kube-api-access-tzrvk\") on node \"crc\" DevicePath \"\"" Nov 28 17:54:26 crc kubenswrapper[4909]: I1128 17:54:26.005835 4909 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/6d806d2c-d2e6-4d8a-ac60-243bdb4c246b-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 28 17:54:26 crc kubenswrapper[4909]: I1128 17:54:26.005843 4909 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6d806d2c-d2e6-4d8a-ac60-243bdb4c246b-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 28 17:54:26 crc kubenswrapper[4909]: I1128 17:54:26.005850 4909 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6d806d2c-d2e6-4d8a-ac60-243bdb4c246b-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 28 17:54:26 crc kubenswrapper[4909]: I1128 17:54:26.044959 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6d806d2c-d2e6-4d8a-ac60-243bdb4c246b-config-data" (OuterVolumeSpecName: "config-data") pod "6d806d2c-d2e6-4d8a-ac60-243bdb4c246b" (UID: "6d806d2c-d2e6-4d8a-ac60-243bdb4c246b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:54:26 crc kubenswrapper[4909]: I1128 17:54:26.108167 4909 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6d806d2c-d2e6-4d8a-ac60-243bdb4c246b-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 17:54:26 crc kubenswrapper[4909]: I1128 17:54:26.696991 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"c0f248c2-fae0-4996-b2d1-3d19d18f6cc5","Type":"ContainerStarted","Data":"6854d05651f519a69f70324ccfa790eaea8ccea705eb7a408021939ea9e54fb4"} Nov 28 17:54:26 crc kubenswrapper[4909]: I1128 17:54:26.699416 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6d806d2c-d2e6-4d8a-ac60-243bdb4c246b","Type":"ContainerDied","Data":"53420b20488737dc9fe1cee90b10767aaa2ebb3e4e1fae696eaa096b36cbba8f"} Nov 28 17:54:26 crc kubenswrapper[4909]: I1128 17:54:26.699464 4909 scope.go:117] "RemoveContainer" containerID="5b1a47c7837ffcbec21488060cba0f1296cdbb9be8601670de3ba04e6f0608dd" Nov 28 17:54:26 crc kubenswrapper[4909]: I1128 17:54:26.699628 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 17:54:26 crc kubenswrapper[4909]: I1128 17:54:26.744281 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 28 17:54:26 crc kubenswrapper[4909]: I1128 17:54:26.779920 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 28 17:54:26 crc kubenswrapper[4909]: I1128 17:54:26.794129 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 28 17:54:26 crc kubenswrapper[4909]: E1128 17:54:26.794642 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6d806d2c-d2e6-4d8a-ac60-243bdb4c246b" containerName="sg-core" Nov 28 17:54:26 crc kubenswrapper[4909]: I1128 17:54:26.794678 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="6d806d2c-d2e6-4d8a-ac60-243bdb4c246b" containerName="sg-core" Nov 28 17:54:26 crc kubenswrapper[4909]: E1128 17:54:26.794693 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6d806d2c-d2e6-4d8a-ac60-243bdb4c246b" containerName="proxy-httpd" Nov 28 17:54:26 crc kubenswrapper[4909]: I1128 17:54:26.794701 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="6d806d2c-d2e6-4d8a-ac60-243bdb4c246b" containerName="proxy-httpd" Nov 28 17:54:26 crc kubenswrapper[4909]: E1128 17:54:26.794721 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6d806d2c-d2e6-4d8a-ac60-243bdb4c246b" containerName="ceilometer-notification-agent" Nov 28 17:54:26 crc kubenswrapper[4909]: I1128 17:54:26.794727 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="6d806d2c-d2e6-4d8a-ac60-243bdb4c246b" containerName="ceilometer-notification-agent" Nov 28 17:54:26 crc kubenswrapper[4909]: E1128 17:54:26.794745 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6d806d2c-d2e6-4d8a-ac60-243bdb4c246b" containerName="ceilometer-central-agent" Nov 28 17:54:26 crc kubenswrapper[4909]: I1128 17:54:26.794751 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="6d806d2c-d2e6-4d8a-ac60-243bdb4c246b" containerName="ceilometer-central-agent" Nov 28 17:54:26 crc kubenswrapper[4909]: I1128 17:54:26.795066 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="6d806d2c-d2e6-4d8a-ac60-243bdb4c246b" containerName="ceilometer-notification-agent" Nov 28 17:54:26 crc kubenswrapper[4909]: I1128 17:54:26.795087 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="6d806d2c-d2e6-4d8a-ac60-243bdb4c246b" containerName="sg-core" Nov 28 17:54:26 crc kubenswrapper[4909]: I1128 17:54:26.795098 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="6d806d2c-d2e6-4d8a-ac60-243bdb4c246b" containerName="ceilometer-central-agent" Nov 28 17:54:26 crc kubenswrapper[4909]: I1128 17:54:26.795107 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="6d806d2c-d2e6-4d8a-ac60-243bdb4c246b" containerName="proxy-httpd" Nov 28 17:54:26 crc kubenswrapper[4909]: I1128 17:54:26.797154 4909 scope.go:117] "RemoveContainer" containerID="970d171d3a3dbe0a8ecf220b947ae3ee654529eff77f94b2ee78781056ae20c8" Nov 28 17:54:26 crc kubenswrapper[4909]: I1128 17:54:26.797238 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 17:54:26 crc kubenswrapper[4909]: I1128 17:54:26.799843 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 28 17:54:26 crc kubenswrapper[4909]: I1128 17:54:26.799862 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 28 17:54:26 crc kubenswrapper[4909]: I1128 17:54:26.819020 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 28 17:54:26 crc kubenswrapper[4909]: I1128 17:54:26.823316 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a7894653-d935-4fd0-852d-f36425839b1f-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"a7894653-d935-4fd0-852d-f36425839b1f\") " pod="openstack/ceilometer-0" Nov 28 17:54:26 crc kubenswrapper[4909]: I1128 17:54:26.823373 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a7894653-d935-4fd0-852d-f36425839b1f-scripts\") pod \"ceilometer-0\" (UID: \"a7894653-d935-4fd0-852d-f36425839b1f\") " pod="openstack/ceilometer-0" Nov 28 17:54:26 crc kubenswrapper[4909]: I1128 17:54:26.823526 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a7894653-d935-4fd0-852d-f36425839b1f-log-httpd\") pod \"ceilometer-0\" (UID: \"a7894653-d935-4fd0-852d-f36425839b1f\") " pod="openstack/ceilometer-0" Nov 28 17:54:26 crc kubenswrapper[4909]: I1128 17:54:26.823634 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a7894653-d935-4fd0-852d-f36425839b1f-config-data\") pod \"ceilometer-0\" (UID: \"a7894653-d935-4fd0-852d-f36425839b1f\") " pod="openstack/ceilometer-0" Nov 28 17:54:26 crc kubenswrapper[4909]: I1128 17:54:26.823727 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a7894653-d935-4fd0-852d-f36425839b1f-run-httpd\") pod \"ceilometer-0\" (UID: \"a7894653-d935-4fd0-852d-f36425839b1f\") " pod="openstack/ceilometer-0" Nov 28 17:54:26 crc kubenswrapper[4909]: I1128 17:54:26.823765 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/a7894653-d935-4fd0-852d-f36425839b1f-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"a7894653-d935-4fd0-852d-f36425839b1f\") " pod="openstack/ceilometer-0" Nov 28 17:54:26 crc kubenswrapper[4909]: I1128 17:54:26.823844 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t2lbj\" (UniqueName: \"kubernetes.io/projected/a7894653-d935-4fd0-852d-f36425839b1f-kube-api-access-t2lbj\") pod \"ceilometer-0\" (UID: \"a7894653-d935-4fd0-852d-f36425839b1f\") " pod="openstack/ceilometer-0" Nov 28 17:54:26 crc kubenswrapper[4909]: I1128 17:54:26.839941 4909 scope.go:117] "RemoveContainer" containerID="a780c085997cd1aa0eccc4d286a160c3f03ffac162e308f414edd3477f686e4c" Nov 28 17:54:26 crc kubenswrapper[4909]: I1128 17:54:26.872721 4909 scope.go:117] "RemoveContainer" containerID="fe37426916ad8394a9617f80a472e10ff30517be0673103e90057e4201cc6ea2" Nov 28 17:54:26 crc kubenswrapper[4909]: I1128 17:54:26.926135 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a7894653-d935-4fd0-852d-f36425839b1f-run-httpd\") pod \"ceilometer-0\" (UID: \"a7894653-d935-4fd0-852d-f36425839b1f\") " pod="openstack/ceilometer-0" Nov 28 17:54:26 crc kubenswrapper[4909]: I1128 17:54:26.926209 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/a7894653-d935-4fd0-852d-f36425839b1f-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"a7894653-d935-4fd0-852d-f36425839b1f\") " pod="openstack/ceilometer-0" Nov 28 17:54:26 crc kubenswrapper[4909]: I1128 17:54:26.926250 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t2lbj\" (UniqueName: \"kubernetes.io/projected/a7894653-d935-4fd0-852d-f36425839b1f-kube-api-access-t2lbj\") pod \"ceilometer-0\" (UID: \"a7894653-d935-4fd0-852d-f36425839b1f\") " pod="openstack/ceilometer-0" Nov 28 17:54:26 crc kubenswrapper[4909]: I1128 17:54:26.926369 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a7894653-d935-4fd0-852d-f36425839b1f-scripts\") pod \"ceilometer-0\" (UID: \"a7894653-d935-4fd0-852d-f36425839b1f\") " pod="openstack/ceilometer-0" Nov 28 17:54:26 crc kubenswrapper[4909]: I1128 17:54:26.926397 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a7894653-d935-4fd0-852d-f36425839b1f-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"a7894653-d935-4fd0-852d-f36425839b1f\") " pod="openstack/ceilometer-0" Nov 28 17:54:26 crc kubenswrapper[4909]: I1128 17:54:26.926541 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a7894653-d935-4fd0-852d-f36425839b1f-log-httpd\") pod \"ceilometer-0\" (UID: \"a7894653-d935-4fd0-852d-f36425839b1f\") " pod="openstack/ceilometer-0" Nov 28 17:54:26 crc kubenswrapper[4909]: I1128 17:54:26.926576 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a7894653-d935-4fd0-852d-f36425839b1f-config-data\") pod \"ceilometer-0\" (UID: \"a7894653-d935-4fd0-852d-f36425839b1f\") " pod="openstack/ceilometer-0" Nov 28 17:54:26 crc kubenswrapper[4909]: I1128 17:54:26.931389 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a7894653-d935-4fd0-852d-f36425839b1f-log-httpd\") pod \"ceilometer-0\" (UID: \"a7894653-d935-4fd0-852d-f36425839b1f\") " pod="openstack/ceilometer-0" Nov 28 17:54:26 crc kubenswrapper[4909]: I1128 17:54:26.932255 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a7894653-d935-4fd0-852d-f36425839b1f-run-httpd\") pod \"ceilometer-0\" (UID: \"a7894653-d935-4fd0-852d-f36425839b1f\") " pod="openstack/ceilometer-0" Nov 28 17:54:26 crc kubenswrapper[4909]: I1128 17:54:26.934338 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a7894653-d935-4fd0-852d-f36425839b1f-config-data\") pod \"ceilometer-0\" (UID: \"a7894653-d935-4fd0-852d-f36425839b1f\") " pod="openstack/ceilometer-0" Nov 28 17:54:26 crc kubenswrapper[4909]: I1128 17:54:26.934882 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a7894653-d935-4fd0-852d-f36425839b1f-scripts\") pod \"ceilometer-0\" (UID: \"a7894653-d935-4fd0-852d-f36425839b1f\") " pod="openstack/ceilometer-0" Nov 28 17:54:26 crc kubenswrapper[4909]: I1128 17:54:26.935301 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/a7894653-d935-4fd0-852d-f36425839b1f-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"a7894653-d935-4fd0-852d-f36425839b1f\") " pod="openstack/ceilometer-0" Nov 28 17:54:26 crc kubenswrapper[4909]: I1128 17:54:26.936555 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a7894653-d935-4fd0-852d-f36425839b1f-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"a7894653-d935-4fd0-852d-f36425839b1f\") " pod="openstack/ceilometer-0" Nov 28 17:54:26 crc kubenswrapper[4909]: I1128 17:54:26.947372 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t2lbj\" (UniqueName: \"kubernetes.io/projected/a7894653-d935-4fd0-852d-f36425839b1f-kube-api-access-t2lbj\") pod \"ceilometer-0\" (UID: \"a7894653-d935-4fd0-852d-f36425839b1f\") " pod="openstack/ceilometer-0" Nov 28 17:54:27 crc kubenswrapper[4909]: I1128 17:54:27.120416 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 17:54:27 crc kubenswrapper[4909]: I1128 17:54:27.914529 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6d806d2c-d2e6-4d8a-ac60-243bdb4c246b" path="/var/lib/kubelet/pods/6d806d2c-d2e6-4d8a-ac60-243bdb4c246b/volumes" Nov 28 17:54:28 crc kubenswrapper[4909]: I1128 17:54:28.115866 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 28 17:54:28 crc kubenswrapper[4909]: I1128 17:54:28.745747 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"c0f248c2-fae0-4996-b2d1-3d19d18f6cc5","Type":"ContainerStarted","Data":"8caac4d7f393610e82b4ee8ea5ccd846e77cd5ac230b39329365935b87e7acba"} Nov 28 17:54:28 crc kubenswrapper[4909]: I1128 17:54:28.746577 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a7894653-d935-4fd0-852d-f36425839b1f","Type":"ContainerStarted","Data":"b4c5c966e8c905d06e80db85935aedd3c2bb64aa1d3d7007dcf75cd88f0ba33e"} Nov 28 17:54:29 crc kubenswrapper[4909]: I1128 17:54:29.763860 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a7894653-d935-4fd0-852d-f36425839b1f","Type":"ContainerStarted","Data":"e8e1bf3840e9406783f658d6e1a58d796903552a483de449df05759e2947dcc6"} Nov 28 17:54:30 crc kubenswrapper[4909]: I1128 17:54:30.777021 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"c0f248c2-fae0-4996-b2d1-3d19d18f6cc5","Type":"ContainerStarted","Data":"83a7ba5b548eb28d1f77bef0301fdce265be72f4b59c51c01ad24399aa556337"} Nov 28 17:54:30 crc kubenswrapper[4909]: I1128 17:54:30.779352 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a7894653-d935-4fd0-852d-f36425839b1f","Type":"ContainerStarted","Data":"bc1f10153e15882993e99071934a9562df9de18f93dc286a149db1b0d87d3bab"} Nov 28 17:54:30 crc kubenswrapper[4909]: I1128 17:54:30.810563 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/aodh-0" podStartSLOduration=2.321765085 podStartE2EDuration="8.810536746s" podCreationTimestamp="2025-11-28 17:54:22 +0000 UTC" firstStartedPulling="2025-11-28 17:54:23.4971056 +0000 UTC m=+6245.893790124" lastFinishedPulling="2025-11-28 17:54:29.985877261 +0000 UTC m=+6252.382561785" observedRunningTime="2025-11-28 17:54:30.803877159 +0000 UTC m=+6253.200561683" watchObservedRunningTime="2025-11-28 17:54:30.810536746 +0000 UTC m=+6253.207221280" Nov 28 17:54:30 crc kubenswrapper[4909]: I1128 17:54:30.901996 4909 scope.go:117] "RemoveContainer" containerID="eb8a08a6c738fff0fcbfbb88427c9ed53477944abe7436212850e368ec229c4f" Nov 28 17:54:30 crc kubenswrapper[4909]: E1128 17:54:30.902361 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 17:54:31 crc kubenswrapper[4909]: I1128 17:54:31.793723 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a7894653-d935-4fd0-852d-f36425839b1f","Type":"ContainerStarted","Data":"2df6656b43236561a8b34e107fb25acd16fe397b5523795f95e6f98b8ef0185b"} Nov 28 17:54:32 crc kubenswrapper[4909]: I1128 17:54:32.806711 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a7894653-d935-4fd0-852d-f36425839b1f","Type":"ContainerStarted","Data":"e3e19d316361d68e4f892b1ff26bdeccec1f5445537d34af6e7b3032bccee0f3"} Nov 28 17:54:32 crc kubenswrapper[4909]: I1128 17:54:32.808757 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 28 17:54:32 crc kubenswrapper[4909]: I1128 17:54:32.832082 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.542885505 podStartE2EDuration="6.832060893s" podCreationTimestamp="2025-11-28 17:54:26 +0000 UTC" firstStartedPulling="2025-11-28 17:54:28.137351703 +0000 UTC m=+6250.534036227" lastFinishedPulling="2025-11-28 17:54:32.426527061 +0000 UTC m=+6254.823211615" observedRunningTime="2025-11-28 17:54:32.824678447 +0000 UTC m=+6255.221362991" watchObservedRunningTime="2025-11-28 17:54:32.832060893 +0000 UTC m=+6255.228745417" Nov 28 17:54:34 crc kubenswrapper[4909]: I1128 17:54:34.894467 4909 scope.go:117] "RemoveContainer" containerID="82cfeda0f7892f3fd3e81c1ffe3b3599a0185a6ef7d13388c7ad34860ae58818" Nov 28 17:54:34 crc kubenswrapper[4909]: I1128 17:54:34.984266 4909 scope.go:117] "RemoveContainer" containerID="168caa014f44cb5b9c04bdf92347bcd35ab22cf92dafebb1b734eed08be00a23" Nov 28 17:54:35 crc kubenswrapper[4909]: I1128 17:54:35.024400 4909 scope.go:117] "RemoveContainer" containerID="9abebcbda14a8084e514996dec7508ff98ad5142765c975ba661e3c2d6e6cee5" Nov 28 17:54:36 crc kubenswrapper[4909]: I1128 17:54:36.339303 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/manila-db-create-4ktws"] Nov 28 17:54:36 crc kubenswrapper[4909]: I1128 17:54:36.341269 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-db-create-4ktws" Nov 28 17:54:36 crc kubenswrapper[4909]: I1128 17:54:36.350841 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-db-create-4ktws"] Nov 28 17:54:36 crc kubenswrapper[4909]: I1128 17:54:36.442128 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pzfv8\" (UniqueName: \"kubernetes.io/projected/5dc1d1d5-b59f-4586-b96b-5ca2d78644c0-kube-api-access-pzfv8\") pod \"manila-db-create-4ktws\" (UID: \"5dc1d1d5-b59f-4586-b96b-5ca2d78644c0\") " pod="openstack/manila-db-create-4ktws" Nov 28 17:54:36 crc kubenswrapper[4909]: I1128 17:54:36.443035 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5dc1d1d5-b59f-4586-b96b-5ca2d78644c0-operator-scripts\") pod \"manila-db-create-4ktws\" (UID: \"5dc1d1d5-b59f-4586-b96b-5ca2d78644c0\") " pod="openstack/manila-db-create-4ktws" Nov 28 17:54:36 crc kubenswrapper[4909]: I1128 17:54:36.447923 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/manila-2f47-account-create-update-h849b"] Nov 28 17:54:36 crc kubenswrapper[4909]: I1128 17:54:36.449411 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-2f47-account-create-update-h849b" Nov 28 17:54:36 crc kubenswrapper[4909]: I1128 17:54:36.456250 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-db-secret" Nov 28 17:54:36 crc kubenswrapper[4909]: I1128 17:54:36.468003 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-2f47-account-create-update-h849b"] Nov 28 17:54:36 crc kubenswrapper[4909]: I1128 17:54:36.549774 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5dc1d1d5-b59f-4586-b96b-5ca2d78644c0-operator-scripts\") pod \"manila-db-create-4ktws\" (UID: \"5dc1d1d5-b59f-4586-b96b-5ca2d78644c0\") " pod="openstack/manila-db-create-4ktws" Nov 28 17:54:36 crc kubenswrapper[4909]: I1128 17:54:36.549837 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pzfv8\" (UniqueName: \"kubernetes.io/projected/5dc1d1d5-b59f-4586-b96b-5ca2d78644c0-kube-api-access-pzfv8\") pod \"manila-db-create-4ktws\" (UID: \"5dc1d1d5-b59f-4586-b96b-5ca2d78644c0\") " pod="openstack/manila-db-create-4ktws" Nov 28 17:54:36 crc kubenswrapper[4909]: I1128 17:54:36.551122 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5dc1d1d5-b59f-4586-b96b-5ca2d78644c0-operator-scripts\") pod \"manila-db-create-4ktws\" (UID: \"5dc1d1d5-b59f-4586-b96b-5ca2d78644c0\") " pod="openstack/manila-db-create-4ktws" Nov 28 17:54:36 crc kubenswrapper[4909]: I1128 17:54:36.571343 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pzfv8\" (UniqueName: \"kubernetes.io/projected/5dc1d1d5-b59f-4586-b96b-5ca2d78644c0-kube-api-access-pzfv8\") pod \"manila-db-create-4ktws\" (UID: \"5dc1d1d5-b59f-4586-b96b-5ca2d78644c0\") " pod="openstack/manila-db-create-4ktws" Nov 28 17:54:36 crc kubenswrapper[4909]: I1128 17:54:36.652282 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/adf72195-c644-4f7d-9467-ade2c4096ed4-operator-scripts\") pod \"manila-2f47-account-create-update-h849b\" (UID: \"adf72195-c644-4f7d-9467-ade2c4096ed4\") " pod="openstack/manila-2f47-account-create-update-h849b" Nov 28 17:54:36 crc kubenswrapper[4909]: I1128 17:54:36.652618 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d8vf5\" (UniqueName: \"kubernetes.io/projected/adf72195-c644-4f7d-9467-ade2c4096ed4-kube-api-access-d8vf5\") pod \"manila-2f47-account-create-update-h849b\" (UID: \"adf72195-c644-4f7d-9467-ade2c4096ed4\") " pod="openstack/manila-2f47-account-create-update-h849b" Nov 28 17:54:36 crc kubenswrapper[4909]: I1128 17:54:36.711244 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-db-create-4ktws" Nov 28 17:54:36 crc kubenswrapper[4909]: I1128 17:54:36.754297 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/adf72195-c644-4f7d-9467-ade2c4096ed4-operator-scripts\") pod \"manila-2f47-account-create-update-h849b\" (UID: \"adf72195-c644-4f7d-9467-ade2c4096ed4\") " pod="openstack/manila-2f47-account-create-update-h849b" Nov 28 17:54:36 crc kubenswrapper[4909]: I1128 17:54:36.754391 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d8vf5\" (UniqueName: \"kubernetes.io/projected/adf72195-c644-4f7d-9467-ade2c4096ed4-kube-api-access-d8vf5\") pod \"manila-2f47-account-create-update-h849b\" (UID: \"adf72195-c644-4f7d-9467-ade2c4096ed4\") " pod="openstack/manila-2f47-account-create-update-h849b" Nov 28 17:54:36 crc kubenswrapper[4909]: I1128 17:54:36.755593 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/adf72195-c644-4f7d-9467-ade2c4096ed4-operator-scripts\") pod \"manila-2f47-account-create-update-h849b\" (UID: \"adf72195-c644-4f7d-9467-ade2c4096ed4\") " pod="openstack/manila-2f47-account-create-update-h849b" Nov 28 17:54:36 crc kubenswrapper[4909]: I1128 17:54:36.783520 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d8vf5\" (UniqueName: \"kubernetes.io/projected/adf72195-c644-4f7d-9467-ade2c4096ed4-kube-api-access-d8vf5\") pod \"manila-2f47-account-create-update-h849b\" (UID: \"adf72195-c644-4f7d-9467-ade2c4096ed4\") " pod="openstack/manila-2f47-account-create-update-h849b" Nov 28 17:54:37 crc kubenswrapper[4909]: I1128 17:54:37.070054 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-2f47-account-create-update-h849b" Nov 28 17:54:37 crc kubenswrapper[4909]: I1128 17:54:37.223604 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-db-create-4ktws"] Nov 28 17:54:37 crc kubenswrapper[4909]: W1128 17:54:37.226702 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5dc1d1d5_b59f_4586_b96b_5ca2d78644c0.slice/crio-afe88caf3ab616bfe81ae80c5d144bbc1c844c682c58476f41cccd59fdaeda9d WatchSource:0}: Error finding container afe88caf3ab616bfe81ae80c5d144bbc1c844c682c58476f41cccd59fdaeda9d: Status 404 returned error can't find the container with id afe88caf3ab616bfe81ae80c5d144bbc1c844c682c58476f41cccd59fdaeda9d Nov 28 17:54:37 crc kubenswrapper[4909]: W1128 17:54:37.619053 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podadf72195_c644_4f7d_9467_ade2c4096ed4.slice/crio-f77f5acc32d9bc84526cf4443f9d9a74dbad5c1e7f87b9c58d98f4d9e3b18cc0 WatchSource:0}: Error finding container f77f5acc32d9bc84526cf4443f9d9a74dbad5c1e7f87b9c58d98f4d9e3b18cc0: Status 404 returned error can't find the container with id f77f5acc32d9bc84526cf4443f9d9a74dbad5c1e7f87b9c58d98f4d9e3b18cc0 Nov 28 17:54:37 crc kubenswrapper[4909]: I1128 17:54:37.620515 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-2f47-account-create-update-h849b"] Nov 28 17:54:37 crc kubenswrapper[4909]: I1128 17:54:37.890708 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-2f47-account-create-update-h849b" event={"ID":"adf72195-c644-4f7d-9467-ade2c4096ed4","Type":"ContainerStarted","Data":"f77f5acc32d9bc84526cf4443f9d9a74dbad5c1e7f87b9c58d98f4d9e3b18cc0"} Nov 28 17:54:37 crc kubenswrapper[4909]: I1128 17:54:37.892441 4909 generic.go:334] "Generic (PLEG): container finished" podID="5dc1d1d5-b59f-4586-b96b-5ca2d78644c0" containerID="850aced73610555014cc55b6ac60b0032d3129421dde98b1f2629cda4fad678a" exitCode=0 Nov 28 17:54:37 crc kubenswrapper[4909]: I1128 17:54:37.892465 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-db-create-4ktws" event={"ID":"5dc1d1d5-b59f-4586-b96b-5ca2d78644c0","Type":"ContainerDied","Data":"850aced73610555014cc55b6ac60b0032d3129421dde98b1f2629cda4fad678a"} Nov 28 17:54:37 crc kubenswrapper[4909]: I1128 17:54:37.892481 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-db-create-4ktws" event={"ID":"5dc1d1d5-b59f-4586-b96b-5ca2d78644c0","Type":"ContainerStarted","Data":"afe88caf3ab616bfe81ae80c5d144bbc1c844c682c58476f41cccd59fdaeda9d"} Nov 28 17:54:38 crc kubenswrapper[4909]: I1128 17:54:38.915909 4909 generic.go:334] "Generic (PLEG): container finished" podID="adf72195-c644-4f7d-9467-ade2c4096ed4" containerID="6e77d54878a042e57b0f228ce8305299105e02f2a26a46fc0ca98007e54ec711" exitCode=0 Nov 28 17:54:38 crc kubenswrapper[4909]: I1128 17:54:38.915997 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-2f47-account-create-update-h849b" event={"ID":"adf72195-c644-4f7d-9467-ade2c4096ed4","Type":"ContainerDied","Data":"6e77d54878a042e57b0f228ce8305299105e02f2a26a46fc0ca98007e54ec711"} Nov 28 17:54:39 crc kubenswrapper[4909]: I1128 17:54:39.405361 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/manila-db-create-4ktws" Nov 28 17:54:39 crc kubenswrapper[4909]: I1128 17:54:39.523003 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5dc1d1d5-b59f-4586-b96b-5ca2d78644c0-operator-scripts\") pod \"5dc1d1d5-b59f-4586-b96b-5ca2d78644c0\" (UID: \"5dc1d1d5-b59f-4586-b96b-5ca2d78644c0\") " Nov 28 17:54:39 crc kubenswrapper[4909]: I1128 17:54:39.523144 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pzfv8\" (UniqueName: \"kubernetes.io/projected/5dc1d1d5-b59f-4586-b96b-5ca2d78644c0-kube-api-access-pzfv8\") pod \"5dc1d1d5-b59f-4586-b96b-5ca2d78644c0\" (UID: \"5dc1d1d5-b59f-4586-b96b-5ca2d78644c0\") " Nov 28 17:54:39 crc kubenswrapper[4909]: I1128 17:54:39.523877 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5dc1d1d5-b59f-4586-b96b-5ca2d78644c0-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "5dc1d1d5-b59f-4586-b96b-5ca2d78644c0" (UID: "5dc1d1d5-b59f-4586-b96b-5ca2d78644c0"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 17:54:39 crc kubenswrapper[4909]: I1128 17:54:39.530053 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5dc1d1d5-b59f-4586-b96b-5ca2d78644c0-kube-api-access-pzfv8" (OuterVolumeSpecName: "kube-api-access-pzfv8") pod "5dc1d1d5-b59f-4586-b96b-5ca2d78644c0" (UID: "5dc1d1d5-b59f-4586-b96b-5ca2d78644c0"). InnerVolumeSpecName "kube-api-access-pzfv8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:54:39 crc kubenswrapper[4909]: I1128 17:54:39.626080 4909 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5dc1d1d5-b59f-4586-b96b-5ca2d78644c0-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 17:54:39 crc kubenswrapper[4909]: I1128 17:54:39.626124 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pzfv8\" (UniqueName: \"kubernetes.io/projected/5dc1d1d5-b59f-4586-b96b-5ca2d78644c0-kube-api-access-pzfv8\") on node \"crc\" DevicePath \"\"" Nov 28 17:54:39 crc kubenswrapper[4909]: I1128 17:54:39.933966 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-db-create-4ktws" event={"ID":"5dc1d1d5-b59f-4586-b96b-5ca2d78644c0","Type":"ContainerDied","Data":"afe88caf3ab616bfe81ae80c5d144bbc1c844c682c58476f41cccd59fdaeda9d"} Nov 28 17:54:39 crc kubenswrapper[4909]: I1128 17:54:39.934014 4909 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="afe88caf3ab616bfe81ae80c5d144bbc1c844c682c58476f41cccd59fdaeda9d" Nov 28 17:54:39 crc kubenswrapper[4909]: I1128 17:54:39.933977 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/manila-db-create-4ktws" Nov 28 17:54:40 crc kubenswrapper[4909]: I1128 17:54:40.328715 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/manila-2f47-account-create-update-h849b" Nov 28 17:54:40 crc kubenswrapper[4909]: I1128 17:54:40.341700 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/adf72195-c644-4f7d-9467-ade2c4096ed4-operator-scripts\") pod \"adf72195-c644-4f7d-9467-ade2c4096ed4\" (UID: \"adf72195-c644-4f7d-9467-ade2c4096ed4\") " Nov 28 17:54:40 crc kubenswrapper[4909]: I1128 17:54:40.341886 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d8vf5\" (UniqueName: \"kubernetes.io/projected/adf72195-c644-4f7d-9467-ade2c4096ed4-kube-api-access-d8vf5\") pod \"adf72195-c644-4f7d-9467-ade2c4096ed4\" (UID: \"adf72195-c644-4f7d-9467-ade2c4096ed4\") " Nov 28 17:54:40 crc kubenswrapper[4909]: I1128 17:54:40.342781 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/adf72195-c644-4f7d-9467-ade2c4096ed4-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "adf72195-c644-4f7d-9467-ade2c4096ed4" (UID: "adf72195-c644-4f7d-9467-ade2c4096ed4"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 17:54:40 crc kubenswrapper[4909]: I1128 17:54:40.356463 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/adf72195-c644-4f7d-9467-ade2c4096ed4-kube-api-access-d8vf5" (OuterVolumeSpecName: "kube-api-access-d8vf5") pod "adf72195-c644-4f7d-9467-ade2c4096ed4" (UID: "adf72195-c644-4f7d-9467-ade2c4096ed4"). InnerVolumeSpecName "kube-api-access-d8vf5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:54:40 crc kubenswrapper[4909]: I1128 17:54:40.450196 4909 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/adf72195-c644-4f7d-9467-ade2c4096ed4-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 17:54:40 crc kubenswrapper[4909]: I1128 17:54:40.450238 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d8vf5\" (UniqueName: \"kubernetes.io/projected/adf72195-c644-4f7d-9467-ade2c4096ed4-kube-api-access-d8vf5\") on node \"crc\" DevicePath \"\"" Nov 28 17:54:40 crc kubenswrapper[4909]: I1128 17:54:40.946284 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-2f47-account-create-update-h849b" event={"ID":"adf72195-c644-4f7d-9467-ade2c4096ed4","Type":"ContainerDied","Data":"f77f5acc32d9bc84526cf4443f9d9a74dbad5c1e7f87b9c58d98f4d9e3b18cc0"} Nov 28 17:54:40 crc kubenswrapper[4909]: I1128 17:54:40.946326 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/manila-2f47-account-create-update-h849b" Nov 28 17:54:40 crc kubenswrapper[4909]: I1128 17:54:40.946340 4909 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f77f5acc32d9bc84526cf4443f9d9a74dbad5c1e7f87b9c58d98f4d9e3b18cc0" Nov 28 17:54:41 crc kubenswrapper[4909]: I1128 17:54:41.961851 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/manila-db-sync-5h9n9"] Nov 28 17:54:41 crc kubenswrapper[4909]: E1128 17:54:41.962627 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5dc1d1d5-b59f-4586-b96b-5ca2d78644c0" containerName="mariadb-database-create" Nov 28 17:54:41 crc kubenswrapper[4909]: I1128 17:54:41.962641 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="5dc1d1d5-b59f-4586-b96b-5ca2d78644c0" containerName="mariadb-database-create" Nov 28 17:54:41 crc kubenswrapper[4909]: E1128 17:54:41.962682 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="adf72195-c644-4f7d-9467-ade2c4096ed4" containerName="mariadb-account-create-update" Nov 28 17:54:41 crc kubenswrapper[4909]: I1128 17:54:41.962689 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="adf72195-c644-4f7d-9467-ade2c4096ed4" containerName="mariadb-account-create-update" Nov 28 17:54:41 crc kubenswrapper[4909]: I1128 17:54:41.962878 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="adf72195-c644-4f7d-9467-ade2c4096ed4" containerName="mariadb-account-create-update" Nov 28 17:54:41 crc kubenswrapper[4909]: I1128 17:54:41.962894 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="5dc1d1d5-b59f-4586-b96b-5ca2d78644c0" containerName="mariadb-database-create" Nov 28 17:54:41 crc kubenswrapper[4909]: I1128 17:54:41.963613 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-db-sync-5h9n9" Nov 28 17:54:41 crc kubenswrapper[4909]: I1128 17:54:41.965607 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-manila-dockercfg-nhlmz" Nov 28 17:54:41 crc kubenswrapper[4909]: I1128 17:54:41.966166 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-config-data" Nov 28 17:54:42 crc kubenswrapper[4909]: I1128 17:54:42.005258 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-db-sync-5h9n9"] Nov 28 17:54:42 crc kubenswrapper[4909]: I1128 17:54:42.089709 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"job-config-data\" (UniqueName: \"kubernetes.io/secret/e5db555a-5000-4331-b1b1-6108f0a9fd17-job-config-data\") pod \"manila-db-sync-5h9n9\" (UID: \"e5db555a-5000-4331-b1b1-6108f0a9fd17\") " pod="openstack/manila-db-sync-5h9n9" Nov 28 17:54:42 crc kubenswrapper[4909]: I1128 17:54:42.090140 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e5db555a-5000-4331-b1b1-6108f0a9fd17-config-data\") pod \"manila-db-sync-5h9n9\" (UID: \"e5db555a-5000-4331-b1b1-6108f0a9fd17\") " pod="openstack/manila-db-sync-5h9n9" Nov 28 17:54:42 crc kubenswrapper[4909]: I1128 17:54:42.090232 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tsb7k\" (UniqueName: \"kubernetes.io/projected/e5db555a-5000-4331-b1b1-6108f0a9fd17-kube-api-access-tsb7k\") pod \"manila-db-sync-5h9n9\" (UID: \"e5db555a-5000-4331-b1b1-6108f0a9fd17\") " pod="openstack/manila-db-sync-5h9n9" Nov 28 17:54:42 crc kubenswrapper[4909]: I1128 17:54:42.090553 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e5db555a-5000-4331-b1b1-6108f0a9fd17-combined-ca-bundle\") pod \"manila-db-sync-5h9n9\" (UID: \"e5db555a-5000-4331-b1b1-6108f0a9fd17\") " pod="openstack/manila-db-sync-5h9n9" Nov 28 17:54:42 crc kubenswrapper[4909]: I1128 17:54:42.194233 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"job-config-data\" (UniqueName: \"kubernetes.io/secret/e5db555a-5000-4331-b1b1-6108f0a9fd17-job-config-data\") pod \"manila-db-sync-5h9n9\" (UID: \"e5db555a-5000-4331-b1b1-6108f0a9fd17\") " pod="openstack/manila-db-sync-5h9n9" Nov 28 17:54:42 crc kubenswrapper[4909]: I1128 17:54:42.194487 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e5db555a-5000-4331-b1b1-6108f0a9fd17-config-data\") pod \"manila-db-sync-5h9n9\" (UID: \"e5db555a-5000-4331-b1b1-6108f0a9fd17\") " pod="openstack/manila-db-sync-5h9n9" Nov 28 17:54:42 crc kubenswrapper[4909]: I1128 17:54:42.194625 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tsb7k\" (UniqueName: \"kubernetes.io/projected/e5db555a-5000-4331-b1b1-6108f0a9fd17-kube-api-access-tsb7k\") pod \"manila-db-sync-5h9n9\" (UID: \"e5db555a-5000-4331-b1b1-6108f0a9fd17\") " pod="openstack/manila-db-sync-5h9n9" Nov 28 17:54:42 crc kubenswrapper[4909]: I1128 17:54:42.194787 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e5db555a-5000-4331-b1b1-6108f0a9fd17-combined-ca-bundle\") pod \"manila-db-sync-5h9n9\" (UID: \"e5db555a-5000-4331-b1b1-6108f0a9fd17\") " pod="openstack/manila-db-sync-5h9n9" Nov 28 17:54:42 crc kubenswrapper[4909]: I1128 17:54:42.200543 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"job-config-data\" (UniqueName: \"kubernetes.io/secret/e5db555a-5000-4331-b1b1-6108f0a9fd17-job-config-data\") pod \"manila-db-sync-5h9n9\" (UID: \"e5db555a-5000-4331-b1b1-6108f0a9fd17\") " pod="openstack/manila-db-sync-5h9n9" Nov 28 17:54:42 crc kubenswrapper[4909]: I1128 17:54:42.200784 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e5db555a-5000-4331-b1b1-6108f0a9fd17-combined-ca-bundle\") pod \"manila-db-sync-5h9n9\" (UID: \"e5db555a-5000-4331-b1b1-6108f0a9fd17\") " pod="openstack/manila-db-sync-5h9n9" Nov 28 17:54:42 crc kubenswrapper[4909]: I1128 17:54:42.201742 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e5db555a-5000-4331-b1b1-6108f0a9fd17-config-data\") pod \"manila-db-sync-5h9n9\" (UID: \"e5db555a-5000-4331-b1b1-6108f0a9fd17\") " pod="openstack/manila-db-sync-5h9n9" Nov 28 17:54:42 crc kubenswrapper[4909]: I1128 17:54:42.210223 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tsb7k\" (UniqueName: \"kubernetes.io/projected/e5db555a-5000-4331-b1b1-6108f0a9fd17-kube-api-access-tsb7k\") pod \"manila-db-sync-5h9n9\" (UID: \"e5db555a-5000-4331-b1b1-6108f0a9fd17\") " pod="openstack/manila-db-sync-5h9n9" Nov 28 17:54:42 crc kubenswrapper[4909]: I1128 17:54:42.311947 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-db-sync-5h9n9" Nov 28 17:54:43 crc kubenswrapper[4909]: I1128 17:54:43.017175 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-db-sync-5h9n9"] Nov 28 17:54:43 crc kubenswrapper[4909]: W1128 17:54:43.019149 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode5db555a_5000_4331_b1b1_6108f0a9fd17.slice/crio-a30fe309c048e261aeb5a15cd9d9bff85b86e0939aca846d99dd0f2e3916bf51 WatchSource:0}: Error finding container a30fe309c048e261aeb5a15cd9d9bff85b86e0939aca846d99dd0f2e3916bf51: Status 404 returned error can't find the container with id a30fe309c048e261aeb5a15cd9d9bff85b86e0939aca846d99dd0f2e3916bf51 Nov 28 17:54:43 crc kubenswrapper[4909]: I1128 17:54:43.910864 4909 scope.go:117] "RemoveContainer" containerID="eb8a08a6c738fff0fcbfbb88427c9ed53477944abe7436212850e368ec229c4f" Nov 28 17:54:43 crc kubenswrapper[4909]: E1128 17:54:43.922062 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 17:54:43 crc kubenswrapper[4909]: I1128 17:54:43.992493 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-db-sync-5h9n9" event={"ID":"e5db555a-5000-4331-b1b1-6108f0a9fd17","Type":"ContainerStarted","Data":"a30fe309c048e261aeb5a15cd9d9bff85b86e0939aca846d99dd0f2e3916bf51"} Nov 28 17:54:47 crc kubenswrapper[4909]: I1128 17:54:47.048955 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-30f7-account-create-update-5k4q7"] Nov 28 17:54:47 crc kubenswrapper[4909]: I1128 17:54:47.061848 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-30f7-account-create-update-5k4q7"] Nov 28 17:54:47 crc kubenswrapper[4909]: I1128 17:54:47.921904 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1611532c-c609-460f-9376-7767f8caebec" path="/var/lib/kubelet/pods/1611532c-c609-460f-9376-7767f8caebec/volumes" Nov 28 17:54:48 crc kubenswrapper[4909]: I1128 17:54:48.052418 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-db-create-rpjjz"] Nov 28 17:54:48 crc kubenswrapper[4909]: I1128 17:54:48.069105 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-db-create-bcc4g"] Nov 28 17:54:48 crc kubenswrapper[4909]: I1128 17:54:48.081148 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-db-create-rpjjz"] Nov 28 17:54:48 crc kubenswrapper[4909]: I1128 17:54:48.091243 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-db-create-bcc4g"] Nov 28 17:54:49 crc kubenswrapper[4909]: I1128 17:54:49.049501 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-db-create-rznwv"] Nov 28 17:54:49 crc kubenswrapper[4909]: I1128 17:54:49.061479 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-db-sync-5h9n9" event={"ID":"e5db555a-5000-4331-b1b1-6108f0a9fd17","Type":"ContainerStarted","Data":"a83ec6ba03113cf81ed6473cad414d38d7646059004ee1d30c351dbc8477d9f1"} Nov 28 17:54:49 crc kubenswrapper[4909]: I1128 17:54:49.062824 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-a66f-account-create-update-b8qxp"] Nov 28 17:54:49 crc kubenswrapper[4909]: I1128 17:54:49.081559 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-277a-account-create-update-j5vxd"] Nov 28 17:54:49 crc kubenswrapper[4909]: I1128 17:54:49.100898 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-db-create-rznwv"] Nov 28 17:54:49 crc kubenswrapper[4909]: I1128 17:54:49.110264 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-a66f-account-create-update-b8qxp"] Nov 28 17:54:49 crc kubenswrapper[4909]: I1128 17:54:49.118211 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-277a-account-create-update-j5vxd"] Nov 28 17:54:49 crc kubenswrapper[4909]: I1128 17:54:49.125888 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/manila-db-sync-5h9n9" podStartSLOduration=3.221321853 podStartE2EDuration="8.125869573s" podCreationTimestamp="2025-11-28 17:54:41 +0000 UTC" firstStartedPulling="2025-11-28 17:54:43.022732487 +0000 UTC m=+6265.419417011" lastFinishedPulling="2025-11-28 17:54:47.927280207 +0000 UTC m=+6270.323964731" observedRunningTime="2025-11-28 17:54:49.084491293 +0000 UTC m=+6271.481175817" watchObservedRunningTime="2025-11-28 17:54:49.125869573 +0000 UTC m=+6271.522554097" Nov 28 17:54:49 crc kubenswrapper[4909]: I1128 17:54:49.914635 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="00679f33-a115-4278-b8dd-8515ab78adce" path="/var/lib/kubelet/pods/00679f33-a115-4278-b8dd-8515ab78adce/volumes" Nov 28 17:54:49 crc kubenswrapper[4909]: I1128 17:54:49.916165 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="202367de-7bd5-4d73-8d87-536428b3638b" path="/var/lib/kubelet/pods/202367de-7bd5-4d73-8d87-536428b3638b/volumes" Nov 28 17:54:49 crc kubenswrapper[4909]: I1128 17:54:49.917303 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="22081f58-7375-4695-ae44-0d3523be341a" path="/var/lib/kubelet/pods/22081f58-7375-4695-ae44-0d3523be341a/volumes" Nov 28 17:54:49 crc kubenswrapper[4909]: I1128 17:54:49.918149 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6f469cb4-de41-446a-b59c-6c7db8411f55" path="/var/lib/kubelet/pods/6f469cb4-de41-446a-b59c-6c7db8411f55/volumes" Nov 28 17:54:49 crc kubenswrapper[4909]: I1128 17:54:49.919777 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a99f1c9e-ad94-49bc-a382-f0615533a5ac" path="/var/lib/kubelet/pods/a99f1c9e-ad94-49bc-a382-f0615533a5ac/volumes" Nov 28 17:54:51 crc kubenswrapper[4909]: I1128 17:54:51.089407 4909 generic.go:334] "Generic (PLEG): container finished" podID="e5db555a-5000-4331-b1b1-6108f0a9fd17" containerID="a83ec6ba03113cf81ed6473cad414d38d7646059004ee1d30c351dbc8477d9f1" exitCode=0 Nov 28 17:54:51 crc kubenswrapper[4909]: I1128 17:54:51.089499 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-db-sync-5h9n9" event={"ID":"e5db555a-5000-4331-b1b1-6108f0a9fd17","Type":"ContainerDied","Data":"a83ec6ba03113cf81ed6473cad414d38d7646059004ee1d30c351dbc8477d9f1"} Nov 28 17:54:52 crc kubenswrapper[4909]: I1128 17:54:52.623843 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/manila-db-sync-5h9n9" Nov 28 17:54:52 crc kubenswrapper[4909]: I1128 17:54:52.659401 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e5db555a-5000-4331-b1b1-6108f0a9fd17-combined-ca-bundle\") pod \"e5db555a-5000-4331-b1b1-6108f0a9fd17\" (UID: \"e5db555a-5000-4331-b1b1-6108f0a9fd17\") " Nov 28 17:54:52 crc kubenswrapper[4909]: I1128 17:54:52.659600 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tsb7k\" (UniqueName: \"kubernetes.io/projected/e5db555a-5000-4331-b1b1-6108f0a9fd17-kube-api-access-tsb7k\") pod \"e5db555a-5000-4331-b1b1-6108f0a9fd17\" (UID: \"e5db555a-5000-4331-b1b1-6108f0a9fd17\") " Nov 28 17:54:52 crc kubenswrapper[4909]: I1128 17:54:52.659646 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e5db555a-5000-4331-b1b1-6108f0a9fd17-config-data\") pod \"e5db555a-5000-4331-b1b1-6108f0a9fd17\" (UID: \"e5db555a-5000-4331-b1b1-6108f0a9fd17\") " Nov 28 17:54:52 crc kubenswrapper[4909]: I1128 17:54:52.659730 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"job-config-data\" (UniqueName: \"kubernetes.io/secret/e5db555a-5000-4331-b1b1-6108f0a9fd17-job-config-data\") pod \"e5db555a-5000-4331-b1b1-6108f0a9fd17\" (UID: \"e5db555a-5000-4331-b1b1-6108f0a9fd17\") " Nov 28 17:54:52 crc kubenswrapper[4909]: I1128 17:54:52.665827 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e5db555a-5000-4331-b1b1-6108f0a9fd17-job-config-data" (OuterVolumeSpecName: "job-config-data") pod "e5db555a-5000-4331-b1b1-6108f0a9fd17" (UID: "e5db555a-5000-4331-b1b1-6108f0a9fd17"). InnerVolumeSpecName "job-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:54:52 crc kubenswrapper[4909]: I1128 17:54:52.666026 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e5db555a-5000-4331-b1b1-6108f0a9fd17-kube-api-access-tsb7k" (OuterVolumeSpecName: "kube-api-access-tsb7k") pod "e5db555a-5000-4331-b1b1-6108f0a9fd17" (UID: "e5db555a-5000-4331-b1b1-6108f0a9fd17"). InnerVolumeSpecName "kube-api-access-tsb7k". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:54:52 crc kubenswrapper[4909]: I1128 17:54:52.669857 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e5db555a-5000-4331-b1b1-6108f0a9fd17-config-data" (OuterVolumeSpecName: "config-data") pod "e5db555a-5000-4331-b1b1-6108f0a9fd17" (UID: "e5db555a-5000-4331-b1b1-6108f0a9fd17"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:54:52 crc kubenswrapper[4909]: I1128 17:54:52.691105 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e5db555a-5000-4331-b1b1-6108f0a9fd17-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e5db555a-5000-4331-b1b1-6108f0a9fd17" (UID: "e5db555a-5000-4331-b1b1-6108f0a9fd17"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:54:52 crc kubenswrapper[4909]: I1128 17:54:52.761020 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tsb7k\" (UniqueName: \"kubernetes.io/projected/e5db555a-5000-4331-b1b1-6108f0a9fd17-kube-api-access-tsb7k\") on node \"crc\" DevicePath \"\"" Nov 28 17:54:52 crc kubenswrapper[4909]: I1128 17:54:52.761493 4909 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e5db555a-5000-4331-b1b1-6108f0a9fd17-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 17:54:52 crc kubenswrapper[4909]: I1128 17:54:52.761505 4909 reconciler_common.go:293] "Volume detached for volume \"job-config-data\" (UniqueName: \"kubernetes.io/secret/e5db555a-5000-4331-b1b1-6108f0a9fd17-job-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 17:54:52 crc kubenswrapper[4909]: I1128 17:54:52.761513 4909 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e5db555a-5000-4331-b1b1-6108f0a9fd17-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 17:54:53 crc kubenswrapper[4909]: I1128 17:54:53.118727 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-db-sync-5h9n9" event={"ID":"e5db555a-5000-4331-b1b1-6108f0a9fd17","Type":"ContainerDied","Data":"a30fe309c048e261aeb5a15cd9d9bff85b86e0939aca846d99dd0f2e3916bf51"} Nov 28 17:54:53 crc kubenswrapper[4909]: I1128 17:54:53.118788 4909 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a30fe309c048e261aeb5a15cd9d9bff85b86e0939aca846d99dd0f2e3916bf51" Nov 28 17:54:53 crc kubenswrapper[4909]: I1128 17:54:53.118876 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/manila-db-sync-5h9n9" Nov 28 17:54:53 crc kubenswrapper[4909]: I1128 17:54:53.465182 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/manila-scheduler-0"] Nov 28 17:54:53 crc kubenswrapper[4909]: E1128 17:54:53.465634 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e5db555a-5000-4331-b1b1-6108f0a9fd17" containerName="manila-db-sync" Nov 28 17:54:53 crc kubenswrapper[4909]: I1128 17:54:53.465649 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="e5db555a-5000-4331-b1b1-6108f0a9fd17" containerName="manila-db-sync" Nov 28 17:54:53 crc kubenswrapper[4909]: I1128 17:54:53.466413 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="e5db555a-5000-4331-b1b1-6108f0a9fd17" containerName="manila-db-sync" Nov 28 17:54:53 crc kubenswrapper[4909]: I1128 17:54:53.467603 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-scheduler-0" Nov 28 17:54:53 crc kubenswrapper[4909]: I1128 17:54:53.478107 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-scheduler-config-data" Nov 28 17:54:53 crc kubenswrapper[4909]: I1128 17:54:53.478242 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-manila-dockercfg-nhlmz" Nov 28 17:54:53 crc kubenswrapper[4909]: I1128 17:54:53.478575 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-config-data" Nov 28 17:54:53 crc kubenswrapper[4909]: I1128 17:54:53.478778 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-scripts" Nov 28 17:54:53 crc kubenswrapper[4909]: I1128 17:54:53.500457 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-scheduler-0"] Nov 28 17:54:53 crc kubenswrapper[4909]: I1128 17:54:53.525439 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/manila-share-share1-0"] Nov 28 17:54:53 crc kubenswrapper[4909]: I1128 17:54:53.527096 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-share-share1-0" Nov 28 17:54:53 crc kubenswrapper[4909]: I1128 17:54:53.545487 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-share-share1-config-data" Nov 28 17:54:53 crc kubenswrapper[4909]: I1128 17:54:53.568835 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-share-share1-0"] Nov 28 17:54:53 crc kubenswrapper[4909]: I1128 17:54:53.580110 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t2r25\" (UniqueName: \"kubernetes.io/projected/0a875053-cc34-499d-8aef-c515a3e3b399-kube-api-access-t2r25\") pod \"manila-share-share1-0\" (UID: \"0a875053-cc34-499d-8aef-c515a3e3b399\") " pod="openstack/manila-share-share1-0" Nov 28 17:54:53 crc kubenswrapper[4909]: I1128 17:54:53.580230 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/0a875053-cc34-499d-8aef-c515a3e3b399-etc-machine-id\") pod \"manila-share-share1-0\" (UID: \"0a875053-cc34-499d-8aef-c515a3e3b399\") " pod="openstack/manila-share-share1-0" Nov 28 17:54:53 crc kubenswrapper[4909]: I1128 17:54:53.580297 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0a875053-cc34-499d-8aef-c515a3e3b399-config-data\") pod \"manila-share-share1-0\" (UID: \"0a875053-cc34-499d-8aef-c515a3e3b399\") " pod="openstack/manila-share-share1-0" Nov 28 17:54:53 crc kubenswrapper[4909]: I1128 17:54:53.580330 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/9d7fb806-75ea-4671-a658-8871d26a148c-etc-machine-id\") pod \"manila-scheduler-0\" (UID: \"9d7fb806-75ea-4671-a658-8871d26a148c\") " pod="openstack/manila-scheduler-0" Nov 28 17:54:53 crc kubenswrapper[4909]: I1128 17:54:53.580355 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-manila\" (UniqueName: \"kubernetes.io/host-path/0a875053-cc34-499d-8aef-c515a3e3b399-var-lib-manila\") pod \"manila-share-share1-0\" (UID: \"0a875053-cc34-499d-8aef-c515a3e3b399\") " pod="openstack/manila-share-share1-0" Nov 28 17:54:53 crc kubenswrapper[4909]: I1128 17:54:53.580386 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2zrwq\" (UniqueName: \"kubernetes.io/projected/9d7fb806-75ea-4671-a658-8871d26a148c-kube-api-access-2zrwq\") pod \"manila-scheduler-0\" (UID: \"9d7fb806-75ea-4671-a658-8871d26a148c\") " pod="openstack/manila-scheduler-0" Nov 28 17:54:53 crc kubenswrapper[4909]: I1128 17:54:53.580407 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0a875053-cc34-499d-8aef-c515a3e3b399-combined-ca-bundle\") pod \"manila-share-share1-0\" (UID: \"0a875053-cc34-499d-8aef-c515a3e3b399\") " pod="openstack/manila-share-share1-0" Nov 28 17:54:53 crc kubenswrapper[4909]: I1128 17:54:53.580434 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9d7fb806-75ea-4671-a658-8871d26a148c-config-data\") pod \"manila-scheduler-0\" (UID: \"9d7fb806-75ea-4671-a658-8871d26a148c\") " pod="openstack/manila-scheduler-0" Nov 28 17:54:53 crc kubenswrapper[4909]: I1128 17:54:53.580460 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9d7fb806-75ea-4671-a658-8871d26a148c-combined-ca-bundle\") pod \"manila-scheduler-0\" (UID: \"9d7fb806-75ea-4671-a658-8871d26a148c\") " pod="openstack/manila-scheduler-0" Nov 28 17:54:53 crc kubenswrapper[4909]: I1128 17:54:53.580478 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0a875053-cc34-499d-8aef-c515a3e3b399-config-data-custom\") pod \"manila-share-share1-0\" (UID: \"0a875053-cc34-499d-8aef-c515a3e3b399\") " pod="openstack/manila-share-share1-0" Nov 28 17:54:53 crc kubenswrapper[4909]: I1128 17:54:53.580498 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9d7fb806-75ea-4671-a658-8871d26a148c-scripts\") pod \"manila-scheduler-0\" (UID: \"9d7fb806-75ea-4671-a658-8871d26a148c\") " pod="openstack/manila-scheduler-0" Nov 28 17:54:53 crc kubenswrapper[4909]: I1128 17:54:53.580541 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0a875053-cc34-499d-8aef-c515a3e3b399-scripts\") pod \"manila-share-share1-0\" (UID: \"0a875053-cc34-499d-8aef-c515a3e3b399\") " pod="openstack/manila-share-share1-0" Nov 28 17:54:53 crc kubenswrapper[4909]: I1128 17:54:53.580558 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/9d7fb806-75ea-4671-a658-8871d26a148c-config-data-custom\") pod \"manila-scheduler-0\" (UID: \"9d7fb806-75ea-4671-a658-8871d26a148c\") " pod="openstack/manila-scheduler-0" Nov 28 17:54:53 crc kubenswrapper[4909]: I1128 17:54:53.580776 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/0a875053-cc34-499d-8aef-c515a3e3b399-ceph\") pod \"manila-share-share1-0\" (UID: \"0a875053-cc34-499d-8aef-c515a3e3b399\") " pod="openstack/manila-share-share1-0" Nov 28 17:54:53 crc kubenswrapper[4909]: I1128 17:54:53.626192 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-55c467f6c7-5st2f"] Nov 28 17:54:53 crc kubenswrapper[4909]: I1128 17:54:53.628196 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-55c467f6c7-5st2f" Nov 28 17:54:53 crc kubenswrapper[4909]: I1128 17:54:53.680510 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-55c467f6c7-5st2f"] Nov 28 17:54:53 crc kubenswrapper[4909]: I1128 17:54:53.684897 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/0a875053-cc34-499d-8aef-c515a3e3b399-etc-machine-id\") pod \"manila-share-share1-0\" (UID: \"0a875053-cc34-499d-8aef-c515a3e3b399\") " pod="openstack/manila-share-share1-0" Nov 28 17:54:53 crc kubenswrapper[4909]: I1128 17:54:53.684991 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/06eb2b90-11b5-4a78-b602-d4d2a9c1ad3c-ovsdbserver-sb\") pod \"dnsmasq-dns-55c467f6c7-5st2f\" (UID: \"06eb2b90-11b5-4a78-b602-d4d2a9c1ad3c\") " pod="openstack/dnsmasq-dns-55c467f6c7-5st2f" Nov 28 17:54:53 crc kubenswrapper[4909]: I1128 17:54:53.685043 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/06eb2b90-11b5-4a78-b602-d4d2a9c1ad3c-ovsdbserver-nb\") pod \"dnsmasq-dns-55c467f6c7-5st2f\" (UID: \"06eb2b90-11b5-4a78-b602-d4d2a9c1ad3c\") " pod="openstack/dnsmasq-dns-55c467f6c7-5st2f" Nov 28 17:54:53 crc kubenswrapper[4909]: I1128 17:54:53.685082 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0a875053-cc34-499d-8aef-c515a3e3b399-config-data\") pod \"manila-share-share1-0\" (UID: \"0a875053-cc34-499d-8aef-c515a3e3b399\") " pod="openstack/manila-share-share1-0" Nov 28 17:54:53 crc kubenswrapper[4909]: I1128 17:54:53.685135 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/06eb2b90-11b5-4a78-b602-d4d2a9c1ad3c-config\") pod \"dnsmasq-dns-55c467f6c7-5st2f\" (UID: \"06eb2b90-11b5-4a78-b602-d4d2a9c1ad3c\") " pod="openstack/dnsmasq-dns-55c467f6c7-5st2f" Nov 28 17:54:53 crc kubenswrapper[4909]: I1128 17:54:53.685162 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/9d7fb806-75ea-4671-a658-8871d26a148c-etc-machine-id\") pod \"manila-scheduler-0\" (UID: \"9d7fb806-75ea-4671-a658-8871d26a148c\") " pod="openstack/manila-scheduler-0" Nov 28 17:54:53 crc kubenswrapper[4909]: I1128 17:54:53.685194 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-manila\" (UniqueName: \"kubernetes.io/host-path/0a875053-cc34-499d-8aef-c515a3e3b399-var-lib-manila\") pod \"manila-share-share1-0\" (UID: \"0a875053-cc34-499d-8aef-c515a3e3b399\") " pod="openstack/manila-share-share1-0" Nov 28 17:54:53 crc kubenswrapper[4909]: I1128 17:54:53.685239 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2zrwq\" (UniqueName: \"kubernetes.io/projected/9d7fb806-75ea-4671-a658-8871d26a148c-kube-api-access-2zrwq\") pod \"manila-scheduler-0\" (UID: \"9d7fb806-75ea-4671-a658-8871d26a148c\") " pod="openstack/manila-scheduler-0" Nov 28 17:54:53 crc kubenswrapper[4909]: I1128 17:54:53.685273 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0a875053-cc34-499d-8aef-c515a3e3b399-combined-ca-bundle\") pod \"manila-share-share1-0\" (UID: \"0a875053-cc34-499d-8aef-c515a3e3b399\") " pod="openstack/manila-share-share1-0" Nov 28 17:54:53 crc kubenswrapper[4909]: I1128 17:54:53.685315 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9d7fb806-75ea-4671-a658-8871d26a148c-config-data\") pod \"manila-scheduler-0\" (UID: \"9d7fb806-75ea-4671-a658-8871d26a148c\") " pod="openstack/manila-scheduler-0" Nov 28 17:54:53 crc kubenswrapper[4909]: I1128 17:54:53.685357 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9d7fb806-75ea-4671-a658-8871d26a148c-combined-ca-bundle\") pod \"manila-scheduler-0\" (UID: \"9d7fb806-75ea-4671-a658-8871d26a148c\") " pod="openstack/manila-scheduler-0" Nov 28 17:54:53 crc kubenswrapper[4909]: I1128 17:54:53.685389 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0a875053-cc34-499d-8aef-c515a3e3b399-config-data-custom\") pod \"manila-share-share1-0\" (UID: \"0a875053-cc34-499d-8aef-c515a3e3b399\") " pod="openstack/manila-share-share1-0" Nov 28 17:54:53 crc kubenswrapper[4909]: I1128 17:54:53.685415 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9d7fb806-75ea-4671-a658-8871d26a148c-scripts\") pod \"manila-scheduler-0\" (UID: \"9d7fb806-75ea-4671-a658-8871d26a148c\") " pod="openstack/manila-scheduler-0" Nov 28 17:54:53 crc kubenswrapper[4909]: I1128 17:54:53.685459 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/06eb2b90-11b5-4a78-b602-d4d2a9c1ad3c-dns-svc\") pod \"dnsmasq-dns-55c467f6c7-5st2f\" (UID: \"06eb2b90-11b5-4a78-b602-d4d2a9c1ad3c\") " pod="openstack/dnsmasq-dns-55c467f6c7-5st2f" Nov 28 17:54:53 crc kubenswrapper[4909]: I1128 17:54:53.685489 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0a875053-cc34-499d-8aef-c515a3e3b399-scripts\") pod \"manila-share-share1-0\" (UID: \"0a875053-cc34-499d-8aef-c515a3e3b399\") " pod="openstack/manila-share-share1-0" Nov 28 17:54:53 crc kubenswrapper[4909]: I1128 17:54:53.685513 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/9d7fb806-75ea-4671-a658-8871d26a148c-config-data-custom\") pod \"manila-scheduler-0\" (UID: \"9d7fb806-75ea-4671-a658-8871d26a148c\") " pod="openstack/manila-scheduler-0" Nov 28 17:54:53 crc kubenswrapper[4909]: I1128 17:54:53.685554 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6s485\" (UniqueName: \"kubernetes.io/projected/06eb2b90-11b5-4a78-b602-d4d2a9c1ad3c-kube-api-access-6s485\") pod \"dnsmasq-dns-55c467f6c7-5st2f\" (UID: \"06eb2b90-11b5-4a78-b602-d4d2a9c1ad3c\") " pod="openstack/dnsmasq-dns-55c467f6c7-5st2f" Nov 28 17:54:53 crc kubenswrapper[4909]: I1128 17:54:53.685605 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/0a875053-cc34-499d-8aef-c515a3e3b399-ceph\") pod \"manila-share-share1-0\" (UID: \"0a875053-cc34-499d-8aef-c515a3e3b399\") " pod="openstack/manila-share-share1-0" Nov 28 17:54:53 crc kubenswrapper[4909]: I1128 17:54:53.685637 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t2r25\" (UniqueName: \"kubernetes.io/projected/0a875053-cc34-499d-8aef-c515a3e3b399-kube-api-access-t2r25\") pod \"manila-share-share1-0\" (UID: \"0a875053-cc34-499d-8aef-c515a3e3b399\") " pod="openstack/manila-share-share1-0" Nov 28 17:54:53 crc kubenswrapper[4909]: I1128 17:54:53.686048 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/0a875053-cc34-499d-8aef-c515a3e3b399-etc-machine-id\") pod \"manila-share-share1-0\" (UID: \"0a875053-cc34-499d-8aef-c515a3e3b399\") " pod="openstack/manila-share-share1-0" Nov 28 17:54:53 crc kubenswrapper[4909]: I1128 17:54:53.687138 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-manila\" (UniqueName: \"kubernetes.io/host-path/0a875053-cc34-499d-8aef-c515a3e3b399-var-lib-manila\") pod \"manila-share-share1-0\" (UID: \"0a875053-cc34-499d-8aef-c515a3e3b399\") " pod="openstack/manila-share-share1-0" Nov 28 17:54:53 crc kubenswrapper[4909]: I1128 17:54:53.687188 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/9d7fb806-75ea-4671-a658-8871d26a148c-etc-machine-id\") pod \"manila-scheduler-0\" (UID: \"9d7fb806-75ea-4671-a658-8871d26a148c\") " pod="openstack/manila-scheduler-0" Nov 28 17:54:53 crc kubenswrapper[4909]: I1128 17:54:53.697056 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0a875053-cc34-499d-8aef-c515a3e3b399-config-data-custom\") pod \"manila-share-share1-0\" (UID: \"0a875053-cc34-499d-8aef-c515a3e3b399\") " pod="openstack/manila-share-share1-0" Nov 28 17:54:53 crc kubenswrapper[4909]: I1128 17:54:53.700525 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9d7fb806-75ea-4671-a658-8871d26a148c-combined-ca-bundle\") pod \"manila-scheduler-0\" (UID: \"9d7fb806-75ea-4671-a658-8871d26a148c\") " pod="openstack/manila-scheduler-0" Nov 28 17:54:53 crc kubenswrapper[4909]: I1128 17:54:53.708576 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9d7fb806-75ea-4671-a658-8871d26a148c-config-data\") pod \"manila-scheduler-0\" (UID: \"9d7fb806-75ea-4671-a658-8871d26a148c\") " pod="openstack/manila-scheduler-0" Nov 28 17:54:53 crc kubenswrapper[4909]: I1128 17:54:53.728550 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0a875053-cc34-499d-8aef-c515a3e3b399-combined-ca-bundle\") pod \"manila-share-share1-0\" (UID: \"0a875053-cc34-499d-8aef-c515a3e3b399\") " pod="openstack/manila-share-share1-0" Nov 28 17:54:53 crc kubenswrapper[4909]: I1128 17:54:53.749187 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t2r25\" (UniqueName: \"kubernetes.io/projected/0a875053-cc34-499d-8aef-c515a3e3b399-kube-api-access-t2r25\") pod \"manila-share-share1-0\" (UID: \"0a875053-cc34-499d-8aef-c515a3e3b399\") " pod="openstack/manila-share-share1-0" Nov 28 17:54:53 crc kubenswrapper[4909]: I1128 17:54:53.749946 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0a875053-cc34-499d-8aef-c515a3e3b399-scripts\") pod \"manila-share-share1-0\" (UID: \"0a875053-cc34-499d-8aef-c515a3e3b399\") " pod="openstack/manila-share-share1-0" Nov 28 17:54:53 crc kubenswrapper[4909]: I1128 17:54:53.750424 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/9d7fb806-75ea-4671-a658-8871d26a148c-config-data-custom\") pod \"manila-scheduler-0\" (UID: \"9d7fb806-75ea-4671-a658-8871d26a148c\") " pod="openstack/manila-scheduler-0" Nov 28 17:54:53 crc kubenswrapper[4909]: I1128 17:54:53.750529 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0a875053-cc34-499d-8aef-c515a3e3b399-config-data\") pod \"manila-share-share1-0\" (UID: \"0a875053-cc34-499d-8aef-c515a3e3b399\") " pod="openstack/manila-share-share1-0" Nov 28 17:54:53 crc kubenswrapper[4909]: I1128 17:54:53.750812 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9d7fb806-75ea-4671-a658-8871d26a148c-scripts\") pod \"manila-scheduler-0\" (UID: \"9d7fb806-75ea-4671-a658-8871d26a148c\") " pod="openstack/manila-scheduler-0" Nov 28 17:54:53 crc kubenswrapper[4909]: I1128 17:54:53.751047 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/0a875053-cc34-499d-8aef-c515a3e3b399-ceph\") pod \"manila-share-share1-0\" (UID: \"0a875053-cc34-499d-8aef-c515a3e3b399\") " pod="openstack/manila-share-share1-0" Nov 28 17:54:53 crc kubenswrapper[4909]: I1128 17:54:53.756305 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2zrwq\" (UniqueName: \"kubernetes.io/projected/9d7fb806-75ea-4671-a658-8871d26a148c-kube-api-access-2zrwq\") pod \"manila-scheduler-0\" (UID: \"9d7fb806-75ea-4671-a658-8871d26a148c\") " pod="openstack/manila-scheduler-0" Nov 28 17:54:53 crc kubenswrapper[4909]: I1128 17:54:53.793865 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/06eb2b90-11b5-4a78-b602-d4d2a9c1ad3c-config\") pod \"dnsmasq-dns-55c467f6c7-5st2f\" (UID: \"06eb2b90-11b5-4a78-b602-d4d2a9c1ad3c\") " pod="openstack/dnsmasq-dns-55c467f6c7-5st2f" Nov 28 17:54:53 crc kubenswrapper[4909]: I1128 17:54:53.793986 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/06eb2b90-11b5-4a78-b602-d4d2a9c1ad3c-dns-svc\") pod \"dnsmasq-dns-55c467f6c7-5st2f\" (UID: \"06eb2b90-11b5-4a78-b602-d4d2a9c1ad3c\") " pod="openstack/dnsmasq-dns-55c467f6c7-5st2f" Nov 28 17:54:53 crc kubenswrapper[4909]: I1128 17:54:53.794025 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6s485\" (UniqueName: \"kubernetes.io/projected/06eb2b90-11b5-4a78-b602-d4d2a9c1ad3c-kube-api-access-6s485\") pod \"dnsmasq-dns-55c467f6c7-5st2f\" (UID: \"06eb2b90-11b5-4a78-b602-d4d2a9c1ad3c\") " pod="openstack/dnsmasq-dns-55c467f6c7-5st2f" Nov 28 17:54:53 crc kubenswrapper[4909]: I1128 17:54:53.794103 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/06eb2b90-11b5-4a78-b602-d4d2a9c1ad3c-ovsdbserver-sb\") pod \"dnsmasq-dns-55c467f6c7-5st2f\" (UID: \"06eb2b90-11b5-4a78-b602-d4d2a9c1ad3c\") " pod="openstack/dnsmasq-dns-55c467f6c7-5st2f" Nov 28 17:54:53 crc kubenswrapper[4909]: I1128 17:54:53.794152 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/06eb2b90-11b5-4a78-b602-d4d2a9c1ad3c-ovsdbserver-nb\") pod \"dnsmasq-dns-55c467f6c7-5st2f\" (UID: \"06eb2b90-11b5-4a78-b602-d4d2a9c1ad3c\") " pod="openstack/dnsmasq-dns-55c467f6c7-5st2f" Nov 28 17:54:53 crc kubenswrapper[4909]: I1128 17:54:53.795133 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/06eb2b90-11b5-4a78-b602-d4d2a9c1ad3c-ovsdbserver-nb\") pod \"dnsmasq-dns-55c467f6c7-5st2f\" (UID: \"06eb2b90-11b5-4a78-b602-d4d2a9c1ad3c\") " pod="openstack/dnsmasq-dns-55c467f6c7-5st2f" Nov 28 17:54:53 crc kubenswrapper[4909]: I1128 17:54:53.795566 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/06eb2b90-11b5-4a78-b602-d4d2a9c1ad3c-dns-svc\") pod \"dnsmasq-dns-55c467f6c7-5st2f\" (UID: \"06eb2b90-11b5-4a78-b602-d4d2a9c1ad3c\") " pod="openstack/dnsmasq-dns-55c467f6c7-5st2f" Nov 28 17:54:53 crc kubenswrapper[4909]: I1128 17:54:53.796012 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/06eb2b90-11b5-4a78-b602-d4d2a9c1ad3c-ovsdbserver-sb\") pod \"dnsmasq-dns-55c467f6c7-5st2f\" (UID: \"06eb2b90-11b5-4a78-b602-d4d2a9c1ad3c\") " pod="openstack/dnsmasq-dns-55c467f6c7-5st2f" Nov 28 17:54:53 crc kubenswrapper[4909]: I1128 17:54:53.796111 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/06eb2b90-11b5-4a78-b602-d4d2a9c1ad3c-config\") pod \"dnsmasq-dns-55c467f6c7-5st2f\" (UID: \"06eb2b90-11b5-4a78-b602-d4d2a9c1ad3c\") " pod="openstack/dnsmasq-dns-55c467f6c7-5st2f" Nov 28 17:54:53 crc kubenswrapper[4909]: I1128 17:54:53.798153 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-scheduler-0" Nov 28 17:54:53 crc kubenswrapper[4909]: I1128 17:54:53.845701 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6s485\" (UniqueName: \"kubernetes.io/projected/06eb2b90-11b5-4a78-b602-d4d2a9c1ad3c-kube-api-access-6s485\") pod \"dnsmasq-dns-55c467f6c7-5st2f\" (UID: \"06eb2b90-11b5-4a78-b602-d4d2a9c1ad3c\") " pod="openstack/dnsmasq-dns-55c467f6c7-5st2f" Nov 28 17:54:53 crc kubenswrapper[4909]: I1128 17:54:53.867366 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-share-share1-0" Nov 28 17:54:53 crc kubenswrapper[4909]: I1128 17:54:53.891469 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/manila-api-0"] Nov 28 17:54:53 crc kubenswrapper[4909]: I1128 17:54:53.893387 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-api-0" Nov 28 17:54:53 crc kubenswrapper[4909]: I1128 17:54:53.899561 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-api-config-data" Nov 28 17:54:53 crc kubenswrapper[4909]: I1128 17:54:53.963273 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-55c467f6c7-5st2f" Nov 28 17:54:53 crc kubenswrapper[4909]: I1128 17:54:53.983946 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-api-0"] Nov 28 17:54:54 crc kubenswrapper[4909]: I1128 17:54:54.005584 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/21a44c88-dfc5-4832-8db4-b16f31dc9625-config-data-custom\") pod \"manila-api-0\" (UID: \"21a44c88-dfc5-4832-8db4-b16f31dc9625\") " pod="openstack/manila-api-0" Nov 28 17:54:54 crc kubenswrapper[4909]: I1128 17:54:54.005650 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jfm5d\" (UniqueName: \"kubernetes.io/projected/21a44c88-dfc5-4832-8db4-b16f31dc9625-kube-api-access-jfm5d\") pod \"manila-api-0\" (UID: \"21a44c88-dfc5-4832-8db4-b16f31dc9625\") " pod="openstack/manila-api-0" Nov 28 17:54:54 crc kubenswrapper[4909]: I1128 17:54:54.005712 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/21a44c88-dfc5-4832-8db4-b16f31dc9625-config-data\") pod \"manila-api-0\" (UID: \"21a44c88-dfc5-4832-8db4-b16f31dc9625\") " pod="openstack/manila-api-0" Nov 28 17:54:54 crc kubenswrapper[4909]: I1128 17:54:54.005966 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/21a44c88-dfc5-4832-8db4-b16f31dc9625-etc-machine-id\") pod \"manila-api-0\" (UID: \"21a44c88-dfc5-4832-8db4-b16f31dc9625\") " pod="openstack/manila-api-0" Nov 28 17:54:54 crc kubenswrapper[4909]: I1128 17:54:54.006011 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/21a44c88-dfc5-4832-8db4-b16f31dc9625-scripts\") pod \"manila-api-0\" (UID: \"21a44c88-dfc5-4832-8db4-b16f31dc9625\") " pod="openstack/manila-api-0" Nov 28 17:54:54 crc kubenswrapper[4909]: I1128 17:54:54.006026 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/21a44c88-dfc5-4832-8db4-b16f31dc9625-logs\") pod \"manila-api-0\" (UID: \"21a44c88-dfc5-4832-8db4-b16f31dc9625\") " pod="openstack/manila-api-0" Nov 28 17:54:54 crc kubenswrapper[4909]: I1128 17:54:54.009959 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/21a44c88-dfc5-4832-8db4-b16f31dc9625-combined-ca-bundle\") pod \"manila-api-0\" (UID: \"21a44c88-dfc5-4832-8db4-b16f31dc9625\") " pod="openstack/manila-api-0" Nov 28 17:54:54 crc kubenswrapper[4909]: I1128 17:54:54.112453 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/21a44c88-dfc5-4832-8db4-b16f31dc9625-combined-ca-bundle\") pod \"manila-api-0\" (UID: \"21a44c88-dfc5-4832-8db4-b16f31dc9625\") " pod="openstack/manila-api-0" Nov 28 17:54:54 crc kubenswrapper[4909]: I1128 17:54:54.112748 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/21a44c88-dfc5-4832-8db4-b16f31dc9625-config-data-custom\") pod \"manila-api-0\" (UID: \"21a44c88-dfc5-4832-8db4-b16f31dc9625\") " pod="openstack/manila-api-0" Nov 28 17:54:54 crc kubenswrapper[4909]: I1128 17:54:54.112774 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jfm5d\" (UniqueName: \"kubernetes.io/projected/21a44c88-dfc5-4832-8db4-b16f31dc9625-kube-api-access-jfm5d\") pod \"manila-api-0\" (UID: \"21a44c88-dfc5-4832-8db4-b16f31dc9625\") " pod="openstack/manila-api-0" Nov 28 17:54:54 crc kubenswrapper[4909]: I1128 17:54:54.112795 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/21a44c88-dfc5-4832-8db4-b16f31dc9625-config-data\") pod \"manila-api-0\" (UID: \"21a44c88-dfc5-4832-8db4-b16f31dc9625\") " pod="openstack/manila-api-0" Nov 28 17:54:54 crc kubenswrapper[4909]: I1128 17:54:54.112926 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/21a44c88-dfc5-4832-8db4-b16f31dc9625-etc-machine-id\") pod \"manila-api-0\" (UID: \"21a44c88-dfc5-4832-8db4-b16f31dc9625\") " pod="openstack/manila-api-0" Nov 28 17:54:54 crc kubenswrapper[4909]: I1128 17:54:54.112956 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/21a44c88-dfc5-4832-8db4-b16f31dc9625-logs\") pod \"manila-api-0\" (UID: \"21a44c88-dfc5-4832-8db4-b16f31dc9625\") " pod="openstack/manila-api-0" Nov 28 17:54:54 crc kubenswrapper[4909]: I1128 17:54:54.112971 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/21a44c88-dfc5-4832-8db4-b16f31dc9625-scripts\") pod \"manila-api-0\" (UID: \"21a44c88-dfc5-4832-8db4-b16f31dc9625\") " pod="openstack/manila-api-0" Nov 28 17:54:54 crc kubenswrapper[4909]: I1128 17:54:54.113424 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/21a44c88-dfc5-4832-8db4-b16f31dc9625-etc-machine-id\") pod \"manila-api-0\" (UID: \"21a44c88-dfc5-4832-8db4-b16f31dc9625\") " pod="openstack/manila-api-0" Nov 28 17:54:54 crc kubenswrapper[4909]: I1128 17:54:54.113705 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/21a44c88-dfc5-4832-8db4-b16f31dc9625-logs\") pod \"manila-api-0\" (UID: \"21a44c88-dfc5-4832-8db4-b16f31dc9625\") " pod="openstack/manila-api-0" Nov 28 17:54:54 crc kubenswrapper[4909]: I1128 17:54:54.117282 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/21a44c88-dfc5-4832-8db4-b16f31dc9625-config-data-custom\") pod \"manila-api-0\" (UID: \"21a44c88-dfc5-4832-8db4-b16f31dc9625\") " pod="openstack/manila-api-0" Nov 28 17:54:54 crc kubenswrapper[4909]: I1128 17:54:54.117787 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/21a44c88-dfc5-4832-8db4-b16f31dc9625-scripts\") pod \"manila-api-0\" (UID: \"21a44c88-dfc5-4832-8db4-b16f31dc9625\") " pod="openstack/manila-api-0" Nov 28 17:54:54 crc kubenswrapper[4909]: I1128 17:54:54.118269 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/21a44c88-dfc5-4832-8db4-b16f31dc9625-combined-ca-bundle\") pod \"manila-api-0\" (UID: \"21a44c88-dfc5-4832-8db4-b16f31dc9625\") " pod="openstack/manila-api-0" Nov 28 17:54:54 crc kubenswrapper[4909]: I1128 17:54:54.118956 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/21a44c88-dfc5-4832-8db4-b16f31dc9625-config-data\") pod \"manila-api-0\" (UID: \"21a44c88-dfc5-4832-8db4-b16f31dc9625\") " pod="openstack/manila-api-0" Nov 28 17:54:54 crc kubenswrapper[4909]: I1128 17:54:54.133769 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jfm5d\" (UniqueName: \"kubernetes.io/projected/21a44c88-dfc5-4832-8db4-b16f31dc9625-kube-api-access-jfm5d\") pod \"manila-api-0\" (UID: \"21a44c88-dfc5-4832-8db4-b16f31dc9625\") " pod="openstack/manila-api-0" Nov 28 17:54:54 crc kubenswrapper[4909]: I1128 17:54:54.301715 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-api-0" Nov 28 17:54:54 crc kubenswrapper[4909]: I1128 17:54:54.536098 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-scheduler-0"] Nov 28 17:54:54 crc kubenswrapper[4909]: I1128 17:54:54.658754 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-55c467f6c7-5st2f"] Nov 28 17:54:54 crc kubenswrapper[4909]: I1128 17:54:54.748581 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-share-share1-0"] Nov 28 17:54:54 crc kubenswrapper[4909]: W1128 17:54:54.773544 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0a875053_cc34_499d_8aef_c515a3e3b399.slice/crio-53d80bfce140b86ad19ea9f000e859db0b2d6fd65875c9a23490961a0b104fd3 WatchSource:0}: Error finding container 53d80bfce140b86ad19ea9f000e859db0b2d6fd65875c9a23490961a0b104fd3: Status 404 returned error can't find the container with id 53d80bfce140b86ad19ea9f000e859db0b2d6fd65875c9a23490961a0b104fd3 Nov 28 17:54:54 crc kubenswrapper[4909]: I1128 17:54:54.901712 4909 scope.go:117] "RemoveContainer" containerID="eb8a08a6c738fff0fcbfbb88427c9ed53477944abe7436212850e368ec229c4f" Nov 28 17:54:54 crc kubenswrapper[4909]: E1128 17:54:54.902133 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 17:54:55 crc kubenswrapper[4909]: I1128 17:54:55.154305 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-api-0"] Nov 28 17:54:55 crc kubenswrapper[4909]: W1128 17:54:55.165621 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod21a44c88_dfc5_4832_8db4_b16f31dc9625.slice/crio-58aedce72cca731aee1f0b6fec606712b86ad4948ab9ac861235bc3b03783043 WatchSource:0}: Error finding container 58aedce72cca731aee1f0b6fec606712b86ad4948ab9ac861235bc3b03783043: Status 404 returned error can't find the container with id 58aedce72cca731aee1f0b6fec606712b86ad4948ab9ac861235bc3b03783043 Nov 28 17:54:55 crc kubenswrapper[4909]: I1128 17:54:55.172181 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-share-share1-0" event={"ID":"0a875053-cc34-499d-8aef-c515a3e3b399","Type":"ContainerStarted","Data":"53d80bfce140b86ad19ea9f000e859db0b2d6fd65875c9a23490961a0b104fd3"} Nov 28 17:54:55 crc kubenswrapper[4909]: I1128 17:54:55.178793 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-55c467f6c7-5st2f" event={"ID":"06eb2b90-11b5-4a78-b602-d4d2a9c1ad3c","Type":"ContainerStarted","Data":"d6632a747ce0edf41d7188b9a0e641e18e2c2b057bb851710068c9bbafa2bad2"} Nov 28 17:54:55 crc kubenswrapper[4909]: I1128 17:54:55.178847 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-55c467f6c7-5st2f" event={"ID":"06eb2b90-11b5-4a78-b602-d4d2a9c1ad3c","Type":"ContainerStarted","Data":"c22f3912b0b3d89332afe836dddd952372853eda43199eaf89708b90ac68270e"} Nov 28 17:54:55 crc kubenswrapper[4909]: I1128 17:54:55.181809 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-scheduler-0" event={"ID":"9d7fb806-75ea-4671-a658-8871d26a148c","Type":"ContainerStarted","Data":"9eb4d00cfa28e789dadb9232ceb87df9de5f4cb7afd5d5a2050f16f12d75ee9f"} Nov 28 17:54:56 crc kubenswrapper[4909]: I1128 17:54:56.202572 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-scheduler-0" event={"ID":"9d7fb806-75ea-4671-a658-8871d26a148c","Type":"ContainerStarted","Data":"09479eed8d9e3baca314250fecb7858d068926e1c43b206e8949f3163a76ffff"} Nov 28 17:54:56 crc kubenswrapper[4909]: I1128 17:54:56.205258 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-api-0" event={"ID":"21a44c88-dfc5-4832-8db4-b16f31dc9625","Type":"ContainerStarted","Data":"2a9eb1299d2c439dc6c48859148ab6888019b4d7759c5f5b3486b5a947a7e76d"} Nov 28 17:54:56 crc kubenswrapper[4909]: I1128 17:54:56.205307 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-api-0" event={"ID":"21a44c88-dfc5-4832-8db4-b16f31dc9625","Type":"ContainerStarted","Data":"58aedce72cca731aee1f0b6fec606712b86ad4948ab9ac861235bc3b03783043"} Nov 28 17:54:56 crc kubenswrapper[4909]: I1128 17:54:56.208268 4909 generic.go:334] "Generic (PLEG): container finished" podID="06eb2b90-11b5-4a78-b602-d4d2a9c1ad3c" containerID="d6632a747ce0edf41d7188b9a0e641e18e2c2b057bb851710068c9bbafa2bad2" exitCode=0 Nov 28 17:54:56 crc kubenswrapper[4909]: I1128 17:54:56.208308 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-55c467f6c7-5st2f" event={"ID":"06eb2b90-11b5-4a78-b602-d4d2a9c1ad3c","Type":"ContainerDied","Data":"d6632a747ce0edf41d7188b9a0e641e18e2c2b057bb851710068c9bbafa2bad2"} Nov 28 17:54:57 crc kubenswrapper[4909]: I1128 17:54:57.039732 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-4gwd6"] Nov 28 17:54:57 crc kubenswrapper[4909]: I1128 17:54:57.062894 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-4gwd6"] Nov 28 17:54:57 crc kubenswrapper[4909]: I1128 17:54:57.223004 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-55c467f6c7-5st2f" event={"ID":"06eb2b90-11b5-4a78-b602-d4d2a9c1ad3c","Type":"ContainerStarted","Data":"d72a24b9dc7d8271734927560fc61a0ded4ec144b7a095cc0d2a6ac96b5ced5d"} Nov 28 17:54:57 crc kubenswrapper[4909]: I1128 17:54:57.223302 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-55c467f6c7-5st2f" Nov 28 17:54:57 crc kubenswrapper[4909]: I1128 17:54:57.224524 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-scheduler-0" event={"ID":"9d7fb806-75ea-4671-a658-8871d26a148c","Type":"ContainerStarted","Data":"af4b671f88ae031bd5562441d7d4e71eebfb56ebef16bf5402e613db2cd5a5f6"} Nov 28 17:54:57 crc kubenswrapper[4909]: I1128 17:54:57.227114 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-api-0" event={"ID":"21a44c88-dfc5-4832-8db4-b16f31dc9625","Type":"ContainerStarted","Data":"f73362b299b419701c278bd7b905a1b0638172f2a8f2882ab940862b54420f0c"} Nov 28 17:54:57 crc kubenswrapper[4909]: I1128 17:54:57.227256 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/manila-api-0" Nov 28 17:54:57 crc kubenswrapper[4909]: I1128 17:54:57.236746 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Nov 28 17:54:57 crc kubenswrapper[4909]: I1128 17:54:57.248142 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-55c467f6c7-5st2f" podStartSLOduration=4.248125039 podStartE2EDuration="4.248125039s" podCreationTimestamp="2025-11-28 17:54:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 17:54:57.241344488 +0000 UTC m=+6279.638029012" watchObservedRunningTime="2025-11-28 17:54:57.248125039 +0000 UTC m=+6279.644809563" Nov 28 17:54:57 crc kubenswrapper[4909]: I1128 17:54:57.290916 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/manila-api-0" podStartSLOduration=4.29090164 podStartE2EDuration="4.29090164s" podCreationTimestamp="2025-11-28 17:54:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 17:54:57.289593955 +0000 UTC m=+6279.686278479" watchObservedRunningTime="2025-11-28 17:54:57.29090164 +0000 UTC m=+6279.687586164" Nov 28 17:54:57 crc kubenswrapper[4909]: I1128 17:54:57.328942 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/manila-scheduler-0" podStartSLOduration=3.49416649 podStartE2EDuration="4.328922144s" podCreationTimestamp="2025-11-28 17:54:53 +0000 UTC" firstStartedPulling="2025-11-28 17:54:54.544444889 +0000 UTC m=+6276.941129413" lastFinishedPulling="2025-11-28 17:54:55.379200543 +0000 UTC m=+6277.775885067" observedRunningTime="2025-11-28 17:54:57.322562674 +0000 UTC m=+6279.719247198" watchObservedRunningTime="2025-11-28 17:54:57.328922144 +0000 UTC m=+6279.725606668" Nov 28 17:54:57 crc kubenswrapper[4909]: I1128 17:54:57.929552 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c682b238-7341-4d5f-bf11-45f1dfb386ce" path="/var/lib/kubelet/pods/c682b238-7341-4d5f-bf11-45f1dfb386ce/volumes" Nov 28 17:55:02 crc kubenswrapper[4909]: I1128 17:55:02.281438 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-share-share1-0" event={"ID":"0a875053-cc34-499d-8aef-c515a3e3b399","Type":"ContainerStarted","Data":"1a13ae9e3be0b1cebb198b413a28b2744251435b5e8cf8ec1611767b194ba503"} Nov 28 17:55:02 crc kubenswrapper[4909]: I1128 17:55:02.283167 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-share-share1-0" event={"ID":"0a875053-cc34-499d-8aef-c515a3e3b399","Type":"ContainerStarted","Data":"d677639715ac13f65a16b713dd8ed982e55f4a4fd8912e604a8b2dcf68b73b29"} Nov 28 17:55:02 crc kubenswrapper[4909]: I1128 17:55:02.329289 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/manila-share-share1-0" podStartSLOduration=3.141678399 podStartE2EDuration="9.329270899s" podCreationTimestamp="2025-11-28 17:54:53 +0000 UTC" firstStartedPulling="2025-11-28 17:54:54.776244201 +0000 UTC m=+6277.172928715" lastFinishedPulling="2025-11-28 17:55:00.963836691 +0000 UTC m=+6283.360521215" observedRunningTime="2025-11-28 17:55:02.320960167 +0000 UTC m=+6284.717644701" watchObservedRunningTime="2025-11-28 17:55:02.329270899 +0000 UTC m=+6284.725955423" Nov 28 17:55:03 crc kubenswrapper[4909]: I1128 17:55:03.798401 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/manila-scheduler-0" Nov 28 17:55:03 crc kubenswrapper[4909]: I1128 17:55:03.869045 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/manila-share-share1-0" Nov 28 17:55:03 crc kubenswrapper[4909]: I1128 17:55:03.964801 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-55c467f6c7-5st2f" Nov 28 17:55:04 crc kubenswrapper[4909]: I1128 17:55:04.035932 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5bcf776997-bfqmp"] Nov 28 17:55:04 crc kubenswrapper[4909]: I1128 17:55:04.036239 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5bcf776997-bfqmp" podUID="45872bd0-84d2-43ff-92dc-86de32a67a64" containerName="dnsmasq-dns" containerID="cri-o://f0b18132ed8a6bffb6ef9d4d938f8473a5f0e34c997df09948a44ba46f7c58c1" gracePeriod=10 Nov 28 17:55:04 crc kubenswrapper[4909]: I1128 17:55:04.310729 4909 generic.go:334] "Generic (PLEG): container finished" podID="45872bd0-84d2-43ff-92dc-86de32a67a64" containerID="f0b18132ed8a6bffb6ef9d4d938f8473a5f0e34c997df09948a44ba46f7c58c1" exitCode=0 Nov 28 17:55:04 crc kubenswrapper[4909]: I1128 17:55:04.310867 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5bcf776997-bfqmp" event={"ID":"45872bd0-84d2-43ff-92dc-86de32a67a64","Type":"ContainerDied","Data":"f0b18132ed8a6bffb6ef9d4d938f8473a5f0e34c997df09948a44ba46f7c58c1"} Nov 28 17:55:04 crc kubenswrapper[4909]: I1128 17:55:04.559910 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5bcf776997-bfqmp" Nov 28 17:55:04 crc kubenswrapper[4909]: I1128 17:55:04.680950 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/45872bd0-84d2-43ff-92dc-86de32a67a64-ovsdbserver-sb\") pod \"45872bd0-84d2-43ff-92dc-86de32a67a64\" (UID: \"45872bd0-84d2-43ff-92dc-86de32a67a64\") " Nov 28 17:55:04 crc kubenswrapper[4909]: I1128 17:55:04.681013 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/45872bd0-84d2-43ff-92dc-86de32a67a64-dns-svc\") pod \"45872bd0-84d2-43ff-92dc-86de32a67a64\" (UID: \"45872bd0-84d2-43ff-92dc-86de32a67a64\") " Nov 28 17:55:04 crc kubenswrapper[4909]: I1128 17:55:04.681219 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zhtrq\" (UniqueName: \"kubernetes.io/projected/45872bd0-84d2-43ff-92dc-86de32a67a64-kube-api-access-zhtrq\") pod \"45872bd0-84d2-43ff-92dc-86de32a67a64\" (UID: \"45872bd0-84d2-43ff-92dc-86de32a67a64\") " Nov 28 17:55:04 crc kubenswrapper[4909]: I1128 17:55:04.681264 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/45872bd0-84d2-43ff-92dc-86de32a67a64-config\") pod \"45872bd0-84d2-43ff-92dc-86de32a67a64\" (UID: \"45872bd0-84d2-43ff-92dc-86de32a67a64\") " Nov 28 17:55:04 crc kubenswrapper[4909]: I1128 17:55:04.681319 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/45872bd0-84d2-43ff-92dc-86de32a67a64-ovsdbserver-nb\") pod \"45872bd0-84d2-43ff-92dc-86de32a67a64\" (UID: \"45872bd0-84d2-43ff-92dc-86de32a67a64\") " Nov 28 17:55:04 crc kubenswrapper[4909]: I1128 17:55:04.687162 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/45872bd0-84d2-43ff-92dc-86de32a67a64-kube-api-access-zhtrq" (OuterVolumeSpecName: "kube-api-access-zhtrq") pod "45872bd0-84d2-43ff-92dc-86de32a67a64" (UID: "45872bd0-84d2-43ff-92dc-86de32a67a64"). InnerVolumeSpecName "kube-api-access-zhtrq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:55:04 crc kubenswrapper[4909]: I1128 17:55:04.730758 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/45872bd0-84d2-43ff-92dc-86de32a67a64-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "45872bd0-84d2-43ff-92dc-86de32a67a64" (UID: "45872bd0-84d2-43ff-92dc-86de32a67a64"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 17:55:04 crc kubenswrapper[4909]: I1128 17:55:04.742143 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/45872bd0-84d2-43ff-92dc-86de32a67a64-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "45872bd0-84d2-43ff-92dc-86de32a67a64" (UID: "45872bd0-84d2-43ff-92dc-86de32a67a64"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 17:55:04 crc kubenswrapper[4909]: I1128 17:55:04.754231 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/45872bd0-84d2-43ff-92dc-86de32a67a64-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "45872bd0-84d2-43ff-92dc-86de32a67a64" (UID: "45872bd0-84d2-43ff-92dc-86de32a67a64"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 17:55:04 crc kubenswrapper[4909]: I1128 17:55:04.761360 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/45872bd0-84d2-43ff-92dc-86de32a67a64-config" (OuterVolumeSpecName: "config") pod "45872bd0-84d2-43ff-92dc-86de32a67a64" (UID: "45872bd0-84d2-43ff-92dc-86de32a67a64"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 17:55:04 crc kubenswrapper[4909]: I1128 17:55:04.783511 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zhtrq\" (UniqueName: \"kubernetes.io/projected/45872bd0-84d2-43ff-92dc-86de32a67a64-kube-api-access-zhtrq\") on node \"crc\" DevicePath \"\"" Nov 28 17:55:04 crc kubenswrapper[4909]: I1128 17:55:04.783545 4909 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/45872bd0-84d2-43ff-92dc-86de32a67a64-config\") on node \"crc\" DevicePath \"\"" Nov 28 17:55:04 crc kubenswrapper[4909]: I1128 17:55:04.783556 4909 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/45872bd0-84d2-43ff-92dc-86de32a67a64-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 28 17:55:04 crc kubenswrapper[4909]: I1128 17:55:04.783563 4909 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/45872bd0-84d2-43ff-92dc-86de32a67a64-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 28 17:55:04 crc kubenswrapper[4909]: I1128 17:55:04.783573 4909 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/45872bd0-84d2-43ff-92dc-86de32a67a64-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 17:55:05 crc kubenswrapper[4909]: I1128 17:55:05.325142 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5bcf776997-bfqmp" event={"ID":"45872bd0-84d2-43ff-92dc-86de32a67a64","Type":"ContainerDied","Data":"2ac729cea6c6b25ff586da491d105ad3331ac009e06d1784b930a434c19eab95"} Nov 28 17:55:05 crc kubenswrapper[4909]: I1128 17:55:05.325193 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5bcf776997-bfqmp" Nov 28 17:55:05 crc kubenswrapper[4909]: I1128 17:55:05.325505 4909 scope.go:117] "RemoveContainer" containerID="f0b18132ed8a6bffb6ef9d4d938f8473a5f0e34c997df09948a44ba46f7c58c1" Nov 28 17:55:05 crc kubenswrapper[4909]: I1128 17:55:05.363358 4909 scope.go:117] "RemoveContainer" containerID="358757d9a7a0cfd070b1faaf6d1987193c8f32e8b14620eb01c08eb5dcb3f680" Nov 28 17:55:05 crc kubenswrapper[4909]: I1128 17:55:05.363504 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5bcf776997-bfqmp"] Nov 28 17:55:05 crc kubenswrapper[4909]: I1128 17:55:05.372840 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5bcf776997-bfqmp"] Nov 28 17:55:05 crc kubenswrapper[4909]: I1128 17:55:05.920086 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="45872bd0-84d2-43ff-92dc-86de32a67a64" path="/var/lib/kubelet/pods/45872bd0-84d2-43ff-92dc-86de32a67a64/volumes" Nov 28 17:55:06 crc kubenswrapper[4909]: I1128 17:55:06.515401 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 28 17:55:06 crc kubenswrapper[4909]: I1128 17:55:06.515896 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="a7894653-d935-4fd0-852d-f36425839b1f" containerName="ceilometer-central-agent" containerID="cri-o://e8e1bf3840e9406783f658d6e1a58d796903552a483de449df05759e2947dcc6" gracePeriod=30 Nov 28 17:55:06 crc kubenswrapper[4909]: I1128 17:55:06.516020 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="a7894653-d935-4fd0-852d-f36425839b1f" containerName="proxy-httpd" containerID="cri-o://e3e19d316361d68e4f892b1ff26bdeccec1f5445537d34af6e7b3032bccee0f3" gracePeriod=30 Nov 28 17:55:06 crc kubenswrapper[4909]: I1128 17:55:06.516062 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="a7894653-d935-4fd0-852d-f36425839b1f" containerName="sg-core" containerID="cri-o://2df6656b43236561a8b34e107fb25acd16fe397b5523795f95e6f98b8ef0185b" gracePeriod=30 Nov 28 17:55:06 crc kubenswrapper[4909]: I1128 17:55:06.516093 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="a7894653-d935-4fd0-852d-f36425839b1f" containerName="ceilometer-notification-agent" containerID="cri-o://bc1f10153e15882993e99071934a9562df9de18f93dc286a149db1b0d87d3bab" gracePeriod=30 Nov 28 17:55:06 crc kubenswrapper[4909]: I1128 17:55:06.901506 4909 scope.go:117] "RemoveContainer" containerID="eb8a08a6c738fff0fcbfbb88427c9ed53477944abe7436212850e368ec229c4f" Nov 28 17:55:06 crc kubenswrapper[4909]: E1128 17:55:06.901812 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 17:55:07 crc kubenswrapper[4909]: I1128 17:55:07.355128 4909 generic.go:334] "Generic (PLEG): container finished" podID="a7894653-d935-4fd0-852d-f36425839b1f" containerID="e3e19d316361d68e4f892b1ff26bdeccec1f5445537d34af6e7b3032bccee0f3" exitCode=0 Nov 28 17:55:07 crc kubenswrapper[4909]: I1128 17:55:07.355393 4909 generic.go:334] "Generic (PLEG): container finished" podID="a7894653-d935-4fd0-852d-f36425839b1f" containerID="2df6656b43236561a8b34e107fb25acd16fe397b5523795f95e6f98b8ef0185b" exitCode=2 Nov 28 17:55:07 crc kubenswrapper[4909]: I1128 17:55:07.355404 4909 generic.go:334] "Generic (PLEG): container finished" podID="a7894653-d935-4fd0-852d-f36425839b1f" containerID="e8e1bf3840e9406783f658d6e1a58d796903552a483de449df05759e2947dcc6" exitCode=0 Nov 28 17:55:07 crc kubenswrapper[4909]: I1128 17:55:07.355178 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a7894653-d935-4fd0-852d-f36425839b1f","Type":"ContainerDied","Data":"e3e19d316361d68e4f892b1ff26bdeccec1f5445537d34af6e7b3032bccee0f3"} Nov 28 17:55:07 crc kubenswrapper[4909]: I1128 17:55:07.355437 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a7894653-d935-4fd0-852d-f36425839b1f","Type":"ContainerDied","Data":"2df6656b43236561a8b34e107fb25acd16fe397b5523795f95e6f98b8ef0185b"} Nov 28 17:55:07 crc kubenswrapper[4909]: I1128 17:55:07.355448 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a7894653-d935-4fd0-852d-f36425839b1f","Type":"ContainerDied","Data":"e8e1bf3840e9406783f658d6e1a58d796903552a483de449df05759e2947dcc6"} Nov 28 17:55:11 crc kubenswrapper[4909]: I1128 17:55:11.037892 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-p9m6s"] Nov 28 17:55:11 crc kubenswrapper[4909]: I1128 17:55:11.071279 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-p9m6s"] Nov 28 17:55:11 crc kubenswrapper[4909]: I1128 17:55:11.916546 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c2e858b1-afe6-4fde-a513-635c137a0275" path="/var/lib/kubelet/pods/c2e858b1-afe6-4fde-a513-635c137a0275/volumes" Nov 28 17:55:13 crc kubenswrapper[4909]: I1128 17:55:13.038745 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-cell-mapping-9d9sb"] Nov 28 17:55:13 crc kubenswrapper[4909]: I1128 17:55:13.049207 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-cell-mapping-9d9sb"] Nov 28 17:55:13 crc kubenswrapper[4909]: I1128 17:55:13.917011 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f5a78f44-565c-4a51-821b-0c83055e4fd3" path="/var/lib/kubelet/pods/f5a78f44-565c-4a51-821b-0c83055e4fd3/volumes" Nov 28 17:55:14 crc kubenswrapper[4909]: I1128 17:55:14.006522 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 17:55:14 crc kubenswrapper[4909]: I1128 17:55:14.088000 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a7894653-d935-4fd0-852d-f36425839b1f-log-httpd\") pod \"a7894653-d935-4fd0-852d-f36425839b1f\" (UID: \"a7894653-d935-4fd0-852d-f36425839b1f\") " Nov 28 17:55:14 crc kubenswrapper[4909]: I1128 17:55:14.088053 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a7894653-d935-4fd0-852d-f36425839b1f-scripts\") pod \"a7894653-d935-4fd0-852d-f36425839b1f\" (UID: \"a7894653-d935-4fd0-852d-f36425839b1f\") " Nov 28 17:55:14 crc kubenswrapper[4909]: I1128 17:55:14.088179 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a7894653-d935-4fd0-852d-f36425839b1f-run-httpd\") pod \"a7894653-d935-4fd0-852d-f36425839b1f\" (UID: \"a7894653-d935-4fd0-852d-f36425839b1f\") " Nov 28 17:55:14 crc kubenswrapper[4909]: I1128 17:55:14.088265 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t2lbj\" (UniqueName: \"kubernetes.io/projected/a7894653-d935-4fd0-852d-f36425839b1f-kube-api-access-t2lbj\") pod \"a7894653-d935-4fd0-852d-f36425839b1f\" (UID: \"a7894653-d935-4fd0-852d-f36425839b1f\") " Nov 28 17:55:14 crc kubenswrapper[4909]: I1128 17:55:14.088305 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/a7894653-d935-4fd0-852d-f36425839b1f-sg-core-conf-yaml\") pod \"a7894653-d935-4fd0-852d-f36425839b1f\" (UID: \"a7894653-d935-4fd0-852d-f36425839b1f\") " Nov 28 17:55:14 crc kubenswrapper[4909]: I1128 17:55:14.088354 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a7894653-d935-4fd0-852d-f36425839b1f-combined-ca-bundle\") pod \"a7894653-d935-4fd0-852d-f36425839b1f\" (UID: \"a7894653-d935-4fd0-852d-f36425839b1f\") " Nov 28 17:55:14 crc kubenswrapper[4909]: I1128 17:55:14.088379 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a7894653-d935-4fd0-852d-f36425839b1f-config-data\") pod \"a7894653-d935-4fd0-852d-f36425839b1f\" (UID: \"a7894653-d935-4fd0-852d-f36425839b1f\") " Nov 28 17:55:14 crc kubenswrapper[4909]: I1128 17:55:14.090065 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a7894653-d935-4fd0-852d-f36425839b1f-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "a7894653-d935-4fd0-852d-f36425839b1f" (UID: "a7894653-d935-4fd0-852d-f36425839b1f"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:55:14 crc kubenswrapper[4909]: I1128 17:55:14.090386 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a7894653-d935-4fd0-852d-f36425839b1f-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "a7894653-d935-4fd0-852d-f36425839b1f" (UID: "a7894653-d935-4fd0-852d-f36425839b1f"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:55:14 crc kubenswrapper[4909]: I1128 17:55:14.094595 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a7894653-d935-4fd0-852d-f36425839b1f-kube-api-access-t2lbj" (OuterVolumeSpecName: "kube-api-access-t2lbj") pod "a7894653-d935-4fd0-852d-f36425839b1f" (UID: "a7894653-d935-4fd0-852d-f36425839b1f"). InnerVolumeSpecName "kube-api-access-t2lbj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:55:14 crc kubenswrapper[4909]: I1128 17:55:14.098524 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a7894653-d935-4fd0-852d-f36425839b1f-scripts" (OuterVolumeSpecName: "scripts") pod "a7894653-d935-4fd0-852d-f36425839b1f" (UID: "a7894653-d935-4fd0-852d-f36425839b1f"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:55:14 crc kubenswrapper[4909]: I1128 17:55:14.135331 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a7894653-d935-4fd0-852d-f36425839b1f-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "a7894653-d935-4fd0-852d-f36425839b1f" (UID: "a7894653-d935-4fd0-852d-f36425839b1f"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:55:14 crc kubenswrapper[4909]: I1128 17:55:14.191192 4909 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a7894653-d935-4fd0-852d-f36425839b1f-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 28 17:55:14 crc kubenswrapper[4909]: I1128 17:55:14.191405 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t2lbj\" (UniqueName: \"kubernetes.io/projected/a7894653-d935-4fd0-852d-f36425839b1f-kube-api-access-t2lbj\") on node \"crc\" DevicePath \"\"" Nov 28 17:55:14 crc kubenswrapper[4909]: I1128 17:55:14.191535 4909 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/a7894653-d935-4fd0-852d-f36425839b1f-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 28 17:55:14 crc kubenswrapper[4909]: I1128 17:55:14.191617 4909 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a7894653-d935-4fd0-852d-f36425839b1f-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 28 17:55:14 crc kubenswrapper[4909]: I1128 17:55:14.191712 4909 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a7894653-d935-4fd0-852d-f36425839b1f-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 17:55:14 crc kubenswrapper[4909]: I1128 17:55:14.194938 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a7894653-d935-4fd0-852d-f36425839b1f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a7894653-d935-4fd0-852d-f36425839b1f" (UID: "a7894653-d935-4fd0-852d-f36425839b1f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:55:14 crc kubenswrapper[4909]: I1128 17:55:14.244932 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a7894653-d935-4fd0-852d-f36425839b1f-config-data" (OuterVolumeSpecName: "config-data") pod "a7894653-d935-4fd0-852d-f36425839b1f" (UID: "a7894653-d935-4fd0-852d-f36425839b1f"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:55:14 crc kubenswrapper[4909]: I1128 17:55:14.294709 4909 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a7894653-d935-4fd0-852d-f36425839b1f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 17:55:14 crc kubenswrapper[4909]: I1128 17:55:14.294796 4909 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a7894653-d935-4fd0-852d-f36425839b1f-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 17:55:14 crc kubenswrapper[4909]: I1128 17:55:14.439855 4909 generic.go:334] "Generic (PLEG): container finished" podID="a7894653-d935-4fd0-852d-f36425839b1f" containerID="bc1f10153e15882993e99071934a9562df9de18f93dc286a149db1b0d87d3bab" exitCode=0 Nov 28 17:55:14 crc kubenswrapper[4909]: I1128 17:55:14.440175 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 17:55:14 crc kubenswrapper[4909]: I1128 17:55:14.440063 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a7894653-d935-4fd0-852d-f36425839b1f","Type":"ContainerDied","Data":"bc1f10153e15882993e99071934a9562df9de18f93dc286a149db1b0d87d3bab"} Nov 28 17:55:14 crc kubenswrapper[4909]: I1128 17:55:14.440465 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a7894653-d935-4fd0-852d-f36425839b1f","Type":"ContainerDied","Data":"b4c5c966e8c905d06e80db85935aedd3c2bb64aa1d3d7007dcf75cd88f0ba33e"} Nov 28 17:55:14 crc kubenswrapper[4909]: I1128 17:55:14.444958 4909 scope.go:117] "RemoveContainer" containerID="e3e19d316361d68e4f892b1ff26bdeccec1f5445537d34af6e7b3032bccee0f3" Nov 28 17:55:14 crc kubenswrapper[4909]: I1128 17:55:14.479516 4909 scope.go:117] "RemoveContainer" containerID="2df6656b43236561a8b34e107fb25acd16fe397b5523795f95e6f98b8ef0185b" Nov 28 17:55:14 crc kubenswrapper[4909]: I1128 17:55:14.503398 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 28 17:55:14 crc kubenswrapper[4909]: I1128 17:55:14.519135 4909 scope.go:117] "RemoveContainer" containerID="bc1f10153e15882993e99071934a9562df9de18f93dc286a149db1b0d87d3bab" Nov 28 17:55:14 crc kubenswrapper[4909]: I1128 17:55:14.553723 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 28 17:55:14 crc kubenswrapper[4909]: I1128 17:55:14.568720 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 28 17:55:14 crc kubenswrapper[4909]: E1128 17:55:14.569190 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="45872bd0-84d2-43ff-92dc-86de32a67a64" containerName="init" Nov 28 17:55:14 crc kubenswrapper[4909]: I1128 17:55:14.569208 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="45872bd0-84d2-43ff-92dc-86de32a67a64" containerName="init" Nov 28 17:55:14 crc kubenswrapper[4909]: E1128 17:55:14.569231 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a7894653-d935-4fd0-852d-f36425839b1f" containerName="ceilometer-central-agent" Nov 28 17:55:14 crc kubenswrapper[4909]: I1128 17:55:14.569238 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="a7894653-d935-4fd0-852d-f36425839b1f" containerName="ceilometer-central-agent" Nov 28 17:55:14 crc kubenswrapper[4909]: E1128 17:55:14.569252 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a7894653-d935-4fd0-852d-f36425839b1f" containerName="sg-core" Nov 28 17:55:14 crc kubenswrapper[4909]: I1128 17:55:14.569259 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="a7894653-d935-4fd0-852d-f36425839b1f" containerName="sg-core" Nov 28 17:55:14 crc kubenswrapper[4909]: E1128 17:55:14.569272 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a7894653-d935-4fd0-852d-f36425839b1f" containerName="proxy-httpd" Nov 28 17:55:14 crc kubenswrapper[4909]: I1128 17:55:14.569278 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="a7894653-d935-4fd0-852d-f36425839b1f" containerName="proxy-httpd" Nov 28 17:55:14 crc kubenswrapper[4909]: E1128 17:55:14.569296 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a7894653-d935-4fd0-852d-f36425839b1f" containerName="ceilometer-notification-agent" Nov 28 17:55:14 crc kubenswrapper[4909]: I1128 17:55:14.569303 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="a7894653-d935-4fd0-852d-f36425839b1f" containerName="ceilometer-notification-agent" Nov 28 17:55:14 crc kubenswrapper[4909]: E1128 17:55:14.569324 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="45872bd0-84d2-43ff-92dc-86de32a67a64" containerName="dnsmasq-dns" Nov 28 17:55:14 crc kubenswrapper[4909]: I1128 17:55:14.569329 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="45872bd0-84d2-43ff-92dc-86de32a67a64" containerName="dnsmasq-dns" Nov 28 17:55:14 crc kubenswrapper[4909]: I1128 17:55:14.569512 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="a7894653-d935-4fd0-852d-f36425839b1f" containerName="ceilometer-central-agent" Nov 28 17:55:14 crc kubenswrapper[4909]: I1128 17:55:14.569534 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="a7894653-d935-4fd0-852d-f36425839b1f" containerName="sg-core" Nov 28 17:55:14 crc kubenswrapper[4909]: I1128 17:55:14.569545 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="45872bd0-84d2-43ff-92dc-86de32a67a64" containerName="dnsmasq-dns" Nov 28 17:55:14 crc kubenswrapper[4909]: I1128 17:55:14.569556 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="a7894653-d935-4fd0-852d-f36425839b1f" containerName="ceilometer-notification-agent" Nov 28 17:55:14 crc kubenswrapper[4909]: I1128 17:55:14.569570 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="a7894653-d935-4fd0-852d-f36425839b1f" containerName="proxy-httpd" Nov 28 17:55:14 crc kubenswrapper[4909]: I1128 17:55:14.571519 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 17:55:14 crc kubenswrapper[4909]: I1128 17:55:14.573878 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 28 17:55:14 crc kubenswrapper[4909]: I1128 17:55:14.575330 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 28 17:55:14 crc kubenswrapper[4909]: I1128 17:55:14.578144 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 28 17:55:14 crc kubenswrapper[4909]: I1128 17:55:14.581585 4909 scope.go:117] "RemoveContainer" containerID="e8e1bf3840e9406783f658d6e1a58d796903552a483de449df05759e2947dcc6" Nov 28 17:55:14 crc kubenswrapper[4909]: I1128 17:55:14.607896 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9jcq8\" (UniqueName: \"kubernetes.io/projected/bda35a95-ef50-478b-9dd3-7ce3743fda57-kube-api-access-9jcq8\") pod \"ceilometer-0\" (UID: \"bda35a95-ef50-478b-9dd3-7ce3743fda57\") " pod="openstack/ceilometer-0" Nov 28 17:55:14 crc kubenswrapper[4909]: I1128 17:55:14.607942 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bda35a95-ef50-478b-9dd3-7ce3743fda57-log-httpd\") pod \"ceilometer-0\" (UID: \"bda35a95-ef50-478b-9dd3-7ce3743fda57\") " pod="openstack/ceilometer-0" Nov 28 17:55:14 crc kubenswrapper[4909]: I1128 17:55:14.608033 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/bda35a95-ef50-478b-9dd3-7ce3743fda57-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"bda35a95-ef50-478b-9dd3-7ce3743fda57\") " pod="openstack/ceilometer-0" Nov 28 17:55:14 crc kubenswrapper[4909]: I1128 17:55:14.608238 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bda35a95-ef50-478b-9dd3-7ce3743fda57-config-data\") pod \"ceilometer-0\" (UID: \"bda35a95-ef50-478b-9dd3-7ce3743fda57\") " pod="openstack/ceilometer-0" Nov 28 17:55:14 crc kubenswrapper[4909]: I1128 17:55:14.608353 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bda35a95-ef50-478b-9dd3-7ce3743fda57-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"bda35a95-ef50-478b-9dd3-7ce3743fda57\") " pod="openstack/ceilometer-0" Nov 28 17:55:14 crc kubenswrapper[4909]: I1128 17:55:14.608404 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bda35a95-ef50-478b-9dd3-7ce3743fda57-run-httpd\") pod \"ceilometer-0\" (UID: \"bda35a95-ef50-478b-9dd3-7ce3743fda57\") " pod="openstack/ceilometer-0" Nov 28 17:55:14 crc kubenswrapper[4909]: I1128 17:55:14.608468 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bda35a95-ef50-478b-9dd3-7ce3743fda57-scripts\") pod \"ceilometer-0\" (UID: \"bda35a95-ef50-478b-9dd3-7ce3743fda57\") " pod="openstack/ceilometer-0" Nov 28 17:55:14 crc kubenswrapper[4909]: I1128 17:55:14.637846 4909 scope.go:117] "RemoveContainer" containerID="e3e19d316361d68e4f892b1ff26bdeccec1f5445537d34af6e7b3032bccee0f3" Nov 28 17:55:14 crc kubenswrapper[4909]: E1128 17:55:14.638264 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e3e19d316361d68e4f892b1ff26bdeccec1f5445537d34af6e7b3032bccee0f3\": container with ID starting with e3e19d316361d68e4f892b1ff26bdeccec1f5445537d34af6e7b3032bccee0f3 not found: ID does not exist" containerID="e3e19d316361d68e4f892b1ff26bdeccec1f5445537d34af6e7b3032bccee0f3" Nov 28 17:55:14 crc kubenswrapper[4909]: I1128 17:55:14.638296 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e3e19d316361d68e4f892b1ff26bdeccec1f5445537d34af6e7b3032bccee0f3"} err="failed to get container status \"e3e19d316361d68e4f892b1ff26bdeccec1f5445537d34af6e7b3032bccee0f3\": rpc error: code = NotFound desc = could not find container \"e3e19d316361d68e4f892b1ff26bdeccec1f5445537d34af6e7b3032bccee0f3\": container with ID starting with e3e19d316361d68e4f892b1ff26bdeccec1f5445537d34af6e7b3032bccee0f3 not found: ID does not exist" Nov 28 17:55:14 crc kubenswrapper[4909]: I1128 17:55:14.638317 4909 scope.go:117] "RemoveContainer" containerID="2df6656b43236561a8b34e107fb25acd16fe397b5523795f95e6f98b8ef0185b" Nov 28 17:55:14 crc kubenswrapper[4909]: E1128 17:55:14.638528 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2df6656b43236561a8b34e107fb25acd16fe397b5523795f95e6f98b8ef0185b\": container with ID starting with 2df6656b43236561a8b34e107fb25acd16fe397b5523795f95e6f98b8ef0185b not found: ID does not exist" containerID="2df6656b43236561a8b34e107fb25acd16fe397b5523795f95e6f98b8ef0185b" Nov 28 17:55:14 crc kubenswrapper[4909]: I1128 17:55:14.638584 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2df6656b43236561a8b34e107fb25acd16fe397b5523795f95e6f98b8ef0185b"} err="failed to get container status \"2df6656b43236561a8b34e107fb25acd16fe397b5523795f95e6f98b8ef0185b\": rpc error: code = NotFound desc = could not find container \"2df6656b43236561a8b34e107fb25acd16fe397b5523795f95e6f98b8ef0185b\": container with ID starting with 2df6656b43236561a8b34e107fb25acd16fe397b5523795f95e6f98b8ef0185b not found: ID does not exist" Nov 28 17:55:14 crc kubenswrapper[4909]: I1128 17:55:14.638598 4909 scope.go:117] "RemoveContainer" containerID="bc1f10153e15882993e99071934a9562df9de18f93dc286a149db1b0d87d3bab" Nov 28 17:55:14 crc kubenswrapper[4909]: E1128 17:55:14.639712 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bc1f10153e15882993e99071934a9562df9de18f93dc286a149db1b0d87d3bab\": container with ID starting with bc1f10153e15882993e99071934a9562df9de18f93dc286a149db1b0d87d3bab not found: ID does not exist" containerID="bc1f10153e15882993e99071934a9562df9de18f93dc286a149db1b0d87d3bab" Nov 28 17:55:14 crc kubenswrapper[4909]: I1128 17:55:14.639743 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bc1f10153e15882993e99071934a9562df9de18f93dc286a149db1b0d87d3bab"} err="failed to get container status \"bc1f10153e15882993e99071934a9562df9de18f93dc286a149db1b0d87d3bab\": rpc error: code = NotFound desc = could not find container \"bc1f10153e15882993e99071934a9562df9de18f93dc286a149db1b0d87d3bab\": container with ID starting with bc1f10153e15882993e99071934a9562df9de18f93dc286a149db1b0d87d3bab not found: ID does not exist" Nov 28 17:55:14 crc kubenswrapper[4909]: I1128 17:55:14.639756 4909 scope.go:117] "RemoveContainer" containerID="e8e1bf3840e9406783f658d6e1a58d796903552a483de449df05759e2947dcc6" Nov 28 17:55:14 crc kubenswrapper[4909]: E1128 17:55:14.640003 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e8e1bf3840e9406783f658d6e1a58d796903552a483de449df05759e2947dcc6\": container with ID starting with e8e1bf3840e9406783f658d6e1a58d796903552a483de449df05759e2947dcc6 not found: ID does not exist" containerID="e8e1bf3840e9406783f658d6e1a58d796903552a483de449df05759e2947dcc6" Nov 28 17:55:14 crc kubenswrapper[4909]: I1128 17:55:14.640031 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e8e1bf3840e9406783f658d6e1a58d796903552a483de449df05759e2947dcc6"} err="failed to get container status \"e8e1bf3840e9406783f658d6e1a58d796903552a483de449df05759e2947dcc6\": rpc error: code = NotFound desc = could not find container \"e8e1bf3840e9406783f658d6e1a58d796903552a483de449df05759e2947dcc6\": container with ID starting with e8e1bf3840e9406783f658d6e1a58d796903552a483de449df05759e2947dcc6 not found: ID does not exist" Nov 28 17:55:14 crc kubenswrapper[4909]: I1128 17:55:14.710531 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bda35a95-ef50-478b-9dd3-7ce3743fda57-log-httpd\") pod \"ceilometer-0\" (UID: \"bda35a95-ef50-478b-9dd3-7ce3743fda57\") " pod="openstack/ceilometer-0" Nov 28 17:55:14 crc kubenswrapper[4909]: I1128 17:55:14.710580 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9jcq8\" (UniqueName: \"kubernetes.io/projected/bda35a95-ef50-478b-9dd3-7ce3743fda57-kube-api-access-9jcq8\") pod \"ceilometer-0\" (UID: \"bda35a95-ef50-478b-9dd3-7ce3743fda57\") " pod="openstack/ceilometer-0" Nov 28 17:55:14 crc kubenswrapper[4909]: I1128 17:55:14.710649 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/bda35a95-ef50-478b-9dd3-7ce3743fda57-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"bda35a95-ef50-478b-9dd3-7ce3743fda57\") " pod="openstack/ceilometer-0" Nov 28 17:55:14 crc kubenswrapper[4909]: I1128 17:55:14.710712 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bda35a95-ef50-478b-9dd3-7ce3743fda57-config-data\") pod \"ceilometer-0\" (UID: \"bda35a95-ef50-478b-9dd3-7ce3743fda57\") " pod="openstack/ceilometer-0" Nov 28 17:55:14 crc kubenswrapper[4909]: I1128 17:55:14.710746 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bda35a95-ef50-478b-9dd3-7ce3743fda57-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"bda35a95-ef50-478b-9dd3-7ce3743fda57\") " pod="openstack/ceilometer-0" Nov 28 17:55:14 crc kubenswrapper[4909]: I1128 17:55:14.710764 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bda35a95-ef50-478b-9dd3-7ce3743fda57-run-httpd\") pod \"ceilometer-0\" (UID: \"bda35a95-ef50-478b-9dd3-7ce3743fda57\") " pod="openstack/ceilometer-0" Nov 28 17:55:14 crc kubenswrapper[4909]: I1128 17:55:14.710789 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bda35a95-ef50-478b-9dd3-7ce3743fda57-scripts\") pod \"ceilometer-0\" (UID: \"bda35a95-ef50-478b-9dd3-7ce3743fda57\") " pod="openstack/ceilometer-0" Nov 28 17:55:14 crc kubenswrapper[4909]: I1128 17:55:14.711049 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bda35a95-ef50-478b-9dd3-7ce3743fda57-log-httpd\") pod \"ceilometer-0\" (UID: \"bda35a95-ef50-478b-9dd3-7ce3743fda57\") " pod="openstack/ceilometer-0" Nov 28 17:55:14 crc kubenswrapper[4909]: I1128 17:55:14.711870 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bda35a95-ef50-478b-9dd3-7ce3743fda57-run-httpd\") pod \"ceilometer-0\" (UID: \"bda35a95-ef50-478b-9dd3-7ce3743fda57\") " pod="openstack/ceilometer-0" Nov 28 17:55:14 crc kubenswrapper[4909]: I1128 17:55:14.716887 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/bda35a95-ef50-478b-9dd3-7ce3743fda57-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"bda35a95-ef50-478b-9dd3-7ce3743fda57\") " pod="openstack/ceilometer-0" Nov 28 17:55:14 crc kubenswrapper[4909]: I1128 17:55:14.716909 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bda35a95-ef50-478b-9dd3-7ce3743fda57-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"bda35a95-ef50-478b-9dd3-7ce3743fda57\") " pod="openstack/ceilometer-0" Nov 28 17:55:14 crc kubenswrapper[4909]: I1128 17:55:14.719262 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bda35a95-ef50-478b-9dd3-7ce3743fda57-config-data\") pod \"ceilometer-0\" (UID: \"bda35a95-ef50-478b-9dd3-7ce3743fda57\") " pod="openstack/ceilometer-0" Nov 28 17:55:14 crc kubenswrapper[4909]: I1128 17:55:14.720188 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bda35a95-ef50-478b-9dd3-7ce3743fda57-scripts\") pod \"ceilometer-0\" (UID: \"bda35a95-ef50-478b-9dd3-7ce3743fda57\") " pod="openstack/ceilometer-0" Nov 28 17:55:14 crc kubenswrapper[4909]: I1128 17:55:14.729547 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9jcq8\" (UniqueName: \"kubernetes.io/projected/bda35a95-ef50-478b-9dd3-7ce3743fda57-kube-api-access-9jcq8\") pod \"ceilometer-0\" (UID: \"bda35a95-ef50-478b-9dd3-7ce3743fda57\") " pod="openstack/ceilometer-0" Nov 28 17:55:14 crc kubenswrapper[4909]: I1128 17:55:14.934783 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 17:55:15 crc kubenswrapper[4909]: I1128 17:55:15.338172 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/manila-scheduler-0" Nov 28 17:55:15 crc kubenswrapper[4909]: W1128 17:55:15.409319 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podbda35a95_ef50_478b_9dd3_7ce3743fda57.slice/crio-be068586f6ad6fcdde1bee8131fb634390d68adddafc46df559fc4e51e71a948 WatchSource:0}: Error finding container be068586f6ad6fcdde1bee8131fb634390d68adddafc46df559fc4e51e71a948: Status 404 returned error can't find the container with id be068586f6ad6fcdde1bee8131fb634390d68adddafc46df559fc4e51e71a948 Nov 28 17:55:15 crc kubenswrapper[4909]: I1128 17:55:15.411229 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 28 17:55:15 crc kubenswrapper[4909]: I1128 17:55:15.452043 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"bda35a95-ef50-478b-9dd3-7ce3743fda57","Type":"ContainerStarted","Data":"be068586f6ad6fcdde1bee8131fb634390d68adddafc46df559fc4e51e71a948"} Nov 28 17:55:15 crc kubenswrapper[4909]: I1128 17:55:15.649792 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/manila-share-share1-0" Nov 28 17:55:15 crc kubenswrapper[4909]: I1128 17:55:15.662260 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/manila-api-0" Nov 28 17:55:15 crc kubenswrapper[4909]: I1128 17:55:15.921573 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a7894653-d935-4fd0-852d-f36425839b1f" path="/var/lib/kubelet/pods/a7894653-d935-4fd0-852d-f36425839b1f/volumes" Nov 28 17:55:16 crc kubenswrapper[4909]: I1128 17:55:16.470826 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"bda35a95-ef50-478b-9dd3-7ce3743fda57","Type":"ContainerStarted","Data":"01288393bb94f28602482d8db9c63ab04b52fd9ac8d3957c8a385010959d1403"} Nov 28 17:55:17 crc kubenswrapper[4909]: I1128 17:55:17.483591 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"bda35a95-ef50-478b-9dd3-7ce3743fda57","Type":"ContainerStarted","Data":"10f27d7e662340c12e6d09da06c1a192cfdc4936c757a1e04d4ee744178caa2b"} Nov 28 17:55:18 crc kubenswrapper[4909]: I1128 17:55:18.502848 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"bda35a95-ef50-478b-9dd3-7ce3743fda57","Type":"ContainerStarted","Data":"4b742c4473eb37be830b9bda0015351bb3e553b5deb9bc448c86bf0a57c3f7cd"} Nov 28 17:55:19 crc kubenswrapper[4909]: I1128 17:55:19.526686 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"bda35a95-ef50-478b-9dd3-7ce3743fda57","Type":"ContainerStarted","Data":"ff0435747ee5bad5cc9e6c1aa6fae16437357928b065b78d322bdc65a8222f41"} Nov 28 17:55:19 crc kubenswrapper[4909]: I1128 17:55:19.527155 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 28 17:55:19 crc kubenswrapper[4909]: I1128 17:55:19.550493 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=1.966357044 podStartE2EDuration="5.550468277s" podCreationTimestamp="2025-11-28 17:55:14 +0000 UTC" firstStartedPulling="2025-11-28 17:55:15.412108722 +0000 UTC m=+6297.808793246" lastFinishedPulling="2025-11-28 17:55:18.996219955 +0000 UTC m=+6301.392904479" observedRunningTime="2025-11-28 17:55:19.546376678 +0000 UTC m=+6301.943061212" watchObservedRunningTime="2025-11-28 17:55:19.550468277 +0000 UTC m=+6301.947152801" Nov 28 17:55:21 crc kubenswrapper[4909]: I1128 17:55:21.902749 4909 scope.go:117] "RemoveContainer" containerID="eb8a08a6c738fff0fcbfbb88427c9ed53477944abe7436212850e368ec229c4f" Nov 28 17:55:22 crc kubenswrapper[4909]: I1128 17:55:22.573729 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" event={"ID":"5f0ac931-d37b-4342-8c12-c2779b455cc5","Type":"ContainerStarted","Data":"ab68541ecdd49f3d48e7eee95d5d783096294e3e9a7f79df71710f3210660edc"} Nov 28 17:55:26 crc kubenswrapper[4909]: I1128 17:55:26.059534 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-cell-mapping-zhtp2"] Nov 28 17:55:26 crc kubenswrapper[4909]: I1128 17:55:26.071171 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-cell-mapping-zhtp2"] Nov 28 17:55:27 crc kubenswrapper[4909]: I1128 17:55:27.916145 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="32b73e05-07dd-49d0-8b6d-6e2a7258a66b" path="/var/lib/kubelet/pods/32b73e05-07dd-49d0-8b6d-6e2a7258a66b/volumes" Nov 28 17:55:35 crc kubenswrapper[4909]: I1128 17:55:35.135916 4909 scope.go:117] "RemoveContainer" containerID="ee678844165960f867f4193cc477f1d7f98e4bc318cf99967e273c736e61d5a4" Nov 28 17:55:35 crc kubenswrapper[4909]: I1128 17:55:35.203830 4909 scope.go:117] "RemoveContainer" containerID="33bed1a3b657e444a60eb507f34cc6e84cda17feacca7ab808bf2aed15bd5975" Nov 28 17:55:35 crc kubenswrapper[4909]: I1128 17:55:35.247561 4909 scope.go:117] "RemoveContainer" containerID="2e05a935d700a45c8148b75d65fbc2f4dc3273d480d7d5851e262f75c88325b6" Nov 28 17:55:35 crc kubenswrapper[4909]: I1128 17:55:35.292935 4909 scope.go:117] "RemoveContainer" containerID="fee70e379f18321de810331a3c37dc1dc62b18bca6b023997b76552c885b04e4" Nov 28 17:55:35 crc kubenswrapper[4909]: I1128 17:55:35.337642 4909 scope.go:117] "RemoveContainer" containerID="901214c22a53b1a5aba0637cb474a8013e7cdee1d881a1b9a5168ea372206b7c" Nov 28 17:55:35 crc kubenswrapper[4909]: I1128 17:55:35.409700 4909 scope.go:117] "RemoveContainer" containerID="b0b79c5acff85a28fbd7545fd478661c8c5e8f63ab6124f60e4a425495da8135" Nov 28 17:55:35 crc kubenswrapper[4909]: I1128 17:55:35.454874 4909 scope.go:117] "RemoveContainer" containerID="f59a638d283155c2808c40b351bb1fb6ed0e0fcaa20b35283c12cff6d9201816" Nov 28 17:55:35 crc kubenswrapper[4909]: I1128 17:55:35.493111 4909 scope.go:117] "RemoveContainer" containerID="7719531f915a8228b59dbb4f35d129d72e5414119d5f730c78cee695f12e2725" Nov 28 17:55:35 crc kubenswrapper[4909]: I1128 17:55:35.528913 4909 scope.go:117] "RemoveContainer" containerID="c216bd32a45143ab494c44c5d85485b874cbd98b2be1b50013581eb7f65cc07c" Nov 28 17:55:35 crc kubenswrapper[4909]: I1128 17:55:35.550976 4909 scope.go:117] "RemoveContainer" containerID="60240d1d35142df0c28caf64c807c1eecd8d0da966af14a7e39e9b1e61641f40" Nov 28 17:55:35 crc kubenswrapper[4909]: I1128 17:55:35.574750 4909 scope.go:117] "RemoveContainer" containerID="0c124de33435ce2fe12221ae8431f18613e29973f9159df5f61fbeb56b3e2ff8" Nov 28 17:55:44 crc kubenswrapper[4909]: I1128 17:55:44.944161 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Nov 28 17:56:06 crc kubenswrapper[4909]: I1128 17:56:06.018413 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-86b59fc6c5-jvnfr"] Nov 28 17:56:06 crc kubenswrapper[4909]: I1128 17:56:06.021285 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-86b59fc6c5-jvnfr" Nov 28 17:56:06 crc kubenswrapper[4909]: I1128 17:56:06.024117 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1" Nov 28 17:56:06 crc kubenswrapper[4909]: I1128 17:56:06.034209 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-86b59fc6c5-jvnfr"] Nov 28 17:56:06 crc kubenswrapper[4909]: I1128 17:56:06.111743 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/451be04f-ff1a-4075-b229-3a662fcb67bd-dns-svc\") pod \"dnsmasq-dns-86b59fc6c5-jvnfr\" (UID: \"451be04f-ff1a-4075-b229-3a662fcb67bd\") " pod="openstack/dnsmasq-dns-86b59fc6c5-jvnfr" Nov 28 17:56:06 crc kubenswrapper[4909]: I1128 17:56:06.111807 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-cell1\" (UniqueName: \"kubernetes.io/configmap/451be04f-ff1a-4075-b229-3a662fcb67bd-openstack-cell1\") pod \"dnsmasq-dns-86b59fc6c5-jvnfr\" (UID: \"451be04f-ff1a-4075-b229-3a662fcb67bd\") " pod="openstack/dnsmasq-dns-86b59fc6c5-jvnfr" Nov 28 17:56:06 crc kubenswrapper[4909]: I1128 17:56:06.111984 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/451be04f-ff1a-4075-b229-3a662fcb67bd-config\") pod \"dnsmasq-dns-86b59fc6c5-jvnfr\" (UID: \"451be04f-ff1a-4075-b229-3a662fcb67bd\") " pod="openstack/dnsmasq-dns-86b59fc6c5-jvnfr" Nov 28 17:56:06 crc kubenswrapper[4909]: I1128 17:56:06.112053 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-97sqh\" (UniqueName: \"kubernetes.io/projected/451be04f-ff1a-4075-b229-3a662fcb67bd-kube-api-access-97sqh\") pod \"dnsmasq-dns-86b59fc6c5-jvnfr\" (UID: \"451be04f-ff1a-4075-b229-3a662fcb67bd\") " pod="openstack/dnsmasq-dns-86b59fc6c5-jvnfr" Nov 28 17:56:06 crc kubenswrapper[4909]: I1128 17:56:06.112084 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/451be04f-ff1a-4075-b229-3a662fcb67bd-ovsdbserver-nb\") pod \"dnsmasq-dns-86b59fc6c5-jvnfr\" (UID: \"451be04f-ff1a-4075-b229-3a662fcb67bd\") " pod="openstack/dnsmasq-dns-86b59fc6c5-jvnfr" Nov 28 17:56:06 crc kubenswrapper[4909]: I1128 17:56:06.112337 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/451be04f-ff1a-4075-b229-3a662fcb67bd-ovsdbserver-sb\") pod \"dnsmasq-dns-86b59fc6c5-jvnfr\" (UID: \"451be04f-ff1a-4075-b229-3a662fcb67bd\") " pod="openstack/dnsmasq-dns-86b59fc6c5-jvnfr" Nov 28 17:56:06 crc kubenswrapper[4909]: I1128 17:56:06.214889 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-cell1\" (UniqueName: \"kubernetes.io/configmap/451be04f-ff1a-4075-b229-3a662fcb67bd-openstack-cell1\") pod \"dnsmasq-dns-86b59fc6c5-jvnfr\" (UID: \"451be04f-ff1a-4075-b229-3a662fcb67bd\") " pod="openstack/dnsmasq-dns-86b59fc6c5-jvnfr" Nov 28 17:56:06 crc kubenswrapper[4909]: I1128 17:56:06.215016 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/451be04f-ff1a-4075-b229-3a662fcb67bd-config\") pod \"dnsmasq-dns-86b59fc6c5-jvnfr\" (UID: \"451be04f-ff1a-4075-b229-3a662fcb67bd\") " pod="openstack/dnsmasq-dns-86b59fc6c5-jvnfr" Nov 28 17:56:06 crc kubenswrapper[4909]: I1128 17:56:06.215073 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-97sqh\" (UniqueName: \"kubernetes.io/projected/451be04f-ff1a-4075-b229-3a662fcb67bd-kube-api-access-97sqh\") pod \"dnsmasq-dns-86b59fc6c5-jvnfr\" (UID: \"451be04f-ff1a-4075-b229-3a662fcb67bd\") " pod="openstack/dnsmasq-dns-86b59fc6c5-jvnfr" Nov 28 17:56:06 crc kubenswrapper[4909]: I1128 17:56:06.215099 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/451be04f-ff1a-4075-b229-3a662fcb67bd-ovsdbserver-nb\") pod \"dnsmasq-dns-86b59fc6c5-jvnfr\" (UID: \"451be04f-ff1a-4075-b229-3a662fcb67bd\") " pod="openstack/dnsmasq-dns-86b59fc6c5-jvnfr" Nov 28 17:56:06 crc kubenswrapper[4909]: I1128 17:56:06.215172 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/451be04f-ff1a-4075-b229-3a662fcb67bd-ovsdbserver-sb\") pod \"dnsmasq-dns-86b59fc6c5-jvnfr\" (UID: \"451be04f-ff1a-4075-b229-3a662fcb67bd\") " pod="openstack/dnsmasq-dns-86b59fc6c5-jvnfr" Nov 28 17:56:06 crc kubenswrapper[4909]: I1128 17:56:06.215274 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/451be04f-ff1a-4075-b229-3a662fcb67bd-dns-svc\") pod \"dnsmasq-dns-86b59fc6c5-jvnfr\" (UID: \"451be04f-ff1a-4075-b229-3a662fcb67bd\") " pod="openstack/dnsmasq-dns-86b59fc6c5-jvnfr" Nov 28 17:56:06 crc kubenswrapper[4909]: I1128 17:56:06.215997 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-cell1\" (UniqueName: \"kubernetes.io/configmap/451be04f-ff1a-4075-b229-3a662fcb67bd-openstack-cell1\") pod \"dnsmasq-dns-86b59fc6c5-jvnfr\" (UID: \"451be04f-ff1a-4075-b229-3a662fcb67bd\") " pod="openstack/dnsmasq-dns-86b59fc6c5-jvnfr" Nov 28 17:56:06 crc kubenswrapper[4909]: I1128 17:56:06.216332 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/451be04f-ff1a-4075-b229-3a662fcb67bd-ovsdbserver-nb\") pod \"dnsmasq-dns-86b59fc6c5-jvnfr\" (UID: \"451be04f-ff1a-4075-b229-3a662fcb67bd\") " pod="openstack/dnsmasq-dns-86b59fc6c5-jvnfr" Nov 28 17:56:06 crc kubenswrapper[4909]: I1128 17:56:06.216359 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/451be04f-ff1a-4075-b229-3a662fcb67bd-dns-svc\") pod \"dnsmasq-dns-86b59fc6c5-jvnfr\" (UID: \"451be04f-ff1a-4075-b229-3a662fcb67bd\") " pod="openstack/dnsmasq-dns-86b59fc6c5-jvnfr" Nov 28 17:56:06 crc kubenswrapper[4909]: I1128 17:56:06.216456 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/451be04f-ff1a-4075-b229-3a662fcb67bd-config\") pod \"dnsmasq-dns-86b59fc6c5-jvnfr\" (UID: \"451be04f-ff1a-4075-b229-3a662fcb67bd\") " pod="openstack/dnsmasq-dns-86b59fc6c5-jvnfr" Nov 28 17:56:06 crc kubenswrapper[4909]: I1128 17:56:06.216495 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/451be04f-ff1a-4075-b229-3a662fcb67bd-ovsdbserver-sb\") pod \"dnsmasq-dns-86b59fc6c5-jvnfr\" (UID: \"451be04f-ff1a-4075-b229-3a662fcb67bd\") " pod="openstack/dnsmasq-dns-86b59fc6c5-jvnfr" Nov 28 17:56:06 crc kubenswrapper[4909]: I1128 17:56:06.234277 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-97sqh\" (UniqueName: \"kubernetes.io/projected/451be04f-ff1a-4075-b229-3a662fcb67bd-kube-api-access-97sqh\") pod \"dnsmasq-dns-86b59fc6c5-jvnfr\" (UID: \"451be04f-ff1a-4075-b229-3a662fcb67bd\") " pod="openstack/dnsmasq-dns-86b59fc6c5-jvnfr" Nov 28 17:56:06 crc kubenswrapper[4909]: I1128 17:56:06.351980 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-86b59fc6c5-jvnfr" Nov 28 17:56:06 crc kubenswrapper[4909]: I1128 17:56:06.896460 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-86b59fc6c5-jvnfr"] Nov 28 17:56:06 crc kubenswrapper[4909]: W1128 17:56:06.910967 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod451be04f_ff1a_4075_b229_3a662fcb67bd.slice/crio-b525816d9b6067fc02a3790e3c6a8c2a41a8d101ec6c3c7ef5d9a4e595d94f97 WatchSource:0}: Error finding container b525816d9b6067fc02a3790e3c6a8c2a41a8d101ec6c3c7ef5d9a4e595d94f97: Status 404 returned error can't find the container with id b525816d9b6067fc02a3790e3c6a8c2a41a8d101ec6c3c7ef5d9a4e595d94f97 Nov 28 17:56:07 crc kubenswrapper[4909]: I1128 17:56:07.139382 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-86b59fc6c5-jvnfr" event={"ID":"451be04f-ff1a-4075-b229-3a662fcb67bd","Type":"ContainerStarted","Data":"b525816d9b6067fc02a3790e3c6a8c2a41a8d101ec6c3c7ef5d9a4e595d94f97"} Nov 28 17:56:08 crc kubenswrapper[4909]: I1128 17:56:08.174426 4909 generic.go:334] "Generic (PLEG): container finished" podID="451be04f-ff1a-4075-b229-3a662fcb67bd" containerID="10e6e22ce5fa799a18be5ca6ca0195784e749bcef811c9375eff76728c39cfea" exitCode=0 Nov 28 17:56:08 crc kubenswrapper[4909]: I1128 17:56:08.174795 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-86b59fc6c5-jvnfr" event={"ID":"451be04f-ff1a-4075-b229-3a662fcb67bd","Type":"ContainerDied","Data":"10e6e22ce5fa799a18be5ca6ca0195784e749bcef811c9375eff76728c39cfea"} Nov 28 17:56:09 crc kubenswrapper[4909]: I1128 17:56:09.192857 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-86b59fc6c5-jvnfr" event={"ID":"451be04f-ff1a-4075-b229-3a662fcb67bd","Type":"ContainerStarted","Data":"c60955bda35ca95f1ba0ed067c4a2b7956d82fbbfd8aa51b4d6ef246e8906bf4"} Nov 28 17:56:09 crc kubenswrapper[4909]: I1128 17:56:09.195041 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-86b59fc6c5-jvnfr" Nov 28 17:56:09 crc kubenswrapper[4909]: I1128 17:56:09.228020 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-86b59fc6c5-jvnfr" podStartSLOduration=4.227996791 podStartE2EDuration="4.227996791s" podCreationTimestamp="2025-11-28 17:56:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 17:56:09.218317433 +0000 UTC m=+6351.615001957" watchObservedRunningTime="2025-11-28 17:56:09.227996791 +0000 UTC m=+6351.624681305" Nov 28 17:56:09 crc kubenswrapper[4909]: I1128 17:56:09.992027 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-gkkkg"] Nov 28 17:56:09 crc kubenswrapper[4909]: I1128 17:56:09.994991 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-gkkkg" Nov 28 17:56:10 crc kubenswrapper[4909]: I1128 17:56:10.006813 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-gkkkg"] Nov 28 17:56:10 crc kubenswrapper[4909]: I1128 17:56:10.009574 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/043bef59-2e05-4a43-bb21-ca25e1ce483e-catalog-content\") pod \"redhat-marketplace-gkkkg\" (UID: \"043bef59-2e05-4a43-bb21-ca25e1ce483e\") " pod="openshift-marketplace/redhat-marketplace-gkkkg" Nov 28 17:56:10 crc kubenswrapper[4909]: I1128 17:56:10.009718 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vr2r9\" (UniqueName: \"kubernetes.io/projected/043bef59-2e05-4a43-bb21-ca25e1ce483e-kube-api-access-vr2r9\") pod \"redhat-marketplace-gkkkg\" (UID: \"043bef59-2e05-4a43-bb21-ca25e1ce483e\") " pod="openshift-marketplace/redhat-marketplace-gkkkg" Nov 28 17:56:10 crc kubenswrapper[4909]: I1128 17:56:10.009755 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/043bef59-2e05-4a43-bb21-ca25e1ce483e-utilities\") pod \"redhat-marketplace-gkkkg\" (UID: \"043bef59-2e05-4a43-bb21-ca25e1ce483e\") " pod="openshift-marketplace/redhat-marketplace-gkkkg" Nov 28 17:56:10 crc kubenswrapper[4909]: I1128 17:56:10.050315 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-create-vlvbk"] Nov 28 17:56:10 crc kubenswrapper[4909]: I1128 17:56:10.063135 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-4ece-account-create-update-8gc9p"] Nov 28 17:56:10 crc kubenswrapper[4909]: I1128 17:56:10.073129 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-create-vlvbk"] Nov 28 17:56:10 crc kubenswrapper[4909]: I1128 17:56:10.081851 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-4ece-account-create-update-8gc9p"] Nov 28 17:56:10 crc kubenswrapper[4909]: I1128 17:56:10.111795 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/043bef59-2e05-4a43-bb21-ca25e1ce483e-catalog-content\") pod \"redhat-marketplace-gkkkg\" (UID: \"043bef59-2e05-4a43-bb21-ca25e1ce483e\") " pod="openshift-marketplace/redhat-marketplace-gkkkg" Nov 28 17:56:10 crc kubenswrapper[4909]: I1128 17:56:10.111908 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vr2r9\" (UniqueName: \"kubernetes.io/projected/043bef59-2e05-4a43-bb21-ca25e1ce483e-kube-api-access-vr2r9\") pod \"redhat-marketplace-gkkkg\" (UID: \"043bef59-2e05-4a43-bb21-ca25e1ce483e\") " pod="openshift-marketplace/redhat-marketplace-gkkkg" Nov 28 17:56:10 crc kubenswrapper[4909]: I1128 17:56:10.111941 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/043bef59-2e05-4a43-bb21-ca25e1ce483e-utilities\") pod \"redhat-marketplace-gkkkg\" (UID: \"043bef59-2e05-4a43-bb21-ca25e1ce483e\") " pod="openshift-marketplace/redhat-marketplace-gkkkg" Nov 28 17:56:10 crc kubenswrapper[4909]: I1128 17:56:10.112350 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/043bef59-2e05-4a43-bb21-ca25e1ce483e-catalog-content\") pod \"redhat-marketplace-gkkkg\" (UID: \"043bef59-2e05-4a43-bb21-ca25e1ce483e\") " pod="openshift-marketplace/redhat-marketplace-gkkkg" Nov 28 17:56:10 crc kubenswrapper[4909]: I1128 17:56:10.112386 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/043bef59-2e05-4a43-bb21-ca25e1ce483e-utilities\") pod \"redhat-marketplace-gkkkg\" (UID: \"043bef59-2e05-4a43-bb21-ca25e1ce483e\") " pod="openshift-marketplace/redhat-marketplace-gkkkg" Nov 28 17:56:10 crc kubenswrapper[4909]: I1128 17:56:10.132898 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vr2r9\" (UniqueName: \"kubernetes.io/projected/043bef59-2e05-4a43-bb21-ca25e1ce483e-kube-api-access-vr2r9\") pod \"redhat-marketplace-gkkkg\" (UID: \"043bef59-2e05-4a43-bb21-ca25e1ce483e\") " pod="openshift-marketplace/redhat-marketplace-gkkkg" Nov 28 17:56:10 crc kubenswrapper[4909]: I1128 17:56:10.321089 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-gkkkg" Nov 28 17:56:10 crc kubenswrapper[4909]: I1128 17:56:10.879929 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-gkkkg"] Nov 28 17:56:11 crc kubenswrapper[4909]: I1128 17:56:11.225610 4909 generic.go:334] "Generic (PLEG): container finished" podID="043bef59-2e05-4a43-bb21-ca25e1ce483e" containerID="b1e9a6354c90cc9f0f5aafae4c1795dfed3fd7fbf0a6ab0670521184f3b9b918" exitCode=0 Nov 28 17:56:11 crc kubenswrapper[4909]: I1128 17:56:11.225714 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-gkkkg" event={"ID":"043bef59-2e05-4a43-bb21-ca25e1ce483e","Type":"ContainerDied","Data":"b1e9a6354c90cc9f0f5aafae4c1795dfed3fd7fbf0a6ab0670521184f3b9b918"} Nov 28 17:56:11 crc kubenswrapper[4909]: I1128 17:56:11.226124 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-gkkkg" event={"ID":"043bef59-2e05-4a43-bb21-ca25e1ce483e","Type":"ContainerStarted","Data":"23262052cd9543ba6fa051ae4effd6c2b402ac9b81e048761755c6527b5bff03"} Nov 28 17:56:11 crc kubenswrapper[4909]: I1128 17:56:11.939253 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c13ce55f-0d0a-4e2f-adcc-98b7829703a4" path="/var/lib/kubelet/pods/c13ce55f-0d0a-4e2f-adcc-98b7829703a4/volumes" Nov 28 17:56:11 crc kubenswrapper[4909]: I1128 17:56:11.939856 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ed18cc94-ace5-4bf8-a055-dd864ec5e5c8" path="/var/lib/kubelet/pods/ed18cc94-ace5-4bf8-a055-dd864ec5e5c8/volumes" Nov 28 17:56:12 crc kubenswrapper[4909]: I1128 17:56:12.254914 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-gkkkg" event={"ID":"043bef59-2e05-4a43-bb21-ca25e1ce483e","Type":"ContainerStarted","Data":"ad0b71c5b3c3803e1d6a32aceddf0d0a6df20577d7d3c8339ef2d3a693cde95e"} Nov 28 17:56:13 crc kubenswrapper[4909]: I1128 17:56:13.265030 4909 generic.go:334] "Generic (PLEG): container finished" podID="043bef59-2e05-4a43-bb21-ca25e1ce483e" containerID="ad0b71c5b3c3803e1d6a32aceddf0d0a6df20577d7d3c8339ef2d3a693cde95e" exitCode=0 Nov 28 17:56:13 crc kubenswrapper[4909]: I1128 17:56:13.265197 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-gkkkg" event={"ID":"043bef59-2e05-4a43-bb21-ca25e1ce483e","Type":"ContainerDied","Data":"ad0b71c5b3c3803e1d6a32aceddf0d0a6df20577d7d3c8339ef2d3a693cde95e"} Nov 28 17:56:14 crc kubenswrapper[4909]: I1128 17:56:14.281827 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-gkkkg" event={"ID":"043bef59-2e05-4a43-bb21-ca25e1ce483e","Type":"ContainerStarted","Data":"99d448c89de192ea22caca87a869c12198cd34e53dd2e43efe188b369a339ecc"} Nov 28 17:56:14 crc kubenswrapper[4909]: I1128 17:56:14.316384 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-gkkkg" podStartSLOduration=2.8523228830000003 podStartE2EDuration="5.316363963s" podCreationTimestamp="2025-11-28 17:56:09 +0000 UTC" firstStartedPulling="2025-11-28 17:56:11.228528346 +0000 UTC m=+6353.625212880" lastFinishedPulling="2025-11-28 17:56:13.692569396 +0000 UTC m=+6356.089253960" observedRunningTime="2025-11-28 17:56:14.306041358 +0000 UTC m=+6356.702725912" watchObservedRunningTime="2025-11-28 17:56:14.316363963 +0000 UTC m=+6356.713048487" Nov 28 17:56:16 crc kubenswrapper[4909]: I1128 17:56:16.355882 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-86b59fc6c5-jvnfr" Nov 28 17:56:16 crc kubenswrapper[4909]: I1128 17:56:16.425012 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-55c467f6c7-5st2f"] Nov 28 17:56:16 crc kubenswrapper[4909]: I1128 17:56:16.425304 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-55c467f6c7-5st2f" podUID="06eb2b90-11b5-4a78-b602-d4d2a9c1ad3c" containerName="dnsmasq-dns" containerID="cri-o://d72a24b9dc7d8271734927560fc61a0ded4ec144b7a095cc0d2a6ac96b5ced5d" gracePeriod=10 Nov 28 17:56:16 crc kubenswrapper[4909]: I1128 17:56:16.575855 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-56b94bdf77-gbr2l"] Nov 28 17:56:16 crc kubenswrapper[4909]: I1128 17:56:16.577831 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-56b94bdf77-gbr2l" Nov 28 17:56:16 crc kubenswrapper[4909]: I1128 17:56:16.615207 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-56b94bdf77-gbr2l"] Nov 28 17:56:16 crc kubenswrapper[4909]: I1128 17:56:16.761162 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-cell1\" (UniqueName: \"kubernetes.io/configmap/49263899-997f-4338-8a6f-0492e45aad4f-openstack-cell1\") pod \"dnsmasq-dns-56b94bdf77-gbr2l\" (UID: \"49263899-997f-4338-8a6f-0492e45aad4f\") " pod="openstack/dnsmasq-dns-56b94bdf77-gbr2l" Nov 28 17:56:16 crc kubenswrapper[4909]: I1128 17:56:16.761528 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/49263899-997f-4338-8a6f-0492e45aad4f-ovsdbserver-sb\") pod \"dnsmasq-dns-56b94bdf77-gbr2l\" (UID: \"49263899-997f-4338-8a6f-0492e45aad4f\") " pod="openstack/dnsmasq-dns-56b94bdf77-gbr2l" Nov 28 17:56:16 crc kubenswrapper[4909]: I1128 17:56:16.761569 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-blxc6\" (UniqueName: \"kubernetes.io/projected/49263899-997f-4338-8a6f-0492e45aad4f-kube-api-access-blxc6\") pod \"dnsmasq-dns-56b94bdf77-gbr2l\" (UID: \"49263899-997f-4338-8a6f-0492e45aad4f\") " pod="openstack/dnsmasq-dns-56b94bdf77-gbr2l" Nov 28 17:56:16 crc kubenswrapper[4909]: I1128 17:56:16.761595 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/49263899-997f-4338-8a6f-0492e45aad4f-config\") pod \"dnsmasq-dns-56b94bdf77-gbr2l\" (UID: \"49263899-997f-4338-8a6f-0492e45aad4f\") " pod="openstack/dnsmasq-dns-56b94bdf77-gbr2l" Nov 28 17:56:16 crc kubenswrapper[4909]: I1128 17:56:16.761618 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/49263899-997f-4338-8a6f-0492e45aad4f-dns-svc\") pod \"dnsmasq-dns-56b94bdf77-gbr2l\" (UID: \"49263899-997f-4338-8a6f-0492e45aad4f\") " pod="openstack/dnsmasq-dns-56b94bdf77-gbr2l" Nov 28 17:56:16 crc kubenswrapper[4909]: I1128 17:56:16.761638 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/49263899-997f-4338-8a6f-0492e45aad4f-ovsdbserver-nb\") pod \"dnsmasq-dns-56b94bdf77-gbr2l\" (UID: \"49263899-997f-4338-8a6f-0492e45aad4f\") " pod="openstack/dnsmasq-dns-56b94bdf77-gbr2l" Nov 28 17:56:16 crc kubenswrapper[4909]: I1128 17:56:16.863398 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/49263899-997f-4338-8a6f-0492e45aad4f-ovsdbserver-sb\") pod \"dnsmasq-dns-56b94bdf77-gbr2l\" (UID: \"49263899-997f-4338-8a6f-0492e45aad4f\") " pod="openstack/dnsmasq-dns-56b94bdf77-gbr2l" Nov 28 17:56:16 crc kubenswrapper[4909]: I1128 17:56:16.863474 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-blxc6\" (UniqueName: \"kubernetes.io/projected/49263899-997f-4338-8a6f-0492e45aad4f-kube-api-access-blxc6\") pod \"dnsmasq-dns-56b94bdf77-gbr2l\" (UID: \"49263899-997f-4338-8a6f-0492e45aad4f\") " pod="openstack/dnsmasq-dns-56b94bdf77-gbr2l" Nov 28 17:56:16 crc kubenswrapper[4909]: I1128 17:56:16.863499 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/49263899-997f-4338-8a6f-0492e45aad4f-config\") pod \"dnsmasq-dns-56b94bdf77-gbr2l\" (UID: \"49263899-997f-4338-8a6f-0492e45aad4f\") " pod="openstack/dnsmasq-dns-56b94bdf77-gbr2l" Nov 28 17:56:16 crc kubenswrapper[4909]: I1128 17:56:16.863520 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/49263899-997f-4338-8a6f-0492e45aad4f-dns-svc\") pod \"dnsmasq-dns-56b94bdf77-gbr2l\" (UID: \"49263899-997f-4338-8a6f-0492e45aad4f\") " pod="openstack/dnsmasq-dns-56b94bdf77-gbr2l" Nov 28 17:56:16 crc kubenswrapper[4909]: I1128 17:56:16.863543 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/49263899-997f-4338-8a6f-0492e45aad4f-ovsdbserver-nb\") pod \"dnsmasq-dns-56b94bdf77-gbr2l\" (UID: \"49263899-997f-4338-8a6f-0492e45aad4f\") " pod="openstack/dnsmasq-dns-56b94bdf77-gbr2l" Nov 28 17:56:16 crc kubenswrapper[4909]: I1128 17:56:16.863698 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-cell1\" (UniqueName: \"kubernetes.io/configmap/49263899-997f-4338-8a6f-0492e45aad4f-openstack-cell1\") pod \"dnsmasq-dns-56b94bdf77-gbr2l\" (UID: \"49263899-997f-4338-8a6f-0492e45aad4f\") " pod="openstack/dnsmasq-dns-56b94bdf77-gbr2l" Nov 28 17:56:16 crc kubenswrapper[4909]: I1128 17:56:16.864750 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-cell1\" (UniqueName: \"kubernetes.io/configmap/49263899-997f-4338-8a6f-0492e45aad4f-openstack-cell1\") pod \"dnsmasq-dns-56b94bdf77-gbr2l\" (UID: \"49263899-997f-4338-8a6f-0492e45aad4f\") " pod="openstack/dnsmasq-dns-56b94bdf77-gbr2l" Nov 28 17:56:16 crc kubenswrapper[4909]: I1128 17:56:16.865310 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/49263899-997f-4338-8a6f-0492e45aad4f-ovsdbserver-sb\") pod \"dnsmasq-dns-56b94bdf77-gbr2l\" (UID: \"49263899-997f-4338-8a6f-0492e45aad4f\") " pod="openstack/dnsmasq-dns-56b94bdf77-gbr2l" Nov 28 17:56:16 crc kubenswrapper[4909]: I1128 17:56:16.866092 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/49263899-997f-4338-8a6f-0492e45aad4f-config\") pod \"dnsmasq-dns-56b94bdf77-gbr2l\" (UID: \"49263899-997f-4338-8a6f-0492e45aad4f\") " pod="openstack/dnsmasq-dns-56b94bdf77-gbr2l" Nov 28 17:56:16 crc kubenswrapper[4909]: I1128 17:56:16.866571 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/49263899-997f-4338-8a6f-0492e45aad4f-dns-svc\") pod \"dnsmasq-dns-56b94bdf77-gbr2l\" (UID: \"49263899-997f-4338-8a6f-0492e45aad4f\") " pod="openstack/dnsmasq-dns-56b94bdf77-gbr2l" Nov 28 17:56:16 crc kubenswrapper[4909]: I1128 17:56:16.867209 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/49263899-997f-4338-8a6f-0492e45aad4f-ovsdbserver-nb\") pod \"dnsmasq-dns-56b94bdf77-gbr2l\" (UID: \"49263899-997f-4338-8a6f-0492e45aad4f\") " pod="openstack/dnsmasq-dns-56b94bdf77-gbr2l" Nov 28 17:56:16 crc kubenswrapper[4909]: I1128 17:56:16.889065 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-blxc6\" (UniqueName: \"kubernetes.io/projected/49263899-997f-4338-8a6f-0492e45aad4f-kube-api-access-blxc6\") pod \"dnsmasq-dns-56b94bdf77-gbr2l\" (UID: \"49263899-997f-4338-8a6f-0492e45aad4f\") " pod="openstack/dnsmasq-dns-56b94bdf77-gbr2l" Nov 28 17:56:16 crc kubenswrapper[4909]: I1128 17:56:16.994480 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-56b94bdf77-gbr2l" Nov 28 17:56:17 crc kubenswrapper[4909]: I1128 17:56:17.329192 4909 generic.go:334] "Generic (PLEG): container finished" podID="06eb2b90-11b5-4a78-b602-d4d2a9c1ad3c" containerID="d72a24b9dc7d8271734927560fc61a0ded4ec144b7a095cc0d2a6ac96b5ced5d" exitCode=0 Nov 28 17:56:17 crc kubenswrapper[4909]: I1128 17:56:17.329244 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-55c467f6c7-5st2f" event={"ID":"06eb2b90-11b5-4a78-b602-d4d2a9c1ad3c","Type":"ContainerDied","Data":"d72a24b9dc7d8271734927560fc61a0ded4ec144b7a095cc0d2a6ac96b5ced5d"} Nov 28 17:56:17 crc kubenswrapper[4909]: I1128 17:56:17.691051 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-55c467f6c7-5st2f" Nov 28 17:56:17 crc kubenswrapper[4909]: I1128 17:56:17.782064 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6s485\" (UniqueName: \"kubernetes.io/projected/06eb2b90-11b5-4a78-b602-d4d2a9c1ad3c-kube-api-access-6s485\") pod \"06eb2b90-11b5-4a78-b602-d4d2a9c1ad3c\" (UID: \"06eb2b90-11b5-4a78-b602-d4d2a9c1ad3c\") " Nov 28 17:56:17 crc kubenswrapper[4909]: I1128 17:56:17.782690 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/06eb2b90-11b5-4a78-b602-d4d2a9c1ad3c-ovsdbserver-sb\") pod \"06eb2b90-11b5-4a78-b602-d4d2a9c1ad3c\" (UID: \"06eb2b90-11b5-4a78-b602-d4d2a9c1ad3c\") " Nov 28 17:56:17 crc kubenswrapper[4909]: I1128 17:56:17.782852 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/06eb2b90-11b5-4a78-b602-d4d2a9c1ad3c-dns-svc\") pod \"06eb2b90-11b5-4a78-b602-d4d2a9c1ad3c\" (UID: \"06eb2b90-11b5-4a78-b602-d4d2a9c1ad3c\") " Nov 28 17:56:17 crc kubenswrapper[4909]: I1128 17:56:17.783049 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/06eb2b90-11b5-4a78-b602-d4d2a9c1ad3c-ovsdbserver-nb\") pod \"06eb2b90-11b5-4a78-b602-d4d2a9c1ad3c\" (UID: \"06eb2b90-11b5-4a78-b602-d4d2a9c1ad3c\") " Nov 28 17:56:17 crc kubenswrapper[4909]: I1128 17:56:17.783135 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/06eb2b90-11b5-4a78-b602-d4d2a9c1ad3c-config\") pod \"06eb2b90-11b5-4a78-b602-d4d2a9c1ad3c\" (UID: \"06eb2b90-11b5-4a78-b602-d4d2a9c1ad3c\") " Nov 28 17:56:17 crc kubenswrapper[4909]: I1128 17:56:17.788878 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/06eb2b90-11b5-4a78-b602-d4d2a9c1ad3c-kube-api-access-6s485" (OuterVolumeSpecName: "kube-api-access-6s485") pod "06eb2b90-11b5-4a78-b602-d4d2a9c1ad3c" (UID: "06eb2b90-11b5-4a78-b602-d4d2a9c1ad3c"). InnerVolumeSpecName "kube-api-access-6s485". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:56:17 crc kubenswrapper[4909]: I1128 17:56:17.804101 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-56b94bdf77-gbr2l"] Nov 28 17:56:17 crc kubenswrapper[4909]: I1128 17:56:17.868283 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/06eb2b90-11b5-4a78-b602-d4d2a9c1ad3c-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "06eb2b90-11b5-4a78-b602-d4d2a9c1ad3c" (UID: "06eb2b90-11b5-4a78-b602-d4d2a9c1ad3c"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 17:56:17 crc kubenswrapper[4909]: I1128 17:56:17.883438 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/06eb2b90-11b5-4a78-b602-d4d2a9c1ad3c-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "06eb2b90-11b5-4a78-b602-d4d2a9c1ad3c" (UID: "06eb2b90-11b5-4a78-b602-d4d2a9c1ad3c"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 17:56:17 crc kubenswrapper[4909]: I1128 17:56:17.888433 4909 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/06eb2b90-11b5-4a78-b602-d4d2a9c1ad3c-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 28 17:56:17 crc kubenswrapper[4909]: I1128 17:56:17.888459 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6s485\" (UniqueName: \"kubernetes.io/projected/06eb2b90-11b5-4a78-b602-d4d2a9c1ad3c-kube-api-access-6s485\") on node \"crc\" DevicePath \"\"" Nov 28 17:56:17 crc kubenswrapper[4909]: I1128 17:56:17.888470 4909 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/06eb2b90-11b5-4a78-b602-d4d2a9c1ad3c-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 28 17:56:17 crc kubenswrapper[4909]: I1128 17:56:17.894048 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/06eb2b90-11b5-4a78-b602-d4d2a9c1ad3c-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "06eb2b90-11b5-4a78-b602-d4d2a9c1ad3c" (UID: "06eb2b90-11b5-4a78-b602-d4d2a9c1ad3c"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 17:56:17 crc kubenswrapper[4909]: I1128 17:56:17.897499 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/06eb2b90-11b5-4a78-b602-d4d2a9c1ad3c-config" (OuterVolumeSpecName: "config") pod "06eb2b90-11b5-4a78-b602-d4d2a9c1ad3c" (UID: "06eb2b90-11b5-4a78-b602-d4d2a9c1ad3c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 17:56:17 crc kubenswrapper[4909]: I1128 17:56:17.992423 4909 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/06eb2b90-11b5-4a78-b602-d4d2a9c1ad3c-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 17:56:17 crc kubenswrapper[4909]: I1128 17:56:17.992462 4909 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/06eb2b90-11b5-4a78-b602-d4d2a9c1ad3c-config\") on node \"crc\" DevicePath \"\"" Nov 28 17:56:18 crc kubenswrapper[4909]: I1128 17:56:18.045541 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-sync-6m8rl"] Nov 28 17:56:18 crc kubenswrapper[4909]: I1128 17:56:18.056115 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-sync-6m8rl"] Nov 28 17:56:18 crc kubenswrapper[4909]: I1128 17:56:18.340021 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-55c467f6c7-5st2f" event={"ID":"06eb2b90-11b5-4a78-b602-d4d2a9c1ad3c","Type":"ContainerDied","Data":"c22f3912b0b3d89332afe836dddd952372853eda43199eaf89708b90ac68270e"} Nov 28 17:56:18 crc kubenswrapper[4909]: I1128 17:56:18.340344 4909 scope.go:117] "RemoveContainer" containerID="d72a24b9dc7d8271734927560fc61a0ded4ec144b7a095cc0d2a6ac96b5ced5d" Nov 28 17:56:18 crc kubenswrapper[4909]: I1128 17:56:18.340450 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-55c467f6c7-5st2f" Nov 28 17:56:18 crc kubenswrapper[4909]: I1128 17:56:18.344373 4909 generic.go:334] "Generic (PLEG): container finished" podID="49263899-997f-4338-8a6f-0492e45aad4f" containerID="19666d325b6ad88673ddf48a97b716ec03cd47889795846bf93e8aac7872f338" exitCode=0 Nov 28 17:56:18 crc kubenswrapper[4909]: I1128 17:56:18.344426 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-56b94bdf77-gbr2l" event={"ID":"49263899-997f-4338-8a6f-0492e45aad4f","Type":"ContainerDied","Data":"19666d325b6ad88673ddf48a97b716ec03cd47889795846bf93e8aac7872f338"} Nov 28 17:56:18 crc kubenswrapper[4909]: I1128 17:56:18.344462 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-56b94bdf77-gbr2l" event={"ID":"49263899-997f-4338-8a6f-0492e45aad4f","Type":"ContainerStarted","Data":"1785294d6e2e6c0dab30f67261c92a8579282a484b8d72e4d30e03ae5db68d90"} Nov 28 17:56:18 crc kubenswrapper[4909]: I1128 17:56:18.376719 4909 scope.go:117] "RemoveContainer" containerID="d6632a747ce0edf41d7188b9a0e641e18e2c2b057bb851710068c9bbafa2bad2" Nov 28 17:56:18 crc kubenswrapper[4909]: I1128 17:56:18.405270 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-55c467f6c7-5st2f"] Nov 28 17:56:18 crc kubenswrapper[4909]: I1128 17:56:18.423380 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-55c467f6c7-5st2f"] Nov 28 17:56:19 crc kubenswrapper[4909]: I1128 17:56:19.357545 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-56b94bdf77-gbr2l" event={"ID":"49263899-997f-4338-8a6f-0492e45aad4f","Type":"ContainerStarted","Data":"ea054c3372f63542d918e1a6b6596b6c1459e219999dc1e126d3b1493fb4b583"} Nov 28 17:56:19 crc kubenswrapper[4909]: I1128 17:56:19.358093 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-56b94bdf77-gbr2l" Nov 28 17:56:19 crc kubenswrapper[4909]: I1128 17:56:19.382806 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-56b94bdf77-gbr2l" podStartSLOduration=3.38278624 podStartE2EDuration="3.38278624s" podCreationTimestamp="2025-11-28 17:56:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 17:56:19.381100565 +0000 UTC m=+6361.777785109" watchObservedRunningTime="2025-11-28 17:56:19.38278624 +0000 UTC m=+6361.779470764" Nov 28 17:56:19 crc kubenswrapper[4909]: I1128 17:56:19.912810 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="06eb2b90-11b5-4a78-b602-d4d2a9c1ad3c" path="/var/lib/kubelet/pods/06eb2b90-11b5-4a78-b602-d4d2a9c1ad3c/volumes" Nov 28 17:56:19 crc kubenswrapper[4909]: I1128 17:56:19.913493 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bdcedded-3fc7-4242-a4b8-c34f0560daef" path="/var/lib/kubelet/pods/bdcedded-3fc7-4242-a4b8-c34f0560daef/volumes" Nov 28 17:56:20 crc kubenswrapper[4909]: I1128 17:56:20.322344 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-gkkkg" Nov 28 17:56:20 crc kubenswrapper[4909]: I1128 17:56:20.322705 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-gkkkg" Nov 28 17:56:20 crc kubenswrapper[4909]: I1128 17:56:20.389492 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-gkkkg" Nov 28 17:56:20 crc kubenswrapper[4909]: I1128 17:56:20.445733 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-gkkkg" Nov 28 17:56:20 crc kubenswrapper[4909]: I1128 17:56:20.633835 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-gkkkg"] Nov 28 17:56:22 crc kubenswrapper[4909]: I1128 17:56:22.394341 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-gkkkg" podUID="043bef59-2e05-4a43-bb21-ca25e1ce483e" containerName="registry-server" containerID="cri-o://99d448c89de192ea22caca87a869c12198cd34e53dd2e43efe188b369a339ecc" gracePeriod=2 Nov 28 17:56:23 crc kubenswrapper[4909]: I1128 17:56:23.828628 4909 generic.go:334] "Generic (PLEG): container finished" podID="043bef59-2e05-4a43-bb21-ca25e1ce483e" containerID="99d448c89de192ea22caca87a869c12198cd34e53dd2e43efe188b369a339ecc" exitCode=0 Nov 28 17:56:23 crc kubenswrapper[4909]: I1128 17:56:23.828966 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-gkkkg" event={"ID":"043bef59-2e05-4a43-bb21-ca25e1ce483e","Type":"ContainerDied","Data":"99d448c89de192ea22caca87a869c12198cd34e53dd2e43efe188b369a339ecc"} Nov 28 17:56:24 crc kubenswrapper[4909]: I1128 17:56:24.636177 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-gkkkg" Nov 28 17:56:24 crc kubenswrapper[4909]: I1128 17:56:24.736078 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/043bef59-2e05-4a43-bb21-ca25e1ce483e-catalog-content\") pod \"043bef59-2e05-4a43-bb21-ca25e1ce483e\" (UID: \"043bef59-2e05-4a43-bb21-ca25e1ce483e\") " Nov 28 17:56:24 crc kubenswrapper[4909]: I1128 17:56:24.737056 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vr2r9\" (UniqueName: \"kubernetes.io/projected/043bef59-2e05-4a43-bb21-ca25e1ce483e-kube-api-access-vr2r9\") pod \"043bef59-2e05-4a43-bb21-ca25e1ce483e\" (UID: \"043bef59-2e05-4a43-bb21-ca25e1ce483e\") " Nov 28 17:56:24 crc kubenswrapper[4909]: I1128 17:56:24.737193 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/043bef59-2e05-4a43-bb21-ca25e1ce483e-utilities\") pod \"043bef59-2e05-4a43-bb21-ca25e1ce483e\" (UID: \"043bef59-2e05-4a43-bb21-ca25e1ce483e\") " Nov 28 17:56:24 crc kubenswrapper[4909]: I1128 17:56:24.739263 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/043bef59-2e05-4a43-bb21-ca25e1ce483e-utilities" (OuterVolumeSpecName: "utilities") pod "043bef59-2e05-4a43-bb21-ca25e1ce483e" (UID: "043bef59-2e05-4a43-bb21-ca25e1ce483e"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:56:24 crc kubenswrapper[4909]: I1128 17:56:24.742859 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/043bef59-2e05-4a43-bb21-ca25e1ce483e-kube-api-access-vr2r9" (OuterVolumeSpecName: "kube-api-access-vr2r9") pod "043bef59-2e05-4a43-bb21-ca25e1ce483e" (UID: "043bef59-2e05-4a43-bb21-ca25e1ce483e"). InnerVolumeSpecName "kube-api-access-vr2r9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:56:24 crc kubenswrapper[4909]: I1128 17:56:24.752285 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/043bef59-2e05-4a43-bb21-ca25e1ce483e-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "043bef59-2e05-4a43-bb21-ca25e1ce483e" (UID: "043bef59-2e05-4a43-bb21-ca25e1ce483e"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:56:24 crc kubenswrapper[4909]: I1128 17:56:24.840850 4909 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/043bef59-2e05-4a43-bb21-ca25e1ce483e-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 17:56:24 crc kubenswrapper[4909]: I1128 17:56:24.840879 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vr2r9\" (UniqueName: \"kubernetes.io/projected/043bef59-2e05-4a43-bb21-ca25e1ce483e-kube-api-access-vr2r9\") on node \"crc\" DevicePath \"\"" Nov 28 17:56:24 crc kubenswrapper[4909]: I1128 17:56:24.840891 4909 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/043bef59-2e05-4a43-bb21-ca25e1ce483e-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 17:56:24 crc kubenswrapper[4909]: I1128 17:56:24.841052 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-gkkkg" event={"ID":"043bef59-2e05-4a43-bb21-ca25e1ce483e","Type":"ContainerDied","Data":"23262052cd9543ba6fa051ae4effd6c2b402ac9b81e048761755c6527b5bff03"} Nov 28 17:56:24 crc kubenswrapper[4909]: I1128 17:56:24.841097 4909 scope.go:117] "RemoveContainer" containerID="99d448c89de192ea22caca87a869c12198cd34e53dd2e43efe188b369a339ecc" Nov 28 17:56:24 crc kubenswrapper[4909]: I1128 17:56:24.841240 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-gkkkg" Nov 28 17:56:24 crc kubenswrapper[4909]: I1128 17:56:24.871080 4909 scope.go:117] "RemoveContainer" containerID="ad0b71c5b3c3803e1d6a32aceddf0d0a6df20577d7d3c8339ef2d3a693cde95e" Nov 28 17:56:24 crc kubenswrapper[4909]: I1128 17:56:24.879054 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-gkkkg"] Nov 28 17:56:24 crc kubenswrapper[4909]: I1128 17:56:24.890069 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-gkkkg"] Nov 28 17:56:24 crc kubenswrapper[4909]: I1128 17:56:24.900802 4909 scope.go:117] "RemoveContainer" containerID="b1e9a6354c90cc9f0f5aafae4c1795dfed3fd7fbf0a6ab0670521184f3b9b918" Nov 28 17:56:25 crc kubenswrapper[4909]: I1128 17:56:25.918886 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="043bef59-2e05-4a43-bb21-ca25e1ce483e" path="/var/lib/kubelet/pods/043bef59-2e05-4a43-bb21-ca25e1ce483e/volumes" Nov 28 17:56:26 crc kubenswrapper[4909]: I1128 17:56:26.995883 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-56b94bdf77-gbr2l" Nov 28 17:56:27 crc kubenswrapper[4909]: I1128 17:56:27.058755 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-86b59fc6c5-jvnfr"] Nov 28 17:56:27 crc kubenswrapper[4909]: I1128 17:56:27.059011 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-86b59fc6c5-jvnfr" podUID="451be04f-ff1a-4075-b229-3a662fcb67bd" containerName="dnsmasq-dns" containerID="cri-o://c60955bda35ca95f1ba0ed067c4a2b7956d82fbbfd8aa51b4d6ef246e8906bf4" gracePeriod=10 Nov 28 17:56:27 crc kubenswrapper[4909]: I1128 17:56:27.602043 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-86b59fc6c5-jvnfr" Nov 28 17:56:27 crc kubenswrapper[4909]: I1128 17:56:27.698505 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-cell1\" (UniqueName: \"kubernetes.io/configmap/451be04f-ff1a-4075-b229-3a662fcb67bd-openstack-cell1\") pod \"451be04f-ff1a-4075-b229-3a662fcb67bd\" (UID: \"451be04f-ff1a-4075-b229-3a662fcb67bd\") " Nov 28 17:56:27 crc kubenswrapper[4909]: I1128 17:56:27.698740 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/451be04f-ff1a-4075-b229-3a662fcb67bd-config\") pod \"451be04f-ff1a-4075-b229-3a662fcb67bd\" (UID: \"451be04f-ff1a-4075-b229-3a662fcb67bd\") " Nov 28 17:56:27 crc kubenswrapper[4909]: I1128 17:56:27.698784 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-97sqh\" (UniqueName: \"kubernetes.io/projected/451be04f-ff1a-4075-b229-3a662fcb67bd-kube-api-access-97sqh\") pod \"451be04f-ff1a-4075-b229-3a662fcb67bd\" (UID: \"451be04f-ff1a-4075-b229-3a662fcb67bd\") " Nov 28 17:56:27 crc kubenswrapper[4909]: I1128 17:56:27.698824 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/451be04f-ff1a-4075-b229-3a662fcb67bd-dns-svc\") pod \"451be04f-ff1a-4075-b229-3a662fcb67bd\" (UID: \"451be04f-ff1a-4075-b229-3a662fcb67bd\") " Nov 28 17:56:27 crc kubenswrapper[4909]: I1128 17:56:27.698893 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/451be04f-ff1a-4075-b229-3a662fcb67bd-ovsdbserver-nb\") pod \"451be04f-ff1a-4075-b229-3a662fcb67bd\" (UID: \"451be04f-ff1a-4075-b229-3a662fcb67bd\") " Nov 28 17:56:27 crc kubenswrapper[4909]: I1128 17:56:27.699025 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/451be04f-ff1a-4075-b229-3a662fcb67bd-ovsdbserver-sb\") pod \"451be04f-ff1a-4075-b229-3a662fcb67bd\" (UID: \"451be04f-ff1a-4075-b229-3a662fcb67bd\") " Nov 28 17:56:27 crc kubenswrapper[4909]: I1128 17:56:27.707289 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/451be04f-ff1a-4075-b229-3a662fcb67bd-kube-api-access-97sqh" (OuterVolumeSpecName: "kube-api-access-97sqh") pod "451be04f-ff1a-4075-b229-3a662fcb67bd" (UID: "451be04f-ff1a-4075-b229-3a662fcb67bd"). InnerVolumeSpecName "kube-api-access-97sqh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:56:27 crc kubenswrapper[4909]: I1128 17:56:27.764190 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/451be04f-ff1a-4075-b229-3a662fcb67bd-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "451be04f-ff1a-4075-b229-3a662fcb67bd" (UID: "451be04f-ff1a-4075-b229-3a662fcb67bd"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 17:56:27 crc kubenswrapper[4909]: I1128 17:56:27.764471 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/451be04f-ff1a-4075-b229-3a662fcb67bd-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "451be04f-ff1a-4075-b229-3a662fcb67bd" (UID: "451be04f-ff1a-4075-b229-3a662fcb67bd"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 17:56:27 crc kubenswrapper[4909]: I1128 17:56:27.766587 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/451be04f-ff1a-4075-b229-3a662fcb67bd-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "451be04f-ff1a-4075-b229-3a662fcb67bd" (UID: "451be04f-ff1a-4075-b229-3a662fcb67bd"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 17:56:27 crc kubenswrapper[4909]: I1128 17:56:27.771618 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/451be04f-ff1a-4075-b229-3a662fcb67bd-config" (OuterVolumeSpecName: "config") pod "451be04f-ff1a-4075-b229-3a662fcb67bd" (UID: "451be04f-ff1a-4075-b229-3a662fcb67bd"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 17:56:27 crc kubenswrapper[4909]: I1128 17:56:27.781865 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/451be04f-ff1a-4075-b229-3a662fcb67bd-openstack-cell1" (OuterVolumeSpecName: "openstack-cell1") pod "451be04f-ff1a-4075-b229-3a662fcb67bd" (UID: "451be04f-ff1a-4075-b229-3a662fcb67bd"). InnerVolumeSpecName "openstack-cell1". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 17:56:27 crc kubenswrapper[4909]: I1128 17:56:27.801092 4909 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/451be04f-ff1a-4075-b229-3a662fcb67bd-config\") on node \"crc\" DevicePath \"\"" Nov 28 17:56:27 crc kubenswrapper[4909]: I1128 17:56:27.801365 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-97sqh\" (UniqueName: \"kubernetes.io/projected/451be04f-ff1a-4075-b229-3a662fcb67bd-kube-api-access-97sqh\") on node \"crc\" DevicePath \"\"" Nov 28 17:56:27 crc kubenswrapper[4909]: I1128 17:56:27.801430 4909 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/451be04f-ff1a-4075-b229-3a662fcb67bd-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 17:56:27 crc kubenswrapper[4909]: I1128 17:56:27.801557 4909 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/451be04f-ff1a-4075-b229-3a662fcb67bd-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 28 17:56:27 crc kubenswrapper[4909]: I1128 17:56:27.801631 4909 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/451be04f-ff1a-4075-b229-3a662fcb67bd-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 28 17:56:27 crc kubenswrapper[4909]: I1128 17:56:27.801724 4909 reconciler_common.go:293] "Volume detached for volume \"openstack-cell1\" (UniqueName: \"kubernetes.io/configmap/451be04f-ff1a-4075-b229-3a662fcb67bd-openstack-cell1\") on node \"crc\" DevicePath \"\"" Nov 28 17:56:27 crc kubenswrapper[4909]: I1128 17:56:27.884431 4909 generic.go:334] "Generic (PLEG): container finished" podID="451be04f-ff1a-4075-b229-3a662fcb67bd" containerID="c60955bda35ca95f1ba0ed067c4a2b7956d82fbbfd8aa51b4d6ef246e8906bf4" exitCode=0 Nov 28 17:56:27 crc kubenswrapper[4909]: I1128 17:56:27.884494 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-86b59fc6c5-jvnfr" event={"ID":"451be04f-ff1a-4075-b229-3a662fcb67bd","Type":"ContainerDied","Data":"c60955bda35ca95f1ba0ed067c4a2b7956d82fbbfd8aa51b4d6ef246e8906bf4"} Nov 28 17:56:27 crc kubenswrapper[4909]: I1128 17:56:27.884866 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-86b59fc6c5-jvnfr" event={"ID":"451be04f-ff1a-4075-b229-3a662fcb67bd","Type":"ContainerDied","Data":"b525816d9b6067fc02a3790e3c6a8c2a41a8d101ec6c3c7ef5d9a4e595d94f97"} Nov 28 17:56:27 crc kubenswrapper[4909]: I1128 17:56:27.884513 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-86b59fc6c5-jvnfr" Nov 28 17:56:27 crc kubenswrapper[4909]: I1128 17:56:27.884915 4909 scope.go:117] "RemoveContainer" containerID="c60955bda35ca95f1ba0ed067c4a2b7956d82fbbfd8aa51b4d6ef246e8906bf4" Nov 28 17:56:27 crc kubenswrapper[4909]: I1128 17:56:27.936504 4909 scope.go:117] "RemoveContainer" containerID="10e6e22ce5fa799a18be5ca6ca0195784e749bcef811c9375eff76728c39cfea" Nov 28 17:56:27 crc kubenswrapper[4909]: I1128 17:56:27.937686 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-86b59fc6c5-jvnfr"] Nov 28 17:56:27 crc kubenswrapper[4909]: I1128 17:56:27.948356 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-86b59fc6c5-jvnfr"] Nov 28 17:56:27 crc kubenswrapper[4909]: I1128 17:56:27.958468 4909 scope.go:117] "RemoveContainer" containerID="c60955bda35ca95f1ba0ed067c4a2b7956d82fbbfd8aa51b4d6ef246e8906bf4" Nov 28 17:56:27 crc kubenswrapper[4909]: E1128 17:56:27.958945 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c60955bda35ca95f1ba0ed067c4a2b7956d82fbbfd8aa51b4d6ef246e8906bf4\": container with ID starting with c60955bda35ca95f1ba0ed067c4a2b7956d82fbbfd8aa51b4d6ef246e8906bf4 not found: ID does not exist" containerID="c60955bda35ca95f1ba0ed067c4a2b7956d82fbbfd8aa51b4d6ef246e8906bf4" Nov 28 17:56:27 crc kubenswrapper[4909]: I1128 17:56:27.958982 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c60955bda35ca95f1ba0ed067c4a2b7956d82fbbfd8aa51b4d6ef246e8906bf4"} err="failed to get container status \"c60955bda35ca95f1ba0ed067c4a2b7956d82fbbfd8aa51b4d6ef246e8906bf4\": rpc error: code = NotFound desc = could not find container \"c60955bda35ca95f1ba0ed067c4a2b7956d82fbbfd8aa51b4d6ef246e8906bf4\": container with ID starting with c60955bda35ca95f1ba0ed067c4a2b7956d82fbbfd8aa51b4d6ef246e8906bf4 not found: ID does not exist" Nov 28 17:56:27 crc kubenswrapper[4909]: I1128 17:56:27.959007 4909 scope.go:117] "RemoveContainer" containerID="10e6e22ce5fa799a18be5ca6ca0195784e749bcef811c9375eff76728c39cfea" Nov 28 17:56:27 crc kubenswrapper[4909]: E1128 17:56:27.959359 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"10e6e22ce5fa799a18be5ca6ca0195784e749bcef811c9375eff76728c39cfea\": container with ID starting with 10e6e22ce5fa799a18be5ca6ca0195784e749bcef811c9375eff76728c39cfea not found: ID does not exist" containerID="10e6e22ce5fa799a18be5ca6ca0195784e749bcef811c9375eff76728c39cfea" Nov 28 17:56:27 crc kubenswrapper[4909]: I1128 17:56:27.959394 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"10e6e22ce5fa799a18be5ca6ca0195784e749bcef811c9375eff76728c39cfea"} err="failed to get container status \"10e6e22ce5fa799a18be5ca6ca0195784e749bcef811c9375eff76728c39cfea\": rpc error: code = NotFound desc = could not find container \"10e6e22ce5fa799a18be5ca6ca0195784e749bcef811c9375eff76728c39cfea\": container with ID starting with 10e6e22ce5fa799a18be5ca6ca0195784e749bcef811c9375eff76728c39cfea not found: ID does not exist" Nov 28 17:56:29 crc kubenswrapper[4909]: I1128 17:56:29.916747 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="451be04f-ff1a-4075-b229-3a662fcb67bd" path="/var/lib/kubelet/pods/451be04f-ff1a-4075-b229-3a662fcb67bd/volumes" Nov 28 17:56:35 crc kubenswrapper[4909]: I1128 17:56:35.923727 4909 scope.go:117] "RemoveContainer" containerID="d920b9a243174ad11b0fa447095605f8c457c29b2ed5908c15d82acc47d5f07f" Nov 28 17:56:35 crc kubenswrapper[4909]: I1128 17:56:35.952749 4909 scope.go:117] "RemoveContainer" containerID="d6e386292cc7e130a2615622530583498f2425035b61655d00b32ecc1a026b0d" Nov 28 17:56:36 crc kubenswrapper[4909]: I1128 17:56:36.020562 4909 scope.go:117] "RemoveContainer" containerID="ef877dfe87c9b17c27387bc59b869068bf1fd5496a499fbb12b09e82056e5efb" Nov 28 17:56:38 crc kubenswrapper[4909]: I1128 17:56:38.955754 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cnv78l"] Nov 28 17:56:38 crc kubenswrapper[4909]: E1128 17:56:38.958066 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="06eb2b90-11b5-4a78-b602-d4d2a9c1ad3c" containerName="dnsmasq-dns" Nov 28 17:56:38 crc kubenswrapper[4909]: I1128 17:56:38.958186 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="06eb2b90-11b5-4a78-b602-d4d2a9c1ad3c" containerName="dnsmasq-dns" Nov 28 17:56:38 crc kubenswrapper[4909]: E1128 17:56:38.958349 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="06eb2b90-11b5-4a78-b602-d4d2a9c1ad3c" containerName="init" Nov 28 17:56:38 crc kubenswrapper[4909]: I1128 17:56:38.958455 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="06eb2b90-11b5-4a78-b602-d4d2a9c1ad3c" containerName="init" Nov 28 17:56:38 crc kubenswrapper[4909]: E1128 17:56:38.958570 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="043bef59-2e05-4a43-bb21-ca25e1ce483e" containerName="extract-utilities" Nov 28 17:56:38 crc kubenswrapper[4909]: I1128 17:56:38.958631 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="043bef59-2e05-4a43-bb21-ca25e1ce483e" containerName="extract-utilities" Nov 28 17:56:38 crc kubenswrapper[4909]: E1128 17:56:38.958671 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="451be04f-ff1a-4075-b229-3a662fcb67bd" containerName="init" Nov 28 17:56:38 crc kubenswrapper[4909]: I1128 17:56:38.958680 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="451be04f-ff1a-4075-b229-3a662fcb67bd" containerName="init" Nov 28 17:56:38 crc kubenswrapper[4909]: E1128 17:56:38.958690 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="043bef59-2e05-4a43-bb21-ca25e1ce483e" containerName="registry-server" Nov 28 17:56:38 crc kubenswrapper[4909]: I1128 17:56:38.958696 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="043bef59-2e05-4a43-bb21-ca25e1ce483e" containerName="registry-server" Nov 28 17:56:38 crc kubenswrapper[4909]: E1128 17:56:38.958711 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="451be04f-ff1a-4075-b229-3a662fcb67bd" containerName="dnsmasq-dns" Nov 28 17:56:38 crc kubenswrapper[4909]: I1128 17:56:38.958719 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="451be04f-ff1a-4075-b229-3a662fcb67bd" containerName="dnsmasq-dns" Nov 28 17:56:38 crc kubenswrapper[4909]: E1128 17:56:38.958736 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="043bef59-2e05-4a43-bb21-ca25e1ce483e" containerName="extract-content" Nov 28 17:56:38 crc kubenswrapper[4909]: I1128 17:56:38.958742 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="043bef59-2e05-4a43-bb21-ca25e1ce483e" containerName="extract-content" Nov 28 17:56:38 crc kubenswrapper[4909]: I1128 17:56:38.959262 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="451be04f-ff1a-4075-b229-3a662fcb67bd" containerName="dnsmasq-dns" Nov 28 17:56:38 crc kubenswrapper[4909]: I1128 17:56:38.959370 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="043bef59-2e05-4a43-bb21-ca25e1ce483e" containerName="registry-server" Nov 28 17:56:38 crc kubenswrapper[4909]: I1128 17:56:38.959455 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="06eb2b90-11b5-4a78-b602-d4d2a9c1ad3c" containerName="dnsmasq-dns" Nov 28 17:56:38 crc kubenswrapper[4909]: I1128 17:56:38.960609 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cnv78l" Nov 28 17:56:38 crc kubenswrapper[4909]: I1128 17:56:38.963381 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 28 17:56:38 crc kubenswrapper[4909]: I1128 17:56:38.963418 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-z249h" Nov 28 17:56:38 crc kubenswrapper[4909]: I1128 17:56:38.963394 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Nov 28 17:56:38 crc kubenswrapper[4909]: I1128 17:56:38.963449 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Nov 28 17:56:38 crc kubenswrapper[4909]: I1128 17:56:38.967618 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cnv78l"] Nov 28 17:56:39 crc kubenswrapper[4909]: I1128 17:56:39.057318 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2e07ff4b-3800-4921-83fb-4b1da482a8b0-inventory\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-cnv78l\" (UID: \"2e07ff4b-3800-4921-83fb-4b1da482a8b0\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cnv78l" Nov 28 17:56:39 crc kubenswrapper[4909]: I1128 17:56:39.057645 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/2e07ff4b-3800-4921-83fb-4b1da482a8b0-ssh-key\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-cnv78l\" (UID: \"2e07ff4b-3800-4921-83fb-4b1da482a8b0\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cnv78l" Nov 28 17:56:39 crc kubenswrapper[4909]: I1128 17:56:39.057805 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-498pv\" (UniqueName: \"kubernetes.io/projected/2e07ff4b-3800-4921-83fb-4b1da482a8b0-kube-api-access-498pv\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-cnv78l\" (UID: \"2e07ff4b-3800-4921-83fb-4b1da482a8b0\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cnv78l" Nov 28 17:56:39 crc kubenswrapper[4909]: I1128 17:56:39.057948 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/2e07ff4b-3800-4921-83fb-4b1da482a8b0-ceph\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-cnv78l\" (UID: \"2e07ff4b-3800-4921-83fb-4b1da482a8b0\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cnv78l" Nov 28 17:56:39 crc kubenswrapper[4909]: I1128 17:56:39.058053 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pre-adoption-validation-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2e07ff4b-3800-4921-83fb-4b1da482a8b0-pre-adoption-validation-combined-ca-bundle\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-cnv78l\" (UID: \"2e07ff4b-3800-4921-83fb-4b1da482a8b0\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cnv78l" Nov 28 17:56:39 crc kubenswrapper[4909]: I1128 17:56:39.160547 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2e07ff4b-3800-4921-83fb-4b1da482a8b0-inventory\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-cnv78l\" (UID: \"2e07ff4b-3800-4921-83fb-4b1da482a8b0\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cnv78l" Nov 28 17:56:39 crc kubenswrapper[4909]: I1128 17:56:39.160717 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/2e07ff4b-3800-4921-83fb-4b1da482a8b0-ssh-key\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-cnv78l\" (UID: \"2e07ff4b-3800-4921-83fb-4b1da482a8b0\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cnv78l" Nov 28 17:56:39 crc kubenswrapper[4909]: I1128 17:56:39.160767 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-498pv\" (UniqueName: \"kubernetes.io/projected/2e07ff4b-3800-4921-83fb-4b1da482a8b0-kube-api-access-498pv\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-cnv78l\" (UID: \"2e07ff4b-3800-4921-83fb-4b1da482a8b0\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cnv78l" Nov 28 17:56:39 crc kubenswrapper[4909]: I1128 17:56:39.160818 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/2e07ff4b-3800-4921-83fb-4b1da482a8b0-ceph\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-cnv78l\" (UID: \"2e07ff4b-3800-4921-83fb-4b1da482a8b0\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cnv78l" Nov 28 17:56:39 crc kubenswrapper[4909]: I1128 17:56:39.160844 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pre-adoption-validation-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2e07ff4b-3800-4921-83fb-4b1da482a8b0-pre-adoption-validation-combined-ca-bundle\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-cnv78l\" (UID: \"2e07ff4b-3800-4921-83fb-4b1da482a8b0\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cnv78l" Nov 28 17:56:39 crc kubenswrapper[4909]: I1128 17:56:39.166067 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2e07ff4b-3800-4921-83fb-4b1da482a8b0-inventory\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-cnv78l\" (UID: \"2e07ff4b-3800-4921-83fb-4b1da482a8b0\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cnv78l" Nov 28 17:56:39 crc kubenswrapper[4909]: I1128 17:56:39.167185 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pre-adoption-validation-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2e07ff4b-3800-4921-83fb-4b1da482a8b0-pre-adoption-validation-combined-ca-bundle\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-cnv78l\" (UID: \"2e07ff4b-3800-4921-83fb-4b1da482a8b0\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cnv78l" Nov 28 17:56:39 crc kubenswrapper[4909]: I1128 17:56:39.167340 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/2e07ff4b-3800-4921-83fb-4b1da482a8b0-ceph\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-cnv78l\" (UID: \"2e07ff4b-3800-4921-83fb-4b1da482a8b0\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cnv78l" Nov 28 17:56:39 crc kubenswrapper[4909]: I1128 17:56:39.168139 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/2e07ff4b-3800-4921-83fb-4b1da482a8b0-ssh-key\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-cnv78l\" (UID: \"2e07ff4b-3800-4921-83fb-4b1da482a8b0\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cnv78l" Nov 28 17:56:39 crc kubenswrapper[4909]: I1128 17:56:39.176292 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-498pv\" (UniqueName: \"kubernetes.io/projected/2e07ff4b-3800-4921-83fb-4b1da482a8b0-kube-api-access-498pv\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-cnv78l\" (UID: \"2e07ff4b-3800-4921-83fb-4b1da482a8b0\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cnv78l" Nov 28 17:56:39 crc kubenswrapper[4909]: I1128 17:56:39.290189 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cnv78l" Nov 28 17:56:39 crc kubenswrapper[4909]: I1128 17:56:39.889803 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cnv78l"] Nov 28 17:56:39 crc kubenswrapper[4909]: W1128 17:56:39.893798 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2e07ff4b_3800_4921_83fb_4b1da482a8b0.slice/crio-2c6cb44b3f0a4216ad396b415b847134f650de948b2ec04c6c1bc701d3618b76 WatchSource:0}: Error finding container 2c6cb44b3f0a4216ad396b415b847134f650de948b2ec04c6c1bc701d3618b76: Status 404 returned error can't find the container with id 2c6cb44b3f0a4216ad396b415b847134f650de948b2ec04c6c1bc701d3618b76 Nov 28 17:56:40 crc kubenswrapper[4909]: I1128 17:56:40.025288 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cnv78l" event={"ID":"2e07ff4b-3800-4921-83fb-4b1da482a8b0","Type":"ContainerStarted","Data":"2c6cb44b3f0a4216ad396b415b847134f650de948b2ec04c6c1bc701d3618b76"} Nov 28 17:56:51 crc kubenswrapper[4909]: I1128 17:56:51.173896 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cnv78l" event={"ID":"2e07ff4b-3800-4921-83fb-4b1da482a8b0","Type":"ContainerStarted","Data":"6645ea44396162b18ff200f40d93ac27857b2e9b3efe88c885e9a311a2094fd3"} Nov 28 17:56:51 crc kubenswrapper[4909]: I1128 17:56:51.210885 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cnv78l" podStartSLOduration=2.404525282 podStartE2EDuration="13.210864319s" podCreationTimestamp="2025-11-28 17:56:38 +0000 UTC" firstStartedPulling="2025-11-28 17:56:39.895841755 +0000 UTC m=+6382.292526279" lastFinishedPulling="2025-11-28 17:56:50.702180772 +0000 UTC m=+6393.098865316" observedRunningTime="2025-11-28 17:56:51.200293617 +0000 UTC m=+6393.596978181" watchObservedRunningTime="2025-11-28 17:56:51.210864319 +0000 UTC m=+6393.607548863" Nov 28 17:56:57 crc kubenswrapper[4909]: I1128 17:56:57.581387 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-4pvmc"] Nov 28 17:56:57 crc kubenswrapper[4909]: I1128 17:56:57.584889 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-4pvmc" Nov 28 17:56:57 crc kubenswrapper[4909]: I1128 17:56:57.591855 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-4pvmc"] Nov 28 17:56:57 crc kubenswrapper[4909]: I1128 17:56:57.597190 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gctmf\" (UniqueName: \"kubernetes.io/projected/0511af05-1d6c-43eb-88a0-7fe5bd8b7c86-kube-api-access-gctmf\") pod \"community-operators-4pvmc\" (UID: \"0511af05-1d6c-43eb-88a0-7fe5bd8b7c86\") " pod="openshift-marketplace/community-operators-4pvmc" Nov 28 17:56:57 crc kubenswrapper[4909]: I1128 17:56:57.599269 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0511af05-1d6c-43eb-88a0-7fe5bd8b7c86-utilities\") pod \"community-operators-4pvmc\" (UID: \"0511af05-1d6c-43eb-88a0-7fe5bd8b7c86\") " pod="openshift-marketplace/community-operators-4pvmc" Nov 28 17:56:57 crc kubenswrapper[4909]: I1128 17:56:57.599374 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0511af05-1d6c-43eb-88a0-7fe5bd8b7c86-catalog-content\") pod \"community-operators-4pvmc\" (UID: \"0511af05-1d6c-43eb-88a0-7fe5bd8b7c86\") " pod="openshift-marketplace/community-operators-4pvmc" Nov 28 17:56:57 crc kubenswrapper[4909]: I1128 17:56:57.700988 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0511af05-1d6c-43eb-88a0-7fe5bd8b7c86-catalog-content\") pod \"community-operators-4pvmc\" (UID: \"0511af05-1d6c-43eb-88a0-7fe5bd8b7c86\") " pod="openshift-marketplace/community-operators-4pvmc" Nov 28 17:56:57 crc kubenswrapper[4909]: I1128 17:56:57.701154 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gctmf\" (UniqueName: \"kubernetes.io/projected/0511af05-1d6c-43eb-88a0-7fe5bd8b7c86-kube-api-access-gctmf\") pod \"community-operators-4pvmc\" (UID: \"0511af05-1d6c-43eb-88a0-7fe5bd8b7c86\") " pod="openshift-marketplace/community-operators-4pvmc" Nov 28 17:56:57 crc kubenswrapper[4909]: I1128 17:56:57.701271 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0511af05-1d6c-43eb-88a0-7fe5bd8b7c86-utilities\") pod \"community-operators-4pvmc\" (UID: \"0511af05-1d6c-43eb-88a0-7fe5bd8b7c86\") " pod="openshift-marketplace/community-operators-4pvmc" Nov 28 17:56:57 crc kubenswrapper[4909]: I1128 17:56:57.701808 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0511af05-1d6c-43eb-88a0-7fe5bd8b7c86-utilities\") pod \"community-operators-4pvmc\" (UID: \"0511af05-1d6c-43eb-88a0-7fe5bd8b7c86\") " pod="openshift-marketplace/community-operators-4pvmc" Nov 28 17:56:57 crc kubenswrapper[4909]: I1128 17:56:57.702056 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0511af05-1d6c-43eb-88a0-7fe5bd8b7c86-catalog-content\") pod \"community-operators-4pvmc\" (UID: \"0511af05-1d6c-43eb-88a0-7fe5bd8b7c86\") " pod="openshift-marketplace/community-operators-4pvmc" Nov 28 17:56:57 crc kubenswrapper[4909]: I1128 17:56:57.724029 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gctmf\" (UniqueName: \"kubernetes.io/projected/0511af05-1d6c-43eb-88a0-7fe5bd8b7c86-kube-api-access-gctmf\") pod \"community-operators-4pvmc\" (UID: \"0511af05-1d6c-43eb-88a0-7fe5bd8b7c86\") " pod="openshift-marketplace/community-operators-4pvmc" Nov 28 17:56:57 crc kubenswrapper[4909]: I1128 17:56:57.910328 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-4pvmc" Nov 28 17:56:58 crc kubenswrapper[4909]: I1128 17:56:58.469236 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-4pvmc"] Nov 28 17:56:59 crc kubenswrapper[4909]: I1128 17:56:59.330207 4909 generic.go:334] "Generic (PLEG): container finished" podID="0511af05-1d6c-43eb-88a0-7fe5bd8b7c86" containerID="c55a3728b37b651e77e25f254b70b01e5942270038fae445458c895964a7605c" exitCode=0 Nov 28 17:56:59 crc kubenswrapper[4909]: I1128 17:56:59.330514 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4pvmc" event={"ID":"0511af05-1d6c-43eb-88a0-7fe5bd8b7c86","Type":"ContainerDied","Data":"c55a3728b37b651e77e25f254b70b01e5942270038fae445458c895964a7605c"} Nov 28 17:56:59 crc kubenswrapper[4909]: I1128 17:56:59.330547 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4pvmc" event={"ID":"0511af05-1d6c-43eb-88a0-7fe5bd8b7c86","Type":"ContainerStarted","Data":"62032f45f1bb6baa9c4760df6b16a9a571bcc913b53e1bfba10a96b1b005e76f"} Nov 28 17:56:59 crc kubenswrapper[4909]: I1128 17:56:59.354178 4909 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 28 17:57:01 crc kubenswrapper[4909]: I1128 17:57:01.354973 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4pvmc" event={"ID":"0511af05-1d6c-43eb-88a0-7fe5bd8b7c86","Type":"ContainerStarted","Data":"9cd3d75eaa0a3a301376555f177e4b95c15aec8152be5a468cb7a0ac8355cb1b"} Nov 28 17:57:02 crc kubenswrapper[4909]: I1128 17:57:02.369479 4909 generic.go:334] "Generic (PLEG): container finished" podID="0511af05-1d6c-43eb-88a0-7fe5bd8b7c86" containerID="9cd3d75eaa0a3a301376555f177e4b95c15aec8152be5a468cb7a0ac8355cb1b" exitCode=0 Nov 28 17:57:02 crc kubenswrapper[4909]: I1128 17:57:02.369522 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4pvmc" event={"ID":"0511af05-1d6c-43eb-88a0-7fe5bd8b7c86","Type":"ContainerDied","Data":"9cd3d75eaa0a3a301376555f177e4b95c15aec8152be5a468cb7a0ac8355cb1b"} Nov 28 17:57:05 crc kubenswrapper[4909]: I1128 17:57:05.410095 4909 generic.go:334] "Generic (PLEG): container finished" podID="2e07ff4b-3800-4921-83fb-4b1da482a8b0" containerID="6645ea44396162b18ff200f40d93ac27857b2e9b3efe88c885e9a311a2094fd3" exitCode=0 Nov 28 17:57:05 crc kubenswrapper[4909]: I1128 17:57:05.410184 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cnv78l" event={"ID":"2e07ff4b-3800-4921-83fb-4b1da482a8b0","Type":"ContainerDied","Data":"6645ea44396162b18ff200f40d93ac27857b2e9b3efe88c885e9a311a2094fd3"} Nov 28 17:57:05 crc kubenswrapper[4909]: I1128 17:57:05.414217 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4pvmc" event={"ID":"0511af05-1d6c-43eb-88a0-7fe5bd8b7c86","Type":"ContainerStarted","Data":"616f445341f951454fa598244d3775c767e95cdff0e76791897d0c706340e360"} Nov 28 17:57:05 crc kubenswrapper[4909]: I1128 17:57:05.452966 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-4pvmc" podStartSLOduration=3.553662691 podStartE2EDuration="8.452943951s" podCreationTimestamp="2025-11-28 17:56:57 +0000 UTC" firstStartedPulling="2025-11-28 17:56:59.353889703 +0000 UTC m=+6401.750574237" lastFinishedPulling="2025-11-28 17:57:04.253170963 +0000 UTC m=+6406.649855497" observedRunningTime="2025-11-28 17:57:05.450697631 +0000 UTC m=+6407.847382185" watchObservedRunningTime="2025-11-28 17:57:05.452943951 +0000 UTC m=+6407.849628475" Nov 28 17:57:06 crc kubenswrapper[4909]: I1128 17:57:06.902614 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cnv78l" Nov 28 17:57:07 crc kubenswrapper[4909]: I1128 17:57:07.018135 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pre-adoption-validation-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2e07ff4b-3800-4921-83fb-4b1da482a8b0-pre-adoption-validation-combined-ca-bundle\") pod \"2e07ff4b-3800-4921-83fb-4b1da482a8b0\" (UID: \"2e07ff4b-3800-4921-83fb-4b1da482a8b0\") " Nov 28 17:57:07 crc kubenswrapper[4909]: I1128 17:57:07.018507 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/2e07ff4b-3800-4921-83fb-4b1da482a8b0-ceph\") pod \"2e07ff4b-3800-4921-83fb-4b1da482a8b0\" (UID: \"2e07ff4b-3800-4921-83fb-4b1da482a8b0\") " Nov 28 17:57:07 crc kubenswrapper[4909]: I1128 17:57:07.018793 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2e07ff4b-3800-4921-83fb-4b1da482a8b0-inventory\") pod \"2e07ff4b-3800-4921-83fb-4b1da482a8b0\" (UID: \"2e07ff4b-3800-4921-83fb-4b1da482a8b0\") " Nov 28 17:57:07 crc kubenswrapper[4909]: I1128 17:57:07.018960 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-498pv\" (UniqueName: \"kubernetes.io/projected/2e07ff4b-3800-4921-83fb-4b1da482a8b0-kube-api-access-498pv\") pod \"2e07ff4b-3800-4921-83fb-4b1da482a8b0\" (UID: \"2e07ff4b-3800-4921-83fb-4b1da482a8b0\") " Nov 28 17:57:07 crc kubenswrapper[4909]: I1128 17:57:07.019124 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/2e07ff4b-3800-4921-83fb-4b1da482a8b0-ssh-key\") pod \"2e07ff4b-3800-4921-83fb-4b1da482a8b0\" (UID: \"2e07ff4b-3800-4921-83fb-4b1da482a8b0\") " Nov 28 17:57:07 crc kubenswrapper[4909]: I1128 17:57:07.024550 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2e07ff4b-3800-4921-83fb-4b1da482a8b0-ceph" (OuterVolumeSpecName: "ceph") pod "2e07ff4b-3800-4921-83fb-4b1da482a8b0" (UID: "2e07ff4b-3800-4921-83fb-4b1da482a8b0"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:57:07 crc kubenswrapper[4909]: I1128 17:57:07.025166 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2e07ff4b-3800-4921-83fb-4b1da482a8b0-kube-api-access-498pv" (OuterVolumeSpecName: "kube-api-access-498pv") pod "2e07ff4b-3800-4921-83fb-4b1da482a8b0" (UID: "2e07ff4b-3800-4921-83fb-4b1da482a8b0"). InnerVolumeSpecName "kube-api-access-498pv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:57:07 crc kubenswrapper[4909]: I1128 17:57:07.025244 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2e07ff4b-3800-4921-83fb-4b1da482a8b0-pre-adoption-validation-combined-ca-bundle" (OuterVolumeSpecName: "pre-adoption-validation-combined-ca-bundle") pod "2e07ff4b-3800-4921-83fb-4b1da482a8b0" (UID: "2e07ff4b-3800-4921-83fb-4b1da482a8b0"). InnerVolumeSpecName "pre-adoption-validation-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:57:07 crc kubenswrapper[4909]: I1128 17:57:07.049180 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2e07ff4b-3800-4921-83fb-4b1da482a8b0-inventory" (OuterVolumeSpecName: "inventory") pod "2e07ff4b-3800-4921-83fb-4b1da482a8b0" (UID: "2e07ff4b-3800-4921-83fb-4b1da482a8b0"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:57:07 crc kubenswrapper[4909]: I1128 17:57:07.061210 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2e07ff4b-3800-4921-83fb-4b1da482a8b0-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "2e07ff4b-3800-4921-83fb-4b1da482a8b0" (UID: "2e07ff4b-3800-4921-83fb-4b1da482a8b0"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:57:07 crc kubenswrapper[4909]: I1128 17:57:07.122800 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-498pv\" (UniqueName: \"kubernetes.io/projected/2e07ff4b-3800-4921-83fb-4b1da482a8b0-kube-api-access-498pv\") on node \"crc\" DevicePath \"\"" Nov 28 17:57:07 crc kubenswrapper[4909]: I1128 17:57:07.122828 4909 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/2e07ff4b-3800-4921-83fb-4b1da482a8b0-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 28 17:57:07 crc kubenswrapper[4909]: I1128 17:57:07.122838 4909 reconciler_common.go:293] "Volume detached for volume \"pre-adoption-validation-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2e07ff4b-3800-4921-83fb-4b1da482a8b0-pre-adoption-validation-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 17:57:07 crc kubenswrapper[4909]: I1128 17:57:07.122850 4909 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/2e07ff4b-3800-4921-83fb-4b1da482a8b0-ceph\") on node \"crc\" DevicePath \"\"" Nov 28 17:57:07 crc kubenswrapper[4909]: I1128 17:57:07.122861 4909 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2e07ff4b-3800-4921-83fb-4b1da482a8b0-inventory\") on node \"crc\" DevicePath \"\"" Nov 28 17:57:07 crc kubenswrapper[4909]: I1128 17:57:07.438158 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cnv78l" event={"ID":"2e07ff4b-3800-4921-83fb-4b1da482a8b0","Type":"ContainerDied","Data":"2c6cb44b3f0a4216ad396b415b847134f650de948b2ec04c6c1bc701d3618b76"} Nov 28 17:57:07 crc kubenswrapper[4909]: I1128 17:57:07.438710 4909 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2c6cb44b3f0a4216ad396b415b847134f650de948b2ec04c6c1bc701d3618b76" Nov 28 17:57:07 crc kubenswrapper[4909]: I1128 17:57:07.438250 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cnv78l" Nov 28 17:57:07 crc kubenswrapper[4909]: I1128 17:57:07.921269 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-4pvmc" Nov 28 17:57:07 crc kubenswrapper[4909]: I1128 17:57:07.921322 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-4pvmc" Nov 28 17:57:08 crc kubenswrapper[4909]: I1128 17:57:08.006874 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-4pvmc" Nov 28 17:57:11 crc kubenswrapper[4909]: I1128 17:57:11.041838 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-zw85w"] Nov 28 17:57:11 crc kubenswrapper[4909]: E1128 17:57:11.048558 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2e07ff4b-3800-4921-83fb-4b1da482a8b0" containerName="pre-adoption-validation-openstack-pre-adoption-openstack-cell1" Nov 28 17:57:11 crc kubenswrapper[4909]: I1128 17:57:11.048579 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="2e07ff4b-3800-4921-83fb-4b1da482a8b0" containerName="pre-adoption-validation-openstack-pre-adoption-openstack-cell1" Nov 28 17:57:11 crc kubenswrapper[4909]: I1128 17:57:11.048809 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="2e07ff4b-3800-4921-83fb-4b1da482a8b0" containerName="pre-adoption-validation-openstack-pre-adoption-openstack-cell1" Nov 28 17:57:11 crc kubenswrapper[4909]: I1128 17:57:11.049655 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-zw85w" Nov 28 17:57:11 crc kubenswrapper[4909]: I1128 17:57:11.052141 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Nov 28 17:57:11 crc kubenswrapper[4909]: I1128 17:57:11.052255 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Nov 28 17:57:11 crc kubenswrapper[4909]: I1128 17:57:11.052346 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 28 17:57:11 crc kubenswrapper[4909]: I1128 17:57:11.052471 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-z249h" Nov 28 17:57:11 crc kubenswrapper[4909]: I1128 17:57:11.054675 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-zw85w"] Nov 28 17:57:11 crc kubenswrapper[4909]: I1128 17:57:11.111688 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/ff7c1f2c-beb4-45ed-9b2b-1326cbdb8b54-ceph\") pod \"tripleo-cleanup-tripleo-cleanup-openstack-cell1-zw85w\" (UID: \"ff7c1f2c-beb4-45ed-9b2b-1326cbdb8b54\") " pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-zw85w" Nov 28 17:57:11 crc kubenswrapper[4909]: I1128 17:57:11.111748 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xx7j4\" (UniqueName: \"kubernetes.io/projected/ff7c1f2c-beb4-45ed-9b2b-1326cbdb8b54-kube-api-access-xx7j4\") pod \"tripleo-cleanup-tripleo-cleanup-openstack-cell1-zw85w\" (UID: \"ff7c1f2c-beb4-45ed-9b2b-1326cbdb8b54\") " pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-zw85w" Nov 28 17:57:11 crc kubenswrapper[4909]: I1128 17:57:11.111807 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ff7c1f2c-beb4-45ed-9b2b-1326cbdb8b54-ssh-key\") pod \"tripleo-cleanup-tripleo-cleanup-openstack-cell1-zw85w\" (UID: \"ff7c1f2c-beb4-45ed-9b2b-1326cbdb8b54\") " pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-zw85w" Nov 28 17:57:11 crc kubenswrapper[4909]: I1128 17:57:11.111839 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ff7c1f2c-beb4-45ed-9b2b-1326cbdb8b54-inventory\") pod \"tripleo-cleanup-tripleo-cleanup-openstack-cell1-zw85w\" (UID: \"ff7c1f2c-beb4-45ed-9b2b-1326cbdb8b54\") " pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-zw85w" Nov 28 17:57:11 crc kubenswrapper[4909]: I1128 17:57:11.111894 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tripleo-cleanup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ff7c1f2c-beb4-45ed-9b2b-1326cbdb8b54-tripleo-cleanup-combined-ca-bundle\") pod \"tripleo-cleanup-tripleo-cleanup-openstack-cell1-zw85w\" (UID: \"ff7c1f2c-beb4-45ed-9b2b-1326cbdb8b54\") " pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-zw85w" Nov 28 17:57:11 crc kubenswrapper[4909]: I1128 17:57:11.213869 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/ff7c1f2c-beb4-45ed-9b2b-1326cbdb8b54-ceph\") pod \"tripleo-cleanup-tripleo-cleanup-openstack-cell1-zw85w\" (UID: \"ff7c1f2c-beb4-45ed-9b2b-1326cbdb8b54\") " pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-zw85w" Nov 28 17:57:11 crc kubenswrapper[4909]: I1128 17:57:11.213905 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xx7j4\" (UniqueName: \"kubernetes.io/projected/ff7c1f2c-beb4-45ed-9b2b-1326cbdb8b54-kube-api-access-xx7j4\") pod \"tripleo-cleanup-tripleo-cleanup-openstack-cell1-zw85w\" (UID: \"ff7c1f2c-beb4-45ed-9b2b-1326cbdb8b54\") " pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-zw85w" Nov 28 17:57:11 crc kubenswrapper[4909]: I1128 17:57:11.213956 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ff7c1f2c-beb4-45ed-9b2b-1326cbdb8b54-ssh-key\") pod \"tripleo-cleanup-tripleo-cleanup-openstack-cell1-zw85w\" (UID: \"ff7c1f2c-beb4-45ed-9b2b-1326cbdb8b54\") " pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-zw85w" Nov 28 17:57:11 crc kubenswrapper[4909]: I1128 17:57:11.213988 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ff7c1f2c-beb4-45ed-9b2b-1326cbdb8b54-inventory\") pod \"tripleo-cleanup-tripleo-cleanup-openstack-cell1-zw85w\" (UID: \"ff7c1f2c-beb4-45ed-9b2b-1326cbdb8b54\") " pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-zw85w" Nov 28 17:57:11 crc kubenswrapper[4909]: I1128 17:57:11.214038 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tripleo-cleanup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ff7c1f2c-beb4-45ed-9b2b-1326cbdb8b54-tripleo-cleanup-combined-ca-bundle\") pod \"tripleo-cleanup-tripleo-cleanup-openstack-cell1-zw85w\" (UID: \"ff7c1f2c-beb4-45ed-9b2b-1326cbdb8b54\") " pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-zw85w" Nov 28 17:57:11 crc kubenswrapper[4909]: I1128 17:57:11.220397 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/ff7c1f2c-beb4-45ed-9b2b-1326cbdb8b54-ceph\") pod \"tripleo-cleanup-tripleo-cleanup-openstack-cell1-zw85w\" (UID: \"ff7c1f2c-beb4-45ed-9b2b-1326cbdb8b54\") " pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-zw85w" Nov 28 17:57:11 crc kubenswrapper[4909]: I1128 17:57:11.220425 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ff7c1f2c-beb4-45ed-9b2b-1326cbdb8b54-inventory\") pod \"tripleo-cleanup-tripleo-cleanup-openstack-cell1-zw85w\" (UID: \"ff7c1f2c-beb4-45ed-9b2b-1326cbdb8b54\") " pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-zw85w" Nov 28 17:57:11 crc kubenswrapper[4909]: I1128 17:57:11.220779 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ff7c1f2c-beb4-45ed-9b2b-1326cbdb8b54-ssh-key\") pod \"tripleo-cleanup-tripleo-cleanup-openstack-cell1-zw85w\" (UID: \"ff7c1f2c-beb4-45ed-9b2b-1326cbdb8b54\") " pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-zw85w" Nov 28 17:57:11 crc kubenswrapper[4909]: I1128 17:57:11.221932 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tripleo-cleanup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ff7c1f2c-beb4-45ed-9b2b-1326cbdb8b54-tripleo-cleanup-combined-ca-bundle\") pod \"tripleo-cleanup-tripleo-cleanup-openstack-cell1-zw85w\" (UID: \"ff7c1f2c-beb4-45ed-9b2b-1326cbdb8b54\") " pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-zw85w" Nov 28 17:57:11 crc kubenswrapper[4909]: I1128 17:57:11.229162 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xx7j4\" (UniqueName: \"kubernetes.io/projected/ff7c1f2c-beb4-45ed-9b2b-1326cbdb8b54-kube-api-access-xx7j4\") pod \"tripleo-cleanup-tripleo-cleanup-openstack-cell1-zw85w\" (UID: \"ff7c1f2c-beb4-45ed-9b2b-1326cbdb8b54\") " pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-zw85w" Nov 28 17:57:11 crc kubenswrapper[4909]: I1128 17:57:11.384501 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-zw85w" Nov 28 17:57:11 crc kubenswrapper[4909]: I1128 17:57:11.947509 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-zw85w"] Nov 28 17:57:12 crc kubenswrapper[4909]: I1128 17:57:12.507258 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-zw85w" event={"ID":"ff7c1f2c-beb4-45ed-9b2b-1326cbdb8b54","Type":"ContainerStarted","Data":"382fbb89f1c3eabb1c8afebc3a8baf3038676fe23ed334c6d0f71f78df45c77f"} Nov 28 17:57:13 crc kubenswrapper[4909]: I1128 17:57:13.518770 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-zw85w" event={"ID":"ff7c1f2c-beb4-45ed-9b2b-1326cbdb8b54","Type":"ContainerStarted","Data":"d8dac42ad92690806ae1f9760a7601f7b395fb7b18f0aae62af5819bd276aaf5"} Nov 28 17:57:13 crc kubenswrapper[4909]: I1128 17:57:13.541085 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-zw85w" podStartSLOduration=1.6484365730000001 podStartE2EDuration="2.541065171s" podCreationTimestamp="2025-11-28 17:57:11 +0000 UTC" firstStartedPulling="2025-11-28 17:57:11.946175343 +0000 UTC m=+6414.342859867" lastFinishedPulling="2025-11-28 17:57:12.838803941 +0000 UTC m=+6415.235488465" observedRunningTime="2025-11-28 17:57:13.535172514 +0000 UTC m=+6415.931857078" watchObservedRunningTime="2025-11-28 17:57:13.541065171 +0000 UTC m=+6415.937749705" Nov 28 17:57:17 crc kubenswrapper[4909]: I1128 17:57:17.974106 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-4pvmc" Nov 28 17:57:18 crc kubenswrapper[4909]: I1128 17:57:18.023138 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-4pvmc"] Nov 28 17:57:18 crc kubenswrapper[4909]: I1128 17:57:18.592292 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-4pvmc" podUID="0511af05-1d6c-43eb-88a0-7fe5bd8b7c86" containerName="registry-server" containerID="cri-o://616f445341f951454fa598244d3775c767e95cdff0e76791897d0c706340e360" gracePeriod=2 Nov 28 17:57:19 crc kubenswrapper[4909]: I1128 17:57:19.205793 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-4pvmc" Nov 28 17:57:19 crc kubenswrapper[4909]: I1128 17:57:19.390574 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gctmf\" (UniqueName: \"kubernetes.io/projected/0511af05-1d6c-43eb-88a0-7fe5bd8b7c86-kube-api-access-gctmf\") pod \"0511af05-1d6c-43eb-88a0-7fe5bd8b7c86\" (UID: \"0511af05-1d6c-43eb-88a0-7fe5bd8b7c86\") " Nov 28 17:57:19 crc kubenswrapper[4909]: I1128 17:57:19.390776 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0511af05-1d6c-43eb-88a0-7fe5bd8b7c86-catalog-content\") pod \"0511af05-1d6c-43eb-88a0-7fe5bd8b7c86\" (UID: \"0511af05-1d6c-43eb-88a0-7fe5bd8b7c86\") " Nov 28 17:57:19 crc kubenswrapper[4909]: I1128 17:57:19.390973 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0511af05-1d6c-43eb-88a0-7fe5bd8b7c86-utilities\") pod \"0511af05-1d6c-43eb-88a0-7fe5bd8b7c86\" (UID: \"0511af05-1d6c-43eb-88a0-7fe5bd8b7c86\") " Nov 28 17:57:19 crc kubenswrapper[4909]: I1128 17:57:19.392102 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0511af05-1d6c-43eb-88a0-7fe5bd8b7c86-utilities" (OuterVolumeSpecName: "utilities") pod "0511af05-1d6c-43eb-88a0-7fe5bd8b7c86" (UID: "0511af05-1d6c-43eb-88a0-7fe5bd8b7c86"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:57:19 crc kubenswrapper[4909]: I1128 17:57:19.398502 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0511af05-1d6c-43eb-88a0-7fe5bd8b7c86-kube-api-access-gctmf" (OuterVolumeSpecName: "kube-api-access-gctmf") pod "0511af05-1d6c-43eb-88a0-7fe5bd8b7c86" (UID: "0511af05-1d6c-43eb-88a0-7fe5bd8b7c86"). InnerVolumeSpecName "kube-api-access-gctmf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:57:19 crc kubenswrapper[4909]: I1128 17:57:19.452648 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0511af05-1d6c-43eb-88a0-7fe5bd8b7c86-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "0511af05-1d6c-43eb-88a0-7fe5bd8b7c86" (UID: "0511af05-1d6c-43eb-88a0-7fe5bd8b7c86"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:57:19 crc kubenswrapper[4909]: I1128 17:57:19.493750 4909 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0511af05-1d6c-43eb-88a0-7fe5bd8b7c86-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 17:57:19 crc kubenswrapper[4909]: I1128 17:57:19.493792 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gctmf\" (UniqueName: \"kubernetes.io/projected/0511af05-1d6c-43eb-88a0-7fe5bd8b7c86-kube-api-access-gctmf\") on node \"crc\" DevicePath \"\"" Nov 28 17:57:19 crc kubenswrapper[4909]: I1128 17:57:19.493804 4909 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0511af05-1d6c-43eb-88a0-7fe5bd8b7c86-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 17:57:19 crc kubenswrapper[4909]: I1128 17:57:19.607386 4909 generic.go:334] "Generic (PLEG): container finished" podID="0511af05-1d6c-43eb-88a0-7fe5bd8b7c86" containerID="616f445341f951454fa598244d3775c767e95cdff0e76791897d0c706340e360" exitCode=0 Nov 28 17:57:19 crc kubenswrapper[4909]: I1128 17:57:19.607438 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4pvmc" event={"ID":"0511af05-1d6c-43eb-88a0-7fe5bd8b7c86","Type":"ContainerDied","Data":"616f445341f951454fa598244d3775c767e95cdff0e76791897d0c706340e360"} Nov 28 17:57:19 crc kubenswrapper[4909]: I1128 17:57:19.607519 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4pvmc" event={"ID":"0511af05-1d6c-43eb-88a0-7fe5bd8b7c86","Type":"ContainerDied","Data":"62032f45f1bb6baa9c4760df6b16a9a571bcc913b53e1bfba10a96b1b005e76f"} Nov 28 17:57:19 crc kubenswrapper[4909]: I1128 17:57:19.607538 4909 scope.go:117] "RemoveContainer" containerID="616f445341f951454fa598244d3775c767e95cdff0e76791897d0c706340e360" Nov 28 17:57:19 crc kubenswrapper[4909]: I1128 17:57:19.607572 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-4pvmc" Nov 28 17:57:19 crc kubenswrapper[4909]: I1128 17:57:19.633165 4909 scope.go:117] "RemoveContainer" containerID="9cd3d75eaa0a3a301376555f177e4b95c15aec8152be5a468cb7a0ac8355cb1b" Nov 28 17:57:19 crc kubenswrapper[4909]: I1128 17:57:19.678443 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-4pvmc"] Nov 28 17:57:19 crc kubenswrapper[4909]: I1128 17:57:19.685977 4909 scope.go:117] "RemoveContainer" containerID="c55a3728b37b651e77e25f254b70b01e5942270038fae445458c895964a7605c" Nov 28 17:57:19 crc kubenswrapper[4909]: I1128 17:57:19.688144 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-4pvmc"] Nov 28 17:57:19 crc kubenswrapper[4909]: I1128 17:57:19.728473 4909 scope.go:117] "RemoveContainer" containerID="616f445341f951454fa598244d3775c767e95cdff0e76791897d0c706340e360" Nov 28 17:57:19 crc kubenswrapper[4909]: E1128 17:57:19.729123 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"616f445341f951454fa598244d3775c767e95cdff0e76791897d0c706340e360\": container with ID starting with 616f445341f951454fa598244d3775c767e95cdff0e76791897d0c706340e360 not found: ID does not exist" containerID="616f445341f951454fa598244d3775c767e95cdff0e76791897d0c706340e360" Nov 28 17:57:19 crc kubenswrapper[4909]: I1128 17:57:19.729168 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"616f445341f951454fa598244d3775c767e95cdff0e76791897d0c706340e360"} err="failed to get container status \"616f445341f951454fa598244d3775c767e95cdff0e76791897d0c706340e360\": rpc error: code = NotFound desc = could not find container \"616f445341f951454fa598244d3775c767e95cdff0e76791897d0c706340e360\": container with ID starting with 616f445341f951454fa598244d3775c767e95cdff0e76791897d0c706340e360 not found: ID does not exist" Nov 28 17:57:19 crc kubenswrapper[4909]: I1128 17:57:19.729203 4909 scope.go:117] "RemoveContainer" containerID="9cd3d75eaa0a3a301376555f177e4b95c15aec8152be5a468cb7a0ac8355cb1b" Nov 28 17:57:19 crc kubenswrapper[4909]: E1128 17:57:19.729595 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9cd3d75eaa0a3a301376555f177e4b95c15aec8152be5a468cb7a0ac8355cb1b\": container with ID starting with 9cd3d75eaa0a3a301376555f177e4b95c15aec8152be5a468cb7a0ac8355cb1b not found: ID does not exist" containerID="9cd3d75eaa0a3a301376555f177e4b95c15aec8152be5a468cb7a0ac8355cb1b" Nov 28 17:57:19 crc kubenswrapper[4909]: I1128 17:57:19.729700 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9cd3d75eaa0a3a301376555f177e4b95c15aec8152be5a468cb7a0ac8355cb1b"} err="failed to get container status \"9cd3d75eaa0a3a301376555f177e4b95c15aec8152be5a468cb7a0ac8355cb1b\": rpc error: code = NotFound desc = could not find container \"9cd3d75eaa0a3a301376555f177e4b95c15aec8152be5a468cb7a0ac8355cb1b\": container with ID starting with 9cd3d75eaa0a3a301376555f177e4b95c15aec8152be5a468cb7a0ac8355cb1b not found: ID does not exist" Nov 28 17:57:19 crc kubenswrapper[4909]: I1128 17:57:19.729728 4909 scope.go:117] "RemoveContainer" containerID="c55a3728b37b651e77e25f254b70b01e5942270038fae445458c895964a7605c" Nov 28 17:57:19 crc kubenswrapper[4909]: E1128 17:57:19.730084 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c55a3728b37b651e77e25f254b70b01e5942270038fae445458c895964a7605c\": container with ID starting with c55a3728b37b651e77e25f254b70b01e5942270038fae445458c895964a7605c not found: ID does not exist" containerID="c55a3728b37b651e77e25f254b70b01e5942270038fae445458c895964a7605c" Nov 28 17:57:19 crc kubenswrapper[4909]: I1128 17:57:19.730139 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c55a3728b37b651e77e25f254b70b01e5942270038fae445458c895964a7605c"} err="failed to get container status \"c55a3728b37b651e77e25f254b70b01e5942270038fae445458c895964a7605c\": rpc error: code = NotFound desc = could not find container \"c55a3728b37b651e77e25f254b70b01e5942270038fae445458c895964a7605c\": container with ID starting with c55a3728b37b651e77e25f254b70b01e5942270038fae445458c895964a7605c not found: ID does not exist" Nov 28 17:57:19 crc kubenswrapper[4909]: I1128 17:57:19.915742 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0511af05-1d6c-43eb-88a0-7fe5bd8b7c86" path="/var/lib/kubelet/pods/0511af05-1d6c-43eb-88a0-7fe5bd8b7c86/volumes" Nov 28 17:57:36 crc kubenswrapper[4909]: I1128 17:57:36.181792 4909 scope.go:117] "RemoveContainer" containerID="2c44ca4620701caa981707c19b746fec250e8822a377b1e08e67cb2d4487b9f0" Nov 28 17:57:36 crc kubenswrapper[4909]: I1128 17:57:36.212034 4909 scope.go:117] "RemoveContainer" containerID="743bb0300263e9d29f266a3237e27da8aced6a0274a448c4715d0a1b07cb2ab3" Nov 28 17:57:49 crc kubenswrapper[4909]: I1128 17:57:49.911162 4909 patch_prober.go:28] interesting pod/machine-config-daemon-d5nd7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 17:57:49 crc kubenswrapper[4909]: I1128 17:57:49.911826 4909 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 17:58:19 crc kubenswrapper[4909]: I1128 17:58:19.911087 4909 patch_prober.go:28] interesting pod/machine-config-daemon-d5nd7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 17:58:19 crc kubenswrapper[4909]: I1128 17:58:19.911528 4909 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 17:58:49 crc kubenswrapper[4909]: I1128 17:58:49.910964 4909 patch_prober.go:28] interesting pod/machine-config-daemon-d5nd7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 17:58:49 crc kubenswrapper[4909]: I1128 17:58:49.911621 4909 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 17:58:49 crc kubenswrapper[4909]: I1128 17:58:49.915218 4909 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" Nov 28 17:58:49 crc kubenswrapper[4909]: I1128 17:58:49.916222 4909 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"ab68541ecdd49f3d48e7eee95d5d783096294e3e9a7f79df71710f3210660edc"} pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 17:58:49 crc kubenswrapper[4909]: I1128 17:58:49.916304 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerName="machine-config-daemon" containerID="cri-o://ab68541ecdd49f3d48e7eee95d5d783096294e3e9a7f79df71710f3210660edc" gracePeriod=600 Nov 28 17:58:50 crc kubenswrapper[4909]: I1128 17:58:50.603308 4909 generic.go:334] "Generic (PLEG): container finished" podID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerID="ab68541ecdd49f3d48e7eee95d5d783096294e3e9a7f79df71710f3210660edc" exitCode=0 Nov 28 17:58:50 crc kubenswrapper[4909]: I1128 17:58:50.603419 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" event={"ID":"5f0ac931-d37b-4342-8c12-c2779b455cc5","Type":"ContainerDied","Data":"ab68541ecdd49f3d48e7eee95d5d783096294e3e9a7f79df71710f3210660edc"} Nov 28 17:58:50 crc kubenswrapper[4909]: I1128 17:58:50.603703 4909 scope.go:117] "RemoveContainer" containerID="eb8a08a6c738fff0fcbfbb88427c9ed53477944abe7436212850e368ec229c4f" Nov 28 17:58:51 crc kubenswrapper[4909]: I1128 17:58:51.616254 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" event={"ID":"5f0ac931-d37b-4342-8c12-c2779b455cc5","Type":"ContainerStarted","Data":"e411339176f5e7e63fa950117654bd213c078924c12ebf7a42744b40e8eabbd4"} Nov 28 17:59:00 crc kubenswrapper[4909]: I1128 17:59:00.188718 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/octavia-db-create-ml6s4"] Nov 28 17:59:00 crc kubenswrapper[4909]: I1128 17:59:00.198375 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/octavia-db-create-ml6s4"] Nov 28 17:59:01 crc kubenswrapper[4909]: I1128 17:59:01.046082 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/octavia-57ae-account-create-update-scp7k"] Nov 28 17:59:01 crc kubenswrapper[4909]: I1128 17:59:01.057071 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/octavia-57ae-account-create-update-scp7k"] Nov 28 17:59:01 crc kubenswrapper[4909]: I1128 17:59:01.912883 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="21636d2d-fea0-4488-8ab1-9f5766ff35ef" path="/var/lib/kubelet/pods/21636d2d-fea0-4488-8ab1-9f5766ff35ef/volumes" Nov 28 17:59:01 crc kubenswrapper[4909]: I1128 17:59:01.914726 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="60dc21da-1ae5-49b5-88d2-a8f7bd552cc2" path="/var/lib/kubelet/pods/60dc21da-1ae5-49b5-88d2-a8f7bd552cc2/volumes" Nov 28 17:59:07 crc kubenswrapper[4909]: I1128 17:59:07.029969 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/octavia-persistence-db-create-vgrrt"] Nov 28 17:59:07 crc kubenswrapper[4909]: I1128 17:59:07.047935 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/octavia-persistence-db-create-vgrrt"] Nov 28 17:59:07 crc kubenswrapper[4909]: I1128 17:59:07.916433 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="50f7d8e1-a601-4fb0-894c-ceaeba78f7ea" path="/var/lib/kubelet/pods/50f7d8e1-a601-4fb0-894c-ceaeba78f7ea/volumes" Nov 28 17:59:08 crc kubenswrapper[4909]: I1128 17:59:08.055020 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/octavia-72f1-account-create-update-lkr8q"] Nov 28 17:59:08 crc kubenswrapper[4909]: I1128 17:59:08.064808 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/octavia-72f1-account-create-update-lkr8q"] Nov 28 17:59:09 crc kubenswrapper[4909]: I1128 17:59:09.915420 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b0da79de-6f24-4b07-ab86-215469b6e162" path="/var/lib/kubelet/pods/b0da79de-6f24-4b07-ab86-215469b6e162/volumes" Nov 28 17:59:36 crc kubenswrapper[4909]: I1128 17:59:36.546563 4909 scope.go:117] "RemoveContainer" containerID="5c213a55efa3551857deb4916353d765fabd979bc4098942f0ee1721a2a3f6bb" Nov 28 17:59:36 crc kubenswrapper[4909]: I1128 17:59:36.604760 4909 scope.go:117] "RemoveContainer" containerID="36aed24fa244a6eff805b588671dd1bdf99853668507bdb37dc2cce9f6c1a619" Nov 28 17:59:36 crc kubenswrapper[4909]: I1128 17:59:36.663359 4909 scope.go:117] "RemoveContainer" containerID="3990e620e1d3ad8d3aec7f208b5119268a7365c254b48068f983d0c58090a93a" Nov 28 17:59:36 crc kubenswrapper[4909]: I1128 17:59:36.712280 4909 scope.go:117] "RemoveContainer" containerID="5027288c6f28cd10a4e0a3b8034e7deaa8cdaa1490b93d2bbf744db24f4e87d0" Nov 28 17:59:44 crc kubenswrapper[4909]: I1128 17:59:44.052999 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/octavia-db-sync-vz6hh"] Nov 28 17:59:44 crc kubenswrapper[4909]: I1128 17:59:44.069816 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/octavia-db-sync-vz6hh"] Nov 28 17:59:45 crc kubenswrapper[4909]: I1128 17:59:45.921508 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="44ec722e-82ca-4adf-b196-c76a6d192768" path="/var/lib/kubelet/pods/44ec722e-82ca-4adf-b196-c76a6d192768/volumes" Nov 28 17:59:48 crc kubenswrapper[4909]: I1128 17:59:48.708152 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-52psx"] Nov 28 17:59:48 crc kubenswrapper[4909]: E1128 17:59:48.709401 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0511af05-1d6c-43eb-88a0-7fe5bd8b7c86" containerName="extract-utilities" Nov 28 17:59:48 crc kubenswrapper[4909]: I1128 17:59:48.709423 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="0511af05-1d6c-43eb-88a0-7fe5bd8b7c86" containerName="extract-utilities" Nov 28 17:59:48 crc kubenswrapper[4909]: E1128 17:59:48.709455 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0511af05-1d6c-43eb-88a0-7fe5bd8b7c86" containerName="extract-content" Nov 28 17:59:48 crc kubenswrapper[4909]: I1128 17:59:48.709469 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="0511af05-1d6c-43eb-88a0-7fe5bd8b7c86" containerName="extract-content" Nov 28 17:59:48 crc kubenswrapper[4909]: E1128 17:59:48.709515 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0511af05-1d6c-43eb-88a0-7fe5bd8b7c86" containerName="registry-server" Nov 28 17:59:48 crc kubenswrapper[4909]: I1128 17:59:48.709527 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="0511af05-1d6c-43eb-88a0-7fe5bd8b7c86" containerName="registry-server" Nov 28 17:59:48 crc kubenswrapper[4909]: I1128 17:59:48.709976 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="0511af05-1d6c-43eb-88a0-7fe5bd8b7c86" containerName="registry-server" Nov 28 17:59:48 crc kubenswrapper[4909]: I1128 17:59:48.713028 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-52psx" Nov 28 17:59:48 crc kubenswrapper[4909]: I1128 17:59:48.733729 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-52psx"] Nov 28 17:59:48 crc kubenswrapper[4909]: I1128 17:59:48.792746 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qnzs8\" (UniqueName: \"kubernetes.io/projected/64cdef72-7f9d-4c88-98f5-d06bf3dbfb85-kube-api-access-qnzs8\") pod \"redhat-operators-52psx\" (UID: \"64cdef72-7f9d-4c88-98f5-d06bf3dbfb85\") " pod="openshift-marketplace/redhat-operators-52psx" Nov 28 17:59:48 crc kubenswrapper[4909]: I1128 17:59:48.792903 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/64cdef72-7f9d-4c88-98f5-d06bf3dbfb85-catalog-content\") pod \"redhat-operators-52psx\" (UID: \"64cdef72-7f9d-4c88-98f5-d06bf3dbfb85\") " pod="openshift-marketplace/redhat-operators-52psx" Nov 28 17:59:48 crc kubenswrapper[4909]: I1128 17:59:48.793184 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/64cdef72-7f9d-4c88-98f5-d06bf3dbfb85-utilities\") pod \"redhat-operators-52psx\" (UID: \"64cdef72-7f9d-4c88-98f5-d06bf3dbfb85\") " pod="openshift-marketplace/redhat-operators-52psx" Nov 28 17:59:48 crc kubenswrapper[4909]: I1128 17:59:48.895518 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/64cdef72-7f9d-4c88-98f5-d06bf3dbfb85-catalog-content\") pod \"redhat-operators-52psx\" (UID: \"64cdef72-7f9d-4c88-98f5-d06bf3dbfb85\") " pod="openshift-marketplace/redhat-operators-52psx" Nov 28 17:59:48 crc kubenswrapper[4909]: I1128 17:59:48.896292 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/64cdef72-7f9d-4c88-98f5-d06bf3dbfb85-utilities\") pod \"redhat-operators-52psx\" (UID: \"64cdef72-7f9d-4c88-98f5-d06bf3dbfb85\") " pod="openshift-marketplace/redhat-operators-52psx" Nov 28 17:59:48 crc kubenswrapper[4909]: I1128 17:59:48.896375 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/64cdef72-7f9d-4c88-98f5-d06bf3dbfb85-catalog-content\") pod \"redhat-operators-52psx\" (UID: \"64cdef72-7f9d-4c88-98f5-d06bf3dbfb85\") " pod="openshift-marketplace/redhat-operators-52psx" Nov 28 17:59:48 crc kubenswrapper[4909]: I1128 17:59:48.896579 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qnzs8\" (UniqueName: \"kubernetes.io/projected/64cdef72-7f9d-4c88-98f5-d06bf3dbfb85-kube-api-access-qnzs8\") pod \"redhat-operators-52psx\" (UID: \"64cdef72-7f9d-4c88-98f5-d06bf3dbfb85\") " pod="openshift-marketplace/redhat-operators-52psx" Nov 28 17:59:48 crc kubenswrapper[4909]: I1128 17:59:48.897097 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/64cdef72-7f9d-4c88-98f5-d06bf3dbfb85-utilities\") pod \"redhat-operators-52psx\" (UID: \"64cdef72-7f9d-4c88-98f5-d06bf3dbfb85\") " pod="openshift-marketplace/redhat-operators-52psx" Nov 28 17:59:48 crc kubenswrapper[4909]: I1128 17:59:48.917927 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qnzs8\" (UniqueName: \"kubernetes.io/projected/64cdef72-7f9d-4c88-98f5-d06bf3dbfb85-kube-api-access-qnzs8\") pod \"redhat-operators-52psx\" (UID: \"64cdef72-7f9d-4c88-98f5-d06bf3dbfb85\") " pod="openshift-marketplace/redhat-operators-52psx" Nov 28 17:59:49 crc kubenswrapper[4909]: I1128 17:59:49.046231 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-52psx" Nov 28 17:59:49 crc kubenswrapper[4909]: I1128 17:59:49.544363 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-52psx"] Nov 28 17:59:50 crc kubenswrapper[4909]: I1128 17:59:50.285685 4909 generic.go:334] "Generic (PLEG): container finished" podID="64cdef72-7f9d-4c88-98f5-d06bf3dbfb85" containerID="126963b4c51e00733a589380c5b1a41d829f9aa90647be2bcf23cf63e4f18f77" exitCode=0 Nov 28 17:59:50 crc kubenswrapper[4909]: I1128 17:59:50.285993 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-52psx" event={"ID":"64cdef72-7f9d-4c88-98f5-d06bf3dbfb85","Type":"ContainerDied","Data":"126963b4c51e00733a589380c5b1a41d829f9aa90647be2bcf23cf63e4f18f77"} Nov 28 17:59:50 crc kubenswrapper[4909]: I1128 17:59:50.286023 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-52psx" event={"ID":"64cdef72-7f9d-4c88-98f5-d06bf3dbfb85","Type":"ContainerStarted","Data":"af5ed070cfa6db48000cc59292ef2786080ce4a5c42d77a19eaa3f0bc3cc2882"} Nov 28 17:59:51 crc kubenswrapper[4909]: I1128 17:59:51.297566 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-52psx" event={"ID":"64cdef72-7f9d-4c88-98f5-d06bf3dbfb85","Type":"ContainerStarted","Data":"c1154656e2454d7f5062fad30431e4ebc5fff12187787846d9494a999d740b42"} Nov 28 17:59:55 crc kubenswrapper[4909]: I1128 17:59:55.366790 4909 generic.go:334] "Generic (PLEG): container finished" podID="64cdef72-7f9d-4c88-98f5-d06bf3dbfb85" containerID="c1154656e2454d7f5062fad30431e4ebc5fff12187787846d9494a999d740b42" exitCode=0 Nov 28 17:59:55 crc kubenswrapper[4909]: I1128 17:59:55.366858 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-52psx" event={"ID":"64cdef72-7f9d-4c88-98f5-d06bf3dbfb85","Type":"ContainerDied","Data":"c1154656e2454d7f5062fad30431e4ebc5fff12187787846d9494a999d740b42"} Nov 28 17:59:56 crc kubenswrapper[4909]: I1128 17:59:56.381161 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-52psx" event={"ID":"64cdef72-7f9d-4c88-98f5-d06bf3dbfb85","Type":"ContainerStarted","Data":"2689fc461531047d4477c867026431faac6d905cc3b90e8b12fa91ed067575f6"} Nov 28 17:59:59 crc kubenswrapper[4909]: I1128 17:59:59.047701 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-52psx" Nov 28 17:59:59 crc kubenswrapper[4909]: I1128 17:59:59.049077 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-52psx" Nov 28 18:00:00 crc kubenswrapper[4909]: I1128 18:00:00.093704 4909 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-52psx" podUID="64cdef72-7f9d-4c88-98f5-d06bf3dbfb85" containerName="registry-server" probeResult="failure" output=< Nov 28 18:00:00 crc kubenswrapper[4909]: timeout: failed to connect service ":50051" within 1s Nov 28 18:00:00 crc kubenswrapper[4909]: > Nov 28 18:00:00 crc kubenswrapper[4909]: I1128 18:00:00.157987 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-52psx" podStartSLOduration=6.472663602 podStartE2EDuration="12.157960687s" podCreationTimestamp="2025-11-28 17:59:48 +0000 UTC" firstStartedPulling="2025-11-28 17:59:50.288596285 +0000 UTC m=+6572.685280849" lastFinishedPulling="2025-11-28 17:59:55.97389341 +0000 UTC m=+6578.370577934" observedRunningTime="2025-11-28 17:59:56.401997873 +0000 UTC m=+6578.798682437" watchObservedRunningTime="2025-11-28 18:00:00.157960687 +0000 UTC m=+6582.554645221" Nov 28 18:00:00 crc kubenswrapper[4909]: I1128 18:00:00.172360 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405880-fv5k7"] Nov 28 18:00:00 crc kubenswrapper[4909]: I1128 18:00:00.174114 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405880-fv5k7" Nov 28 18:00:00 crc kubenswrapper[4909]: I1128 18:00:00.176093 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 28 18:00:00 crc kubenswrapper[4909]: I1128 18:00:00.176436 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 28 18:00:00 crc kubenswrapper[4909]: I1128 18:00:00.179930 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/0fd47769-1328-495f-aa2d-3e15a9df114d-secret-volume\") pod \"collect-profiles-29405880-fv5k7\" (UID: \"0fd47769-1328-495f-aa2d-3e15a9df114d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405880-fv5k7" Nov 28 18:00:00 crc kubenswrapper[4909]: I1128 18:00:00.180000 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wm8l2\" (UniqueName: \"kubernetes.io/projected/0fd47769-1328-495f-aa2d-3e15a9df114d-kube-api-access-wm8l2\") pod \"collect-profiles-29405880-fv5k7\" (UID: \"0fd47769-1328-495f-aa2d-3e15a9df114d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405880-fv5k7" Nov 28 18:00:00 crc kubenswrapper[4909]: I1128 18:00:00.180074 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/0fd47769-1328-495f-aa2d-3e15a9df114d-config-volume\") pod \"collect-profiles-29405880-fv5k7\" (UID: \"0fd47769-1328-495f-aa2d-3e15a9df114d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405880-fv5k7" Nov 28 18:00:00 crc kubenswrapper[4909]: I1128 18:00:00.188472 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405880-fv5k7"] Nov 28 18:00:00 crc kubenswrapper[4909]: I1128 18:00:00.282391 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/0fd47769-1328-495f-aa2d-3e15a9df114d-secret-volume\") pod \"collect-profiles-29405880-fv5k7\" (UID: \"0fd47769-1328-495f-aa2d-3e15a9df114d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405880-fv5k7" Nov 28 18:00:00 crc kubenswrapper[4909]: I1128 18:00:00.282488 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wm8l2\" (UniqueName: \"kubernetes.io/projected/0fd47769-1328-495f-aa2d-3e15a9df114d-kube-api-access-wm8l2\") pod \"collect-profiles-29405880-fv5k7\" (UID: \"0fd47769-1328-495f-aa2d-3e15a9df114d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405880-fv5k7" Nov 28 18:00:00 crc kubenswrapper[4909]: I1128 18:00:00.282532 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/0fd47769-1328-495f-aa2d-3e15a9df114d-config-volume\") pod \"collect-profiles-29405880-fv5k7\" (UID: \"0fd47769-1328-495f-aa2d-3e15a9df114d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405880-fv5k7" Nov 28 18:00:00 crc kubenswrapper[4909]: I1128 18:00:00.283508 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/0fd47769-1328-495f-aa2d-3e15a9df114d-config-volume\") pod \"collect-profiles-29405880-fv5k7\" (UID: \"0fd47769-1328-495f-aa2d-3e15a9df114d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405880-fv5k7" Nov 28 18:00:00 crc kubenswrapper[4909]: I1128 18:00:00.291145 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/0fd47769-1328-495f-aa2d-3e15a9df114d-secret-volume\") pod \"collect-profiles-29405880-fv5k7\" (UID: \"0fd47769-1328-495f-aa2d-3e15a9df114d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405880-fv5k7" Nov 28 18:00:00 crc kubenswrapper[4909]: I1128 18:00:00.312102 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wm8l2\" (UniqueName: \"kubernetes.io/projected/0fd47769-1328-495f-aa2d-3e15a9df114d-kube-api-access-wm8l2\") pod \"collect-profiles-29405880-fv5k7\" (UID: \"0fd47769-1328-495f-aa2d-3e15a9df114d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405880-fv5k7" Nov 28 18:00:00 crc kubenswrapper[4909]: I1128 18:00:00.503043 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405880-fv5k7" Nov 28 18:00:01 crc kubenswrapper[4909]: I1128 18:00:01.068809 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405880-fv5k7"] Nov 28 18:00:01 crc kubenswrapper[4909]: I1128 18:00:01.426554 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405880-fv5k7" event={"ID":"0fd47769-1328-495f-aa2d-3e15a9df114d","Type":"ContainerStarted","Data":"b26a761f7f99192d97c3db332338c7cb0e6fa7201dcb4944384a020921cdcc36"} Nov 28 18:00:01 crc kubenswrapper[4909]: I1128 18:00:01.426602 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405880-fv5k7" event={"ID":"0fd47769-1328-495f-aa2d-3e15a9df114d","Type":"ContainerStarted","Data":"d8163fef51eafd84180940e566d0094a65013cad06557ddb73b8026d55ae801e"} Nov 28 18:00:01 crc kubenswrapper[4909]: I1128 18:00:01.446153 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29405880-fv5k7" podStartSLOduration=1.446130522 podStartE2EDuration="1.446130522s" podCreationTimestamp="2025-11-28 18:00:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 18:00:01.440153983 +0000 UTC m=+6583.836838507" watchObservedRunningTime="2025-11-28 18:00:01.446130522 +0000 UTC m=+6583.842815046" Nov 28 18:00:02 crc kubenswrapper[4909]: I1128 18:00:02.439806 4909 generic.go:334] "Generic (PLEG): container finished" podID="0fd47769-1328-495f-aa2d-3e15a9df114d" containerID="b26a761f7f99192d97c3db332338c7cb0e6fa7201dcb4944384a020921cdcc36" exitCode=0 Nov 28 18:00:02 crc kubenswrapper[4909]: I1128 18:00:02.439892 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405880-fv5k7" event={"ID":"0fd47769-1328-495f-aa2d-3e15a9df114d","Type":"ContainerDied","Data":"b26a761f7f99192d97c3db332338c7cb0e6fa7201dcb4944384a020921cdcc36"} Nov 28 18:00:03 crc kubenswrapper[4909]: I1128 18:00:03.844515 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405880-fv5k7" Nov 28 18:00:03 crc kubenswrapper[4909]: I1128 18:00:03.958345 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wm8l2\" (UniqueName: \"kubernetes.io/projected/0fd47769-1328-495f-aa2d-3e15a9df114d-kube-api-access-wm8l2\") pod \"0fd47769-1328-495f-aa2d-3e15a9df114d\" (UID: \"0fd47769-1328-495f-aa2d-3e15a9df114d\") " Nov 28 18:00:03 crc kubenswrapper[4909]: I1128 18:00:03.958417 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/0fd47769-1328-495f-aa2d-3e15a9df114d-secret-volume\") pod \"0fd47769-1328-495f-aa2d-3e15a9df114d\" (UID: \"0fd47769-1328-495f-aa2d-3e15a9df114d\") " Nov 28 18:00:03 crc kubenswrapper[4909]: I1128 18:00:03.959103 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/0fd47769-1328-495f-aa2d-3e15a9df114d-config-volume\") pod \"0fd47769-1328-495f-aa2d-3e15a9df114d\" (UID: \"0fd47769-1328-495f-aa2d-3e15a9df114d\") " Nov 28 18:00:03 crc kubenswrapper[4909]: I1128 18:00:03.959586 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0fd47769-1328-495f-aa2d-3e15a9df114d-config-volume" (OuterVolumeSpecName: "config-volume") pod "0fd47769-1328-495f-aa2d-3e15a9df114d" (UID: "0fd47769-1328-495f-aa2d-3e15a9df114d"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 18:00:03 crc kubenswrapper[4909]: I1128 18:00:03.959975 4909 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/0fd47769-1328-495f-aa2d-3e15a9df114d-config-volume\") on node \"crc\" DevicePath \"\"" Nov 28 18:00:03 crc kubenswrapper[4909]: I1128 18:00:03.963792 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0fd47769-1328-495f-aa2d-3e15a9df114d-kube-api-access-wm8l2" (OuterVolumeSpecName: "kube-api-access-wm8l2") pod "0fd47769-1328-495f-aa2d-3e15a9df114d" (UID: "0fd47769-1328-495f-aa2d-3e15a9df114d"). InnerVolumeSpecName "kube-api-access-wm8l2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 18:00:03 crc kubenswrapper[4909]: I1128 18:00:03.968605 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0fd47769-1328-495f-aa2d-3e15a9df114d-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "0fd47769-1328-495f-aa2d-3e15a9df114d" (UID: "0fd47769-1328-495f-aa2d-3e15a9df114d"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 18:00:04 crc kubenswrapper[4909]: I1128 18:00:04.062291 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wm8l2\" (UniqueName: \"kubernetes.io/projected/0fd47769-1328-495f-aa2d-3e15a9df114d-kube-api-access-wm8l2\") on node \"crc\" DevicePath \"\"" Nov 28 18:00:04 crc kubenswrapper[4909]: I1128 18:00:04.062332 4909 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/0fd47769-1328-495f-aa2d-3e15a9df114d-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 28 18:00:04 crc kubenswrapper[4909]: I1128 18:00:04.462570 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405880-fv5k7" event={"ID":"0fd47769-1328-495f-aa2d-3e15a9df114d","Type":"ContainerDied","Data":"d8163fef51eafd84180940e566d0094a65013cad06557ddb73b8026d55ae801e"} Nov 28 18:00:04 crc kubenswrapper[4909]: I1128 18:00:04.462953 4909 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d8163fef51eafd84180940e566d0094a65013cad06557ddb73b8026d55ae801e" Nov 28 18:00:04 crc kubenswrapper[4909]: I1128 18:00:04.462755 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405880-fv5k7" Nov 28 18:00:04 crc kubenswrapper[4909]: I1128 18:00:04.508679 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405835-8jtkk"] Nov 28 18:00:04 crc kubenswrapper[4909]: I1128 18:00:04.521605 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405835-8jtkk"] Nov 28 18:00:05 crc kubenswrapper[4909]: I1128 18:00:05.915642 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2cb2a5f4-dd73-41dc-a42e-c7cd82bfd60f" path="/var/lib/kubelet/pods/2cb2a5f4-dd73-41dc-a42e-c7cd82bfd60f/volumes" Nov 28 18:00:09 crc kubenswrapper[4909]: I1128 18:00:09.104496 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-52psx" Nov 28 18:00:09 crc kubenswrapper[4909]: I1128 18:00:09.165017 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-52psx" Nov 28 18:00:09 crc kubenswrapper[4909]: I1128 18:00:09.342841 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-52psx"] Nov 28 18:00:10 crc kubenswrapper[4909]: I1128 18:00:10.533695 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-52psx" podUID="64cdef72-7f9d-4c88-98f5-d06bf3dbfb85" containerName="registry-server" containerID="cri-o://2689fc461531047d4477c867026431faac6d905cc3b90e8b12fa91ed067575f6" gracePeriod=2 Nov 28 18:00:11 crc kubenswrapper[4909]: I1128 18:00:11.498761 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-52psx" Nov 28 18:00:11 crc kubenswrapper[4909]: I1128 18:00:11.549955 4909 generic.go:334] "Generic (PLEG): container finished" podID="64cdef72-7f9d-4c88-98f5-d06bf3dbfb85" containerID="2689fc461531047d4477c867026431faac6d905cc3b90e8b12fa91ed067575f6" exitCode=0 Nov 28 18:00:11 crc kubenswrapper[4909]: I1128 18:00:11.550025 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-52psx" Nov 28 18:00:11 crc kubenswrapper[4909]: I1128 18:00:11.550021 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-52psx" event={"ID":"64cdef72-7f9d-4c88-98f5-d06bf3dbfb85","Type":"ContainerDied","Data":"2689fc461531047d4477c867026431faac6d905cc3b90e8b12fa91ed067575f6"} Nov 28 18:00:11 crc kubenswrapper[4909]: I1128 18:00:11.550097 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-52psx" event={"ID":"64cdef72-7f9d-4c88-98f5-d06bf3dbfb85","Type":"ContainerDied","Data":"af5ed070cfa6db48000cc59292ef2786080ce4a5c42d77a19eaa3f0bc3cc2882"} Nov 28 18:00:11 crc kubenswrapper[4909]: I1128 18:00:11.550122 4909 scope.go:117] "RemoveContainer" containerID="2689fc461531047d4477c867026431faac6d905cc3b90e8b12fa91ed067575f6" Nov 28 18:00:11 crc kubenswrapper[4909]: I1128 18:00:11.573199 4909 scope.go:117] "RemoveContainer" containerID="c1154656e2454d7f5062fad30431e4ebc5fff12187787846d9494a999d740b42" Nov 28 18:00:11 crc kubenswrapper[4909]: I1128 18:00:11.594201 4909 scope.go:117] "RemoveContainer" containerID="126963b4c51e00733a589380c5b1a41d829f9aa90647be2bcf23cf63e4f18f77" Nov 28 18:00:11 crc kubenswrapper[4909]: I1128 18:00:11.625633 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/64cdef72-7f9d-4c88-98f5-d06bf3dbfb85-utilities\") pod \"64cdef72-7f9d-4c88-98f5-d06bf3dbfb85\" (UID: \"64cdef72-7f9d-4c88-98f5-d06bf3dbfb85\") " Nov 28 18:00:11 crc kubenswrapper[4909]: I1128 18:00:11.625891 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/64cdef72-7f9d-4c88-98f5-d06bf3dbfb85-catalog-content\") pod \"64cdef72-7f9d-4c88-98f5-d06bf3dbfb85\" (UID: \"64cdef72-7f9d-4c88-98f5-d06bf3dbfb85\") " Nov 28 18:00:11 crc kubenswrapper[4909]: I1128 18:00:11.625944 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qnzs8\" (UniqueName: \"kubernetes.io/projected/64cdef72-7f9d-4c88-98f5-d06bf3dbfb85-kube-api-access-qnzs8\") pod \"64cdef72-7f9d-4c88-98f5-d06bf3dbfb85\" (UID: \"64cdef72-7f9d-4c88-98f5-d06bf3dbfb85\") " Nov 28 18:00:11 crc kubenswrapper[4909]: I1128 18:00:11.628555 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/64cdef72-7f9d-4c88-98f5-d06bf3dbfb85-utilities" (OuterVolumeSpecName: "utilities") pod "64cdef72-7f9d-4c88-98f5-d06bf3dbfb85" (UID: "64cdef72-7f9d-4c88-98f5-d06bf3dbfb85"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 18:00:11 crc kubenswrapper[4909]: I1128 18:00:11.632857 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/64cdef72-7f9d-4c88-98f5-d06bf3dbfb85-kube-api-access-qnzs8" (OuterVolumeSpecName: "kube-api-access-qnzs8") pod "64cdef72-7f9d-4c88-98f5-d06bf3dbfb85" (UID: "64cdef72-7f9d-4c88-98f5-d06bf3dbfb85"). InnerVolumeSpecName "kube-api-access-qnzs8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 18:00:11 crc kubenswrapper[4909]: I1128 18:00:11.647892 4909 scope.go:117] "RemoveContainer" containerID="2689fc461531047d4477c867026431faac6d905cc3b90e8b12fa91ed067575f6" Nov 28 18:00:11 crc kubenswrapper[4909]: E1128 18:00:11.648305 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2689fc461531047d4477c867026431faac6d905cc3b90e8b12fa91ed067575f6\": container with ID starting with 2689fc461531047d4477c867026431faac6d905cc3b90e8b12fa91ed067575f6 not found: ID does not exist" containerID="2689fc461531047d4477c867026431faac6d905cc3b90e8b12fa91ed067575f6" Nov 28 18:00:11 crc kubenswrapper[4909]: I1128 18:00:11.648363 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2689fc461531047d4477c867026431faac6d905cc3b90e8b12fa91ed067575f6"} err="failed to get container status \"2689fc461531047d4477c867026431faac6d905cc3b90e8b12fa91ed067575f6\": rpc error: code = NotFound desc = could not find container \"2689fc461531047d4477c867026431faac6d905cc3b90e8b12fa91ed067575f6\": container with ID starting with 2689fc461531047d4477c867026431faac6d905cc3b90e8b12fa91ed067575f6 not found: ID does not exist" Nov 28 18:00:11 crc kubenswrapper[4909]: I1128 18:00:11.648396 4909 scope.go:117] "RemoveContainer" containerID="c1154656e2454d7f5062fad30431e4ebc5fff12187787846d9494a999d740b42" Nov 28 18:00:11 crc kubenswrapper[4909]: E1128 18:00:11.648777 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c1154656e2454d7f5062fad30431e4ebc5fff12187787846d9494a999d740b42\": container with ID starting with c1154656e2454d7f5062fad30431e4ebc5fff12187787846d9494a999d740b42 not found: ID does not exist" containerID="c1154656e2454d7f5062fad30431e4ebc5fff12187787846d9494a999d740b42" Nov 28 18:00:11 crc kubenswrapper[4909]: I1128 18:00:11.648819 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c1154656e2454d7f5062fad30431e4ebc5fff12187787846d9494a999d740b42"} err="failed to get container status \"c1154656e2454d7f5062fad30431e4ebc5fff12187787846d9494a999d740b42\": rpc error: code = NotFound desc = could not find container \"c1154656e2454d7f5062fad30431e4ebc5fff12187787846d9494a999d740b42\": container with ID starting with c1154656e2454d7f5062fad30431e4ebc5fff12187787846d9494a999d740b42 not found: ID does not exist" Nov 28 18:00:11 crc kubenswrapper[4909]: I1128 18:00:11.648843 4909 scope.go:117] "RemoveContainer" containerID="126963b4c51e00733a589380c5b1a41d829f9aa90647be2bcf23cf63e4f18f77" Nov 28 18:00:11 crc kubenswrapper[4909]: E1128 18:00:11.649364 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"126963b4c51e00733a589380c5b1a41d829f9aa90647be2bcf23cf63e4f18f77\": container with ID starting with 126963b4c51e00733a589380c5b1a41d829f9aa90647be2bcf23cf63e4f18f77 not found: ID does not exist" containerID="126963b4c51e00733a589380c5b1a41d829f9aa90647be2bcf23cf63e4f18f77" Nov 28 18:00:11 crc kubenswrapper[4909]: I1128 18:00:11.649402 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"126963b4c51e00733a589380c5b1a41d829f9aa90647be2bcf23cf63e4f18f77"} err="failed to get container status \"126963b4c51e00733a589380c5b1a41d829f9aa90647be2bcf23cf63e4f18f77\": rpc error: code = NotFound desc = could not find container \"126963b4c51e00733a589380c5b1a41d829f9aa90647be2bcf23cf63e4f18f77\": container with ID starting with 126963b4c51e00733a589380c5b1a41d829f9aa90647be2bcf23cf63e4f18f77 not found: ID does not exist" Nov 28 18:00:11 crc kubenswrapper[4909]: I1128 18:00:11.729148 4909 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/64cdef72-7f9d-4c88-98f5-d06bf3dbfb85-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 18:00:11 crc kubenswrapper[4909]: I1128 18:00:11.729178 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qnzs8\" (UniqueName: \"kubernetes.io/projected/64cdef72-7f9d-4c88-98f5-d06bf3dbfb85-kube-api-access-qnzs8\") on node \"crc\" DevicePath \"\"" Nov 28 18:00:11 crc kubenswrapper[4909]: I1128 18:00:11.734962 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/64cdef72-7f9d-4c88-98f5-d06bf3dbfb85-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "64cdef72-7f9d-4c88-98f5-d06bf3dbfb85" (UID: "64cdef72-7f9d-4c88-98f5-d06bf3dbfb85"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 18:00:11 crc kubenswrapper[4909]: I1128 18:00:11.830838 4909 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/64cdef72-7f9d-4c88-98f5-d06bf3dbfb85-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 18:00:11 crc kubenswrapper[4909]: I1128 18:00:11.898490 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-52psx"] Nov 28 18:00:11 crc kubenswrapper[4909]: I1128 18:00:11.914159 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-52psx"] Nov 28 18:00:13 crc kubenswrapper[4909]: I1128 18:00:13.923964 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="64cdef72-7f9d-4c88-98f5-d06bf3dbfb85" path="/var/lib/kubelet/pods/64cdef72-7f9d-4c88-98f5-d06bf3dbfb85/volumes" Nov 28 18:00:36 crc kubenswrapper[4909]: I1128 18:00:36.845635 4909 scope.go:117] "RemoveContainer" containerID="fa938d3befb77f50b7d816f18ba395a3c8ccbbf9a17492a0bc474afee0aa4f28" Nov 28 18:00:36 crc kubenswrapper[4909]: I1128 18:00:36.887241 4909 scope.go:117] "RemoveContainer" containerID="6b4711ed370ed0e1dc30137af9f9ca29cc9903c95f654f9237ac5085edce7b96" Nov 28 18:00:36 crc kubenswrapper[4909]: I1128 18:00:36.975308 4909 scope.go:117] "RemoveContainer" containerID="638693064e9a609ee3d18c4a13259d60f77284b9b9f8f977ac1a8aa09ca862dd" Nov 28 18:01:00 crc kubenswrapper[4909]: I1128 18:01:00.163612 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-cron-29405881-hg7qc"] Nov 28 18:01:00 crc kubenswrapper[4909]: E1128 18:01:00.164711 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="64cdef72-7f9d-4c88-98f5-d06bf3dbfb85" containerName="extract-utilities" Nov 28 18:01:00 crc kubenswrapper[4909]: I1128 18:01:00.164727 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="64cdef72-7f9d-4c88-98f5-d06bf3dbfb85" containerName="extract-utilities" Nov 28 18:01:00 crc kubenswrapper[4909]: E1128 18:01:00.164759 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="64cdef72-7f9d-4c88-98f5-d06bf3dbfb85" containerName="extract-content" Nov 28 18:01:00 crc kubenswrapper[4909]: I1128 18:01:00.164766 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="64cdef72-7f9d-4c88-98f5-d06bf3dbfb85" containerName="extract-content" Nov 28 18:01:00 crc kubenswrapper[4909]: E1128 18:01:00.164784 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="64cdef72-7f9d-4c88-98f5-d06bf3dbfb85" containerName="registry-server" Nov 28 18:01:00 crc kubenswrapper[4909]: I1128 18:01:00.164789 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="64cdef72-7f9d-4c88-98f5-d06bf3dbfb85" containerName="registry-server" Nov 28 18:01:00 crc kubenswrapper[4909]: E1128 18:01:00.164805 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0fd47769-1328-495f-aa2d-3e15a9df114d" containerName="collect-profiles" Nov 28 18:01:00 crc kubenswrapper[4909]: I1128 18:01:00.164810 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="0fd47769-1328-495f-aa2d-3e15a9df114d" containerName="collect-profiles" Nov 28 18:01:00 crc kubenswrapper[4909]: I1128 18:01:00.165033 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="64cdef72-7f9d-4c88-98f5-d06bf3dbfb85" containerName="registry-server" Nov 28 18:01:00 crc kubenswrapper[4909]: I1128 18:01:00.165047 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="0fd47769-1328-495f-aa2d-3e15a9df114d" containerName="collect-profiles" Nov 28 18:01:00 crc kubenswrapper[4909]: I1128 18:01:00.165871 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29405881-hg7qc" Nov 28 18:01:00 crc kubenswrapper[4909]: I1128 18:01:00.185882 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29405881-hg7qc"] Nov 28 18:01:00 crc kubenswrapper[4909]: I1128 18:01:00.282956 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/187d1121-6730-42b2-8233-4f050d18f92b-combined-ca-bundle\") pod \"keystone-cron-29405881-hg7qc\" (UID: \"187d1121-6730-42b2-8233-4f050d18f92b\") " pod="openstack/keystone-cron-29405881-hg7qc" Nov 28 18:01:00 crc kubenswrapper[4909]: I1128 18:01:00.283125 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q6kmr\" (UniqueName: \"kubernetes.io/projected/187d1121-6730-42b2-8233-4f050d18f92b-kube-api-access-q6kmr\") pod \"keystone-cron-29405881-hg7qc\" (UID: \"187d1121-6730-42b2-8233-4f050d18f92b\") " pod="openstack/keystone-cron-29405881-hg7qc" Nov 28 18:01:00 crc kubenswrapper[4909]: I1128 18:01:00.283160 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/187d1121-6730-42b2-8233-4f050d18f92b-config-data\") pod \"keystone-cron-29405881-hg7qc\" (UID: \"187d1121-6730-42b2-8233-4f050d18f92b\") " pod="openstack/keystone-cron-29405881-hg7qc" Nov 28 18:01:00 crc kubenswrapper[4909]: I1128 18:01:00.283259 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/187d1121-6730-42b2-8233-4f050d18f92b-fernet-keys\") pod \"keystone-cron-29405881-hg7qc\" (UID: \"187d1121-6730-42b2-8233-4f050d18f92b\") " pod="openstack/keystone-cron-29405881-hg7qc" Nov 28 18:01:00 crc kubenswrapper[4909]: I1128 18:01:00.384609 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/187d1121-6730-42b2-8233-4f050d18f92b-combined-ca-bundle\") pod \"keystone-cron-29405881-hg7qc\" (UID: \"187d1121-6730-42b2-8233-4f050d18f92b\") " pod="openstack/keystone-cron-29405881-hg7qc" Nov 28 18:01:00 crc kubenswrapper[4909]: I1128 18:01:00.384785 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q6kmr\" (UniqueName: \"kubernetes.io/projected/187d1121-6730-42b2-8233-4f050d18f92b-kube-api-access-q6kmr\") pod \"keystone-cron-29405881-hg7qc\" (UID: \"187d1121-6730-42b2-8233-4f050d18f92b\") " pod="openstack/keystone-cron-29405881-hg7qc" Nov 28 18:01:00 crc kubenswrapper[4909]: I1128 18:01:00.384814 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/187d1121-6730-42b2-8233-4f050d18f92b-config-data\") pod \"keystone-cron-29405881-hg7qc\" (UID: \"187d1121-6730-42b2-8233-4f050d18f92b\") " pod="openstack/keystone-cron-29405881-hg7qc" Nov 28 18:01:00 crc kubenswrapper[4909]: I1128 18:01:00.384891 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/187d1121-6730-42b2-8233-4f050d18f92b-fernet-keys\") pod \"keystone-cron-29405881-hg7qc\" (UID: \"187d1121-6730-42b2-8233-4f050d18f92b\") " pod="openstack/keystone-cron-29405881-hg7qc" Nov 28 18:01:00 crc kubenswrapper[4909]: I1128 18:01:00.391363 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/187d1121-6730-42b2-8233-4f050d18f92b-combined-ca-bundle\") pod \"keystone-cron-29405881-hg7qc\" (UID: \"187d1121-6730-42b2-8233-4f050d18f92b\") " pod="openstack/keystone-cron-29405881-hg7qc" Nov 28 18:01:00 crc kubenswrapper[4909]: I1128 18:01:00.391710 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/187d1121-6730-42b2-8233-4f050d18f92b-config-data\") pod \"keystone-cron-29405881-hg7qc\" (UID: \"187d1121-6730-42b2-8233-4f050d18f92b\") " pod="openstack/keystone-cron-29405881-hg7qc" Nov 28 18:01:00 crc kubenswrapper[4909]: I1128 18:01:00.394568 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/187d1121-6730-42b2-8233-4f050d18f92b-fernet-keys\") pod \"keystone-cron-29405881-hg7qc\" (UID: \"187d1121-6730-42b2-8233-4f050d18f92b\") " pod="openstack/keystone-cron-29405881-hg7qc" Nov 28 18:01:00 crc kubenswrapper[4909]: I1128 18:01:00.404881 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q6kmr\" (UniqueName: \"kubernetes.io/projected/187d1121-6730-42b2-8233-4f050d18f92b-kube-api-access-q6kmr\") pod \"keystone-cron-29405881-hg7qc\" (UID: \"187d1121-6730-42b2-8233-4f050d18f92b\") " pod="openstack/keystone-cron-29405881-hg7qc" Nov 28 18:01:00 crc kubenswrapper[4909]: I1128 18:01:00.499588 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29405881-hg7qc" Nov 28 18:01:00 crc kubenswrapper[4909]: I1128 18:01:00.966537 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29405881-hg7qc"] Nov 28 18:01:00 crc kubenswrapper[4909]: W1128 18:01:00.973334 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod187d1121_6730_42b2_8233_4f050d18f92b.slice/crio-0c259b025a550526ef295fe5c9edad8e14b310fb9ce38b6d5f7f695b5905f910 WatchSource:0}: Error finding container 0c259b025a550526ef295fe5c9edad8e14b310fb9ce38b6d5f7f695b5905f910: Status 404 returned error can't find the container with id 0c259b025a550526ef295fe5c9edad8e14b310fb9ce38b6d5f7f695b5905f910 Nov 28 18:01:01 crc kubenswrapper[4909]: I1128 18:01:01.144103 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29405881-hg7qc" event={"ID":"187d1121-6730-42b2-8233-4f050d18f92b","Type":"ContainerStarted","Data":"0c259b025a550526ef295fe5c9edad8e14b310fb9ce38b6d5f7f695b5905f910"} Nov 28 18:01:02 crc kubenswrapper[4909]: I1128 18:01:02.155399 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29405881-hg7qc" event={"ID":"187d1121-6730-42b2-8233-4f050d18f92b","Type":"ContainerStarted","Data":"ecec5956a7cc9dfe41460b4e67bf89e597f8c281b6c2c26edbf780b3b027dfe0"} Nov 28 18:01:02 crc kubenswrapper[4909]: I1128 18:01:02.176017 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-cron-29405881-hg7qc" podStartSLOduration=2.175992331 podStartE2EDuration="2.175992331s" podCreationTimestamp="2025-11-28 18:01:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 18:01:02.172023115 +0000 UTC m=+6644.568707679" watchObservedRunningTime="2025-11-28 18:01:02.175992331 +0000 UTC m=+6644.572676875" Nov 28 18:01:04 crc kubenswrapper[4909]: I1128 18:01:04.177534 4909 generic.go:334] "Generic (PLEG): container finished" podID="187d1121-6730-42b2-8233-4f050d18f92b" containerID="ecec5956a7cc9dfe41460b4e67bf89e597f8c281b6c2c26edbf780b3b027dfe0" exitCode=0 Nov 28 18:01:04 crc kubenswrapper[4909]: I1128 18:01:04.177642 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29405881-hg7qc" event={"ID":"187d1121-6730-42b2-8233-4f050d18f92b","Type":"ContainerDied","Data":"ecec5956a7cc9dfe41460b4e67bf89e597f8c281b6c2c26edbf780b3b027dfe0"} Nov 28 18:01:05 crc kubenswrapper[4909]: I1128 18:01:05.614998 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29405881-hg7qc" Nov 28 18:01:05 crc kubenswrapper[4909]: I1128 18:01:05.624518 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q6kmr\" (UniqueName: \"kubernetes.io/projected/187d1121-6730-42b2-8233-4f050d18f92b-kube-api-access-q6kmr\") pod \"187d1121-6730-42b2-8233-4f050d18f92b\" (UID: \"187d1121-6730-42b2-8233-4f050d18f92b\") " Nov 28 18:01:05 crc kubenswrapper[4909]: I1128 18:01:05.625103 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/187d1121-6730-42b2-8233-4f050d18f92b-fernet-keys\") pod \"187d1121-6730-42b2-8233-4f050d18f92b\" (UID: \"187d1121-6730-42b2-8233-4f050d18f92b\") " Nov 28 18:01:05 crc kubenswrapper[4909]: I1128 18:01:05.625938 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/187d1121-6730-42b2-8233-4f050d18f92b-config-data\") pod \"187d1121-6730-42b2-8233-4f050d18f92b\" (UID: \"187d1121-6730-42b2-8233-4f050d18f92b\") " Nov 28 18:01:05 crc kubenswrapper[4909]: I1128 18:01:05.627087 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/187d1121-6730-42b2-8233-4f050d18f92b-combined-ca-bundle\") pod \"187d1121-6730-42b2-8233-4f050d18f92b\" (UID: \"187d1121-6730-42b2-8233-4f050d18f92b\") " Nov 28 18:01:05 crc kubenswrapper[4909]: I1128 18:01:05.631091 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/187d1121-6730-42b2-8233-4f050d18f92b-kube-api-access-q6kmr" (OuterVolumeSpecName: "kube-api-access-q6kmr") pod "187d1121-6730-42b2-8233-4f050d18f92b" (UID: "187d1121-6730-42b2-8233-4f050d18f92b"). InnerVolumeSpecName "kube-api-access-q6kmr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 18:01:05 crc kubenswrapper[4909]: I1128 18:01:05.635526 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/187d1121-6730-42b2-8233-4f050d18f92b-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "187d1121-6730-42b2-8233-4f050d18f92b" (UID: "187d1121-6730-42b2-8233-4f050d18f92b"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 18:01:05 crc kubenswrapper[4909]: I1128 18:01:05.670055 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/187d1121-6730-42b2-8233-4f050d18f92b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "187d1121-6730-42b2-8233-4f050d18f92b" (UID: "187d1121-6730-42b2-8233-4f050d18f92b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 18:01:05 crc kubenswrapper[4909]: I1128 18:01:05.708864 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/187d1121-6730-42b2-8233-4f050d18f92b-config-data" (OuterVolumeSpecName: "config-data") pod "187d1121-6730-42b2-8233-4f050d18f92b" (UID: "187d1121-6730-42b2-8233-4f050d18f92b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 18:01:05 crc kubenswrapper[4909]: I1128 18:01:05.730353 4909 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/187d1121-6730-42b2-8233-4f050d18f92b-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 28 18:01:05 crc kubenswrapper[4909]: I1128 18:01:05.730540 4909 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/187d1121-6730-42b2-8233-4f050d18f92b-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 18:01:05 crc kubenswrapper[4909]: I1128 18:01:05.730634 4909 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/187d1121-6730-42b2-8233-4f050d18f92b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 18:01:05 crc kubenswrapper[4909]: I1128 18:01:05.730733 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q6kmr\" (UniqueName: \"kubernetes.io/projected/187d1121-6730-42b2-8233-4f050d18f92b-kube-api-access-q6kmr\") on node \"crc\" DevicePath \"\"" Nov 28 18:01:06 crc kubenswrapper[4909]: I1128 18:01:06.209306 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29405881-hg7qc" event={"ID":"187d1121-6730-42b2-8233-4f050d18f92b","Type":"ContainerDied","Data":"0c259b025a550526ef295fe5c9edad8e14b310fb9ce38b6d5f7f695b5905f910"} Nov 28 18:01:06 crc kubenswrapper[4909]: I1128 18:01:06.209360 4909 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0c259b025a550526ef295fe5c9edad8e14b310fb9ce38b6d5f7f695b5905f910" Nov 28 18:01:06 crc kubenswrapper[4909]: I1128 18:01:06.209386 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29405881-hg7qc" Nov 28 18:01:19 crc kubenswrapper[4909]: I1128 18:01:19.911651 4909 patch_prober.go:28] interesting pod/machine-config-daemon-d5nd7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 18:01:19 crc kubenswrapper[4909]: I1128 18:01:19.912741 4909 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 18:01:45 crc kubenswrapper[4909]: I1128 18:01:45.066979 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-9063-account-create-update-j27rx"] Nov 28 18:01:45 crc kubenswrapper[4909]: I1128 18:01:45.080126 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-db-create-gp9f5"] Nov 28 18:01:45 crc kubenswrapper[4909]: I1128 18:01:45.089468 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-db-create-gp9f5"] Nov 28 18:01:45 crc kubenswrapper[4909]: I1128 18:01:45.099106 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-9063-account-create-update-j27rx"] Nov 28 18:01:45 crc kubenswrapper[4909]: I1128 18:01:45.914621 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0a2be1c1-d2d5-4683-b57f-6be731743630" path="/var/lib/kubelet/pods/0a2be1c1-d2d5-4683-b57f-6be731743630/volumes" Nov 28 18:01:45 crc kubenswrapper[4909]: I1128 18:01:45.915928 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0d62d869-b3e1-417f-b946-587d26d6549c" path="/var/lib/kubelet/pods/0d62d869-b3e1-417f-b946-587d26d6549c/volumes" Nov 28 18:01:49 crc kubenswrapper[4909]: I1128 18:01:49.910511 4909 patch_prober.go:28] interesting pod/machine-config-daemon-d5nd7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 18:01:49 crc kubenswrapper[4909]: I1128 18:01:49.911229 4909 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 18:01:59 crc kubenswrapper[4909]: I1128 18:01:59.045259 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-db-sync-rgqnh"] Nov 28 18:01:59 crc kubenswrapper[4909]: I1128 18:01:59.053570 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-db-sync-rgqnh"] Nov 28 18:01:59 crc kubenswrapper[4909]: I1128 18:01:59.913949 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8051d548-77d4-4c16-be81-d2f97bacca54" path="/var/lib/kubelet/pods/8051d548-77d4-4c16-be81-d2f97bacca54/volumes" Nov 28 18:02:19 crc kubenswrapper[4909]: I1128 18:02:19.910560 4909 patch_prober.go:28] interesting pod/machine-config-daemon-d5nd7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 18:02:19 crc kubenswrapper[4909]: I1128 18:02:19.911156 4909 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 18:02:19 crc kubenswrapper[4909]: I1128 18:02:19.920385 4909 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" Nov 28 18:02:19 crc kubenswrapper[4909]: I1128 18:02:19.921252 4909 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"e411339176f5e7e63fa950117654bd213c078924c12ebf7a42744b40e8eabbd4"} pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 18:02:19 crc kubenswrapper[4909]: I1128 18:02:19.921316 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerName="machine-config-daemon" containerID="cri-o://e411339176f5e7e63fa950117654bd213c078924c12ebf7a42744b40e8eabbd4" gracePeriod=600 Nov 28 18:02:20 crc kubenswrapper[4909]: E1128 18:02:20.066323 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 18:02:20 crc kubenswrapper[4909]: I1128 18:02:20.433977 4909 generic.go:334] "Generic (PLEG): container finished" podID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerID="e411339176f5e7e63fa950117654bd213c078924c12ebf7a42744b40e8eabbd4" exitCode=0 Nov 28 18:02:20 crc kubenswrapper[4909]: I1128 18:02:20.434031 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" event={"ID":"5f0ac931-d37b-4342-8c12-c2779b455cc5","Type":"ContainerDied","Data":"e411339176f5e7e63fa950117654bd213c078924c12ebf7a42744b40e8eabbd4"} Nov 28 18:02:20 crc kubenswrapper[4909]: I1128 18:02:20.434071 4909 scope.go:117] "RemoveContainer" containerID="ab68541ecdd49f3d48e7eee95d5d783096294e3e9a7f79df71710f3210660edc" Nov 28 18:02:20 crc kubenswrapper[4909]: I1128 18:02:20.434895 4909 scope.go:117] "RemoveContainer" containerID="e411339176f5e7e63fa950117654bd213c078924c12ebf7a42744b40e8eabbd4" Nov 28 18:02:20 crc kubenswrapper[4909]: E1128 18:02:20.435280 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 18:02:33 crc kubenswrapper[4909]: I1128 18:02:33.902480 4909 scope.go:117] "RemoveContainer" containerID="e411339176f5e7e63fa950117654bd213c078924c12ebf7a42744b40e8eabbd4" Nov 28 18:02:33 crc kubenswrapper[4909]: E1128 18:02:33.903525 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 18:02:37 crc kubenswrapper[4909]: I1128 18:02:37.124245 4909 scope.go:117] "RemoveContainer" containerID="2b6990b20d2a79caf67eb63869a8d96a3328d56573f8b5d96523553ad7d6ddf0" Nov 28 18:02:37 crc kubenswrapper[4909]: I1128 18:02:37.157773 4909 scope.go:117] "RemoveContainer" containerID="bb33086f8dbabc2ba18b8f027a4f4587a7ad592c030b15a0c74dd86c6ea691c6" Nov 28 18:02:37 crc kubenswrapper[4909]: I1128 18:02:37.217606 4909 scope.go:117] "RemoveContainer" containerID="3348844ce4b8fdf98c144937128d422c6411096d47ad8cc31301b703f9a5c148" Nov 28 18:02:46 crc kubenswrapper[4909]: I1128 18:02:46.901366 4909 scope.go:117] "RemoveContainer" containerID="e411339176f5e7e63fa950117654bd213c078924c12ebf7a42744b40e8eabbd4" Nov 28 18:02:46 crc kubenswrapper[4909]: E1128 18:02:46.902314 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 18:02:58 crc kubenswrapper[4909]: I1128 18:02:58.901757 4909 scope.go:117] "RemoveContainer" containerID="e411339176f5e7e63fa950117654bd213c078924c12ebf7a42744b40e8eabbd4" Nov 28 18:02:58 crc kubenswrapper[4909]: E1128 18:02:58.902474 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 18:03:12 crc kubenswrapper[4909]: I1128 18:03:12.902748 4909 scope.go:117] "RemoveContainer" containerID="e411339176f5e7e63fa950117654bd213c078924c12ebf7a42744b40e8eabbd4" Nov 28 18:03:12 crc kubenswrapper[4909]: E1128 18:03:12.905009 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 18:03:27 crc kubenswrapper[4909]: I1128 18:03:27.913784 4909 scope.go:117] "RemoveContainer" containerID="e411339176f5e7e63fa950117654bd213c078924c12ebf7a42744b40e8eabbd4" Nov 28 18:03:27 crc kubenswrapper[4909]: E1128 18:03:27.914831 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 18:03:42 crc kubenswrapper[4909]: I1128 18:03:42.901339 4909 scope.go:117] "RemoveContainer" containerID="e411339176f5e7e63fa950117654bd213c078924c12ebf7a42744b40e8eabbd4" Nov 28 18:03:42 crc kubenswrapper[4909]: E1128 18:03:42.902256 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 18:03:54 crc kubenswrapper[4909]: I1128 18:03:54.902136 4909 scope.go:117] "RemoveContainer" containerID="e411339176f5e7e63fa950117654bd213c078924c12ebf7a42744b40e8eabbd4" Nov 28 18:03:54 crc kubenswrapper[4909]: E1128 18:03:54.902947 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 18:04:06 crc kubenswrapper[4909]: I1128 18:04:06.048594 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/aodh-f20d-account-create-update-dgwgg"] Nov 28 18:04:06 crc kubenswrapper[4909]: I1128 18:04:06.061516 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/aodh-db-create-njpg8"] Nov 28 18:04:06 crc kubenswrapper[4909]: I1128 18:04:06.073892 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/aodh-db-create-njpg8"] Nov 28 18:04:06 crc kubenswrapper[4909]: I1128 18:04:06.084871 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/aodh-f20d-account-create-update-dgwgg"] Nov 28 18:04:07 crc kubenswrapper[4909]: I1128 18:04:07.916098 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="26c39cf0-5737-49da-bf23-62236e1b6926" path="/var/lib/kubelet/pods/26c39cf0-5737-49da-bf23-62236e1b6926/volumes" Nov 28 18:04:07 crc kubenswrapper[4909]: I1128 18:04:07.918967 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="69754b0d-df57-4e81-b969-7b484861990c" path="/var/lib/kubelet/pods/69754b0d-df57-4e81-b969-7b484861990c/volumes" Nov 28 18:04:09 crc kubenswrapper[4909]: I1128 18:04:09.901838 4909 scope.go:117] "RemoveContainer" containerID="e411339176f5e7e63fa950117654bd213c078924c12ebf7a42744b40e8eabbd4" Nov 28 18:04:09 crc kubenswrapper[4909]: E1128 18:04:09.903196 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 18:04:18 crc kubenswrapper[4909]: I1128 18:04:18.044108 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/aodh-db-sync-lbjjd"] Nov 28 18:04:18 crc kubenswrapper[4909]: I1128 18:04:18.056146 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/aodh-db-sync-lbjjd"] Nov 28 18:04:19 crc kubenswrapper[4909]: I1128 18:04:19.927818 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0404b029-383d-4300-a459-fe85d6e0509e" path="/var/lib/kubelet/pods/0404b029-383d-4300-a459-fe85d6e0509e/volumes" Nov 28 18:04:21 crc kubenswrapper[4909]: I1128 18:04:21.901599 4909 scope.go:117] "RemoveContainer" containerID="e411339176f5e7e63fa950117654bd213c078924c12ebf7a42744b40e8eabbd4" Nov 28 18:04:21 crc kubenswrapper[4909]: E1128 18:04:21.902401 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 18:04:36 crc kubenswrapper[4909]: I1128 18:04:36.902146 4909 scope.go:117] "RemoveContainer" containerID="e411339176f5e7e63fa950117654bd213c078924c12ebf7a42744b40e8eabbd4" Nov 28 18:04:36 crc kubenswrapper[4909]: E1128 18:04:36.904515 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 18:04:37 crc kubenswrapper[4909]: I1128 18:04:37.396317 4909 scope.go:117] "RemoveContainer" containerID="466d211ad07f41ac1df05bb5a2741ee033fe079aa964f7938758abedc216b7fd" Nov 28 18:04:37 crc kubenswrapper[4909]: I1128 18:04:37.448723 4909 scope.go:117] "RemoveContainer" containerID="77baa6b7715e3c053f1badb04f6ca480175cafdc7e941ad36f4ceffd0dc67a71" Nov 28 18:04:37 crc kubenswrapper[4909]: I1128 18:04:37.499582 4909 scope.go:117] "RemoveContainer" containerID="238214b8a53273a7f9f765651f3f44817c7ed73b37059b05d3566f86766ef766" Nov 28 18:04:40 crc kubenswrapper[4909]: I1128 18:04:40.033611 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/manila-db-create-4ktws"] Nov 28 18:04:40 crc kubenswrapper[4909]: I1128 18:04:40.056822 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/manila-db-create-4ktws"] Nov 28 18:04:41 crc kubenswrapper[4909]: I1128 18:04:41.036318 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/manila-2f47-account-create-update-h849b"] Nov 28 18:04:41 crc kubenswrapper[4909]: I1128 18:04:41.050969 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/manila-2f47-account-create-update-h849b"] Nov 28 18:04:41 crc kubenswrapper[4909]: I1128 18:04:41.919078 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5dc1d1d5-b59f-4586-b96b-5ca2d78644c0" path="/var/lib/kubelet/pods/5dc1d1d5-b59f-4586-b96b-5ca2d78644c0/volumes" Nov 28 18:04:41 crc kubenswrapper[4909]: I1128 18:04:41.922206 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="adf72195-c644-4f7d-9467-ade2c4096ed4" path="/var/lib/kubelet/pods/adf72195-c644-4f7d-9467-ade2c4096ed4/volumes" Nov 28 18:04:48 crc kubenswrapper[4909]: I1128 18:04:48.902208 4909 scope.go:117] "RemoveContainer" containerID="e411339176f5e7e63fa950117654bd213c078924c12ebf7a42744b40e8eabbd4" Nov 28 18:04:48 crc kubenswrapper[4909]: E1128 18:04:48.903585 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 18:04:53 crc kubenswrapper[4909]: I1128 18:04:53.057508 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/manila-db-sync-5h9n9"] Nov 28 18:04:53 crc kubenswrapper[4909]: I1128 18:04:53.066436 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/manila-db-sync-5h9n9"] Nov 28 18:04:53 crc kubenswrapper[4909]: I1128 18:04:53.915209 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e5db555a-5000-4331-b1b1-6108f0a9fd17" path="/var/lib/kubelet/pods/e5db555a-5000-4331-b1b1-6108f0a9fd17/volumes" Nov 28 18:04:59 crc kubenswrapper[4909]: I1128 18:04:59.321476 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-9hxnc"] Nov 28 18:04:59 crc kubenswrapper[4909]: E1128 18:04:59.322523 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="187d1121-6730-42b2-8233-4f050d18f92b" containerName="keystone-cron" Nov 28 18:04:59 crc kubenswrapper[4909]: I1128 18:04:59.322539 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="187d1121-6730-42b2-8233-4f050d18f92b" containerName="keystone-cron" Nov 28 18:04:59 crc kubenswrapper[4909]: I1128 18:04:59.322906 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="187d1121-6730-42b2-8233-4f050d18f92b" containerName="keystone-cron" Nov 28 18:04:59 crc kubenswrapper[4909]: I1128 18:04:59.325145 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-9hxnc" Nov 28 18:04:59 crc kubenswrapper[4909]: I1128 18:04:59.353470 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-9hxnc"] Nov 28 18:04:59 crc kubenswrapper[4909]: I1128 18:04:59.386010 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/29cbbf6b-1da5-445b-a173-f9aee2f45e55-utilities\") pod \"certified-operators-9hxnc\" (UID: \"29cbbf6b-1da5-445b-a173-f9aee2f45e55\") " pod="openshift-marketplace/certified-operators-9hxnc" Nov 28 18:04:59 crc kubenswrapper[4909]: I1128 18:04:59.386486 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/29cbbf6b-1da5-445b-a173-f9aee2f45e55-catalog-content\") pod \"certified-operators-9hxnc\" (UID: \"29cbbf6b-1da5-445b-a173-f9aee2f45e55\") " pod="openshift-marketplace/certified-operators-9hxnc" Nov 28 18:04:59 crc kubenswrapper[4909]: I1128 18:04:59.386557 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kqtt5\" (UniqueName: \"kubernetes.io/projected/29cbbf6b-1da5-445b-a173-f9aee2f45e55-kube-api-access-kqtt5\") pod \"certified-operators-9hxnc\" (UID: \"29cbbf6b-1da5-445b-a173-f9aee2f45e55\") " pod="openshift-marketplace/certified-operators-9hxnc" Nov 28 18:04:59 crc kubenswrapper[4909]: I1128 18:04:59.488883 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/29cbbf6b-1da5-445b-a173-f9aee2f45e55-utilities\") pod \"certified-operators-9hxnc\" (UID: \"29cbbf6b-1da5-445b-a173-f9aee2f45e55\") " pod="openshift-marketplace/certified-operators-9hxnc" Nov 28 18:04:59 crc kubenswrapper[4909]: I1128 18:04:59.489002 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/29cbbf6b-1da5-445b-a173-f9aee2f45e55-catalog-content\") pod \"certified-operators-9hxnc\" (UID: \"29cbbf6b-1da5-445b-a173-f9aee2f45e55\") " pod="openshift-marketplace/certified-operators-9hxnc" Nov 28 18:04:59 crc kubenswrapper[4909]: I1128 18:04:59.489073 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kqtt5\" (UniqueName: \"kubernetes.io/projected/29cbbf6b-1da5-445b-a173-f9aee2f45e55-kube-api-access-kqtt5\") pod \"certified-operators-9hxnc\" (UID: \"29cbbf6b-1da5-445b-a173-f9aee2f45e55\") " pod="openshift-marketplace/certified-operators-9hxnc" Nov 28 18:04:59 crc kubenswrapper[4909]: I1128 18:04:59.489531 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/29cbbf6b-1da5-445b-a173-f9aee2f45e55-utilities\") pod \"certified-operators-9hxnc\" (UID: \"29cbbf6b-1da5-445b-a173-f9aee2f45e55\") " pod="openshift-marketplace/certified-operators-9hxnc" Nov 28 18:04:59 crc kubenswrapper[4909]: I1128 18:04:59.489588 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/29cbbf6b-1da5-445b-a173-f9aee2f45e55-catalog-content\") pod \"certified-operators-9hxnc\" (UID: \"29cbbf6b-1da5-445b-a173-f9aee2f45e55\") " pod="openshift-marketplace/certified-operators-9hxnc" Nov 28 18:04:59 crc kubenswrapper[4909]: I1128 18:04:59.518943 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kqtt5\" (UniqueName: \"kubernetes.io/projected/29cbbf6b-1da5-445b-a173-f9aee2f45e55-kube-api-access-kqtt5\") pod \"certified-operators-9hxnc\" (UID: \"29cbbf6b-1da5-445b-a173-f9aee2f45e55\") " pod="openshift-marketplace/certified-operators-9hxnc" Nov 28 18:04:59 crc kubenswrapper[4909]: I1128 18:04:59.664769 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-9hxnc" Nov 28 18:05:00 crc kubenswrapper[4909]: I1128 18:05:00.163035 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-9hxnc"] Nov 28 18:05:00 crc kubenswrapper[4909]: I1128 18:05:00.349127 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9hxnc" event={"ID":"29cbbf6b-1da5-445b-a173-f9aee2f45e55","Type":"ContainerStarted","Data":"87fc4a4d5c5d83cce08933fb4bfa1e86f1c13413033d95a16694b4050af87732"} Nov 28 18:05:01 crc kubenswrapper[4909]: I1128 18:05:01.364318 4909 generic.go:334] "Generic (PLEG): container finished" podID="29cbbf6b-1da5-445b-a173-f9aee2f45e55" containerID="81f175b24e26bc51e3be2e608893f64f6f89c78c4a1225794fccd7c95723fcba" exitCode=0 Nov 28 18:05:01 crc kubenswrapper[4909]: I1128 18:05:01.364454 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9hxnc" event={"ID":"29cbbf6b-1da5-445b-a173-f9aee2f45e55","Type":"ContainerDied","Data":"81f175b24e26bc51e3be2e608893f64f6f89c78c4a1225794fccd7c95723fcba"} Nov 28 18:05:01 crc kubenswrapper[4909]: I1128 18:05:01.368207 4909 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 28 18:05:02 crc kubenswrapper[4909]: I1128 18:05:02.901469 4909 scope.go:117] "RemoveContainer" containerID="e411339176f5e7e63fa950117654bd213c078924c12ebf7a42744b40e8eabbd4" Nov 28 18:05:02 crc kubenswrapper[4909]: E1128 18:05:02.902114 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 18:05:04 crc kubenswrapper[4909]: I1128 18:05:04.400645 4909 generic.go:334] "Generic (PLEG): container finished" podID="29cbbf6b-1da5-445b-a173-f9aee2f45e55" containerID="329cd82957f307849178b279835a466c9307efa8cea3fe1742b0e03eef4e7760" exitCode=0 Nov 28 18:05:04 crc kubenswrapper[4909]: I1128 18:05:04.400783 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9hxnc" event={"ID":"29cbbf6b-1da5-445b-a173-f9aee2f45e55","Type":"ContainerDied","Data":"329cd82957f307849178b279835a466c9307efa8cea3fe1742b0e03eef4e7760"} Nov 28 18:05:06 crc kubenswrapper[4909]: I1128 18:05:06.430881 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9hxnc" event={"ID":"29cbbf6b-1da5-445b-a173-f9aee2f45e55","Type":"ContainerStarted","Data":"7e2363b3468dbc8818ff7bb625943d7fec551d1f609cee51956cea3066322f89"} Nov 28 18:05:06 crc kubenswrapper[4909]: I1128 18:05:06.457916 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-9hxnc" podStartSLOduration=3.635349707 podStartE2EDuration="7.457898285s" podCreationTimestamp="2025-11-28 18:04:59 +0000 UTC" firstStartedPulling="2025-11-28 18:05:01.367913234 +0000 UTC m=+6883.764597758" lastFinishedPulling="2025-11-28 18:05:05.190461812 +0000 UTC m=+6887.587146336" observedRunningTime="2025-11-28 18:05:06.452414688 +0000 UTC m=+6888.849099212" watchObservedRunningTime="2025-11-28 18:05:06.457898285 +0000 UTC m=+6888.854582809" Nov 28 18:05:09 crc kubenswrapper[4909]: I1128 18:05:09.665135 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-9hxnc" Nov 28 18:05:09 crc kubenswrapper[4909]: I1128 18:05:09.665477 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-9hxnc" Nov 28 18:05:09 crc kubenswrapper[4909]: I1128 18:05:09.728532 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-9hxnc" Nov 28 18:05:10 crc kubenswrapper[4909]: I1128 18:05:10.568980 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-9hxnc" Nov 28 18:05:10 crc kubenswrapper[4909]: I1128 18:05:10.657274 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-9hxnc"] Nov 28 18:05:12 crc kubenswrapper[4909]: I1128 18:05:12.507180 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-9hxnc" podUID="29cbbf6b-1da5-445b-a173-f9aee2f45e55" containerName="registry-server" containerID="cri-o://7e2363b3468dbc8818ff7bb625943d7fec551d1f609cee51956cea3066322f89" gracePeriod=2 Nov 28 18:05:13 crc kubenswrapper[4909]: I1128 18:05:13.272895 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-9hxnc" Nov 28 18:05:13 crc kubenswrapper[4909]: I1128 18:05:13.351774 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kqtt5\" (UniqueName: \"kubernetes.io/projected/29cbbf6b-1da5-445b-a173-f9aee2f45e55-kube-api-access-kqtt5\") pod \"29cbbf6b-1da5-445b-a173-f9aee2f45e55\" (UID: \"29cbbf6b-1da5-445b-a173-f9aee2f45e55\") " Nov 28 18:05:13 crc kubenswrapper[4909]: I1128 18:05:13.352177 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/29cbbf6b-1da5-445b-a173-f9aee2f45e55-utilities\") pod \"29cbbf6b-1da5-445b-a173-f9aee2f45e55\" (UID: \"29cbbf6b-1da5-445b-a173-f9aee2f45e55\") " Nov 28 18:05:13 crc kubenswrapper[4909]: I1128 18:05:13.352564 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/29cbbf6b-1da5-445b-a173-f9aee2f45e55-catalog-content\") pod \"29cbbf6b-1da5-445b-a173-f9aee2f45e55\" (UID: \"29cbbf6b-1da5-445b-a173-f9aee2f45e55\") " Nov 28 18:05:13 crc kubenswrapper[4909]: I1128 18:05:13.353047 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/29cbbf6b-1da5-445b-a173-f9aee2f45e55-utilities" (OuterVolumeSpecName: "utilities") pod "29cbbf6b-1da5-445b-a173-f9aee2f45e55" (UID: "29cbbf6b-1da5-445b-a173-f9aee2f45e55"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 18:05:13 crc kubenswrapper[4909]: I1128 18:05:13.353338 4909 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/29cbbf6b-1da5-445b-a173-f9aee2f45e55-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 18:05:13 crc kubenswrapper[4909]: I1128 18:05:13.358802 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/29cbbf6b-1da5-445b-a173-f9aee2f45e55-kube-api-access-kqtt5" (OuterVolumeSpecName: "kube-api-access-kqtt5") pod "29cbbf6b-1da5-445b-a173-f9aee2f45e55" (UID: "29cbbf6b-1da5-445b-a173-f9aee2f45e55"). InnerVolumeSpecName "kube-api-access-kqtt5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 18:05:13 crc kubenswrapper[4909]: I1128 18:05:13.402809 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/29cbbf6b-1da5-445b-a173-f9aee2f45e55-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "29cbbf6b-1da5-445b-a173-f9aee2f45e55" (UID: "29cbbf6b-1da5-445b-a173-f9aee2f45e55"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 18:05:13 crc kubenswrapper[4909]: I1128 18:05:13.455230 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kqtt5\" (UniqueName: \"kubernetes.io/projected/29cbbf6b-1da5-445b-a173-f9aee2f45e55-kube-api-access-kqtt5\") on node \"crc\" DevicePath \"\"" Nov 28 18:05:13 crc kubenswrapper[4909]: I1128 18:05:13.455267 4909 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/29cbbf6b-1da5-445b-a173-f9aee2f45e55-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 18:05:13 crc kubenswrapper[4909]: I1128 18:05:13.522171 4909 generic.go:334] "Generic (PLEG): container finished" podID="29cbbf6b-1da5-445b-a173-f9aee2f45e55" containerID="7e2363b3468dbc8818ff7bb625943d7fec551d1f609cee51956cea3066322f89" exitCode=0 Nov 28 18:05:13 crc kubenswrapper[4909]: I1128 18:05:13.522218 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9hxnc" event={"ID":"29cbbf6b-1da5-445b-a173-f9aee2f45e55","Type":"ContainerDied","Data":"7e2363b3468dbc8818ff7bb625943d7fec551d1f609cee51956cea3066322f89"} Nov 28 18:05:13 crc kubenswrapper[4909]: I1128 18:05:13.522285 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9hxnc" event={"ID":"29cbbf6b-1da5-445b-a173-f9aee2f45e55","Type":"ContainerDied","Data":"87fc4a4d5c5d83cce08933fb4bfa1e86f1c13413033d95a16694b4050af87732"} Nov 28 18:05:13 crc kubenswrapper[4909]: I1128 18:05:13.522285 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-9hxnc" Nov 28 18:05:13 crc kubenswrapper[4909]: I1128 18:05:13.522309 4909 scope.go:117] "RemoveContainer" containerID="7e2363b3468dbc8818ff7bb625943d7fec551d1f609cee51956cea3066322f89" Nov 28 18:05:13 crc kubenswrapper[4909]: I1128 18:05:13.555684 4909 scope.go:117] "RemoveContainer" containerID="329cd82957f307849178b279835a466c9307efa8cea3fe1742b0e03eef4e7760" Nov 28 18:05:13 crc kubenswrapper[4909]: I1128 18:05:13.575548 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-9hxnc"] Nov 28 18:05:13 crc kubenswrapper[4909]: I1128 18:05:13.587546 4909 scope.go:117] "RemoveContainer" containerID="81f175b24e26bc51e3be2e608893f64f6f89c78c4a1225794fccd7c95723fcba" Nov 28 18:05:13 crc kubenswrapper[4909]: I1128 18:05:13.588348 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-9hxnc"] Nov 28 18:05:13 crc kubenswrapper[4909]: I1128 18:05:13.624031 4909 scope.go:117] "RemoveContainer" containerID="7e2363b3468dbc8818ff7bb625943d7fec551d1f609cee51956cea3066322f89" Nov 28 18:05:13 crc kubenswrapper[4909]: E1128 18:05:13.624542 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7e2363b3468dbc8818ff7bb625943d7fec551d1f609cee51956cea3066322f89\": container with ID starting with 7e2363b3468dbc8818ff7bb625943d7fec551d1f609cee51956cea3066322f89 not found: ID does not exist" containerID="7e2363b3468dbc8818ff7bb625943d7fec551d1f609cee51956cea3066322f89" Nov 28 18:05:13 crc kubenswrapper[4909]: I1128 18:05:13.624589 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7e2363b3468dbc8818ff7bb625943d7fec551d1f609cee51956cea3066322f89"} err="failed to get container status \"7e2363b3468dbc8818ff7bb625943d7fec551d1f609cee51956cea3066322f89\": rpc error: code = NotFound desc = could not find container \"7e2363b3468dbc8818ff7bb625943d7fec551d1f609cee51956cea3066322f89\": container with ID starting with 7e2363b3468dbc8818ff7bb625943d7fec551d1f609cee51956cea3066322f89 not found: ID does not exist" Nov 28 18:05:13 crc kubenswrapper[4909]: I1128 18:05:13.624627 4909 scope.go:117] "RemoveContainer" containerID="329cd82957f307849178b279835a466c9307efa8cea3fe1742b0e03eef4e7760" Nov 28 18:05:13 crc kubenswrapper[4909]: E1128 18:05:13.624986 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"329cd82957f307849178b279835a466c9307efa8cea3fe1742b0e03eef4e7760\": container with ID starting with 329cd82957f307849178b279835a466c9307efa8cea3fe1742b0e03eef4e7760 not found: ID does not exist" containerID="329cd82957f307849178b279835a466c9307efa8cea3fe1742b0e03eef4e7760" Nov 28 18:05:13 crc kubenswrapper[4909]: I1128 18:05:13.625021 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"329cd82957f307849178b279835a466c9307efa8cea3fe1742b0e03eef4e7760"} err="failed to get container status \"329cd82957f307849178b279835a466c9307efa8cea3fe1742b0e03eef4e7760\": rpc error: code = NotFound desc = could not find container \"329cd82957f307849178b279835a466c9307efa8cea3fe1742b0e03eef4e7760\": container with ID starting with 329cd82957f307849178b279835a466c9307efa8cea3fe1742b0e03eef4e7760 not found: ID does not exist" Nov 28 18:05:13 crc kubenswrapper[4909]: I1128 18:05:13.625042 4909 scope.go:117] "RemoveContainer" containerID="81f175b24e26bc51e3be2e608893f64f6f89c78c4a1225794fccd7c95723fcba" Nov 28 18:05:13 crc kubenswrapper[4909]: E1128 18:05:13.625287 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"81f175b24e26bc51e3be2e608893f64f6f89c78c4a1225794fccd7c95723fcba\": container with ID starting with 81f175b24e26bc51e3be2e608893f64f6f89c78c4a1225794fccd7c95723fcba not found: ID does not exist" containerID="81f175b24e26bc51e3be2e608893f64f6f89c78c4a1225794fccd7c95723fcba" Nov 28 18:05:13 crc kubenswrapper[4909]: I1128 18:05:13.625313 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"81f175b24e26bc51e3be2e608893f64f6f89c78c4a1225794fccd7c95723fcba"} err="failed to get container status \"81f175b24e26bc51e3be2e608893f64f6f89c78c4a1225794fccd7c95723fcba\": rpc error: code = NotFound desc = could not find container \"81f175b24e26bc51e3be2e608893f64f6f89c78c4a1225794fccd7c95723fcba\": container with ID starting with 81f175b24e26bc51e3be2e608893f64f6f89c78c4a1225794fccd7c95723fcba not found: ID does not exist" Nov 28 18:05:13 crc kubenswrapper[4909]: I1128 18:05:13.922267 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="29cbbf6b-1da5-445b-a173-f9aee2f45e55" path="/var/lib/kubelet/pods/29cbbf6b-1da5-445b-a173-f9aee2f45e55/volumes" Nov 28 18:05:17 crc kubenswrapper[4909]: I1128 18:05:17.921711 4909 scope.go:117] "RemoveContainer" containerID="e411339176f5e7e63fa950117654bd213c078924c12ebf7a42744b40e8eabbd4" Nov 28 18:05:17 crc kubenswrapper[4909]: E1128 18:05:17.930562 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 18:05:32 crc kubenswrapper[4909]: I1128 18:05:32.903024 4909 scope.go:117] "RemoveContainer" containerID="e411339176f5e7e63fa950117654bd213c078924c12ebf7a42744b40e8eabbd4" Nov 28 18:05:32 crc kubenswrapper[4909]: E1128 18:05:32.904273 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 18:05:37 crc kubenswrapper[4909]: I1128 18:05:37.648757 4909 scope.go:117] "RemoveContainer" containerID="6e77d54878a042e57b0f228ce8305299105e02f2a26a46fc0ca98007e54ec711" Nov 28 18:05:37 crc kubenswrapper[4909]: I1128 18:05:37.676454 4909 scope.go:117] "RemoveContainer" containerID="850aced73610555014cc55b6ac60b0032d3129421dde98b1f2629cda4fad678a" Nov 28 18:05:37 crc kubenswrapper[4909]: I1128 18:05:37.726757 4909 scope.go:117] "RemoveContainer" containerID="a83ec6ba03113cf81ed6473cad414d38d7646059004ee1d30c351dbc8477d9f1" Nov 28 18:05:43 crc kubenswrapper[4909]: I1128 18:05:43.902983 4909 scope.go:117] "RemoveContainer" containerID="e411339176f5e7e63fa950117654bd213c078924c12ebf7a42744b40e8eabbd4" Nov 28 18:05:43 crc kubenswrapper[4909]: E1128 18:05:43.906352 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 18:05:56 crc kubenswrapper[4909]: I1128 18:05:56.901611 4909 scope.go:117] "RemoveContainer" containerID="e411339176f5e7e63fa950117654bd213c078924c12ebf7a42744b40e8eabbd4" Nov 28 18:05:56 crc kubenswrapper[4909]: E1128 18:05:56.902331 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 18:06:10 crc kubenswrapper[4909]: I1128 18:06:10.902283 4909 scope.go:117] "RemoveContainer" containerID="e411339176f5e7e63fa950117654bd213c078924c12ebf7a42744b40e8eabbd4" Nov 28 18:06:10 crc kubenswrapper[4909]: E1128 18:06:10.903818 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 18:06:24 crc kubenswrapper[4909]: I1128 18:06:24.901840 4909 scope.go:117] "RemoveContainer" containerID="e411339176f5e7e63fa950117654bd213c078924c12ebf7a42744b40e8eabbd4" Nov 28 18:06:24 crc kubenswrapper[4909]: E1128 18:06:24.902569 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 18:06:36 crc kubenswrapper[4909]: I1128 18:06:36.901866 4909 scope.go:117] "RemoveContainer" containerID="e411339176f5e7e63fa950117654bd213c078924c12ebf7a42744b40e8eabbd4" Nov 28 18:06:36 crc kubenswrapper[4909]: E1128 18:06:36.902536 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 18:06:48 crc kubenswrapper[4909]: I1128 18:06:48.902020 4909 scope.go:117] "RemoveContainer" containerID="e411339176f5e7e63fa950117654bd213c078924c12ebf7a42744b40e8eabbd4" Nov 28 18:06:48 crc kubenswrapper[4909]: E1128 18:06:48.903027 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 18:07:01 crc kubenswrapper[4909]: I1128 18:07:01.902187 4909 scope.go:117] "RemoveContainer" containerID="e411339176f5e7e63fa950117654bd213c078924c12ebf7a42744b40e8eabbd4" Nov 28 18:07:01 crc kubenswrapper[4909]: E1128 18:07:01.903246 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 18:07:15 crc kubenswrapper[4909]: I1128 18:07:15.903619 4909 scope.go:117] "RemoveContainer" containerID="e411339176f5e7e63fa950117654bd213c078924c12ebf7a42744b40e8eabbd4" Nov 28 18:07:15 crc kubenswrapper[4909]: E1128 18:07:15.904626 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 18:07:28 crc kubenswrapper[4909]: I1128 18:07:28.901329 4909 scope.go:117] "RemoveContainer" containerID="e411339176f5e7e63fa950117654bd213c078924c12ebf7a42744b40e8eabbd4" Nov 28 18:07:30 crc kubenswrapper[4909]: I1128 18:07:30.036410 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" event={"ID":"5f0ac931-d37b-4342-8c12-c2779b455cc5","Type":"ContainerStarted","Data":"7dde07ca73bc70f8417bb961545f1be0280270788de6df86f67b27140f4beaaa"} Nov 28 18:07:30 crc kubenswrapper[4909]: I1128 18:07:30.973155 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-54hw7"] Nov 28 18:07:30 crc kubenswrapper[4909]: E1128 18:07:30.974005 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="29cbbf6b-1da5-445b-a173-f9aee2f45e55" containerName="extract-content" Nov 28 18:07:30 crc kubenswrapper[4909]: I1128 18:07:30.974029 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="29cbbf6b-1da5-445b-a173-f9aee2f45e55" containerName="extract-content" Nov 28 18:07:30 crc kubenswrapper[4909]: E1128 18:07:30.974057 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="29cbbf6b-1da5-445b-a173-f9aee2f45e55" containerName="extract-utilities" Nov 28 18:07:30 crc kubenswrapper[4909]: I1128 18:07:30.974066 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="29cbbf6b-1da5-445b-a173-f9aee2f45e55" containerName="extract-utilities" Nov 28 18:07:30 crc kubenswrapper[4909]: E1128 18:07:30.974093 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="29cbbf6b-1da5-445b-a173-f9aee2f45e55" containerName="registry-server" Nov 28 18:07:30 crc kubenswrapper[4909]: I1128 18:07:30.974100 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="29cbbf6b-1da5-445b-a173-f9aee2f45e55" containerName="registry-server" Nov 28 18:07:30 crc kubenswrapper[4909]: I1128 18:07:30.974362 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="29cbbf6b-1da5-445b-a173-f9aee2f45e55" containerName="registry-server" Nov 28 18:07:30 crc kubenswrapper[4909]: I1128 18:07:30.976227 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-54hw7" Nov 28 18:07:30 crc kubenswrapper[4909]: I1128 18:07:30.992972 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-54hw7"] Nov 28 18:07:31 crc kubenswrapper[4909]: I1128 18:07:31.154190 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/09b16ade-d5c6-42ba-9036-152ff3287ff3-catalog-content\") pod \"community-operators-54hw7\" (UID: \"09b16ade-d5c6-42ba-9036-152ff3287ff3\") " pod="openshift-marketplace/community-operators-54hw7" Nov 28 18:07:31 crc kubenswrapper[4909]: I1128 18:07:31.154266 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-72mkt\" (UniqueName: \"kubernetes.io/projected/09b16ade-d5c6-42ba-9036-152ff3287ff3-kube-api-access-72mkt\") pod \"community-operators-54hw7\" (UID: \"09b16ade-d5c6-42ba-9036-152ff3287ff3\") " pod="openshift-marketplace/community-operators-54hw7" Nov 28 18:07:31 crc kubenswrapper[4909]: I1128 18:07:31.154495 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/09b16ade-d5c6-42ba-9036-152ff3287ff3-utilities\") pod \"community-operators-54hw7\" (UID: \"09b16ade-d5c6-42ba-9036-152ff3287ff3\") " pod="openshift-marketplace/community-operators-54hw7" Nov 28 18:07:31 crc kubenswrapper[4909]: I1128 18:07:31.257004 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/09b16ade-d5c6-42ba-9036-152ff3287ff3-catalog-content\") pod \"community-operators-54hw7\" (UID: \"09b16ade-d5c6-42ba-9036-152ff3287ff3\") " pod="openshift-marketplace/community-operators-54hw7" Nov 28 18:07:31 crc kubenswrapper[4909]: I1128 18:07:31.257067 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-72mkt\" (UniqueName: \"kubernetes.io/projected/09b16ade-d5c6-42ba-9036-152ff3287ff3-kube-api-access-72mkt\") pod \"community-operators-54hw7\" (UID: \"09b16ade-d5c6-42ba-9036-152ff3287ff3\") " pod="openshift-marketplace/community-operators-54hw7" Nov 28 18:07:31 crc kubenswrapper[4909]: I1128 18:07:31.257113 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/09b16ade-d5c6-42ba-9036-152ff3287ff3-utilities\") pod \"community-operators-54hw7\" (UID: \"09b16ade-d5c6-42ba-9036-152ff3287ff3\") " pod="openshift-marketplace/community-operators-54hw7" Nov 28 18:07:31 crc kubenswrapper[4909]: I1128 18:07:31.257595 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/09b16ade-d5c6-42ba-9036-152ff3287ff3-catalog-content\") pod \"community-operators-54hw7\" (UID: \"09b16ade-d5c6-42ba-9036-152ff3287ff3\") " pod="openshift-marketplace/community-operators-54hw7" Nov 28 18:07:31 crc kubenswrapper[4909]: I1128 18:07:31.257610 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/09b16ade-d5c6-42ba-9036-152ff3287ff3-utilities\") pod \"community-operators-54hw7\" (UID: \"09b16ade-d5c6-42ba-9036-152ff3287ff3\") " pod="openshift-marketplace/community-operators-54hw7" Nov 28 18:07:31 crc kubenswrapper[4909]: I1128 18:07:31.280636 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-72mkt\" (UniqueName: \"kubernetes.io/projected/09b16ade-d5c6-42ba-9036-152ff3287ff3-kube-api-access-72mkt\") pod \"community-operators-54hw7\" (UID: \"09b16ade-d5c6-42ba-9036-152ff3287ff3\") " pod="openshift-marketplace/community-operators-54hw7" Nov 28 18:07:31 crc kubenswrapper[4909]: I1128 18:07:31.329365 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-54hw7" Nov 28 18:07:31 crc kubenswrapper[4909]: I1128 18:07:31.869045 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-54hw7"] Nov 28 18:07:32 crc kubenswrapper[4909]: I1128 18:07:32.078577 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-54hw7" event={"ID":"09b16ade-d5c6-42ba-9036-152ff3287ff3","Type":"ContainerStarted","Data":"41785373073fbfc99edd42c270208e7fe0c9a263004788ad9ea62ca20e12d6bb"} Nov 28 18:07:33 crc kubenswrapper[4909]: I1128 18:07:33.092137 4909 generic.go:334] "Generic (PLEG): container finished" podID="09b16ade-d5c6-42ba-9036-152ff3287ff3" containerID="a4291176e0d6b717be7b5e7f5dcce4eb6f0c37b30f21b3385a66e851e1f2dd1e" exitCode=0 Nov 28 18:07:33 crc kubenswrapper[4909]: I1128 18:07:33.092397 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-54hw7" event={"ID":"09b16ade-d5c6-42ba-9036-152ff3287ff3","Type":"ContainerDied","Data":"a4291176e0d6b717be7b5e7f5dcce4eb6f0c37b30f21b3385a66e851e1f2dd1e"} Nov 28 18:07:35 crc kubenswrapper[4909]: I1128 18:07:35.116599 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-54hw7" event={"ID":"09b16ade-d5c6-42ba-9036-152ff3287ff3","Type":"ContainerStarted","Data":"b5cdddbef039f8fef24050b54d0fa47c7ad89da8747a93da3cefc172de95b285"} Nov 28 18:07:36 crc kubenswrapper[4909]: I1128 18:07:36.135581 4909 generic.go:334] "Generic (PLEG): container finished" podID="09b16ade-d5c6-42ba-9036-152ff3287ff3" containerID="b5cdddbef039f8fef24050b54d0fa47c7ad89da8747a93da3cefc172de95b285" exitCode=0 Nov 28 18:07:36 crc kubenswrapper[4909]: I1128 18:07:36.135686 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-54hw7" event={"ID":"09b16ade-d5c6-42ba-9036-152ff3287ff3","Type":"ContainerDied","Data":"b5cdddbef039f8fef24050b54d0fa47c7ad89da8747a93da3cefc172de95b285"} Nov 28 18:07:38 crc kubenswrapper[4909]: I1128 18:07:38.154737 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-54hw7" event={"ID":"09b16ade-d5c6-42ba-9036-152ff3287ff3","Type":"ContainerStarted","Data":"27a19661069b40f27f9fd6fe4842e67a4a39e1d9688d02b8b07243fed3e1a22c"} Nov 28 18:07:38 crc kubenswrapper[4909]: I1128 18:07:38.176341 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-54hw7" podStartSLOduration=4.345328267 podStartE2EDuration="8.176323872s" podCreationTimestamp="2025-11-28 18:07:30 +0000 UTC" firstStartedPulling="2025-11-28 18:07:33.097794619 +0000 UTC m=+7035.494479143" lastFinishedPulling="2025-11-28 18:07:36.928790224 +0000 UTC m=+7039.325474748" observedRunningTime="2025-11-28 18:07:38.171210485 +0000 UTC m=+7040.567895009" watchObservedRunningTime="2025-11-28 18:07:38.176323872 +0000 UTC m=+7040.573008396" Nov 28 18:07:41 crc kubenswrapper[4909]: I1128 18:07:41.329519 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-54hw7" Nov 28 18:07:41 crc kubenswrapper[4909]: I1128 18:07:41.330110 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-54hw7" Nov 28 18:07:41 crc kubenswrapper[4909]: I1128 18:07:41.388493 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-54hw7" Nov 28 18:07:42 crc kubenswrapper[4909]: I1128 18:07:42.260085 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-54hw7" Nov 28 18:07:42 crc kubenswrapper[4909]: I1128 18:07:42.310362 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-54hw7"] Nov 28 18:07:44 crc kubenswrapper[4909]: I1128 18:07:44.228140 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-54hw7" podUID="09b16ade-d5c6-42ba-9036-152ff3287ff3" containerName="registry-server" containerID="cri-o://27a19661069b40f27f9fd6fe4842e67a4a39e1d9688d02b8b07243fed3e1a22c" gracePeriod=2 Nov 28 18:07:44 crc kubenswrapper[4909]: I1128 18:07:44.737611 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-54hw7" Nov 28 18:07:44 crc kubenswrapper[4909]: I1128 18:07:44.844090 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-72mkt\" (UniqueName: \"kubernetes.io/projected/09b16ade-d5c6-42ba-9036-152ff3287ff3-kube-api-access-72mkt\") pod \"09b16ade-d5c6-42ba-9036-152ff3287ff3\" (UID: \"09b16ade-d5c6-42ba-9036-152ff3287ff3\") " Nov 28 18:07:44 crc kubenswrapper[4909]: I1128 18:07:44.844390 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/09b16ade-d5c6-42ba-9036-152ff3287ff3-utilities\") pod \"09b16ade-d5c6-42ba-9036-152ff3287ff3\" (UID: \"09b16ade-d5c6-42ba-9036-152ff3287ff3\") " Nov 28 18:07:44 crc kubenswrapper[4909]: I1128 18:07:44.844558 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/09b16ade-d5c6-42ba-9036-152ff3287ff3-catalog-content\") pod \"09b16ade-d5c6-42ba-9036-152ff3287ff3\" (UID: \"09b16ade-d5c6-42ba-9036-152ff3287ff3\") " Nov 28 18:07:44 crc kubenswrapper[4909]: I1128 18:07:44.845268 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/09b16ade-d5c6-42ba-9036-152ff3287ff3-utilities" (OuterVolumeSpecName: "utilities") pod "09b16ade-d5c6-42ba-9036-152ff3287ff3" (UID: "09b16ade-d5c6-42ba-9036-152ff3287ff3"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 18:07:44 crc kubenswrapper[4909]: I1128 18:07:44.850980 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09b16ade-d5c6-42ba-9036-152ff3287ff3-kube-api-access-72mkt" (OuterVolumeSpecName: "kube-api-access-72mkt") pod "09b16ade-d5c6-42ba-9036-152ff3287ff3" (UID: "09b16ade-d5c6-42ba-9036-152ff3287ff3"). InnerVolumeSpecName "kube-api-access-72mkt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 18:07:44 crc kubenswrapper[4909]: I1128 18:07:44.947204 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-72mkt\" (UniqueName: \"kubernetes.io/projected/09b16ade-d5c6-42ba-9036-152ff3287ff3-kube-api-access-72mkt\") on node \"crc\" DevicePath \"\"" Nov 28 18:07:44 crc kubenswrapper[4909]: I1128 18:07:44.947240 4909 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/09b16ade-d5c6-42ba-9036-152ff3287ff3-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 18:07:45 crc kubenswrapper[4909]: I1128 18:07:45.038964 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/09b16ade-d5c6-42ba-9036-152ff3287ff3-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "09b16ade-d5c6-42ba-9036-152ff3287ff3" (UID: "09b16ade-d5c6-42ba-9036-152ff3287ff3"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 18:07:45 crc kubenswrapper[4909]: I1128 18:07:45.050584 4909 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/09b16ade-d5c6-42ba-9036-152ff3287ff3-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 18:07:45 crc kubenswrapper[4909]: I1128 18:07:45.258121 4909 generic.go:334] "Generic (PLEG): container finished" podID="09b16ade-d5c6-42ba-9036-152ff3287ff3" containerID="27a19661069b40f27f9fd6fe4842e67a4a39e1d9688d02b8b07243fed3e1a22c" exitCode=0 Nov 28 18:07:45 crc kubenswrapper[4909]: I1128 18:07:45.258478 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-54hw7" event={"ID":"09b16ade-d5c6-42ba-9036-152ff3287ff3","Type":"ContainerDied","Data":"27a19661069b40f27f9fd6fe4842e67a4a39e1d9688d02b8b07243fed3e1a22c"} Nov 28 18:07:45 crc kubenswrapper[4909]: I1128 18:07:45.258507 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-54hw7" event={"ID":"09b16ade-d5c6-42ba-9036-152ff3287ff3","Type":"ContainerDied","Data":"41785373073fbfc99edd42c270208e7fe0c9a263004788ad9ea62ca20e12d6bb"} Nov 28 18:07:45 crc kubenswrapper[4909]: I1128 18:07:45.258523 4909 scope.go:117] "RemoveContainer" containerID="27a19661069b40f27f9fd6fe4842e67a4a39e1d9688d02b8b07243fed3e1a22c" Nov 28 18:07:45 crc kubenswrapper[4909]: I1128 18:07:45.258636 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-54hw7" Nov 28 18:07:45 crc kubenswrapper[4909]: I1128 18:07:45.326959 4909 scope.go:117] "RemoveContainer" containerID="b5cdddbef039f8fef24050b54d0fa47c7ad89da8747a93da3cefc172de95b285" Nov 28 18:07:45 crc kubenswrapper[4909]: I1128 18:07:45.327216 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-54hw7"] Nov 28 18:07:45 crc kubenswrapper[4909]: I1128 18:07:45.343895 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-54hw7"] Nov 28 18:07:45 crc kubenswrapper[4909]: I1128 18:07:45.395555 4909 scope.go:117] "RemoveContainer" containerID="a4291176e0d6b717be7b5e7f5dcce4eb6f0c37b30f21b3385a66e851e1f2dd1e" Nov 28 18:07:45 crc kubenswrapper[4909]: I1128 18:07:45.444056 4909 scope.go:117] "RemoveContainer" containerID="27a19661069b40f27f9fd6fe4842e67a4a39e1d9688d02b8b07243fed3e1a22c" Nov 28 18:07:45 crc kubenswrapper[4909]: E1128 18:07:45.444466 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"27a19661069b40f27f9fd6fe4842e67a4a39e1d9688d02b8b07243fed3e1a22c\": container with ID starting with 27a19661069b40f27f9fd6fe4842e67a4a39e1d9688d02b8b07243fed3e1a22c not found: ID does not exist" containerID="27a19661069b40f27f9fd6fe4842e67a4a39e1d9688d02b8b07243fed3e1a22c" Nov 28 18:07:45 crc kubenswrapper[4909]: I1128 18:07:45.444569 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"27a19661069b40f27f9fd6fe4842e67a4a39e1d9688d02b8b07243fed3e1a22c"} err="failed to get container status \"27a19661069b40f27f9fd6fe4842e67a4a39e1d9688d02b8b07243fed3e1a22c\": rpc error: code = NotFound desc = could not find container \"27a19661069b40f27f9fd6fe4842e67a4a39e1d9688d02b8b07243fed3e1a22c\": container with ID starting with 27a19661069b40f27f9fd6fe4842e67a4a39e1d9688d02b8b07243fed3e1a22c not found: ID does not exist" Nov 28 18:07:45 crc kubenswrapper[4909]: I1128 18:07:45.444647 4909 scope.go:117] "RemoveContainer" containerID="b5cdddbef039f8fef24050b54d0fa47c7ad89da8747a93da3cefc172de95b285" Nov 28 18:07:45 crc kubenswrapper[4909]: E1128 18:07:45.444972 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b5cdddbef039f8fef24050b54d0fa47c7ad89da8747a93da3cefc172de95b285\": container with ID starting with b5cdddbef039f8fef24050b54d0fa47c7ad89da8747a93da3cefc172de95b285 not found: ID does not exist" containerID="b5cdddbef039f8fef24050b54d0fa47c7ad89da8747a93da3cefc172de95b285" Nov 28 18:07:45 crc kubenswrapper[4909]: I1128 18:07:45.445004 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b5cdddbef039f8fef24050b54d0fa47c7ad89da8747a93da3cefc172de95b285"} err="failed to get container status \"b5cdddbef039f8fef24050b54d0fa47c7ad89da8747a93da3cefc172de95b285\": rpc error: code = NotFound desc = could not find container \"b5cdddbef039f8fef24050b54d0fa47c7ad89da8747a93da3cefc172de95b285\": container with ID starting with b5cdddbef039f8fef24050b54d0fa47c7ad89da8747a93da3cefc172de95b285 not found: ID does not exist" Nov 28 18:07:45 crc kubenswrapper[4909]: I1128 18:07:45.445072 4909 scope.go:117] "RemoveContainer" containerID="a4291176e0d6b717be7b5e7f5dcce4eb6f0c37b30f21b3385a66e851e1f2dd1e" Nov 28 18:07:45 crc kubenswrapper[4909]: E1128 18:07:45.445239 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a4291176e0d6b717be7b5e7f5dcce4eb6f0c37b30f21b3385a66e851e1f2dd1e\": container with ID starting with a4291176e0d6b717be7b5e7f5dcce4eb6f0c37b30f21b3385a66e851e1f2dd1e not found: ID does not exist" containerID="a4291176e0d6b717be7b5e7f5dcce4eb6f0c37b30f21b3385a66e851e1f2dd1e" Nov 28 18:07:45 crc kubenswrapper[4909]: I1128 18:07:45.445258 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a4291176e0d6b717be7b5e7f5dcce4eb6f0c37b30f21b3385a66e851e1f2dd1e"} err="failed to get container status \"a4291176e0d6b717be7b5e7f5dcce4eb6f0c37b30f21b3385a66e851e1f2dd1e\": rpc error: code = NotFound desc = could not find container \"a4291176e0d6b717be7b5e7f5dcce4eb6f0c37b30f21b3385a66e851e1f2dd1e\": container with ID starting with a4291176e0d6b717be7b5e7f5dcce4eb6f0c37b30f21b3385a66e851e1f2dd1e not found: ID does not exist" Nov 28 18:07:45 crc kubenswrapper[4909]: I1128 18:07:45.923053 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09b16ade-d5c6-42ba-9036-152ff3287ff3" path="/var/lib/kubelet/pods/09b16ade-d5c6-42ba-9036-152ff3287ff3/volumes" Nov 28 18:07:52 crc kubenswrapper[4909]: I1128 18:07:52.337887 4909 generic.go:334] "Generic (PLEG): container finished" podID="ff7c1f2c-beb4-45ed-9b2b-1326cbdb8b54" containerID="d8dac42ad92690806ae1f9760a7601f7b395fb7b18f0aae62af5819bd276aaf5" exitCode=0 Nov 28 18:07:52 crc kubenswrapper[4909]: I1128 18:07:52.338097 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-zw85w" event={"ID":"ff7c1f2c-beb4-45ed-9b2b-1326cbdb8b54","Type":"ContainerDied","Data":"d8dac42ad92690806ae1f9760a7601f7b395fb7b18f0aae62af5819bd276aaf5"} Nov 28 18:07:53 crc kubenswrapper[4909]: I1128 18:07:53.852283 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-zw85w" Nov 28 18:07:53 crc kubenswrapper[4909]: I1128 18:07:53.950990 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ff7c1f2c-beb4-45ed-9b2b-1326cbdb8b54-ssh-key\") pod \"ff7c1f2c-beb4-45ed-9b2b-1326cbdb8b54\" (UID: \"ff7c1f2c-beb4-45ed-9b2b-1326cbdb8b54\") " Nov 28 18:07:53 crc kubenswrapper[4909]: I1128 18:07:53.951122 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/ff7c1f2c-beb4-45ed-9b2b-1326cbdb8b54-ceph\") pod \"ff7c1f2c-beb4-45ed-9b2b-1326cbdb8b54\" (UID: \"ff7c1f2c-beb4-45ed-9b2b-1326cbdb8b54\") " Nov 28 18:07:53 crc kubenswrapper[4909]: I1128 18:07:53.951216 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ff7c1f2c-beb4-45ed-9b2b-1326cbdb8b54-inventory\") pod \"ff7c1f2c-beb4-45ed-9b2b-1326cbdb8b54\" (UID: \"ff7c1f2c-beb4-45ed-9b2b-1326cbdb8b54\") " Nov 28 18:07:53 crc kubenswrapper[4909]: I1128 18:07:53.951335 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xx7j4\" (UniqueName: \"kubernetes.io/projected/ff7c1f2c-beb4-45ed-9b2b-1326cbdb8b54-kube-api-access-xx7j4\") pod \"ff7c1f2c-beb4-45ed-9b2b-1326cbdb8b54\" (UID: \"ff7c1f2c-beb4-45ed-9b2b-1326cbdb8b54\") " Nov 28 18:07:53 crc kubenswrapper[4909]: I1128 18:07:53.951418 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tripleo-cleanup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ff7c1f2c-beb4-45ed-9b2b-1326cbdb8b54-tripleo-cleanup-combined-ca-bundle\") pod \"ff7c1f2c-beb4-45ed-9b2b-1326cbdb8b54\" (UID: \"ff7c1f2c-beb4-45ed-9b2b-1326cbdb8b54\") " Nov 28 18:07:53 crc kubenswrapper[4909]: I1128 18:07:53.957310 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ff7c1f2c-beb4-45ed-9b2b-1326cbdb8b54-ceph" (OuterVolumeSpecName: "ceph") pod "ff7c1f2c-beb4-45ed-9b2b-1326cbdb8b54" (UID: "ff7c1f2c-beb4-45ed-9b2b-1326cbdb8b54"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 18:07:53 crc kubenswrapper[4909]: I1128 18:07:53.958947 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ff7c1f2c-beb4-45ed-9b2b-1326cbdb8b54-tripleo-cleanup-combined-ca-bundle" (OuterVolumeSpecName: "tripleo-cleanup-combined-ca-bundle") pod "ff7c1f2c-beb4-45ed-9b2b-1326cbdb8b54" (UID: "ff7c1f2c-beb4-45ed-9b2b-1326cbdb8b54"). InnerVolumeSpecName "tripleo-cleanup-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 18:07:53 crc kubenswrapper[4909]: I1128 18:07:53.962060 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ff7c1f2c-beb4-45ed-9b2b-1326cbdb8b54-kube-api-access-xx7j4" (OuterVolumeSpecName: "kube-api-access-xx7j4") pod "ff7c1f2c-beb4-45ed-9b2b-1326cbdb8b54" (UID: "ff7c1f2c-beb4-45ed-9b2b-1326cbdb8b54"). InnerVolumeSpecName "kube-api-access-xx7j4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 18:07:53 crc kubenswrapper[4909]: I1128 18:07:53.995974 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ff7c1f2c-beb4-45ed-9b2b-1326cbdb8b54-inventory" (OuterVolumeSpecName: "inventory") pod "ff7c1f2c-beb4-45ed-9b2b-1326cbdb8b54" (UID: "ff7c1f2c-beb4-45ed-9b2b-1326cbdb8b54"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 18:07:54 crc kubenswrapper[4909]: I1128 18:07:54.015569 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ff7c1f2c-beb4-45ed-9b2b-1326cbdb8b54-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "ff7c1f2c-beb4-45ed-9b2b-1326cbdb8b54" (UID: "ff7c1f2c-beb4-45ed-9b2b-1326cbdb8b54"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 18:07:54 crc kubenswrapper[4909]: I1128 18:07:54.054235 4909 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ff7c1f2c-beb4-45ed-9b2b-1326cbdb8b54-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 28 18:07:54 crc kubenswrapper[4909]: I1128 18:07:54.054260 4909 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/ff7c1f2c-beb4-45ed-9b2b-1326cbdb8b54-ceph\") on node \"crc\" DevicePath \"\"" Nov 28 18:07:54 crc kubenswrapper[4909]: I1128 18:07:54.054270 4909 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ff7c1f2c-beb4-45ed-9b2b-1326cbdb8b54-inventory\") on node \"crc\" DevicePath \"\"" Nov 28 18:07:54 crc kubenswrapper[4909]: I1128 18:07:54.054279 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xx7j4\" (UniqueName: \"kubernetes.io/projected/ff7c1f2c-beb4-45ed-9b2b-1326cbdb8b54-kube-api-access-xx7j4\") on node \"crc\" DevicePath \"\"" Nov 28 18:07:54 crc kubenswrapper[4909]: I1128 18:07:54.054288 4909 reconciler_common.go:293] "Volume detached for volume \"tripleo-cleanup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ff7c1f2c-beb4-45ed-9b2b-1326cbdb8b54-tripleo-cleanup-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 18:07:54 crc kubenswrapper[4909]: I1128 18:07:54.363107 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-zw85w" event={"ID":"ff7c1f2c-beb4-45ed-9b2b-1326cbdb8b54","Type":"ContainerDied","Data":"382fbb89f1c3eabb1c8afebc3a8baf3038676fe23ed334c6d0f71f78df45c77f"} Nov 28 18:07:54 crc kubenswrapper[4909]: I1128 18:07:54.363621 4909 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="382fbb89f1c3eabb1c8afebc3a8baf3038676fe23ed334c6d0f71f78df45c77f" Nov 28 18:07:54 crc kubenswrapper[4909]: I1128 18:07:54.363239 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-zw85w" Nov 28 18:08:00 crc kubenswrapper[4909]: I1128 18:08:00.715139 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/bootstrap-openstack-openstack-cell1-xp7wk"] Nov 28 18:08:00 crc kubenswrapper[4909]: E1128 18:08:00.716215 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="09b16ade-d5c6-42ba-9036-152ff3287ff3" containerName="extract-utilities" Nov 28 18:08:00 crc kubenswrapper[4909]: I1128 18:08:00.716233 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="09b16ade-d5c6-42ba-9036-152ff3287ff3" containerName="extract-utilities" Nov 28 18:08:00 crc kubenswrapper[4909]: E1128 18:08:00.716259 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="09b16ade-d5c6-42ba-9036-152ff3287ff3" containerName="registry-server" Nov 28 18:08:00 crc kubenswrapper[4909]: I1128 18:08:00.716268 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="09b16ade-d5c6-42ba-9036-152ff3287ff3" containerName="registry-server" Nov 28 18:08:00 crc kubenswrapper[4909]: E1128 18:08:00.716285 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="09b16ade-d5c6-42ba-9036-152ff3287ff3" containerName="extract-content" Nov 28 18:08:00 crc kubenswrapper[4909]: I1128 18:08:00.716293 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="09b16ade-d5c6-42ba-9036-152ff3287ff3" containerName="extract-content" Nov 28 18:08:00 crc kubenswrapper[4909]: E1128 18:08:00.716323 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ff7c1f2c-beb4-45ed-9b2b-1326cbdb8b54" containerName="tripleo-cleanup-tripleo-cleanup-openstack-cell1" Nov 28 18:08:00 crc kubenswrapper[4909]: I1128 18:08:00.716332 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="ff7c1f2c-beb4-45ed-9b2b-1326cbdb8b54" containerName="tripleo-cleanup-tripleo-cleanup-openstack-cell1" Nov 28 18:08:00 crc kubenswrapper[4909]: I1128 18:08:00.716582 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="09b16ade-d5c6-42ba-9036-152ff3287ff3" containerName="registry-server" Nov 28 18:08:00 crc kubenswrapper[4909]: I1128 18:08:00.716620 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="ff7c1f2c-beb4-45ed-9b2b-1326cbdb8b54" containerName="tripleo-cleanup-tripleo-cleanup-openstack-cell1" Nov 28 18:08:00 crc kubenswrapper[4909]: I1128 18:08:00.717577 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-openstack-openstack-cell1-xp7wk" Nov 28 18:08:00 crc kubenswrapper[4909]: I1128 18:08:00.720383 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-z249h" Nov 28 18:08:00 crc kubenswrapper[4909]: I1128 18:08:00.720401 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 28 18:08:00 crc kubenswrapper[4909]: I1128 18:08:00.720391 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Nov 28 18:08:00 crc kubenswrapper[4909]: I1128 18:08:00.724051 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Nov 28 18:08:00 crc kubenswrapper[4909]: I1128 18:08:00.743935 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/bootstrap-openstack-openstack-cell1-xp7wk"] Nov 28 18:08:00 crc kubenswrapper[4909]: I1128 18:08:00.814909 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/67e07eb7-8bd1-448a-898a-7ce44180ceaf-ssh-key\") pod \"bootstrap-openstack-openstack-cell1-xp7wk\" (UID: \"67e07eb7-8bd1-448a-898a-7ce44180ceaf\") " pod="openstack/bootstrap-openstack-openstack-cell1-xp7wk" Nov 28 18:08:00 crc kubenswrapper[4909]: I1128 18:08:00.815069 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/67e07eb7-8bd1-448a-898a-7ce44180ceaf-ceph\") pod \"bootstrap-openstack-openstack-cell1-xp7wk\" (UID: \"67e07eb7-8bd1-448a-898a-7ce44180ceaf\") " pod="openstack/bootstrap-openstack-openstack-cell1-xp7wk" Nov 28 18:08:00 crc kubenswrapper[4909]: I1128 18:08:00.815506 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/67e07eb7-8bd1-448a-898a-7ce44180ceaf-inventory\") pod \"bootstrap-openstack-openstack-cell1-xp7wk\" (UID: \"67e07eb7-8bd1-448a-898a-7ce44180ceaf\") " pod="openstack/bootstrap-openstack-openstack-cell1-xp7wk" Nov 28 18:08:00 crc kubenswrapper[4909]: I1128 18:08:00.815586 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7vxqg\" (UniqueName: \"kubernetes.io/projected/67e07eb7-8bd1-448a-898a-7ce44180ceaf-kube-api-access-7vxqg\") pod \"bootstrap-openstack-openstack-cell1-xp7wk\" (UID: \"67e07eb7-8bd1-448a-898a-7ce44180ceaf\") " pod="openstack/bootstrap-openstack-openstack-cell1-xp7wk" Nov 28 18:08:00 crc kubenswrapper[4909]: I1128 18:08:00.815667 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/67e07eb7-8bd1-448a-898a-7ce44180ceaf-bootstrap-combined-ca-bundle\") pod \"bootstrap-openstack-openstack-cell1-xp7wk\" (UID: \"67e07eb7-8bd1-448a-898a-7ce44180ceaf\") " pod="openstack/bootstrap-openstack-openstack-cell1-xp7wk" Nov 28 18:08:00 crc kubenswrapper[4909]: I1128 18:08:00.918788 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/67e07eb7-8bd1-448a-898a-7ce44180ceaf-ssh-key\") pod \"bootstrap-openstack-openstack-cell1-xp7wk\" (UID: \"67e07eb7-8bd1-448a-898a-7ce44180ceaf\") " pod="openstack/bootstrap-openstack-openstack-cell1-xp7wk" Nov 28 18:08:00 crc kubenswrapper[4909]: I1128 18:08:00.918917 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/67e07eb7-8bd1-448a-898a-7ce44180ceaf-ceph\") pod \"bootstrap-openstack-openstack-cell1-xp7wk\" (UID: \"67e07eb7-8bd1-448a-898a-7ce44180ceaf\") " pod="openstack/bootstrap-openstack-openstack-cell1-xp7wk" Nov 28 18:08:00 crc kubenswrapper[4909]: I1128 18:08:00.919162 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/67e07eb7-8bd1-448a-898a-7ce44180ceaf-inventory\") pod \"bootstrap-openstack-openstack-cell1-xp7wk\" (UID: \"67e07eb7-8bd1-448a-898a-7ce44180ceaf\") " pod="openstack/bootstrap-openstack-openstack-cell1-xp7wk" Nov 28 18:08:00 crc kubenswrapper[4909]: I1128 18:08:00.919225 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7vxqg\" (UniqueName: \"kubernetes.io/projected/67e07eb7-8bd1-448a-898a-7ce44180ceaf-kube-api-access-7vxqg\") pod \"bootstrap-openstack-openstack-cell1-xp7wk\" (UID: \"67e07eb7-8bd1-448a-898a-7ce44180ceaf\") " pod="openstack/bootstrap-openstack-openstack-cell1-xp7wk" Nov 28 18:08:00 crc kubenswrapper[4909]: I1128 18:08:00.919290 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/67e07eb7-8bd1-448a-898a-7ce44180ceaf-bootstrap-combined-ca-bundle\") pod \"bootstrap-openstack-openstack-cell1-xp7wk\" (UID: \"67e07eb7-8bd1-448a-898a-7ce44180ceaf\") " pod="openstack/bootstrap-openstack-openstack-cell1-xp7wk" Nov 28 18:08:00 crc kubenswrapper[4909]: I1128 18:08:00.927584 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/67e07eb7-8bd1-448a-898a-7ce44180ceaf-inventory\") pod \"bootstrap-openstack-openstack-cell1-xp7wk\" (UID: \"67e07eb7-8bd1-448a-898a-7ce44180ceaf\") " pod="openstack/bootstrap-openstack-openstack-cell1-xp7wk" Nov 28 18:08:00 crc kubenswrapper[4909]: I1128 18:08:00.927898 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/67e07eb7-8bd1-448a-898a-7ce44180ceaf-ceph\") pod \"bootstrap-openstack-openstack-cell1-xp7wk\" (UID: \"67e07eb7-8bd1-448a-898a-7ce44180ceaf\") " pod="openstack/bootstrap-openstack-openstack-cell1-xp7wk" Nov 28 18:08:00 crc kubenswrapper[4909]: I1128 18:08:00.928204 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/67e07eb7-8bd1-448a-898a-7ce44180ceaf-ssh-key\") pod \"bootstrap-openstack-openstack-cell1-xp7wk\" (UID: \"67e07eb7-8bd1-448a-898a-7ce44180ceaf\") " pod="openstack/bootstrap-openstack-openstack-cell1-xp7wk" Nov 28 18:08:00 crc kubenswrapper[4909]: I1128 18:08:00.932818 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/67e07eb7-8bd1-448a-898a-7ce44180ceaf-bootstrap-combined-ca-bundle\") pod \"bootstrap-openstack-openstack-cell1-xp7wk\" (UID: \"67e07eb7-8bd1-448a-898a-7ce44180ceaf\") " pod="openstack/bootstrap-openstack-openstack-cell1-xp7wk" Nov 28 18:08:00 crc kubenswrapper[4909]: I1128 18:08:00.937902 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7vxqg\" (UniqueName: \"kubernetes.io/projected/67e07eb7-8bd1-448a-898a-7ce44180ceaf-kube-api-access-7vxqg\") pod \"bootstrap-openstack-openstack-cell1-xp7wk\" (UID: \"67e07eb7-8bd1-448a-898a-7ce44180ceaf\") " pod="openstack/bootstrap-openstack-openstack-cell1-xp7wk" Nov 28 18:08:01 crc kubenswrapper[4909]: I1128 18:08:01.058284 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-openstack-openstack-cell1-xp7wk" Nov 28 18:08:01 crc kubenswrapper[4909]: I1128 18:08:01.591511 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/bootstrap-openstack-openstack-cell1-xp7wk"] Nov 28 18:08:02 crc kubenswrapper[4909]: I1128 18:08:02.460568 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-openstack-openstack-cell1-xp7wk" event={"ID":"67e07eb7-8bd1-448a-898a-7ce44180ceaf","Type":"ContainerStarted","Data":"38852f744e77bf4ac1d44a8ed11e6143a8d578fabdc92eddaeff79727b3b6a19"} Nov 28 18:08:02 crc kubenswrapper[4909]: I1128 18:08:02.461052 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-openstack-openstack-cell1-xp7wk" event={"ID":"67e07eb7-8bd1-448a-898a-7ce44180ceaf","Type":"ContainerStarted","Data":"f3d6436f8373cdce6287101f0560f830027e0e259aeb19c58e0dfc072f963a31"} Nov 28 18:08:02 crc kubenswrapper[4909]: I1128 18:08:02.481541 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/bootstrap-openstack-openstack-cell1-xp7wk" podStartSLOduration=1.922783031 podStartE2EDuration="2.481520485s" podCreationTimestamp="2025-11-28 18:08:00 +0000 UTC" firstStartedPulling="2025-11-28 18:08:01.595911983 +0000 UTC m=+7063.992596507" lastFinishedPulling="2025-11-28 18:08:02.154649437 +0000 UTC m=+7064.551333961" observedRunningTime="2025-11-28 18:08:02.474987489 +0000 UTC m=+7064.871672043" watchObservedRunningTime="2025-11-28 18:08:02.481520485 +0000 UTC m=+7064.878205009" Nov 28 18:09:37 crc kubenswrapper[4909]: I1128 18:09:37.860118 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-jndkn"] Nov 28 18:09:37 crc kubenswrapper[4909]: I1128 18:09:37.864944 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-jndkn" Nov 28 18:09:37 crc kubenswrapper[4909]: I1128 18:09:37.877819 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-jndkn"] Nov 28 18:09:38 crc kubenswrapper[4909]: I1128 18:09:38.002017 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0176c88f-f265-41c3-94c3-b1a52cc2a443-catalog-content\") pod \"redhat-marketplace-jndkn\" (UID: \"0176c88f-f265-41c3-94c3-b1a52cc2a443\") " pod="openshift-marketplace/redhat-marketplace-jndkn" Nov 28 18:09:38 crc kubenswrapper[4909]: I1128 18:09:38.002156 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0176c88f-f265-41c3-94c3-b1a52cc2a443-utilities\") pod \"redhat-marketplace-jndkn\" (UID: \"0176c88f-f265-41c3-94c3-b1a52cc2a443\") " pod="openshift-marketplace/redhat-marketplace-jndkn" Nov 28 18:09:38 crc kubenswrapper[4909]: I1128 18:09:38.002198 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4kjwj\" (UniqueName: \"kubernetes.io/projected/0176c88f-f265-41c3-94c3-b1a52cc2a443-kube-api-access-4kjwj\") pod \"redhat-marketplace-jndkn\" (UID: \"0176c88f-f265-41c3-94c3-b1a52cc2a443\") " pod="openshift-marketplace/redhat-marketplace-jndkn" Nov 28 18:09:38 crc kubenswrapper[4909]: I1128 18:09:38.104495 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0176c88f-f265-41c3-94c3-b1a52cc2a443-catalog-content\") pod \"redhat-marketplace-jndkn\" (UID: \"0176c88f-f265-41c3-94c3-b1a52cc2a443\") " pod="openshift-marketplace/redhat-marketplace-jndkn" Nov 28 18:09:38 crc kubenswrapper[4909]: I1128 18:09:38.104693 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0176c88f-f265-41c3-94c3-b1a52cc2a443-utilities\") pod \"redhat-marketplace-jndkn\" (UID: \"0176c88f-f265-41c3-94c3-b1a52cc2a443\") " pod="openshift-marketplace/redhat-marketplace-jndkn" Nov 28 18:09:38 crc kubenswrapper[4909]: I1128 18:09:38.104744 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4kjwj\" (UniqueName: \"kubernetes.io/projected/0176c88f-f265-41c3-94c3-b1a52cc2a443-kube-api-access-4kjwj\") pod \"redhat-marketplace-jndkn\" (UID: \"0176c88f-f265-41c3-94c3-b1a52cc2a443\") " pod="openshift-marketplace/redhat-marketplace-jndkn" Nov 28 18:09:38 crc kubenswrapper[4909]: I1128 18:09:38.105259 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0176c88f-f265-41c3-94c3-b1a52cc2a443-catalog-content\") pod \"redhat-marketplace-jndkn\" (UID: \"0176c88f-f265-41c3-94c3-b1a52cc2a443\") " pod="openshift-marketplace/redhat-marketplace-jndkn" Nov 28 18:09:38 crc kubenswrapper[4909]: I1128 18:09:38.105602 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0176c88f-f265-41c3-94c3-b1a52cc2a443-utilities\") pod \"redhat-marketplace-jndkn\" (UID: \"0176c88f-f265-41c3-94c3-b1a52cc2a443\") " pod="openshift-marketplace/redhat-marketplace-jndkn" Nov 28 18:09:38 crc kubenswrapper[4909]: I1128 18:09:38.138582 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4kjwj\" (UniqueName: \"kubernetes.io/projected/0176c88f-f265-41c3-94c3-b1a52cc2a443-kube-api-access-4kjwj\") pod \"redhat-marketplace-jndkn\" (UID: \"0176c88f-f265-41c3-94c3-b1a52cc2a443\") " pod="openshift-marketplace/redhat-marketplace-jndkn" Nov 28 18:09:38 crc kubenswrapper[4909]: I1128 18:09:38.200868 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-jndkn" Nov 28 18:09:38 crc kubenswrapper[4909]: I1128 18:09:38.767693 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-jndkn"] Nov 28 18:09:39 crc kubenswrapper[4909]: I1128 18:09:39.545946 4909 generic.go:334] "Generic (PLEG): container finished" podID="0176c88f-f265-41c3-94c3-b1a52cc2a443" containerID="010b8d0611376c92d8fa9dc7c92b0c4a438e5469136e08928f5d8f5abdb08c83" exitCode=0 Nov 28 18:09:39 crc kubenswrapper[4909]: I1128 18:09:39.546030 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jndkn" event={"ID":"0176c88f-f265-41c3-94c3-b1a52cc2a443","Type":"ContainerDied","Data":"010b8d0611376c92d8fa9dc7c92b0c4a438e5469136e08928f5d8f5abdb08c83"} Nov 28 18:09:39 crc kubenswrapper[4909]: I1128 18:09:39.546231 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jndkn" event={"ID":"0176c88f-f265-41c3-94c3-b1a52cc2a443","Type":"ContainerStarted","Data":"c019a7903972fcaac03082840f013634e04be41cd529df84fc3ed0da970f230b"} Nov 28 18:09:41 crc kubenswrapper[4909]: I1128 18:09:41.568809 4909 generic.go:334] "Generic (PLEG): container finished" podID="0176c88f-f265-41c3-94c3-b1a52cc2a443" containerID="a0881a54c773f3e2f65a73f159441307d71cf8c7245a4313700ea2a1d09ec2d8" exitCode=0 Nov 28 18:09:41 crc kubenswrapper[4909]: I1128 18:09:41.569090 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jndkn" event={"ID":"0176c88f-f265-41c3-94c3-b1a52cc2a443","Type":"ContainerDied","Data":"a0881a54c773f3e2f65a73f159441307d71cf8c7245a4313700ea2a1d09ec2d8"} Nov 28 18:09:42 crc kubenswrapper[4909]: I1128 18:09:42.587919 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jndkn" event={"ID":"0176c88f-f265-41c3-94c3-b1a52cc2a443","Type":"ContainerStarted","Data":"ee3b2a67cf679a1f54c79475d6eddcf96e058de2feef57a3be56d681668df24e"} Nov 28 18:09:42 crc kubenswrapper[4909]: I1128 18:09:42.614765 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-jndkn" podStartSLOduration=3.071296571 podStartE2EDuration="5.614743572s" podCreationTimestamp="2025-11-28 18:09:37 +0000 UTC" firstStartedPulling="2025-11-28 18:09:39.547871395 +0000 UTC m=+7161.944555959" lastFinishedPulling="2025-11-28 18:09:42.091318436 +0000 UTC m=+7164.488002960" observedRunningTime="2025-11-28 18:09:42.608510734 +0000 UTC m=+7165.005195268" watchObservedRunningTime="2025-11-28 18:09:42.614743572 +0000 UTC m=+7165.011428096" Nov 28 18:09:48 crc kubenswrapper[4909]: I1128 18:09:48.201178 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-jndkn" Nov 28 18:09:48 crc kubenswrapper[4909]: I1128 18:09:48.201854 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-jndkn" Nov 28 18:09:48 crc kubenswrapper[4909]: I1128 18:09:48.261030 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-jndkn" Nov 28 18:09:48 crc kubenswrapper[4909]: I1128 18:09:48.688263 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-jndkn" Nov 28 18:09:48 crc kubenswrapper[4909]: I1128 18:09:48.755005 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-jndkn"] Nov 28 18:09:49 crc kubenswrapper[4909]: I1128 18:09:49.910771 4909 patch_prober.go:28] interesting pod/machine-config-daemon-d5nd7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 18:09:49 crc kubenswrapper[4909]: I1128 18:09:49.911162 4909 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 18:09:50 crc kubenswrapper[4909]: I1128 18:09:50.671722 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-jndkn" podUID="0176c88f-f265-41c3-94c3-b1a52cc2a443" containerName="registry-server" containerID="cri-o://ee3b2a67cf679a1f54c79475d6eddcf96e058de2feef57a3be56d681668df24e" gracePeriod=2 Nov 28 18:09:51 crc kubenswrapper[4909]: I1128 18:09:51.200293 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-jndkn" Nov 28 18:09:51 crc kubenswrapper[4909]: I1128 18:09:51.291139 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0176c88f-f265-41c3-94c3-b1a52cc2a443-utilities\") pod \"0176c88f-f265-41c3-94c3-b1a52cc2a443\" (UID: \"0176c88f-f265-41c3-94c3-b1a52cc2a443\") " Nov 28 18:09:51 crc kubenswrapper[4909]: I1128 18:09:51.291466 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0176c88f-f265-41c3-94c3-b1a52cc2a443-catalog-content\") pod \"0176c88f-f265-41c3-94c3-b1a52cc2a443\" (UID: \"0176c88f-f265-41c3-94c3-b1a52cc2a443\") " Nov 28 18:09:51 crc kubenswrapper[4909]: I1128 18:09:51.291553 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4kjwj\" (UniqueName: \"kubernetes.io/projected/0176c88f-f265-41c3-94c3-b1a52cc2a443-kube-api-access-4kjwj\") pod \"0176c88f-f265-41c3-94c3-b1a52cc2a443\" (UID: \"0176c88f-f265-41c3-94c3-b1a52cc2a443\") " Nov 28 18:09:51 crc kubenswrapper[4909]: I1128 18:09:51.292204 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0176c88f-f265-41c3-94c3-b1a52cc2a443-utilities" (OuterVolumeSpecName: "utilities") pod "0176c88f-f265-41c3-94c3-b1a52cc2a443" (UID: "0176c88f-f265-41c3-94c3-b1a52cc2a443"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 18:09:51 crc kubenswrapper[4909]: I1128 18:09:51.298030 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0176c88f-f265-41c3-94c3-b1a52cc2a443-kube-api-access-4kjwj" (OuterVolumeSpecName: "kube-api-access-4kjwj") pod "0176c88f-f265-41c3-94c3-b1a52cc2a443" (UID: "0176c88f-f265-41c3-94c3-b1a52cc2a443"). InnerVolumeSpecName "kube-api-access-4kjwj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 18:09:51 crc kubenswrapper[4909]: I1128 18:09:51.310483 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0176c88f-f265-41c3-94c3-b1a52cc2a443-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "0176c88f-f265-41c3-94c3-b1a52cc2a443" (UID: "0176c88f-f265-41c3-94c3-b1a52cc2a443"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 18:09:51 crc kubenswrapper[4909]: I1128 18:09:51.393406 4909 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0176c88f-f265-41c3-94c3-b1a52cc2a443-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 18:09:51 crc kubenswrapper[4909]: I1128 18:09:51.393438 4909 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0176c88f-f265-41c3-94c3-b1a52cc2a443-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 18:09:51 crc kubenswrapper[4909]: I1128 18:09:51.393449 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4kjwj\" (UniqueName: \"kubernetes.io/projected/0176c88f-f265-41c3-94c3-b1a52cc2a443-kube-api-access-4kjwj\") on node \"crc\" DevicePath \"\"" Nov 28 18:09:51 crc kubenswrapper[4909]: I1128 18:09:51.686306 4909 generic.go:334] "Generic (PLEG): container finished" podID="0176c88f-f265-41c3-94c3-b1a52cc2a443" containerID="ee3b2a67cf679a1f54c79475d6eddcf96e058de2feef57a3be56d681668df24e" exitCode=0 Nov 28 18:09:51 crc kubenswrapper[4909]: I1128 18:09:51.686382 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jndkn" event={"ID":"0176c88f-f265-41c3-94c3-b1a52cc2a443","Type":"ContainerDied","Data":"ee3b2a67cf679a1f54c79475d6eddcf96e058de2feef57a3be56d681668df24e"} Nov 28 18:09:51 crc kubenswrapper[4909]: I1128 18:09:51.686417 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jndkn" event={"ID":"0176c88f-f265-41c3-94c3-b1a52cc2a443","Type":"ContainerDied","Data":"c019a7903972fcaac03082840f013634e04be41cd529df84fc3ed0da970f230b"} Nov 28 18:09:51 crc kubenswrapper[4909]: I1128 18:09:51.686439 4909 scope.go:117] "RemoveContainer" containerID="ee3b2a67cf679a1f54c79475d6eddcf96e058de2feef57a3be56d681668df24e" Nov 28 18:09:51 crc kubenswrapper[4909]: I1128 18:09:51.686600 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-jndkn" Nov 28 18:09:51 crc kubenswrapper[4909]: I1128 18:09:51.712611 4909 scope.go:117] "RemoveContainer" containerID="a0881a54c773f3e2f65a73f159441307d71cf8c7245a4313700ea2a1d09ec2d8" Nov 28 18:09:51 crc kubenswrapper[4909]: I1128 18:09:51.730121 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-jndkn"] Nov 28 18:09:51 crc kubenswrapper[4909]: I1128 18:09:51.738977 4909 scope.go:117] "RemoveContainer" containerID="010b8d0611376c92d8fa9dc7c92b0c4a438e5469136e08928f5d8f5abdb08c83" Nov 28 18:09:51 crc kubenswrapper[4909]: I1128 18:09:51.739872 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-jndkn"] Nov 28 18:09:51 crc kubenswrapper[4909]: I1128 18:09:51.812945 4909 scope.go:117] "RemoveContainer" containerID="ee3b2a67cf679a1f54c79475d6eddcf96e058de2feef57a3be56d681668df24e" Nov 28 18:09:51 crc kubenswrapper[4909]: E1128 18:09:51.813405 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ee3b2a67cf679a1f54c79475d6eddcf96e058de2feef57a3be56d681668df24e\": container with ID starting with ee3b2a67cf679a1f54c79475d6eddcf96e058de2feef57a3be56d681668df24e not found: ID does not exist" containerID="ee3b2a67cf679a1f54c79475d6eddcf96e058de2feef57a3be56d681668df24e" Nov 28 18:09:51 crc kubenswrapper[4909]: I1128 18:09:51.813530 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ee3b2a67cf679a1f54c79475d6eddcf96e058de2feef57a3be56d681668df24e"} err="failed to get container status \"ee3b2a67cf679a1f54c79475d6eddcf96e058de2feef57a3be56d681668df24e\": rpc error: code = NotFound desc = could not find container \"ee3b2a67cf679a1f54c79475d6eddcf96e058de2feef57a3be56d681668df24e\": container with ID starting with ee3b2a67cf679a1f54c79475d6eddcf96e058de2feef57a3be56d681668df24e not found: ID does not exist" Nov 28 18:09:51 crc kubenswrapper[4909]: I1128 18:09:51.813651 4909 scope.go:117] "RemoveContainer" containerID="a0881a54c773f3e2f65a73f159441307d71cf8c7245a4313700ea2a1d09ec2d8" Nov 28 18:09:51 crc kubenswrapper[4909]: E1128 18:09:51.814088 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a0881a54c773f3e2f65a73f159441307d71cf8c7245a4313700ea2a1d09ec2d8\": container with ID starting with a0881a54c773f3e2f65a73f159441307d71cf8c7245a4313700ea2a1d09ec2d8 not found: ID does not exist" containerID="a0881a54c773f3e2f65a73f159441307d71cf8c7245a4313700ea2a1d09ec2d8" Nov 28 18:09:51 crc kubenswrapper[4909]: I1128 18:09:51.814208 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a0881a54c773f3e2f65a73f159441307d71cf8c7245a4313700ea2a1d09ec2d8"} err="failed to get container status \"a0881a54c773f3e2f65a73f159441307d71cf8c7245a4313700ea2a1d09ec2d8\": rpc error: code = NotFound desc = could not find container \"a0881a54c773f3e2f65a73f159441307d71cf8c7245a4313700ea2a1d09ec2d8\": container with ID starting with a0881a54c773f3e2f65a73f159441307d71cf8c7245a4313700ea2a1d09ec2d8 not found: ID does not exist" Nov 28 18:09:51 crc kubenswrapper[4909]: I1128 18:09:51.814306 4909 scope.go:117] "RemoveContainer" containerID="010b8d0611376c92d8fa9dc7c92b0c4a438e5469136e08928f5d8f5abdb08c83" Nov 28 18:09:51 crc kubenswrapper[4909]: E1128 18:09:51.814868 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"010b8d0611376c92d8fa9dc7c92b0c4a438e5469136e08928f5d8f5abdb08c83\": container with ID starting with 010b8d0611376c92d8fa9dc7c92b0c4a438e5469136e08928f5d8f5abdb08c83 not found: ID does not exist" containerID="010b8d0611376c92d8fa9dc7c92b0c4a438e5469136e08928f5d8f5abdb08c83" Nov 28 18:09:51 crc kubenswrapper[4909]: I1128 18:09:51.814921 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"010b8d0611376c92d8fa9dc7c92b0c4a438e5469136e08928f5d8f5abdb08c83"} err="failed to get container status \"010b8d0611376c92d8fa9dc7c92b0c4a438e5469136e08928f5d8f5abdb08c83\": rpc error: code = NotFound desc = could not find container \"010b8d0611376c92d8fa9dc7c92b0c4a438e5469136e08928f5d8f5abdb08c83\": container with ID starting with 010b8d0611376c92d8fa9dc7c92b0c4a438e5469136e08928f5d8f5abdb08c83 not found: ID does not exist" Nov 28 18:09:51 crc kubenswrapper[4909]: I1128 18:09:51.916373 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0176c88f-f265-41c3-94c3-b1a52cc2a443" path="/var/lib/kubelet/pods/0176c88f-f265-41c3-94c3-b1a52cc2a443/volumes" Nov 28 18:10:19 crc kubenswrapper[4909]: I1128 18:10:19.910896 4909 patch_prober.go:28] interesting pod/machine-config-daemon-d5nd7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 18:10:19 crc kubenswrapper[4909]: I1128 18:10:19.911769 4909 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 18:10:26 crc kubenswrapper[4909]: I1128 18:10:26.687007 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-2pbrb"] Nov 28 18:10:26 crc kubenswrapper[4909]: E1128 18:10:26.688494 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0176c88f-f265-41c3-94c3-b1a52cc2a443" containerName="extract-utilities" Nov 28 18:10:26 crc kubenswrapper[4909]: I1128 18:10:26.688527 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="0176c88f-f265-41c3-94c3-b1a52cc2a443" containerName="extract-utilities" Nov 28 18:10:26 crc kubenswrapper[4909]: E1128 18:10:26.688570 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0176c88f-f265-41c3-94c3-b1a52cc2a443" containerName="extract-content" Nov 28 18:10:26 crc kubenswrapper[4909]: I1128 18:10:26.688590 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="0176c88f-f265-41c3-94c3-b1a52cc2a443" containerName="extract-content" Nov 28 18:10:26 crc kubenswrapper[4909]: E1128 18:10:26.688646 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0176c88f-f265-41c3-94c3-b1a52cc2a443" containerName="registry-server" Nov 28 18:10:26 crc kubenswrapper[4909]: I1128 18:10:26.688709 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="0176c88f-f265-41c3-94c3-b1a52cc2a443" containerName="registry-server" Nov 28 18:10:26 crc kubenswrapper[4909]: I1128 18:10:26.689713 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="0176c88f-f265-41c3-94c3-b1a52cc2a443" containerName="registry-server" Nov 28 18:10:26 crc kubenswrapper[4909]: I1128 18:10:26.696110 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-2pbrb" Nov 28 18:10:26 crc kubenswrapper[4909]: I1128 18:10:26.712934 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-2pbrb"] Nov 28 18:10:26 crc kubenswrapper[4909]: I1128 18:10:26.846319 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/960dcc6f-14e5-4dfa-abb5-350719ab59a0-utilities\") pod \"redhat-operators-2pbrb\" (UID: \"960dcc6f-14e5-4dfa-abb5-350719ab59a0\") " pod="openshift-marketplace/redhat-operators-2pbrb" Nov 28 18:10:26 crc kubenswrapper[4909]: I1128 18:10:26.846693 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gnfrn\" (UniqueName: \"kubernetes.io/projected/960dcc6f-14e5-4dfa-abb5-350719ab59a0-kube-api-access-gnfrn\") pod \"redhat-operators-2pbrb\" (UID: \"960dcc6f-14e5-4dfa-abb5-350719ab59a0\") " pod="openshift-marketplace/redhat-operators-2pbrb" Nov 28 18:10:26 crc kubenswrapper[4909]: I1128 18:10:26.846868 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/960dcc6f-14e5-4dfa-abb5-350719ab59a0-catalog-content\") pod \"redhat-operators-2pbrb\" (UID: \"960dcc6f-14e5-4dfa-abb5-350719ab59a0\") " pod="openshift-marketplace/redhat-operators-2pbrb" Nov 28 18:10:26 crc kubenswrapper[4909]: I1128 18:10:26.948834 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/960dcc6f-14e5-4dfa-abb5-350719ab59a0-utilities\") pod \"redhat-operators-2pbrb\" (UID: \"960dcc6f-14e5-4dfa-abb5-350719ab59a0\") " pod="openshift-marketplace/redhat-operators-2pbrb" Nov 28 18:10:26 crc kubenswrapper[4909]: I1128 18:10:26.948939 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gnfrn\" (UniqueName: \"kubernetes.io/projected/960dcc6f-14e5-4dfa-abb5-350719ab59a0-kube-api-access-gnfrn\") pod \"redhat-operators-2pbrb\" (UID: \"960dcc6f-14e5-4dfa-abb5-350719ab59a0\") " pod="openshift-marketplace/redhat-operators-2pbrb" Nov 28 18:10:26 crc kubenswrapper[4909]: I1128 18:10:26.949020 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/960dcc6f-14e5-4dfa-abb5-350719ab59a0-catalog-content\") pod \"redhat-operators-2pbrb\" (UID: \"960dcc6f-14e5-4dfa-abb5-350719ab59a0\") " pod="openshift-marketplace/redhat-operators-2pbrb" Nov 28 18:10:26 crc kubenswrapper[4909]: I1128 18:10:26.949453 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/960dcc6f-14e5-4dfa-abb5-350719ab59a0-utilities\") pod \"redhat-operators-2pbrb\" (UID: \"960dcc6f-14e5-4dfa-abb5-350719ab59a0\") " pod="openshift-marketplace/redhat-operators-2pbrb" Nov 28 18:10:26 crc kubenswrapper[4909]: I1128 18:10:26.949772 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/960dcc6f-14e5-4dfa-abb5-350719ab59a0-catalog-content\") pod \"redhat-operators-2pbrb\" (UID: \"960dcc6f-14e5-4dfa-abb5-350719ab59a0\") " pod="openshift-marketplace/redhat-operators-2pbrb" Nov 28 18:10:26 crc kubenswrapper[4909]: I1128 18:10:26.975879 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gnfrn\" (UniqueName: \"kubernetes.io/projected/960dcc6f-14e5-4dfa-abb5-350719ab59a0-kube-api-access-gnfrn\") pod \"redhat-operators-2pbrb\" (UID: \"960dcc6f-14e5-4dfa-abb5-350719ab59a0\") " pod="openshift-marketplace/redhat-operators-2pbrb" Nov 28 18:10:27 crc kubenswrapper[4909]: I1128 18:10:27.039433 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-2pbrb" Nov 28 18:10:27 crc kubenswrapper[4909]: I1128 18:10:27.598668 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-2pbrb"] Nov 28 18:10:28 crc kubenswrapper[4909]: I1128 18:10:28.106526 4909 generic.go:334] "Generic (PLEG): container finished" podID="960dcc6f-14e5-4dfa-abb5-350719ab59a0" containerID="7dffe547f8ba218b7d38925d1985af81a6252b5083751dea90ce9f422f9808ad" exitCode=0 Nov 28 18:10:28 crc kubenswrapper[4909]: I1128 18:10:28.106573 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2pbrb" event={"ID":"960dcc6f-14e5-4dfa-abb5-350719ab59a0","Type":"ContainerDied","Data":"7dffe547f8ba218b7d38925d1985af81a6252b5083751dea90ce9f422f9808ad"} Nov 28 18:10:28 crc kubenswrapper[4909]: I1128 18:10:28.106899 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2pbrb" event={"ID":"960dcc6f-14e5-4dfa-abb5-350719ab59a0","Type":"ContainerStarted","Data":"56311db8b52f25d01db2a369a3af66d8011e0d74471fba28af2fa32ec43e086c"} Nov 28 18:10:28 crc kubenswrapper[4909]: I1128 18:10:28.108798 4909 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 28 18:10:29 crc kubenswrapper[4909]: I1128 18:10:29.123177 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2pbrb" event={"ID":"960dcc6f-14e5-4dfa-abb5-350719ab59a0","Type":"ContainerStarted","Data":"8cd09f40c3d43cc4c2055d976af052209556f17ba18d64be84b7c3f87b55fd3c"} Nov 28 18:10:30 crc kubenswrapper[4909]: I1128 18:10:30.135263 4909 generic.go:334] "Generic (PLEG): container finished" podID="960dcc6f-14e5-4dfa-abb5-350719ab59a0" containerID="8cd09f40c3d43cc4c2055d976af052209556f17ba18d64be84b7c3f87b55fd3c" exitCode=0 Nov 28 18:10:30 crc kubenswrapper[4909]: I1128 18:10:30.135352 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2pbrb" event={"ID":"960dcc6f-14e5-4dfa-abb5-350719ab59a0","Type":"ContainerDied","Data":"8cd09f40c3d43cc4c2055d976af052209556f17ba18d64be84b7c3f87b55fd3c"} Nov 28 18:10:31 crc kubenswrapper[4909]: I1128 18:10:31.150143 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2pbrb" event={"ID":"960dcc6f-14e5-4dfa-abb5-350719ab59a0","Type":"ContainerStarted","Data":"35088d2fa6d9e6246ec2932b8afa7cb192c7dc3cba4dff68e986ad90f0c28ced"} Nov 28 18:10:31 crc kubenswrapper[4909]: I1128 18:10:31.187495 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-2pbrb" podStartSLOduration=2.6493894129999997 podStartE2EDuration="5.18747435s" podCreationTimestamp="2025-11-28 18:10:26 +0000 UTC" firstStartedPulling="2025-11-28 18:10:28.108402145 +0000 UTC m=+7210.505086669" lastFinishedPulling="2025-11-28 18:10:30.646487072 +0000 UTC m=+7213.043171606" observedRunningTime="2025-11-28 18:10:31.173182236 +0000 UTC m=+7213.569866790" watchObservedRunningTime="2025-11-28 18:10:31.18747435 +0000 UTC m=+7213.584158884" Nov 28 18:10:37 crc kubenswrapper[4909]: I1128 18:10:37.039631 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-2pbrb" Nov 28 18:10:37 crc kubenswrapper[4909]: I1128 18:10:37.040397 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-2pbrb" Nov 28 18:10:37 crc kubenswrapper[4909]: I1128 18:10:37.109345 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-2pbrb" Nov 28 18:10:37 crc kubenswrapper[4909]: I1128 18:10:37.269704 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-2pbrb" Nov 28 18:10:37 crc kubenswrapper[4909]: I1128 18:10:37.359708 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-2pbrb"] Nov 28 18:10:39 crc kubenswrapper[4909]: I1128 18:10:39.240259 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-2pbrb" podUID="960dcc6f-14e5-4dfa-abb5-350719ab59a0" containerName="registry-server" containerID="cri-o://35088d2fa6d9e6246ec2932b8afa7cb192c7dc3cba4dff68e986ad90f0c28ced" gracePeriod=2 Nov 28 18:10:39 crc kubenswrapper[4909]: I1128 18:10:39.747806 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-2pbrb" Nov 28 18:10:39 crc kubenswrapper[4909]: I1128 18:10:39.860106 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/960dcc6f-14e5-4dfa-abb5-350719ab59a0-utilities\") pod \"960dcc6f-14e5-4dfa-abb5-350719ab59a0\" (UID: \"960dcc6f-14e5-4dfa-abb5-350719ab59a0\") " Nov 28 18:10:39 crc kubenswrapper[4909]: I1128 18:10:39.860184 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gnfrn\" (UniqueName: \"kubernetes.io/projected/960dcc6f-14e5-4dfa-abb5-350719ab59a0-kube-api-access-gnfrn\") pod \"960dcc6f-14e5-4dfa-abb5-350719ab59a0\" (UID: \"960dcc6f-14e5-4dfa-abb5-350719ab59a0\") " Nov 28 18:10:39 crc kubenswrapper[4909]: I1128 18:10:39.860299 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/960dcc6f-14e5-4dfa-abb5-350719ab59a0-catalog-content\") pod \"960dcc6f-14e5-4dfa-abb5-350719ab59a0\" (UID: \"960dcc6f-14e5-4dfa-abb5-350719ab59a0\") " Nov 28 18:10:39 crc kubenswrapper[4909]: I1128 18:10:39.861400 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/960dcc6f-14e5-4dfa-abb5-350719ab59a0-utilities" (OuterVolumeSpecName: "utilities") pod "960dcc6f-14e5-4dfa-abb5-350719ab59a0" (UID: "960dcc6f-14e5-4dfa-abb5-350719ab59a0"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 18:10:39 crc kubenswrapper[4909]: I1128 18:10:39.867041 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/960dcc6f-14e5-4dfa-abb5-350719ab59a0-kube-api-access-gnfrn" (OuterVolumeSpecName: "kube-api-access-gnfrn") pod "960dcc6f-14e5-4dfa-abb5-350719ab59a0" (UID: "960dcc6f-14e5-4dfa-abb5-350719ab59a0"). InnerVolumeSpecName "kube-api-access-gnfrn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 18:10:39 crc kubenswrapper[4909]: I1128 18:10:39.963129 4909 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/960dcc6f-14e5-4dfa-abb5-350719ab59a0-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 18:10:39 crc kubenswrapper[4909]: I1128 18:10:39.963176 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gnfrn\" (UniqueName: \"kubernetes.io/projected/960dcc6f-14e5-4dfa-abb5-350719ab59a0-kube-api-access-gnfrn\") on node \"crc\" DevicePath \"\"" Nov 28 18:10:39 crc kubenswrapper[4909]: I1128 18:10:39.978367 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/960dcc6f-14e5-4dfa-abb5-350719ab59a0-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "960dcc6f-14e5-4dfa-abb5-350719ab59a0" (UID: "960dcc6f-14e5-4dfa-abb5-350719ab59a0"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 18:10:40 crc kubenswrapper[4909]: I1128 18:10:40.065797 4909 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/960dcc6f-14e5-4dfa-abb5-350719ab59a0-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 18:10:40 crc kubenswrapper[4909]: I1128 18:10:40.254142 4909 generic.go:334] "Generic (PLEG): container finished" podID="960dcc6f-14e5-4dfa-abb5-350719ab59a0" containerID="35088d2fa6d9e6246ec2932b8afa7cb192c7dc3cba4dff68e986ad90f0c28ced" exitCode=0 Nov 28 18:10:40 crc kubenswrapper[4909]: I1128 18:10:40.254193 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2pbrb" event={"ID":"960dcc6f-14e5-4dfa-abb5-350719ab59a0","Type":"ContainerDied","Data":"35088d2fa6d9e6246ec2932b8afa7cb192c7dc3cba4dff68e986ad90f0c28ced"} Nov 28 18:10:40 crc kubenswrapper[4909]: I1128 18:10:40.254232 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-2pbrb" Nov 28 18:10:40 crc kubenswrapper[4909]: I1128 18:10:40.254253 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2pbrb" event={"ID":"960dcc6f-14e5-4dfa-abb5-350719ab59a0","Type":"ContainerDied","Data":"56311db8b52f25d01db2a369a3af66d8011e0d74471fba28af2fa32ec43e086c"} Nov 28 18:10:40 crc kubenswrapper[4909]: I1128 18:10:40.254272 4909 scope.go:117] "RemoveContainer" containerID="35088d2fa6d9e6246ec2932b8afa7cb192c7dc3cba4dff68e986ad90f0c28ced" Nov 28 18:10:40 crc kubenswrapper[4909]: I1128 18:10:40.277158 4909 scope.go:117] "RemoveContainer" containerID="8cd09f40c3d43cc4c2055d976af052209556f17ba18d64be84b7c3f87b55fd3c" Nov 28 18:10:40 crc kubenswrapper[4909]: I1128 18:10:40.330132 4909 scope.go:117] "RemoveContainer" containerID="7dffe547f8ba218b7d38925d1985af81a6252b5083751dea90ce9f422f9808ad" Nov 28 18:10:40 crc kubenswrapper[4909]: I1128 18:10:40.361319 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-2pbrb"] Nov 28 18:10:40 crc kubenswrapper[4909]: I1128 18:10:40.370000 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-2pbrb"] Nov 28 18:10:40 crc kubenswrapper[4909]: I1128 18:10:40.395152 4909 scope.go:117] "RemoveContainer" containerID="35088d2fa6d9e6246ec2932b8afa7cb192c7dc3cba4dff68e986ad90f0c28ced" Nov 28 18:10:40 crc kubenswrapper[4909]: E1128 18:10:40.395699 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"35088d2fa6d9e6246ec2932b8afa7cb192c7dc3cba4dff68e986ad90f0c28ced\": container with ID starting with 35088d2fa6d9e6246ec2932b8afa7cb192c7dc3cba4dff68e986ad90f0c28ced not found: ID does not exist" containerID="35088d2fa6d9e6246ec2932b8afa7cb192c7dc3cba4dff68e986ad90f0c28ced" Nov 28 18:10:40 crc kubenswrapper[4909]: I1128 18:10:40.395760 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"35088d2fa6d9e6246ec2932b8afa7cb192c7dc3cba4dff68e986ad90f0c28ced"} err="failed to get container status \"35088d2fa6d9e6246ec2932b8afa7cb192c7dc3cba4dff68e986ad90f0c28ced\": rpc error: code = NotFound desc = could not find container \"35088d2fa6d9e6246ec2932b8afa7cb192c7dc3cba4dff68e986ad90f0c28ced\": container with ID starting with 35088d2fa6d9e6246ec2932b8afa7cb192c7dc3cba4dff68e986ad90f0c28ced not found: ID does not exist" Nov 28 18:10:40 crc kubenswrapper[4909]: I1128 18:10:40.395787 4909 scope.go:117] "RemoveContainer" containerID="8cd09f40c3d43cc4c2055d976af052209556f17ba18d64be84b7c3f87b55fd3c" Nov 28 18:10:40 crc kubenswrapper[4909]: E1128 18:10:40.396159 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8cd09f40c3d43cc4c2055d976af052209556f17ba18d64be84b7c3f87b55fd3c\": container with ID starting with 8cd09f40c3d43cc4c2055d976af052209556f17ba18d64be84b7c3f87b55fd3c not found: ID does not exist" containerID="8cd09f40c3d43cc4c2055d976af052209556f17ba18d64be84b7c3f87b55fd3c" Nov 28 18:10:40 crc kubenswrapper[4909]: I1128 18:10:40.396249 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8cd09f40c3d43cc4c2055d976af052209556f17ba18d64be84b7c3f87b55fd3c"} err="failed to get container status \"8cd09f40c3d43cc4c2055d976af052209556f17ba18d64be84b7c3f87b55fd3c\": rpc error: code = NotFound desc = could not find container \"8cd09f40c3d43cc4c2055d976af052209556f17ba18d64be84b7c3f87b55fd3c\": container with ID starting with 8cd09f40c3d43cc4c2055d976af052209556f17ba18d64be84b7c3f87b55fd3c not found: ID does not exist" Nov 28 18:10:40 crc kubenswrapper[4909]: I1128 18:10:40.396322 4909 scope.go:117] "RemoveContainer" containerID="7dffe547f8ba218b7d38925d1985af81a6252b5083751dea90ce9f422f9808ad" Nov 28 18:10:40 crc kubenswrapper[4909]: E1128 18:10:40.396754 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7dffe547f8ba218b7d38925d1985af81a6252b5083751dea90ce9f422f9808ad\": container with ID starting with 7dffe547f8ba218b7d38925d1985af81a6252b5083751dea90ce9f422f9808ad not found: ID does not exist" containerID="7dffe547f8ba218b7d38925d1985af81a6252b5083751dea90ce9f422f9808ad" Nov 28 18:10:40 crc kubenswrapper[4909]: I1128 18:10:40.396795 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7dffe547f8ba218b7d38925d1985af81a6252b5083751dea90ce9f422f9808ad"} err="failed to get container status \"7dffe547f8ba218b7d38925d1985af81a6252b5083751dea90ce9f422f9808ad\": rpc error: code = NotFound desc = could not find container \"7dffe547f8ba218b7d38925d1985af81a6252b5083751dea90ce9f422f9808ad\": container with ID starting with 7dffe547f8ba218b7d38925d1985af81a6252b5083751dea90ce9f422f9808ad not found: ID does not exist" Nov 28 18:10:41 crc kubenswrapper[4909]: I1128 18:10:41.916754 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="960dcc6f-14e5-4dfa-abb5-350719ab59a0" path="/var/lib/kubelet/pods/960dcc6f-14e5-4dfa-abb5-350719ab59a0/volumes" Nov 28 18:10:49 crc kubenswrapper[4909]: I1128 18:10:49.910557 4909 patch_prober.go:28] interesting pod/machine-config-daemon-d5nd7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 18:10:49 crc kubenswrapper[4909]: I1128 18:10:49.911212 4909 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 18:10:49 crc kubenswrapper[4909]: I1128 18:10:49.919008 4909 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" Nov 28 18:10:49 crc kubenswrapper[4909]: I1128 18:10:49.920228 4909 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"7dde07ca73bc70f8417bb961545f1be0280270788de6df86f67b27140f4beaaa"} pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 18:10:49 crc kubenswrapper[4909]: I1128 18:10:49.920347 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerName="machine-config-daemon" containerID="cri-o://7dde07ca73bc70f8417bb961545f1be0280270788de6df86f67b27140f4beaaa" gracePeriod=600 Nov 28 18:10:50 crc kubenswrapper[4909]: I1128 18:10:50.372351 4909 generic.go:334] "Generic (PLEG): container finished" podID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerID="7dde07ca73bc70f8417bb961545f1be0280270788de6df86f67b27140f4beaaa" exitCode=0 Nov 28 18:10:50 crc kubenswrapper[4909]: I1128 18:10:50.372385 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" event={"ID":"5f0ac931-d37b-4342-8c12-c2779b455cc5","Type":"ContainerDied","Data":"7dde07ca73bc70f8417bb961545f1be0280270788de6df86f67b27140f4beaaa"} Nov 28 18:10:50 crc kubenswrapper[4909]: I1128 18:10:50.372694 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" event={"ID":"5f0ac931-d37b-4342-8c12-c2779b455cc5","Type":"ContainerStarted","Data":"16af51197304d83d362143f44527eca9e3f72c8e7894bd79e4b089fd278bdb1d"} Nov 28 18:10:50 crc kubenswrapper[4909]: I1128 18:10:50.372712 4909 scope.go:117] "RemoveContainer" containerID="e411339176f5e7e63fa950117654bd213c078924c12ebf7a42744b40e8eabbd4" Nov 28 18:12:32 crc kubenswrapper[4909]: I1128 18:12:32.695467 4909 generic.go:334] "Generic (PLEG): container finished" podID="67e07eb7-8bd1-448a-898a-7ce44180ceaf" containerID="38852f744e77bf4ac1d44a8ed11e6143a8d578fabdc92eddaeff79727b3b6a19" exitCode=0 Nov 28 18:12:32 crc kubenswrapper[4909]: I1128 18:12:32.695609 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-openstack-openstack-cell1-xp7wk" event={"ID":"67e07eb7-8bd1-448a-898a-7ce44180ceaf","Type":"ContainerDied","Data":"38852f744e77bf4ac1d44a8ed11e6143a8d578fabdc92eddaeff79727b3b6a19"} Nov 28 18:12:34 crc kubenswrapper[4909]: I1128 18:12:34.223345 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-openstack-openstack-cell1-xp7wk" Nov 28 18:12:34 crc kubenswrapper[4909]: I1128 18:12:34.249430 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/67e07eb7-8bd1-448a-898a-7ce44180ceaf-inventory\") pod \"67e07eb7-8bd1-448a-898a-7ce44180ceaf\" (UID: \"67e07eb7-8bd1-448a-898a-7ce44180ceaf\") " Nov 28 18:12:34 crc kubenswrapper[4909]: I1128 18:12:34.249477 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/67e07eb7-8bd1-448a-898a-7ce44180ceaf-ssh-key\") pod \"67e07eb7-8bd1-448a-898a-7ce44180ceaf\" (UID: \"67e07eb7-8bd1-448a-898a-7ce44180ceaf\") " Nov 28 18:12:34 crc kubenswrapper[4909]: I1128 18:12:34.249558 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/67e07eb7-8bd1-448a-898a-7ce44180ceaf-bootstrap-combined-ca-bundle\") pod \"67e07eb7-8bd1-448a-898a-7ce44180ceaf\" (UID: \"67e07eb7-8bd1-448a-898a-7ce44180ceaf\") " Nov 28 18:12:34 crc kubenswrapper[4909]: I1128 18:12:34.249641 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7vxqg\" (UniqueName: \"kubernetes.io/projected/67e07eb7-8bd1-448a-898a-7ce44180ceaf-kube-api-access-7vxqg\") pod \"67e07eb7-8bd1-448a-898a-7ce44180ceaf\" (UID: \"67e07eb7-8bd1-448a-898a-7ce44180ceaf\") " Nov 28 18:12:34 crc kubenswrapper[4909]: I1128 18:12:34.249865 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/67e07eb7-8bd1-448a-898a-7ce44180ceaf-ceph\") pod \"67e07eb7-8bd1-448a-898a-7ce44180ceaf\" (UID: \"67e07eb7-8bd1-448a-898a-7ce44180ceaf\") " Nov 28 18:12:34 crc kubenswrapper[4909]: I1128 18:12:34.267191 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/67e07eb7-8bd1-448a-898a-7ce44180ceaf-ceph" (OuterVolumeSpecName: "ceph") pod "67e07eb7-8bd1-448a-898a-7ce44180ceaf" (UID: "67e07eb7-8bd1-448a-898a-7ce44180ceaf"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 18:12:34 crc kubenswrapper[4909]: I1128 18:12:34.267215 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/67e07eb7-8bd1-448a-898a-7ce44180ceaf-kube-api-access-7vxqg" (OuterVolumeSpecName: "kube-api-access-7vxqg") pod "67e07eb7-8bd1-448a-898a-7ce44180ceaf" (UID: "67e07eb7-8bd1-448a-898a-7ce44180ceaf"). InnerVolumeSpecName "kube-api-access-7vxqg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 18:12:34 crc kubenswrapper[4909]: I1128 18:12:34.273922 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/67e07eb7-8bd1-448a-898a-7ce44180ceaf-bootstrap-combined-ca-bundle" (OuterVolumeSpecName: "bootstrap-combined-ca-bundle") pod "67e07eb7-8bd1-448a-898a-7ce44180ceaf" (UID: "67e07eb7-8bd1-448a-898a-7ce44180ceaf"). InnerVolumeSpecName "bootstrap-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 18:12:34 crc kubenswrapper[4909]: I1128 18:12:34.302070 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/67e07eb7-8bd1-448a-898a-7ce44180ceaf-inventory" (OuterVolumeSpecName: "inventory") pod "67e07eb7-8bd1-448a-898a-7ce44180ceaf" (UID: "67e07eb7-8bd1-448a-898a-7ce44180ceaf"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 18:12:34 crc kubenswrapper[4909]: I1128 18:12:34.321102 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/67e07eb7-8bd1-448a-898a-7ce44180ceaf-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "67e07eb7-8bd1-448a-898a-7ce44180ceaf" (UID: "67e07eb7-8bd1-448a-898a-7ce44180ceaf"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 18:12:34 crc kubenswrapper[4909]: I1128 18:12:34.352321 4909 reconciler_common.go:293] "Volume detached for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/67e07eb7-8bd1-448a-898a-7ce44180ceaf-bootstrap-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 18:12:34 crc kubenswrapper[4909]: I1128 18:12:34.352353 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7vxqg\" (UniqueName: \"kubernetes.io/projected/67e07eb7-8bd1-448a-898a-7ce44180ceaf-kube-api-access-7vxqg\") on node \"crc\" DevicePath \"\"" Nov 28 18:12:34 crc kubenswrapper[4909]: I1128 18:12:34.352363 4909 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/67e07eb7-8bd1-448a-898a-7ce44180ceaf-ceph\") on node \"crc\" DevicePath \"\"" Nov 28 18:12:34 crc kubenswrapper[4909]: I1128 18:12:34.352374 4909 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/67e07eb7-8bd1-448a-898a-7ce44180ceaf-inventory\") on node \"crc\" DevicePath \"\"" Nov 28 18:12:34 crc kubenswrapper[4909]: I1128 18:12:34.352383 4909 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/67e07eb7-8bd1-448a-898a-7ce44180ceaf-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 28 18:12:34 crc kubenswrapper[4909]: I1128 18:12:34.726259 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-openstack-openstack-cell1-xp7wk" event={"ID":"67e07eb7-8bd1-448a-898a-7ce44180ceaf","Type":"ContainerDied","Data":"f3d6436f8373cdce6287101f0560f830027e0e259aeb19c58e0dfc072f963a31"} Nov 28 18:12:34 crc kubenswrapper[4909]: I1128 18:12:34.726533 4909 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f3d6436f8373cdce6287101f0560f830027e0e259aeb19c58e0dfc072f963a31" Nov 28 18:12:34 crc kubenswrapper[4909]: I1128 18:12:34.726593 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-openstack-openstack-cell1-xp7wk" Nov 28 18:12:34 crc kubenswrapper[4909]: I1128 18:12:34.808069 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/download-cache-openstack-openstack-cell1-l84qt"] Nov 28 18:12:34 crc kubenswrapper[4909]: E1128 18:12:34.808518 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="960dcc6f-14e5-4dfa-abb5-350719ab59a0" containerName="extract-utilities" Nov 28 18:12:34 crc kubenswrapper[4909]: I1128 18:12:34.808529 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="960dcc6f-14e5-4dfa-abb5-350719ab59a0" containerName="extract-utilities" Nov 28 18:12:34 crc kubenswrapper[4909]: E1128 18:12:34.808543 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="67e07eb7-8bd1-448a-898a-7ce44180ceaf" containerName="bootstrap-openstack-openstack-cell1" Nov 28 18:12:34 crc kubenswrapper[4909]: I1128 18:12:34.808548 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="67e07eb7-8bd1-448a-898a-7ce44180ceaf" containerName="bootstrap-openstack-openstack-cell1" Nov 28 18:12:34 crc kubenswrapper[4909]: E1128 18:12:34.808561 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="960dcc6f-14e5-4dfa-abb5-350719ab59a0" containerName="registry-server" Nov 28 18:12:34 crc kubenswrapper[4909]: I1128 18:12:34.808567 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="960dcc6f-14e5-4dfa-abb5-350719ab59a0" containerName="registry-server" Nov 28 18:12:34 crc kubenswrapper[4909]: E1128 18:12:34.808580 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="960dcc6f-14e5-4dfa-abb5-350719ab59a0" containerName="extract-content" Nov 28 18:12:34 crc kubenswrapper[4909]: I1128 18:12:34.808586 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="960dcc6f-14e5-4dfa-abb5-350719ab59a0" containerName="extract-content" Nov 28 18:12:34 crc kubenswrapper[4909]: I1128 18:12:34.809601 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="960dcc6f-14e5-4dfa-abb5-350719ab59a0" containerName="registry-server" Nov 28 18:12:34 crc kubenswrapper[4909]: I1128 18:12:34.809629 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="67e07eb7-8bd1-448a-898a-7ce44180ceaf" containerName="bootstrap-openstack-openstack-cell1" Nov 28 18:12:34 crc kubenswrapper[4909]: I1128 18:12:34.810420 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-openstack-openstack-cell1-l84qt" Nov 28 18:12:34 crc kubenswrapper[4909]: I1128 18:12:34.815202 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-z249h" Nov 28 18:12:34 crc kubenswrapper[4909]: I1128 18:12:34.815269 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 28 18:12:34 crc kubenswrapper[4909]: I1128 18:12:34.815479 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Nov 28 18:12:34 crc kubenswrapper[4909]: I1128 18:12:34.815698 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Nov 28 18:12:34 crc kubenswrapper[4909]: I1128 18:12:34.836506 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-openstack-openstack-cell1-l84qt"] Nov 28 18:12:34 crc kubenswrapper[4909]: I1128 18:12:34.862492 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4f66p\" (UniqueName: \"kubernetes.io/projected/b75c9750-f182-4b75-9a62-57f3939379c4-kube-api-access-4f66p\") pod \"download-cache-openstack-openstack-cell1-l84qt\" (UID: \"b75c9750-f182-4b75-9a62-57f3939379c4\") " pod="openstack/download-cache-openstack-openstack-cell1-l84qt" Nov 28 18:12:34 crc kubenswrapper[4909]: I1128 18:12:34.862636 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b75c9750-f182-4b75-9a62-57f3939379c4-inventory\") pod \"download-cache-openstack-openstack-cell1-l84qt\" (UID: \"b75c9750-f182-4b75-9a62-57f3939379c4\") " pod="openstack/download-cache-openstack-openstack-cell1-l84qt" Nov 28 18:12:34 crc kubenswrapper[4909]: I1128 18:12:34.862913 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/b75c9750-f182-4b75-9a62-57f3939379c4-ceph\") pod \"download-cache-openstack-openstack-cell1-l84qt\" (UID: \"b75c9750-f182-4b75-9a62-57f3939379c4\") " pod="openstack/download-cache-openstack-openstack-cell1-l84qt" Nov 28 18:12:34 crc kubenswrapper[4909]: I1128 18:12:34.863187 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b75c9750-f182-4b75-9a62-57f3939379c4-ssh-key\") pod \"download-cache-openstack-openstack-cell1-l84qt\" (UID: \"b75c9750-f182-4b75-9a62-57f3939379c4\") " pod="openstack/download-cache-openstack-openstack-cell1-l84qt" Nov 28 18:12:34 crc kubenswrapper[4909]: I1128 18:12:34.964589 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b75c9750-f182-4b75-9a62-57f3939379c4-inventory\") pod \"download-cache-openstack-openstack-cell1-l84qt\" (UID: \"b75c9750-f182-4b75-9a62-57f3939379c4\") " pod="openstack/download-cache-openstack-openstack-cell1-l84qt" Nov 28 18:12:34 crc kubenswrapper[4909]: I1128 18:12:34.964709 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/b75c9750-f182-4b75-9a62-57f3939379c4-ceph\") pod \"download-cache-openstack-openstack-cell1-l84qt\" (UID: \"b75c9750-f182-4b75-9a62-57f3939379c4\") " pod="openstack/download-cache-openstack-openstack-cell1-l84qt" Nov 28 18:12:34 crc kubenswrapper[4909]: I1128 18:12:34.964752 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b75c9750-f182-4b75-9a62-57f3939379c4-ssh-key\") pod \"download-cache-openstack-openstack-cell1-l84qt\" (UID: \"b75c9750-f182-4b75-9a62-57f3939379c4\") " pod="openstack/download-cache-openstack-openstack-cell1-l84qt" Nov 28 18:12:34 crc kubenswrapper[4909]: I1128 18:12:34.964818 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4f66p\" (UniqueName: \"kubernetes.io/projected/b75c9750-f182-4b75-9a62-57f3939379c4-kube-api-access-4f66p\") pod \"download-cache-openstack-openstack-cell1-l84qt\" (UID: \"b75c9750-f182-4b75-9a62-57f3939379c4\") " pod="openstack/download-cache-openstack-openstack-cell1-l84qt" Nov 28 18:12:34 crc kubenswrapper[4909]: I1128 18:12:34.969338 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b75c9750-f182-4b75-9a62-57f3939379c4-inventory\") pod \"download-cache-openstack-openstack-cell1-l84qt\" (UID: \"b75c9750-f182-4b75-9a62-57f3939379c4\") " pod="openstack/download-cache-openstack-openstack-cell1-l84qt" Nov 28 18:12:34 crc kubenswrapper[4909]: I1128 18:12:34.969838 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/b75c9750-f182-4b75-9a62-57f3939379c4-ceph\") pod \"download-cache-openstack-openstack-cell1-l84qt\" (UID: \"b75c9750-f182-4b75-9a62-57f3939379c4\") " pod="openstack/download-cache-openstack-openstack-cell1-l84qt" Nov 28 18:12:34 crc kubenswrapper[4909]: I1128 18:12:34.970785 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b75c9750-f182-4b75-9a62-57f3939379c4-ssh-key\") pod \"download-cache-openstack-openstack-cell1-l84qt\" (UID: \"b75c9750-f182-4b75-9a62-57f3939379c4\") " pod="openstack/download-cache-openstack-openstack-cell1-l84qt" Nov 28 18:12:34 crc kubenswrapper[4909]: I1128 18:12:34.981147 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4f66p\" (UniqueName: \"kubernetes.io/projected/b75c9750-f182-4b75-9a62-57f3939379c4-kube-api-access-4f66p\") pod \"download-cache-openstack-openstack-cell1-l84qt\" (UID: \"b75c9750-f182-4b75-9a62-57f3939379c4\") " pod="openstack/download-cache-openstack-openstack-cell1-l84qt" Nov 28 18:12:35 crc kubenswrapper[4909]: I1128 18:12:35.145825 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-openstack-openstack-cell1-l84qt" Nov 28 18:12:35 crc kubenswrapper[4909]: I1128 18:12:35.775777 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-openstack-openstack-cell1-l84qt"] Nov 28 18:12:36 crc kubenswrapper[4909]: I1128 18:12:36.749522 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-openstack-openstack-cell1-l84qt" event={"ID":"b75c9750-f182-4b75-9a62-57f3939379c4","Type":"ContainerStarted","Data":"9642a92c76bf147ec20bf1a7694d9f6bb93362398310f31363112f7c19e7e338"} Nov 28 18:12:36 crc kubenswrapper[4909]: I1128 18:12:36.749992 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-openstack-openstack-cell1-l84qt" event={"ID":"b75c9750-f182-4b75-9a62-57f3939379c4","Type":"ContainerStarted","Data":"53d711324be16c7abccda15eeacd677e9b7888a54982ef6f8dbef6f1fed0d70c"} Nov 28 18:12:36 crc kubenswrapper[4909]: I1128 18:12:36.768400 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/download-cache-openstack-openstack-cell1-l84qt" podStartSLOduration=2.3017356749999998 podStartE2EDuration="2.7683823s" podCreationTimestamp="2025-11-28 18:12:34 +0000 UTC" firstStartedPulling="2025-11-28 18:12:35.781090668 +0000 UTC m=+7338.177775232" lastFinishedPulling="2025-11-28 18:12:36.247737333 +0000 UTC m=+7338.644421857" observedRunningTime="2025-11-28 18:12:36.765516582 +0000 UTC m=+7339.162201126" watchObservedRunningTime="2025-11-28 18:12:36.7683823 +0000 UTC m=+7339.165066834" Nov 28 18:13:19 crc kubenswrapper[4909]: I1128 18:13:19.910636 4909 patch_prober.go:28] interesting pod/machine-config-daemon-d5nd7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 18:13:19 crc kubenswrapper[4909]: I1128 18:13:19.911724 4909 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 18:13:49 crc kubenswrapper[4909]: I1128 18:13:49.911171 4909 patch_prober.go:28] interesting pod/machine-config-daemon-d5nd7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 18:13:49 crc kubenswrapper[4909]: I1128 18:13:49.913430 4909 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 18:14:12 crc kubenswrapper[4909]: I1128 18:14:12.934211 4909 generic.go:334] "Generic (PLEG): container finished" podID="b75c9750-f182-4b75-9a62-57f3939379c4" containerID="9642a92c76bf147ec20bf1a7694d9f6bb93362398310f31363112f7c19e7e338" exitCode=0 Nov 28 18:14:12 crc kubenswrapper[4909]: I1128 18:14:12.934816 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-openstack-openstack-cell1-l84qt" event={"ID":"b75c9750-f182-4b75-9a62-57f3939379c4","Type":"ContainerDied","Data":"9642a92c76bf147ec20bf1a7694d9f6bb93362398310f31363112f7c19e7e338"} Nov 28 18:14:14 crc kubenswrapper[4909]: I1128 18:14:14.451906 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-openstack-openstack-cell1-l84qt" Nov 28 18:14:14 crc kubenswrapper[4909]: I1128 18:14:14.490751 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4f66p\" (UniqueName: \"kubernetes.io/projected/b75c9750-f182-4b75-9a62-57f3939379c4-kube-api-access-4f66p\") pod \"b75c9750-f182-4b75-9a62-57f3939379c4\" (UID: \"b75c9750-f182-4b75-9a62-57f3939379c4\") " Nov 28 18:14:14 crc kubenswrapper[4909]: I1128 18:14:14.503021 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b75c9750-f182-4b75-9a62-57f3939379c4-kube-api-access-4f66p" (OuterVolumeSpecName: "kube-api-access-4f66p") pod "b75c9750-f182-4b75-9a62-57f3939379c4" (UID: "b75c9750-f182-4b75-9a62-57f3939379c4"). InnerVolumeSpecName "kube-api-access-4f66p". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 18:14:14 crc kubenswrapper[4909]: I1128 18:14:14.592886 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b75c9750-f182-4b75-9a62-57f3939379c4-ssh-key\") pod \"b75c9750-f182-4b75-9a62-57f3939379c4\" (UID: \"b75c9750-f182-4b75-9a62-57f3939379c4\") " Nov 28 18:14:14 crc kubenswrapper[4909]: I1128 18:14:14.593069 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b75c9750-f182-4b75-9a62-57f3939379c4-inventory\") pod \"b75c9750-f182-4b75-9a62-57f3939379c4\" (UID: \"b75c9750-f182-4b75-9a62-57f3939379c4\") " Nov 28 18:14:14 crc kubenswrapper[4909]: I1128 18:14:14.593156 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/b75c9750-f182-4b75-9a62-57f3939379c4-ceph\") pod \"b75c9750-f182-4b75-9a62-57f3939379c4\" (UID: \"b75c9750-f182-4b75-9a62-57f3939379c4\") " Nov 28 18:14:14 crc kubenswrapper[4909]: I1128 18:14:14.594085 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4f66p\" (UniqueName: \"kubernetes.io/projected/b75c9750-f182-4b75-9a62-57f3939379c4-kube-api-access-4f66p\") on node \"crc\" DevicePath \"\"" Nov 28 18:14:14 crc kubenswrapper[4909]: I1128 18:14:14.597106 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b75c9750-f182-4b75-9a62-57f3939379c4-ceph" (OuterVolumeSpecName: "ceph") pod "b75c9750-f182-4b75-9a62-57f3939379c4" (UID: "b75c9750-f182-4b75-9a62-57f3939379c4"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 18:14:14 crc kubenswrapper[4909]: I1128 18:14:14.640818 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b75c9750-f182-4b75-9a62-57f3939379c4-inventory" (OuterVolumeSpecName: "inventory") pod "b75c9750-f182-4b75-9a62-57f3939379c4" (UID: "b75c9750-f182-4b75-9a62-57f3939379c4"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 18:14:14 crc kubenswrapper[4909]: I1128 18:14:14.656765 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b75c9750-f182-4b75-9a62-57f3939379c4-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "b75c9750-f182-4b75-9a62-57f3939379c4" (UID: "b75c9750-f182-4b75-9a62-57f3939379c4"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 18:14:14 crc kubenswrapper[4909]: I1128 18:14:14.696137 4909 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b75c9750-f182-4b75-9a62-57f3939379c4-inventory\") on node \"crc\" DevicePath \"\"" Nov 28 18:14:14 crc kubenswrapper[4909]: I1128 18:14:14.696194 4909 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/b75c9750-f182-4b75-9a62-57f3939379c4-ceph\") on node \"crc\" DevicePath \"\"" Nov 28 18:14:14 crc kubenswrapper[4909]: I1128 18:14:14.696209 4909 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b75c9750-f182-4b75-9a62-57f3939379c4-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 28 18:14:14 crc kubenswrapper[4909]: I1128 18:14:14.955839 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-openstack-openstack-cell1-l84qt" event={"ID":"b75c9750-f182-4b75-9a62-57f3939379c4","Type":"ContainerDied","Data":"53d711324be16c7abccda15eeacd677e9b7888a54982ef6f8dbef6f1fed0d70c"} Nov 28 18:14:14 crc kubenswrapper[4909]: I1128 18:14:14.955886 4909 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="53d711324be16c7abccda15eeacd677e9b7888a54982ef6f8dbef6f1fed0d70c" Nov 28 18:14:14 crc kubenswrapper[4909]: I1128 18:14:14.955912 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-openstack-openstack-cell1-l84qt" Nov 28 18:14:15 crc kubenswrapper[4909]: I1128 18:14:15.073487 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/configure-network-openstack-openstack-cell1-pclm9"] Nov 28 18:14:15 crc kubenswrapper[4909]: E1128 18:14:15.074040 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b75c9750-f182-4b75-9a62-57f3939379c4" containerName="download-cache-openstack-openstack-cell1" Nov 28 18:14:15 crc kubenswrapper[4909]: I1128 18:14:15.074063 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="b75c9750-f182-4b75-9a62-57f3939379c4" containerName="download-cache-openstack-openstack-cell1" Nov 28 18:14:15 crc kubenswrapper[4909]: I1128 18:14:15.074271 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="b75c9750-f182-4b75-9a62-57f3939379c4" containerName="download-cache-openstack-openstack-cell1" Nov 28 18:14:15 crc kubenswrapper[4909]: I1128 18:14:15.075043 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-openstack-openstack-cell1-pclm9" Nov 28 18:14:15 crc kubenswrapper[4909]: I1128 18:14:15.077975 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-z249h" Nov 28 18:14:15 crc kubenswrapper[4909]: I1128 18:14:15.078277 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Nov 28 18:14:15 crc kubenswrapper[4909]: I1128 18:14:15.078883 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 28 18:14:15 crc kubenswrapper[4909]: I1128 18:14:15.079179 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Nov 28 18:14:15 crc kubenswrapper[4909]: I1128 18:14:15.091676 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-network-openstack-openstack-cell1-pclm9"] Nov 28 18:14:15 crc kubenswrapper[4909]: I1128 18:14:15.207009 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1f9fdddb-81e6-435f-9d8a-cc3e89bea15f-inventory\") pod \"configure-network-openstack-openstack-cell1-pclm9\" (UID: \"1f9fdddb-81e6-435f-9d8a-cc3e89bea15f\") " pod="openstack/configure-network-openstack-openstack-cell1-pclm9" Nov 28 18:14:15 crc kubenswrapper[4909]: I1128 18:14:15.207490 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8lggg\" (UniqueName: \"kubernetes.io/projected/1f9fdddb-81e6-435f-9d8a-cc3e89bea15f-kube-api-access-8lggg\") pod \"configure-network-openstack-openstack-cell1-pclm9\" (UID: \"1f9fdddb-81e6-435f-9d8a-cc3e89bea15f\") " pod="openstack/configure-network-openstack-openstack-cell1-pclm9" Nov 28 18:14:15 crc kubenswrapper[4909]: I1128 18:14:15.207589 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/1f9fdddb-81e6-435f-9d8a-cc3e89bea15f-ceph\") pod \"configure-network-openstack-openstack-cell1-pclm9\" (UID: \"1f9fdddb-81e6-435f-9d8a-cc3e89bea15f\") " pod="openstack/configure-network-openstack-openstack-cell1-pclm9" Nov 28 18:14:15 crc kubenswrapper[4909]: I1128 18:14:15.207744 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/1f9fdddb-81e6-435f-9d8a-cc3e89bea15f-ssh-key\") pod \"configure-network-openstack-openstack-cell1-pclm9\" (UID: \"1f9fdddb-81e6-435f-9d8a-cc3e89bea15f\") " pod="openstack/configure-network-openstack-openstack-cell1-pclm9" Nov 28 18:14:15 crc kubenswrapper[4909]: I1128 18:14:15.309206 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/1f9fdddb-81e6-435f-9d8a-cc3e89bea15f-ceph\") pod \"configure-network-openstack-openstack-cell1-pclm9\" (UID: \"1f9fdddb-81e6-435f-9d8a-cc3e89bea15f\") " pod="openstack/configure-network-openstack-openstack-cell1-pclm9" Nov 28 18:14:15 crc kubenswrapper[4909]: I1128 18:14:15.309312 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/1f9fdddb-81e6-435f-9d8a-cc3e89bea15f-ssh-key\") pod \"configure-network-openstack-openstack-cell1-pclm9\" (UID: \"1f9fdddb-81e6-435f-9d8a-cc3e89bea15f\") " pod="openstack/configure-network-openstack-openstack-cell1-pclm9" Nov 28 18:14:15 crc kubenswrapper[4909]: I1128 18:14:15.309437 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1f9fdddb-81e6-435f-9d8a-cc3e89bea15f-inventory\") pod \"configure-network-openstack-openstack-cell1-pclm9\" (UID: \"1f9fdddb-81e6-435f-9d8a-cc3e89bea15f\") " pod="openstack/configure-network-openstack-openstack-cell1-pclm9" Nov 28 18:14:15 crc kubenswrapper[4909]: I1128 18:14:15.309489 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8lggg\" (UniqueName: \"kubernetes.io/projected/1f9fdddb-81e6-435f-9d8a-cc3e89bea15f-kube-api-access-8lggg\") pod \"configure-network-openstack-openstack-cell1-pclm9\" (UID: \"1f9fdddb-81e6-435f-9d8a-cc3e89bea15f\") " pod="openstack/configure-network-openstack-openstack-cell1-pclm9" Nov 28 18:14:15 crc kubenswrapper[4909]: I1128 18:14:15.313972 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/1f9fdddb-81e6-435f-9d8a-cc3e89bea15f-ssh-key\") pod \"configure-network-openstack-openstack-cell1-pclm9\" (UID: \"1f9fdddb-81e6-435f-9d8a-cc3e89bea15f\") " pod="openstack/configure-network-openstack-openstack-cell1-pclm9" Nov 28 18:14:15 crc kubenswrapper[4909]: I1128 18:14:15.313994 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/1f9fdddb-81e6-435f-9d8a-cc3e89bea15f-ceph\") pod \"configure-network-openstack-openstack-cell1-pclm9\" (UID: \"1f9fdddb-81e6-435f-9d8a-cc3e89bea15f\") " pod="openstack/configure-network-openstack-openstack-cell1-pclm9" Nov 28 18:14:15 crc kubenswrapper[4909]: I1128 18:14:15.313997 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1f9fdddb-81e6-435f-9d8a-cc3e89bea15f-inventory\") pod \"configure-network-openstack-openstack-cell1-pclm9\" (UID: \"1f9fdddb-81e6-435f-9d8a-cc3e89bea15f\") " pod="openstack/configure-network-openstack-openstack-cell1-pclm9" Nov 28 18:14:15 crc kubenswrapper[4909]: I1128 18:14:15.327104 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8lggg\" (UniqueName: \"kubernetes.io/projected/1f9fdddb-81e6-435f-9d8a-cc3e89bea15f-kube-api-access-8lggg\") pod \"configure-network-openstack-openstack-cell1-pclm9\" (UID: \"1f9fdddb-81e6-435f-9d8a-cc3e89bea15f\") " pod="openstack/configure-network-openstack-openstack-cell1-pclm9" Nov 28 18:14:15 crc kubenswrapper[4909]: I1128 18:14:15.393637 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-openstack-openstack-cell1-pclm9" Nov 28 18:14:15 crc kubenswrapper[4909]: I1128 18:14:15.976067 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-network-openstack-openstack-cell1-pclm9"] Nov 28 18:14:15 crc kubenswrapper[4909]: W1128 18:14:15.977702 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1f9fdddb_81e6_435f_9d8a_cc3e89bea15f.slice/crio-64d4b87af5a164371b5ccc90baebee71db9179a66d93605101bb6a777f868b0c WatchSource:0}: Error finding container 64d4b87af5a164371b5ccc90baebee71db9179a66d93605101bb6a777f868b0c: Status 404 returned error can't find the container with id 64d4b87af5a164371b5ccc90baebee71db9179a66d93605101bb6a777f868b0c Nov 28 18:14:16 crc kubenswrapper[4909]: I1128 18:14:16.974445 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-openstack-openstack-cell1-pclm9" event={"ID":"1f9fdddb-81e6-435f-9d8a-cc3e89bea15f","Type":"ContainerStarted","Data":"81d6c4eff4563c6284c416465366f3afbd522ad26640d2b1629f079ed4359930"} Nov 28 18:14:16 crc kubenswrapper[4909]: I1128 18:14:16.974866 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-openstack-openstack-cell1-pclm9" event={"ID":"1f9fdddb-81e6-435f-9d8a-cc3e89bea15f","Type":"ContainerStarted","Data":"64d4b87af5a164371b5ccc90baebee71db9179a66d93605101bb6a777f868b0c"} Nov 28 18:14:17 crc kubenswrapper[4909]: I1128 18:14:17.013040 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/configure-network-openstack-openstack-cell1-pclm9" podStartSLOduration=1.497230828 podStartE2EDuration="2.013014735s" podCreationTimestamp="2025-11-28 18:14:15 +0000 UTC" firstStartedPulling="2025-11-28 18:14:15.980801505 +0000 UTC m=+7438.377486029" lastFinishedPulling="2025-11-28 18:14:16.496585392 +0000 UTC m=+7438.893269936" observedRunningTime="2025-11-28 18:14:17.011414232 +0000 UTC m=+7439.408098756" watchObservedRunningTime="2025-11-28 18:14:17.013014735 +0000 UTC m=+7439.409699279" Nov 28 18:14:19 crc kubenswrapper[4909]: I1128 18:14:19.912375 4909 patch_prober.go:28] interesting pod/machine-config-daemon-d5nd7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 18:14:19 crc kubenswrapper[4909]: I1128 18:14:19.913017 4909 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 18:14:19 crc kubenswrapper[4909]: I1128 18:14:19.930300 4909 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" Nov 28 18:14:19 crc kubenswrapper[4909]: I1128 18:14:19.931211 4909 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"16af51197304d83d362143f44527eca9e3f72c8e7894bd79e4b089fd278bdb1d"} pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 18:14:19 crc kubenswrapper[4909]: I1128 18:14:19.931269 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerName="machine-config-daemon" containerID="cri-o://16af51197304d83d362143f44527eca9e3f72c8e7894bd79e4b089fd278bdb1d" gracePeriod=600 Nov 28 18:14:20 crc kubenswrapper[4909]: E1128 18:14:20.057363 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 18:14:21 crc kubenswrapper[4909]: I1128 18:14:21.023869 4909 generic.go:334] "Generic (PLEG): container finished" podID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerID="16af51197304d83d362143f44527eca9e3f72c8e7894bd79e4b089fd278bdb1d" exitCode=0 Nov 28 18:14:21 crc kubenswrapper[4909]: I1128 18:14:21.023979 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" event={"ID":"5f0ac931-d37b-4342-8c12-c2779b455cc5","Type":"ContainerDied","Data":"16af51197304d83d362143f44527eca9e3f72c8e7894bd79e4b089fd278bdb1d"} Nov 28 18:14:21 crc kubenswrapper[4909]: I1128 18:14:21.024178 4909 scope.go:117] "RemoveContainer" containerID="7dde07ca73bc70f8417bb961545f1be0280270788de6df86f67b27140f4beaaa" Nov 28 18:14:21 crc kubenswrapper[4909]: I1128 18:14:21.025032 4909 scope.go:117] "RemoveContainer" containerID="16af51197304d83d362143f44527eca9e3f72c8e7894bd79e4b089fd278bdb1d" Nov 28 18:14:21 crc kubenswrapper[4909]: E1128 18:14:21.025368 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 18:14:32 crc kubenswrapper[4909]: I1128 18:14:32.902157 4909 scope.go:117] "RemoveContainer" containerID="16af51197304d83d362143f44527eca9e3f72c8e7894bd79e4b089fd278bdb1d" Nov 28 18:14:32 crc kubenswrapper[4909]: E1128 18:14:32.903250 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 18:14:43 crc kubenswrapper[4909]: I1128 18:14:43.902461 4909 scope.go:117] "RemoveContainer" containerID="16af51197304d83d362143f44527eca9e3f72c8e7894bd79e4b089fd278bdb1d" Nov 28 18:14:43 crc kubenswrapper[4909]: E1128 18:14:43.903462 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 18:14:56 crc kubenswrapper[4909]: I1128 18:14:56.902690 4909 scope.go:117] "RemoveContainer" containerID="16af51197304d83d362143f44527eca9e3f72c8e7894bd79e4b089fd278bdb1d" Nov 28 18:14:56 crc kubenswrapper[4909]: E1128 18:14:56.904111 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 18:15:00 crc kubenswrapper[4909]: I1128 18:15:00.185088 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405895-66ndf"] Nov 28 18:15:00 crc kubenswrapper[4909]: I1128 18:15:00.187467 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405895-66ndf" Nov 28 18:15:00 crc kubenswrapper[4909]: I1128 18:15:00.191078 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 28 18:15:00 crc kubenswrapper[4909]: I1128 18:15:00.191428 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 28 18:15:00 crc kubenswrapper[4909]: I1128 18:15:00.194194 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405895-66ndf"] Nov 28 18:15:00 crc kubenswrapper[4909]: I1128 18:15:00.312665 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hbwrv\" (UniqueName: \"kubernetes.io/projected/953283bf-e833-41ce-83f3-2dc48ae6f291-kube-api-access-hbwrv\") pod \"collect-profiles-29405895-66ndf\" (UID: \"953283bf-e833-41ce-83f3-2dc48ae6f291\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405895-66ndf" Nov 28 18:15:00 crc kubenswrapper[4909]: I1128 18:15:00.313225 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/953283bf-e833-41ce-83f3-2dc48ae6f291-secret-volume\") pod \"collect-profiles-29405895-66ndf\" (UID: \"953283bf-e833-41ce-83f3-2dc48ae6f291\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405895-66ndf" Nov 28 18:15:00 crc kubenswrapper[4909]: I1128 18:15:00.313278 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/953283bf-e833-41ce-83f3-2dc48ae6f291-config-volume\") pod \"collect-profiles-29405895-66ndf\" (UID: \"953283bf-e833-41ce-83f3-2dc48ae6f291\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405895-66ndf" Nov 28 18:15:00 crc kubenswrapper[4909]: I1128 18:15:00.415332 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/953283bf-e833-41ce-83f3-2dc48ae6f291-config-volume\") pod \"collect-profiles-29405895-66ndf\" (UID: \"953283bf-e833-41ce-83f3-2dc48ae6f291\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405895-66ndf" Nov 28 18:15:00 crc kubenswrapper[4909]: I1128 18:15:00.415374 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/953283bf-e833-41ce-83f3-2dc48ae6f291-secret-volume\") pod \"collect-profiles-29405895-66ndf\" (UID: \"953283bf-e833-41ce-83f3-2dc48ae6f291\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405895-66ndf" Nov 28 18:15:00 crc kubenswrapper[4909]: I1128 18:15:00.415457 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hbwrv\" (UniqueName: \"kubernetes.io/projected/953283bf-e833-41ce-83f3-2dc48ae6f291-kube-api-access-hbwrv\") pod \"collect-profiles-29405895-66ndf\" (UID: \"953283bf-e833-41ce-83f3-2dc48ae6f291\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405895-66ndf" Nov 28 18:15:00 crc kubenswrapper[4909]: I1128 18:15:00.418622 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/953283bf-e833-41ce-83f3-2dc48ae6f291-config-volume\") pod \"collect-profiles-29405895-66ndf\" (UID: \"953283bf-e833-41ce-83f3-2dc48ae6f291\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405895-66ndf" Nov 28 18:15:00 crc kubenswrapper[4909]: I1128 18:15:00.421817 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/953283bf-e833-41ce-83f3-2dc48ae6f291-secret-volume\") pod \"collect-profiles-29405895-66ndf\" (UID: \"953283bf-e833-41ce-83f3-2dc48ae6f291\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405895-66ndf" Nov 28 18:15:00 crc kubenswrapper[4909]: I1128 18:15:00.434414 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hbwrv\" (UniqueName: \"kubernetes.io/projected/953283bf-e833-41ce-83f3-2dc48ae6f291-kube-api-access-hbwrv\") pod \"collect-profiles-29405895-66ndf\" (UID: \"953283bf-e833-41ce-83f3-2dc48ae6f291\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405895-66ndf" Nov 28 18:15:00 crc kubenswrapper[4909]: I1128 18:15:00.512988 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405895-66ndf" Nov 28 18:15:01 crc kubenswrapper[4909]: I1128 18:15:01.023427 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405895-66ndf"] Nov 28 18:15:01 crc kubenswrapper[4909]: I1128 18:15:01.511276 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405895-66ndf" event={"ID":"953283bf-e833-41ce-83f3-2dc48ae6f291","Type":"ContainerStarted","Data":"fb0404ca80a40babf6731397718cef05613b665937aa9144733c8b25e5baa8b9"} Nov 28 18:15:01 crc kubenswrapper[4909]: I1128 18:15:01.511507 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405895-66ndf" event={"ID":"953283bf-e833-41ce-83f3-2dc48ae6f291","Type":"ContainerStarted","Data":"44d2a03b9ac121c154a405c9832c6898750e7a5bf208c07f6ce00e735d01f287"} Nov 28 18:15:01 crc kubenswrapper[4909]: I1128 18:15:01.536569 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29405895-66ndf" podStartSLOduration=1.536552118 podStartE2EDuration="1.536552118s" podCreationTimestamp="2025-11-28 18:15:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 18:15:01.529380625 +0000 UTC m=+7483.926065149" watchObservedRunningTime="2025-11-28 18:15:01.536552118 +0000 UTC m=+7483.933236642" Nov 28 18:15:02 crc kubenswrapper[4909]: I1128 18:15:02.521641 4909 generic.go:334] "Generic (PLEG): container finished" podID="953283bf-e833-41ce-83f3-2dc48ae6f291" containerID="fb0404ca80a40babf6731397718cef05613b665937aa9144733c8b25e5baa8b9" exitCode=0 Nov 28 18:15:02 crc kubenswrapper[4909]: I1128 18:15:02.521711 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405895-66ndf" event={"ID":"953283bf-e833-41ce-83f3-2dc48ae6f291","Type":"ContainerDied","Data":"fb0404ca80a40babf6731397718cef05613b665937aa9144733c8b25e5baa8b9"} Nov 28 18:15:03 crc kubenswrapper[4909]: I1128 18:15:03.931251 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405895-66ndf" Nov 28 18:15:03 crc kubenswrapper[4909]: I1128 18:15:03.999455 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/953283bf-e833-41ce-83f3-2dc48ae6f291-secret-volume\") pod \"953283bf-e833-41ce-83f3-2dc48ae6f291\" (UID: \"953283bf-e833-41ce-83f3-2dc48ae6f291\") " Nov 28 18:15:03 crc kubenswrapper[4909]: I1128 18:15:03.999724 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/953283bf-e833-41ce-83f3-2dc48ae6f291-config-volume\") pod \"953283bf-e833-41ce-83f3-2dc48ae6f291\" (UID: \"953283bf-e833-41ce-83f3-2dc48ae6f291\") " Nov 28 18:15:03 crc kubenswrapper[4909]: I1128 18:15:03.999906 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hbwrv\" (UniqueName: \"kubernetes.io/projected/953283bf-e833-41ce-83f3-2dc48ae6f291-kube-api-access-hbwrv\") pod \"953283bf-e833-41ce-83f3-2dc48ae6f291\" (UID: \"953283bf-e833-41ce-83f3-2dc48ae6f291\") " Nov 28 18:15:04 crc kubenswrapper[4909]: I1128 18:15:04.000259 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/953283bf-e833-41ce-83f3-2dc48ae6f291-config-volume" (OuterVolumeSpecName: "config-volume") pod "953283bf-e833-41ce-83f3-2dc48ae6f291" (UID: "953283bf-e833-41ce-83f3-2dc48ae6f291"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 18:15:04 crc kubenswrapper[4909]: I1128 18:15:04.000766 4909 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/953283bf-e833-41ce-83f3-2dc48ae6f291-config-volume\") on node \"crc\" DevicePath \"\"" Nov 28 18:15:04 crc kubenswrapper[4909]: I1128 18:15:04.004483 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/953283bf-e833-41ce-83f3-2dc48ae6f291-kube-api-access-hbwrv" (OuterVolumeSpecName: "kube-api-access-hbwrv") pod "953283bf-e833-41ce-83f3-2dc48ae6f291" (UID: "953283bf-e833-41ce-83f3-2dc48ae6f291"). InnerVolumeSpecName "kube-api-access-hbwrv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 18:15:04 crc kubenswrapper[4909]: I1128 18:15:04.004627 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/953283bf-e833-41ce-83f3-2dc48ae6f291-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "953283bf-e833-41ce-83f3-2dc48ae6f291" (UID: "953283bf-e833-41ce-83f3-2dc48ae6f291"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 18:15:04 crc kubenswrapper[4909]: I1128 18:15:04.102359 4909 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/953283bf-e833-41ce-83f3-2dc48ae6f291-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 28 18:15:04 crc kubenswrapper[4909]: I1128 18:15:04.102737 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hbwrv\" (UniqueName: \"kubernetes.io/projected/953283bf-e833-41ce-83f3-2dc48ae6f291-kube-api-access-hbwrv\") on node \"crc\" DevicePath \"\"" Nov 28 18:15:04 crc kubenswrapper[4909]: I1128 18:15:04.547812 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405895-66ndf" event={"ID":"953283bf-e833-41ce-83f3-2dc48ae6f291","Type":"ContainerDied","Data":"44d2a03b9ac121c154a405c9832c6898750e7a5bf208c07f6ce00e735d01f287"} Nov 28 18:15:04 crc kubenswrapper[4909]: I1128 18:15:04.548115 4909 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="44d2a03b9ac121c154a405c9832c6898750e7a5bf208c07f6ce00e735d01f287" Nov 28 18:15:04 crc kubenswrapper[4909]: I1128 18:15:04.547846 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405895-66ndf" Nov 28 18:15:04 crc kubenswrapper[4909]: I1128 18:15:04.617041 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405850-8wfpm"] Nov 28 18:15:04 crc kubenswrapper[4909]: I1128 18:15:04.628542 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405850-8wfpm"] Nov 28 18:15:05 crc kubenswrapper[4909]: I1128 18:15:05.915943 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="84b48d81-ba40-4e31-82bd-652556a2eda9" path="/var/lib/kubelet/pods/84b48d81-ba40-4e31-82bd-652556a2eda9/volumes" Nov 28 18:15:07 crc kubenswrapper[4909]: I1128 18:15:07.912439 4909 scope.go:117] "RemoveContainer" containerID="16af51197304d83d362143f44527eca9e3f72c8e7894bd79e4b089fd278bdb1d" Nov 28 18:15:07 crc kubenswrapper[4909]: E1128 18:15:07.914274 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 18:15:20 crc kubenswrapper[4909]: I1128 18:15:20.902822 4909 scope.go:117] "RemoveContainer" containerID="16af51197304d83d362143f44527eca9e3f72c8e7894bd79e4b089fd278bdb1d" Nov 28 18:15:20 crc kubenswrapper[4909]: E1128 18:15:20.903697 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 18:15:33 crc kubenswrapper[4909]: I1128 18:15:33.902161 4909 scope.go:117] "RemoveContainer" containerID="16af51197304d83d362143f44527eca9e3f72c8e7894bd79e4b089fd278bdb1d" Nov 28 18:15:33 crc kubenswrapper[4909]: E1128 18:15:33.903184 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 18:15:38 crc kubenswrapper[4909]: I1128 18:15:38.151690 4909 scope.go:117] "RemoveContainer" containerID="a1b4b88660b3827533df934fcb4553b0a04397289404dd13451bdbb44e9167e3" Nov 28 18:15:44 crc kubenswrapper[4909]: I1128 18:15:44.002582 4909 generic.go:334] "Generic (PLEG): container finished" podID="1f9fdddb-81e6-435f-9d8a-cc3e89bea15f" containerID="81d6c4eff4563c6284c416465366f3afbd522ad26640d2b1629f079ed4359930" exitCode=0 Nov 28 18:15:44 crc kubenswrapper[4909]: I1128 18:15:44.002681 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-openstack-openstack-cell1-pclm9" event={"ID":"1f9fdddb-81e6-435f-9d8a-cc3e89bea15f","Type":"ContainerDied","Data":"81d6c4eff4563c6284c416465366f3afbd522ad26640d2b1629f079ed4359930"} Nov 28 18:15:45 crc kubenswrapper[4909]: I1128 18:15:45.553127 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-openstack-openstack-cell1-pclm9" Nov 28 18:15:45 crc kubenswrapper[4909]: I1128 18:15:45.646486 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/1f9fdddb-81e6-435f-9d8a-cc3e89bea15f-ceph\") pod \"1f9fdddb-81e6-435f-9d8a-cc3e89bea15f\" (UID: \"1f9fdddb-81e6-435f-9d8a-cc3e89bea15f\") " Nov 28 18:15:45 crc kubenswrapper[4909]: I1128 18:15:45.646704 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/1f9fdddb-81e6-435f-9d8a-cc3e89bea15f-ssh-key\") pod \"1f9fdddb-81e6-435f-9d8a-cc3e89bea15f\" (UID: \"1f9fdddb-81e6-435f-9d8a-cc3e89bea15f\") " Nov 28 18:15:45 crc kubenswrapper[4909]: I1128 18:15:45.646799 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8lggg\" (UniqueName: \"kubernetes.io/projected/1f9fdddb-81e6-435f-9d8a-cc3e89bea15f-kube-api-access-8lggg\") pod \"1f9fdddb-81e6-435f-9d8a-cc3e89bea15f\" (UID: \"1f9fdddb-81e6-435f-9d8a-cc3e89bea15f\") " Nov 28 18:15:45 crc kubenswrapper[4909]: I1128 18:15:45.646855 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1f9fdddb-81e6-435f-9d8a-cc3e89bea15f-inventory\") pod \"1f9fdddb-81e6-435f-9d8a-cc3e89bea15f\" (UID: \"1f9fdddb-81e6-435f-9d8a-cc3e89bea15f\") " Nov 28 18:15:45 crc kubenswrapper[4909]: I1128 18:15:45.652162 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1f9fdddb-81e6-435f-9d8a-cc3e89bea15f-kube-api-access-8lggg" (OuterVolumeSpecName: "kube-api-access-8lggg") pod "1f9fdddb-81e6-435f-9d8a-cc3e89bea15f" (UID: "1f9fdddb-81e6-435f-9d8a-cc3e89bea15f"). InnerVolumeSpecName "kube-api-access-8lggg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 18:15:45 crc kubenswrapper[4909]: I1128 18:15:45.652625 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1f9fdddb-81e6-435f-9d8a-cc3e89bea15f-ceph" (OuterVolumeSpecName: "ceph") pod "1f9fdddb-81e6-435f-9d8a-cc3e89bea15f" (UID: "1f9fdddb-81e6-435f-9d8a-cc3e89bea15f"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 18:15:45 crc kubenswrapper[4909]: I1128 18:15:45.673914 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1f9fdddb-81e6-435f-9d8a-cc3e89bea15f-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "1f9fdddb-81e6-435f-9d8a-cc3e89bea15f" (UID: "1f9fdddb-81e6-435f-9d8a-cc3e89bea15f"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 18:15:45 crc kubenswrapper[4909]: I1128 18:15:45.674934 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1f9fdddb-81e6-435f-9d8a-cc3e89bea15f-inventory" (OuterVolumeSpecName: "inventory") pod "1f9fdddb-81e6-435f-9d8a-cc3e89bea15f" (UID: "1f9fdddb-81e6-435f-9d8a-cc3e89bea15f"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 18:15:45 crc kubenswrapper[4909]: I1128 18:15:45.749077 4909 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/1f9fdddb-81e6-435f-9d8a-cc3e89bea15f-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 28 18:15:45 crc kubenswrapper[4909]: I1128 18:15:45.749104 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8lggg\" (UniqueName: \"kubernetes.io/projected/1f9fdddb-81e6-435f-9d8a-cc3e89bea15f-kube-api-access-8lggg\") on node \"crc\" DevicePath \"\"" Nov 28 18:15:45 crc kubenswrapper[4909]: I1128 18:15:45.749115 4909 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1f9fdddb-81e6-435f-9d8a-cc3e89bea15f-inventory\") on node \"crc\" DevicePath \"\"" Nov 28 18:15:45 crc kubenswrapper[4909]: I1128 18:15:45.749125 4909 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/1f9fdddb-81e6-435f-9d8a-cc3e89bea15f-ceph\") on node \"crc\" DevicePath \"\"" Nov 28 18:15:46 crc kubenswrapper[4909]: I1128 18:15:46.034296 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-openstack-openstack-cell1-pclm9" event={"ID":"1f9fdddb-81e6-435f-9d8a-cc3e89bea15f","Type":"ContainerDied","Data":"64d4b87af5a164371b5ccc90baebee71db9179a66d93605101bb6a777f868b0c"} Nov 28 18:15:46 crc kubenswrapper[4909]: I1128 18:15:46.034347 4909 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="64d4b87af5a164371b5ccc90baebee71db9179a66d93605101bb6a777f868b0c" Nov 28 18:15:46 crc kubenswrapper[4909]: I1128 18:15:46.034433 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-openstack-openstack-cell1-pclm9" Nov 28 18:15:46 crc kubenswrapper[4909]: I1128 18:15:46.147500 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/validate-network-openstack-openstack-cell1-vlmx8"] Nov 28 18:15:46 crc kubenswrapper[4909]: E1128 18:15:46.148033 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1f9fdddb-81e6-435f-9d8a-cc3e89bea15f" containerName="configure-network-openstack-openstack-cell1" Nov 28 18:15:46 crc kubenswrapper[4909]: I1128 18:15:46.148057 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="1f9fdddb-81e6-435f-9d8a-cc3e89bea15f" containerName="configure-network-openstack-openstack-cell1" Nov 28 18:15:46 crc kubenswrapper[4909]: E1128 18:15:46.148076 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="953283bf-e833-41ce-83f3-2dc48ae6f291" containerName="collect-profiles" Nov 28 18:15:46 crc kubenswrapper[4909]: I1128 18:15:46.148084 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="953283bf-e833-41ce-83f3-2dc48ae6f291" containerName="collect-profiles" Nov 28 18:15:46 crc kubenswrapper[4909]: I1128 18:15:46.148362 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="1f9fdddb-81e6-435f-9d8a-cc3e89bea15f" containerName="configure-network-openstack-openstack-cell1" Nov 28 18:15:46 crc kubenswrapper[4909]: I1128 18:15:46.148394 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="953283bf-e833-41ce-83f3-2dc48ae6f291" containerName="collect-profiles" Nov 28 18:15:46 crc kubenswrapper[4909]: I1128 18:15:46.149304 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-openstack-openstack-cell1-vlmx8" Nov 28 18:15:46 crc kubenswrapper[4909]: I1128 18:15:46.151822 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Nov 28 18:15:46 crc kubenswrapper[4909]: I1128 18:15:46.154607 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-z249h" Nov 28 18:15:46 crc kubenswrapper[4909]: I1128 18:15:46.154758 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 28 18:15:46 crc kubenswrapper[4909]: I1128 18:15:46.155561 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Nov 28 18:15:46 crc kubenswrapper[4909]: I1128 18:15:46.172999 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/validate-network-openstack-openstack-cell1-vlmx8"] Nov 28 18:15:46 crc kubenswrapper[4909]: I1128 18:15:46.259060 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ebd37bf1-01b3-444a-83c4-59ee7f35c529-ssh-key\") pod \"validate-network-openstack-openstack-cell1-vlmx8\" (UID: \"ebd37bf1-01b3-444a-83c4-59ee7f35c529\") " pod="openstack/validate-network-openstack-openstack-cell1-vlmx8" Nov 28 18:15:46 crc kubenswrapper[4909]: I1128 18:15:46.259213 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/ebd37bf1-01b3-444a-83c4-59ee7f35c529-ceph\") pod \"validate-network-openstack-openstack-cell1-vlmx8\" (UID: \"ebd37bf1-01b3-444a-83c4-59ee7f35c529\") " pod="openstack/validate-network-openstack-openstack-cell1-vlmx8" Nov 28 18:15:46 crc kubenswrapper[4909]: I1128 18:15:46.259580 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-46c2q\" (UniqueName: \"kubernetes.io/projected/ebd37bf1-01b3-444a-83c4-59ee7f35c529-kube-api-access-46c2q\") pod \"validate-network-openstack-openstack-cell1-vlmx8\" (UID: \"ebd37bf1-01b3-444a-83c4-59ee7f35c529\") " pod="openstack/validate-network-openstack-openstack-cell1-vlmx8" Nov 28 18:15:46 crc kubenswrapper[4909]: I1128 18:15:46.259826 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ebd37bf1-01b3-444a-83c4-59ee7f35c529-inventory\") pod \"validate-network-openstack-openstack-cell1-vlmx8\" (UID: \"ebd37bf1-01b3-444a-83c4-59ee7f35c529\") " pod="openstack/validate-network-openstack-openstack-cell1-vlmx8" Nov 28 18:15:46 crc kubenswrapper[4909]: I1128 18:15:46.363212 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-46c2q\" (UniqueName: \"kubernetes.io/projected/ebd37bf1-01b3-444a-83c4-59ee7f35c529-kube-api-access-46c2q\") pod \"validate-network-openstack-openstack-cell1-vlmx8\" (UID: \"ebd37bf1-01b3-444a-83c4-59ee7f35c529\") " pod="openstack/validate-network-openstack-openstack-cell1-vlmx8" Nov 28 18:15:46 crc kubenswrapper[4909]: I1128 18:15:46.364592 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ebd37bf1-01b3-444a-83c4-59ee7f35c529-inventory\") pod \"validate-network-openstack-openstack-cell1-vlmx8\" (UID: \"ebd37bf1-01b3-444a-83c4-59ee7f35c529\") " pod="openstack/validate-network-openstack-openstack-cell1-vlmx8" Nov 28 18:15:46 crc kubenswrapper[4909]: I1128 18:15:46.365913 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ebd37bf1-01b3-444a-83c4-59ee7f35c529-ssh-key\") pod \"validate-network-openstack-openstack-cell1-vlmx8\" (UID: \"ebd37bf1-01b3-444a-83c4-59ee7f35c529\") " pod="openstack/validate-network-openstack-openstack-cell1-vlmx8" Nov 28 18:15:46 crc kubenswrapper[4909]: I1128 18:15:46.366059 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/ebd37bf1-01b3-444a-83c4-59ee7f35c529-ceph\") pod \"validate-network-openstack-openstack-cell1-vlmx8\" (UID: \"ebd37bf1-01b3-444a-83c4-59ee7f35c529\") " pod="openstack/validate-network-openstack-openstack-cell1-vlmx8" Nov 28 18:15:46 crc kubenswrapper[4909]: I1128 18:15:46.369244 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ebd37bf1-01b3-444a-83c4-59ee7f35c529-inventory\") pod \"validate-network-openstack-openstack-cell1-vlmx8\" (UID: \"ebd37bf1-01b3-444a-83c4-59ee7f35c529\") " pod="openstack/validate-network-openstack-openstack-cell1-vlmx8" Nov 28 18:15:46 crc kubenswrapper[4909]: I1128 18:15:46.369279 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ebd37bf1-01b3-444a-83c4-59ee7f35c529-ssh-key\") pod \"validate-network-openstack-openstack-cell1-vlmx8\" (UID: \"ebd37bf1-01b3-444a-83c4-59ee7f35c529\") " pod="openstack/validate-network-openstack-openstack-cell1-vlmx8" Nov 28 18:15:46 crc kubenswrapper[4909]: I1128 18:15:46.369362 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/ebd37bf1-01b3-444a-83c4-59ee7f35c529-ceph\") pod \"validate-network-openstack-openstack-cell1-vlmx8\" (UID: \"ebd37bf1-01b3-444a-83c4-59ee7f35c529\") " pod="openstack/validate-network-openstack-openstack-cell1-vlmx8" Nov 28 18:15:46 crc kubenswrapper[4909]: I1128 18:15:46.383304 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-46c2q\" (UniqueName: \"kubernetes.io/projected/ebd37bf1-01b3-444a-83c4-59ee7f35c529-kube-api-access-46c2q\") pod \"validate-network-openstack-openstack-cell1-vlmx8\" (UID: \"ebd37bf1-01b3-444a-83c4-59ee7f35c529\") " pod="openstack/validate-network-openstack-openstack-cell1-vlmx8" Nov 28 18:15:46 crc kubenswrapper[4909]: I1128 18:15:46.521205 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-openstack-openstack-cell1-vlmx8" Nov 28 18:15:47 crc kubenswrapper[4909]: I1128 18:15:47.099295 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/validate-network-openstack-openstack-cell1-vlmx8"] Nov 28 18:15:47 crc kubenswrapper[4909]: I1128 18:15:47.101948 4909 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 28 18:15:47 crc kubenswrapper[4909]: I1128 18:15:47.916291 4909 scope.go:117] "RemoveContainer" containerID="16af51197304d83d362143f44527eca9e3f72c8e7894bd79e4b089fd278bdb1d" Nov 28 18:15:47 crc kubenswrapper[4909]: E1128 18:15:47.916769 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 18:15:48 crc kubenswrapper[4909]: I1128 18:15:48.056728 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-openstack-openstack-cell1-vlmx8" event={"ID":"ebd37bf1-01b3-444a-83c4-59ee7f35c529","Type":"ContainerStarted","Data":"091103d37b01273a24c4dca65455b78c5adfc1b93f70ff3a504d583cdec3f510"} Nov 28 18:15:48 crc kubenswrapper[4909]: I1128 18:15:48.057075 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-openstack-openstack-cell1-vlmx8" event={"ID":"ebd37bf1-01b3-444a-83c4-59ee7f35c529","Type":"ContainerStarted","Data":"e709d1c376727fd06dfc6ae39e0946eb1434f2ec7b01b7e96f787db5e5189716"} Nov 28 18:15:48 crc kubenswrapper[4909]: I1128 18:15:48.074116 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/validate-network-openstack-openstack-cell1-vlmx8" podStartSLOduration=1.4009771469999999 podStartE2EDuration="2.074097336s" podCreationTimestamp="2025-11-28 18:15:46 +0000 UTC" firstStartedPulling="2025-11-28 18:15:47.101570642 +0000 UTC m=+7529.498255166" lastFinishedPulling="2025-11-28 18:15:47.774690831 +0000 UTC m=+7530.171375355" observedRunningTime="2025-11-28 18:15:48.070226302 +0000 UTC m=+7530.466910826" watchObservedRunningTime="2025-11-28 18:15:48.074097336 +0000 UTC m=+7530.470781860" Nov 28 18:15:53 crc kubenswrapper[4909]: I1128 18:15:53.131695 4909 generic.go:334] "Generic (PLEG): container finished" podID="ebd37bf1-01b3-444a-83c4-59ee7f35c529" containerID="091103d37b01273a24c4dca65455b78c5adfc1b93f70ff3a504d583cdec3f510" exitCode=0 Nov 28 18:15:53 crc kubenswrapper[4909]: I1128 18:15:53.131755 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-openstack-openstack-cell1-vlmx8" event={"ID":"ebd37bf1-01b3-444a-83c4-59ee7f35c529","Type":"ContainerDied","Data":"091103d37b01273a24c4dca65455b78c5adfc1b93f70ff3a504d583cdec3f510"} Nov 28 18:15:54 crc kubenswrapper[4909]: I1128 18:15:54.828682 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-openstack-openstack-cell1-vlmx8" Nov 28 18:15:54 crc kubenswrapper[4909]: I1128 18:15:54.881770 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ebd37bf1-01b3-444a-83c4-59ee7f35c529-inventory\") pod \"ebd37bf1-01b3-444a-83c4-59ee7f35c529\" (UID: \"ebd37bf1-01b3-444a-83c4-59ee7f35c529\") " Nov 28 18:15:54 crc kubenswrapper[4909]: I1128 18:15:54.881861 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/ebd37bf1-01b3-444a-83c4-59ee7f35c529-ceph\") pod \"ebd37bf1-01b3-444a-83c4-59ee7f35c529\" (UID: \"ebd37bf1-01b3-444a-83c4-59ee7f35c529\") " Nov 28 18:15:54 crc kubenswrapper[4909]: I1128 18:15:54.882116 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-46c2q\" (UniqueName: \"kubernetes.io/projected/ebd37bf1-01b3-444a-83c4-59ee7f35c529-kube-api-access-46c2q\") pod \"ebd37bf1-01b3-444a-83c4-59ee7f35c529\" (UID: \"ebd37bf1-01b3-444a-83c4-59ee7f35c529\") " Nov 28 18:15:54 crc kubenswrapper[4909]: I1128 18:15:54.882170 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ebd37bf1-01b3-444a-83c4-59ee7f35c529-ssh-key\") pod \"ebd37bf1-01b3-444a-83c4-59ee7f35c529\" (UID: \"ebd37bf1-01b3-444a-83c4-59ee7f35c529\") " Nov 28 18:15:54 crc kubenswrapper[4909]: I1128 18:15:54.890609 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ebd37bf1-01b3-444a-83c4-59ee7f35c529-kube-api-access-46c2q" (OuterVolumeSpecName: "kube-api-access-46c2q") pod "ebd37bf1-01b3-444a-83c4-59ee7f35c529" (UID: "ebd37bf1-01b3-444a-83c4-59ee7f35c529"). InnerVolumeSpecName "kube-api-access-46c2q". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 18:15:54 crc kubenswrapper[4909]: I1128 18:15:54.891482 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ebd37bf1-01b3-444a-83c4-59ee7f35c529-ceph" (OuterVolumeSpecName: "ceph") pod "ebd37bf1-01b3-444a-83c4-59ee7f35c529" (UID: "ebd37bf1-01b3-444a-83c4-59ee7f35c529"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 18:15:54 crc kubenswrapper[4909]: E1128 18:15:54.914358 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ebd37bf1-01b3-444a-83c4-59ee7f35c529-ssh-key podName:ebd37bf1-01b3-444a-83c4-59ee7f35c529 nodeName:}" failed. No retries permitted until 2025-11-28 18:15:55.414326567 +0000 UTC m=+7537.811011101 (durationBeforeRetry 500ms). Error: error cleaning subPath mounts for volume "ssh-key" (UniqueName: "kubernetes.io/secret/ebd37bf1-01b3-444a-83c4-59ee7f35c529-ssh-key") pod "ebd37bf1-01b3-444a-83c4-59ee7f35c529" (UID: "ebd37bf1-01b3-444a-83c4-59ee7f35c529") : error deleting /var/lib/kubelet/pods/ebd37bf1-01b3-444a-83c4-59ee7f35c529/volume-subpaths: remove /var/lib/kubelet/pods/ebd37bf1-01b3-444a-83c4-59ee7f35c529/volume-subpaths: no such file or directory Nov 28 18:15:54 crc kubenswrapper[4909]: I1128 18:15:54.918100 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ebd37bf1-01b3-444a-83c4-59ee7f35c529-inventory" (OuterVolumeSpecName: "inventory") pod "ebd37bf1-01b3-444a-83c4-59ee7f35c529" (UID: "ebd37bf1-01b3-444a-83c4-59ee7f35c529"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 18:15:54 crc kubenswrapper[4909]: I1128 18:15:54.985534 4909 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/ebd37bf1-01b3-444a-83c4-59ee7f35c529-ceph\") on node \"crc\" DevicePath \"\"" Nov 28 18:15:54 crc kubenswrapper[4909]: I1128 18:15:54.985573 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-46c2q\" (UniqueName: \"kubernetes.io/projected/ebd37bf1-01b3-444a-83c4-59ee7f35c529-kube-api-access-46c2q\") on node \"crc\" DevicePath \"\"" Nov 28 18:15:54 crc kubenswrapper[4909]: I1128 18:15:54.985590 4909 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ebd37bf1-01b3-444a-83c4-59ee7f35c529-inventory\") on node \"crc\" DevicePath \"\"" Nov 28 18:15:55 crc kubenswrapper[4909]: I1128 18:15:55.155750 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-openstack-openstack-cell1-vlmx8" event={"ID":"ebd37bf1-01b3-444a-83c4-59ee7f35c529","Type":"ContainerDied","Data":"e709d1c376727fd06dfc6ae39e0946eb1434f2ec7b01b7e96f787db5e5189716"} Nov 28 18:15:55 crc kubenswrapper[4909]: I1128 18:15:55.155799 4909 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e709d1c376727fd06dfc6ae39e0946eb1434f2ec7b01b7e96f787db5e5189716" Nov 28 18:15:55 crc kubenswrapper[4909]: I1128 18:15:55.155864 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-openstack-openstack-cell1-vlmx8" Nov 28 18:15:55 crc kubenswrapper[4909]: I1128 18:15:55.274359 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/install-os-openstack-openstack-cell1-2x6vh"] Nov 28 18:15:55 crc kubenswrapper[4909]: E1128 18:15:55.274815 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ebd37bf1-01b3-444a-83c4-59ee7f35c529" containerName="validate-network-openstack-openstack-cell1" Nov 28 18:15:55 crc kubenswrapper[4909]: I1128 18:15:55.274835 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="ebd37bf1-01b3-444a-83c4-59ee7f35c529" containerName="validate-network-openstack-openstack-cell1" Nov 28 18:15:55 crc kubenswrapper[4909]: I1128 18:15:55.275093 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="ebd37bf1-01b3-444a-83c4-59ee7f35c529" containerName="validate-network-openstack-openstack-cell1" Nov 28 18:15:55 crc kubenswrapper[4909]: I1128 18:15:55.275832 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-openstack-openstack-cell1-2x6vh" Nov 28 18:15:55 crc kubenswrapper[4909]: I1128 18:15:55.312406 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-os-openstack-openstack-cell1-2x6vh"] Nov 28 18:15:55 crc kubenswrapper[4909]: I1128 18:15:55.393721 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/00ea43ac-10c8-4210-a08b-268e72f43f4f-inventory\") pod \"install-os-openstack-openstack-cell1-2x6vh\" (UID: \"00ea43ac-10c8-4210-a08b-268e72f43f4f\") " pod="openstack/install-os-openstack-openstack-cell1-2x6vh" Nov 28 18:15:55 crc kubenswrapper[4909]: I1128 18:15:55.393767 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/00ea43ac-10c8-4210-a08b-268e72f43f4f-ssh-key\") pod \"install-os-openstack-openstack-cell1-2x6vh\" (UID: \"00ea43ac-10c8-4210-a08b-268e72f43f4f\") " pod="openstack/install-os-openstack-openstack-cell1-2x6vh" Nov 28 18:15:55 crc kubenswrapper[4909]: I1128 18:15:55.393837 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vptd7\" (UniqueName: \"kubernetes.io/projected/00ea43ac-10c8-4210-a08b-268e72f43f4f-kube-api-access-vptd7\") pod \"install-os-openstack-openstack-cell1-2x6vh\" (UID: \"00ea43ac-10c8-4210-a08b-268e72f43f4f\") " pod="openstack/install-os-openstack-openstack-cell1-2x6vh" Nov 28 18:15:55 crc kubenswrapper[4909]: I1128 18:15:55.394154 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/00ea43ac-10c8-4210-a08b-268e72f43f4f-ceph\") pod \"install-os-openstack-openstack-cell1-2x6vh\" (UID: \"00ea43ac-10c8-4210-a08b-268e72f43f4f\") " pod="openstack/install-os-openstack-openstack-cell1-2x6vh" Nov 28 18:15:55 crc kubenswrapper[4909]: I1128 18:15:55.496128 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ebd37bf1-01b3-444a-83c4-59ee7f35c529-ssh-key\") pod \"ebd37bf1-01b3-444a-83c4-59ee7f35c529\" (UID: \"ebd37bf1-01b3-444a-83c4-59ee7f35c529\") " Nov 28 18:15:55 crc kubenswrapper[4909]: I1128 18:15:55.496618 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/00ea43ac-10c8-4210-a08b-268e72f43f4f-ceph\") pod \"install-os-openstack-openstack-cell1-2x6vh\" (UID: \"00ea43ac-10c8-4210-a08b-268e72f43f4f\") " pod="openstack/install-os-openstack-openstack-cell1-2x6vh" Nov 28 18:15:55 crc kubenswrapper[4909]: I1128 18:15:55.496840 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/00ea43ac-10c8-4210-a08b-268e72f43f4f-inventory\") pod \"install-os-openstack-openstack-cell1-2x6vh\" (UID: \"00ea43ac-10c8-4210-a08b-268e72f43f4f\") " pod="openstack/install-os-openstack-openstack-cell1-2x6vh" Nov 28 18:15:55 crc kubenswrapper[4909]: I1128 18:15:55.497062 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/00ea43ac-10c8-4210-a08b-268e72f43f4f-ssh-key\") pod \"install-os-openstack-openstack-cell1-2x6vh\" (UID: \"00ea43ac-10c8-4210-a08b-268e72f43f4f\") " pod="openstack/install-os-openstack-openstack-cell1-2x6vh" Nov 28 18:15:55 crc kubenswrapper[4909]: I1128 18:15:55.497154 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vptd7\" (UniqueName: \"kubernetes.io/projected/00ea43ac-10c8-4210-a08b-268e72f43f4f-kube-api-access-vptd7\") pod \"install-os-openstack-openstack-cell1-2x6vh\" (UID: \"00ea43ac-10c8-4210-a08b-268e72f43f4f\") " pod="openstack/install-os-openstack-openstack-cell1-2x6vh" Nov 28 18:15:55 crc kubenswrapper[4909]: I1128 18:15:55.500015 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ebd37bf1-01b3-444a-83c4-59ee7f35c529-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "ebd37bf1-01b3-444a-83c4-59ee7f35c529" (UID: "ebd37bf1-01b3-444a-83c4-59ee7f35c529"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 18:15:55 crc kubenswrapper[4909]: I1128 18:15:55.500072 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/00ea43ac-10c8-4210-a08b-268e72f43f4f-inventory\") pod \"install-os-openstack-openstack-cell1-2x6vh\" (UID: \"00ea43ac-10c8-4210-a08b-268e72f43f4f\") " pod="openstack/install-os-openstack-openstack-cell1-2x6vh" Nov 28 18:15:55 crc kubenswrapper[4909]: I1128 18:15:55.500634 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/00ea43ac-10c8-4210-a08b-268e72f43f4f-ssh-key\") pod \"install-os-openstack-openstack-cell1-2x6vh\" (UID: \"00ea43ac-10c8-4210-a08b-268e72f43f4f\") " pod="openstack/install-os-openstack-openstack-cell1-2x6vh" Nov 28 18:15:55 crc kubenswrapper[4909]: I1128 18:15:55.500779 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/00ea43ac-10c8-4210-a08b-268e72f43f4f-ceph\") pod \"install-os-openstack-openstack-cell1-2x6vh\" (UID: \"00ea43ac-10c8-4210-a08b-268e72f43f4f\") " pod="openstack/install-os-openstack-openstack-cell1-2x6vh" Nov 28 18:15:55 crc kubenswrapper[4909]: I1128 18:15:55.526988 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vptd7\" (UniqueName: \"kubernetes.io/projected/00ea43ac-10c8-4210-a08b-268e72f43f4f-kube-api-access-vptd7\") pod \"install-os-openstack-openstack-cell1-2x6vh\" (UID: \"00ea43ac-10c8-4210-a08b-268e72f43f4f\") " pod="openstack/install-os-openstack-openstack-cell1-2x6vh" Nov 28 18:15:55 crc kubenswrapper[4909]: I1128 18:15:55.596716 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-openstack-openstack-cell1-2x6vh" Nov 28 18:15:55 crc kubenswrapper[4909]: I1128 18:15:55.599178 4909 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ebd37bf1-01b3-444a-83c4-59ee7f35c529-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 28 18:15:56 crc kubenswrapper[4909]: I1128 18:15:56.224215 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-os-openstack-openstack-cell1-2x6vh"] Nov 28 18:15:57 crc kubenswrapper[4909]: I1128 18:15:57.184794 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-openstack-openstack-cell1-2x6vh" event={"ID":"00ea43ac-10c8-4210-a08b-268e72f43f4f","Type":"ContainerStarted","Data":"ae233850822b4db90823f12812fe9309cc91b7314a485ba2b72d7d3325815720"} Nov 28 18:15:57 crc kubenswrapper[4909]: I1128 18:15:57.185461 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-openstack-openstack-cell1-2x6vh" event={"ID":"00ea43ac-10c8-4210-a08b-268e72f43f4f","Type":"ContainerStarted","Data":"e55d24e1de749d1fd65def81ba63866203a97e09d682479c9f8c86261341049d"} Nov 28 18:15:57 crc kubenswrapper[4909]: I1128 18:15:57.213060 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/install-os-openstack-openstack-cell1-2x6vh" podStartSLOduration=1.791518388 podStartE2EDuration="2.213036219s" podCreationTimestamp="2025-11-28 18:15:55 +0000 UTC" firstStartedPulling="2025-11-28 18:15:56.226824666 +0000 UTC m=+7538.623509210" lastFinishedPulling="2025-11-28 18:15:56.648342507 +0000 UTC m=+7539.045027041" observedRunningTime="2025-11-28 18:15:57.206287477 +0000 UTC m=+7539.602972021" watchObservedRunningTime="2025-11-28 18:15:57.213036219 +0000 UTC m=+7539.609720753" Nov 28 18:16:00 crc kubenswrapper[4909]: I1128 18:16:00.902788 4909 scope.go:117] "RemoveContainer" containerID="16af51197304d83d362143f44527eca9e3f72c8e7894bd79e4b089fd278bdb1d" Nov 28 18:16:00 crc kubenswrapper[4909]: E1128 18:16:00.903926 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 18:16:14 crc kubenswrapper[4909]: I1128 18:16:14.903218 4909 scope.go:117] "RemoveContainer" containerID="16af51197304d83d362143f44527eca9e3f72c8e7894bd79e4b089fd278bdb1d" Nov 28 18:16:14 crc kubenswrapper[4909]: E1128 18:16:14.904521 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 18:16:28 crc kubenswrapper[4909]: I1128 18:16:28.902285 4909 scope.go:117] "RemoveContainer" containerID="16af51197304d83d362143f44527eca9e3f72c8e7894bd79e4b089fd278bdb1d" Nov 28 18:16:28 crc kubenswrapper[4909]: E1128 18:16:28.903216 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 18:16:39 crc kubenswrapper[4909]: I1128 18:16:39.901628 4909 scope.go:117] "RemoveContainer" containerID="16af51197304d83d362143f44527eca9e3f72c8e7894bd79e4b089fd278bdb1d" Nov 28 18:16:39 crc kubenswrapper[4909]: E1128 18:16:39.903232 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 18:16:46 crc kubenswrapper[4909]: I1128 18:16:46.746396 4909 generic.go:334] "Generic (PLEG): container finished" podID="00ea43ac-10c8-4210-a08b-268e72f43f4f" containerID="ae233850822b4db90823f12812fe9309cc91b7314a485ba2b72d7d3325815720" exitCode=0 Nov 28 18:16:46 crc kubenswrapper[4909]: I1128 18:16:46.746460 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-openstack-openstack-cell1-2x6vh" event={"ID":"00ea43ac-10c8-4210-a08b-268e72f43f4f","Type":"ContainerDied","Data":"ae233850822b4db90823f12812fe9309cc91b7314a485ba2b72d7d3325815720"} Nov 28 18:16:48 crc kubenswrapper[4909]: I1128 18:16:48.435565 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-openstack-openstack-cell1-2x6vh" Nov 28 18:16:48 crc kubenswrapper[4909]: I1128 18:16:48.523424 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/00ea43ac-10c8-4210-a08b-268e72f43f4f-ceph\") pod \"00ea43ac-10c8-4210-a08b-268e72f43f4f\" (UID: \"00ea43ac-10c8-4210-a08b-268e72f43f4f\") " Nov 28 18:16:48 crc kubenswrapper[4909]: I1128 18:16:48.523555 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/00ea43ac-10c8-4210-a08b-268e72f43f4f-ssh-key\") pod \"00ea43ac-10c8-4210-a08b-268e72f43f4f\" (UID: \"00ea43ac-10c8-4210-a08b-268e72f43f4f\") " Nov 28 18:16:48 crc kubenswrapper[4909]: I1128 18:16:48.523726 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vptd7\" (UniqueName: \"kubernetes.io/projected/00ea43ac-10c8-4210-a08b-268e72f43f4f-kube-api-access-vptd7\") pod \"00ea43ac-10c8-4210-a08b-268e72f43f4f\" (UID: \"00ea43ac-10c8-4210-a08b-268e72f43f4f\") " Nov 28 18:16:48 crc kubenswrapper[4909]: I1128 18:16:48.523778 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/00ea43ac-10c8-4210-a08b-268e72f43f4f-inventory\") pod \"00ea43ac-10c8-4210-a08b-268e72f43f4f\" (UID: \"00ea43ac-10c8-4210-a08b-268e72f43f4f\") " Nov 28 18:16:48 crc kubenswrapper[4909]: I1128 18:16:48.529578 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/00ea43ac-10c8-4210-a08b-268e72f43f4f-kube-api-access-vptd7" (OuterVolumeSpecName: "kube-api-access-vptd7") pod "00ea43ac-10c8-4210-a08b-268e72f43f4f" (UID: "00ea43ac-10c8-4210-a08b-268e72f43f4f"). InnerVolumeSpecName "kube-api-access-vptd7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 18:16:48 crc kubenswrapper[4909]: I1128 18:16:48.537288 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/00ea43ac-10c8-4210-a08b-268e72f43f4f-ceph" (OuterVolumeSpecName: "ceph") pod "00ea43ac-10c8-4210-a08b-268e72f43f4f" (UID: "00ea43ac-10c8-4210-a08b-268e72f43f4f"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 18:16:48 crc kubenswrapper[4909]: I1128 18:16:48.567868 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/00ea43ac-10c8-4210-a08b-268e72f43f4f-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "00ea43ac-10c8-4210-a08b-268e72f43f4f" (UID: "00ea43ac-10c8-4210-a08b-268e72f43f4f"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 18:16:48 crc kubenswrapper[4909]: I1128 18:16:48.576337 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/00ea43ac-10c8-4210-a08b-268e72f43f4f-inventory" (OuterVolumeSpecName: "inventory") pod "00ea43ac-10c8-4210-a08b-268e72f43f4f" (UID: "00ea43ac-10c8-4210-a08b-268e72f43f4f"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 18:16:48 crc kubenswrapper[4909]: I1128 18:16:48.626002 4909 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/00ea43ac-10c8-4210-a08b-268e72f43f4f-inventory\") on node \"crc\" DevicePath \"\"" Nov 28 18:16:48 crc kubenswrapper[4909]: I1128 18:16:48.626031 4909 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/00ea43ac-10c8-4210-a08b-268e72f43f4f-ceph\") on node \"crc\" DevicePath \"\"" Nov 28 18:16:48 crc kubenswrapper[4909]: I1128 18:16:48.626040 4909 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/00ea43ac-10c8-4210-a08b-268e72f43f4f-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 28 18:16:48 crc kubenswrapper[4909]: I1128 18:16:48.626049 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vptd7\" (UniqueName: \"kubernetes.io/projected/00ea43ac-10c8-4210-a08b-268e72f43f4f-kube-api-access-vptd7\") on node \"crc\" DevicePath \"\"" Nov 28 18:16:48 crc kubenswrapper[4909]: I1128 18:16:48.791673 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-openstack-openstack-cell1-2x6vh" event={"ID":"00ea43ac-10c8-4210-a08b-268e72f43f4f","Type":"ContainerDied","Data":"e55d24e1de749d1fd65def81ba63866203a97e09d682479c9f8c86261341049d"} Nov 28 18:16:48 crc kubenswrapper[4909]: I1128 18:16:48.791709 4909 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e55d24e1de749d1fd65def81ba63866203a97e09d682479c9f8c86261341049d" Nov 28 18:16:48 crc kubenswrapper[4909]: I1128 18:16:48.791742 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-openstack-openstack-cell1-2x6vh" Nov 28 18:16:48 crc kubenswrapper[4909]: I1128 18:16:48.895506 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/configure-os-openstack-openstack-cell1-7cgmn"] Nov 28 18:16:48 crc kubenswrapper[4909]: E1128 18:16:48.896050 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="00ea43ac-10c8-4210-a08b-268e72f43f4f" containerName="install-os-openstack-openstack-cell1" Nov 28 18:16:48 crc kubenswrapper[4909]: I1128 18:16:48.896075 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="00ea43ac-10c8-4210-a08b-268e72f43f4f" containerName="install-os-openstack-openstack-cell1" Nov 28 18:16:48 crc kubenswrapper[4909]: I1128 18:16:48.896337 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="00ea43ac-10c8-4210-a08b-268e72f43f4f" containerName="install-os-openstack-openstack-cell1" Nov 28 18:16:48 crc kubenswrapper[4909]: I1128 18:16:48.897394 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-openstack-openstack-cell1-7cgmn" Nov 28 18:16:48 crc kubenswrapper[4909]: I1128 18:16:48.899908 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Nov 28 18:16:48 crc kubenswrapper[4909]: I1128 18:16:48.900002 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Nov 28 18:16:48 crc kubenswrapper[4909]: I1128 18:16:48.901760 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-z249h" Nov 28 18:16:48 crc kubenswrapper[4909]: I1128 18:16:48.901851 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 28 18:16:48 crc kubenswrapper[4909]: I1128 18:16:48.907499 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-os-openstack-openstack-cell1-7cgmn"] Nov 28 18:16:49 crc kubenswrapper[4909]: I1128 18:16:49.033813 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h7qn2\" (UniqueName: \"kubernetes.io/projected/2268ed7c-96e8-4452-b1fb-babe8572783e-kube-api-access-h7qn2\") pod \"configure-os-openstack-openstack-cell1-7cgmn\" (UID: \"2268ed7c-96e8-4452-b1fb-babe8572783e\") " pod="openstack/configure-os-openstack-openstack-cell1-7cgmn" Nov 28 18:16:49 crc kubenswrapper[4909]: I1128 18:16:49.033865 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/2268ed7c-96e8-4452-b1fb-babe8572783e-ssh-key\") pod \"configure-os-openstack-openstack-cell1-7cgmn\" (UID: \"2268ed7c-96e8-4452-b1fb-babe8572783e\") " pod="openstack/configure-os-openstack-openstack-cell1-7cgmn" Nov 28 18:16:49 crc kubenswrapper[4909]: I1128 18:16:49.033979 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/2268ed7c-96e8-4452-b1fb-babe8572783e-ceph\") pod \"configure-os-openstack-openstack-cell1-7cgmn\" (UID: \"2268ed7c-96e8-4452-b1fb-babe8572783e\") " pod="openstack/configure-os-openstack-openstack-cell1-7cgmn" Nov 28 18:16:49 crc kubenswrapper[4909]: I1128 18:16:49.034096 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2268ed7c-96e8-4452-b1fb-babe8572783e-inventory\") pod \"configure-os-openstack-openstack-cell1-7cgmn\" (UID: \"2268ed7c-96e8-4452-b1fb-babe8572783e\") " pod="openstack/configure-os-openstack-openstack-cell1-7cgmn" Nov 28 18:16:49 crc kubenswrapper[4909]: I1128 18:16:49.136243 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/2268ed7c-96e8-4452-b1fb-babe8572783e-ceph\") pod \"configure-os-openstack-openstack-cell1-7cgmn\" (UID: \"2268ed7c-96e8-4452-b1fb-babe8572783e\") " pod="openstack/configure-os-openstack-openstack-cell1-7cgmn" Nov 28 18:16:49 crc kubenswrapper[4909]: I1128 18:16:49.136427 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2268ed7c-96e8-4452-b1fb-babe8572783e-inventory\") pod \"configure-os-openstack-openstack-cell1-7cgmn\" (UID: \"2268ed7c-96e8-4452-b1fb-babe8572783e\") " pod="openstack/configure-os-openstack-openstack-cell1-7cgmn" Nov 28 18:16:49 crc kubenswrapper[4909]: I1128 18:16:49.136694 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h7qn2\" (UniqueName: \"kubernetes.io/projected/2268ed7c-96e8-4452-b1fb-babe8572783e-kube-api-access-h7qn2\") pod \"configure-os-openstack-openstack-cell1-7cgmn\" (UID: \"2268ed7c-96e8-4452-b1fb-babe8572783e\") " pod="openstack/configure-os-openstack-openstack-cell1-7cgmn" Nov 28 18:16:49 crc kubenswrapper[4909]: I1128 18:16:49.136752 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/2268ed7c-96e8-4452-b1fb-babe8572783e-ssh-key\") pod \"configure-os-openstack-openstack-cell1-7cgmn\" (UID: \"2268ed7c-96e8-4452-b1fb-babe8572783e\") " pod="openstack/configure-os-openstack-openstack-cell1-7cgmn" Nov 28 18:16:49 crc kubenswrapper[4909]: I1128 18:16:49.140880 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/2268ed7c-96e8-4452-b1fb-babe8572783e-ceph\") pod \"configure-os-openstack-openstack-cell1-7cgmn\" (UID: \"2268ed7c-96e8-4452-b1fb-babe8572783e\") " pod="openstack/configure-os-openstack-openstack-cell1-7cgmn" Nov 28 18:16:49 crc kubenswrapper[4909]: I1128 18:16:49.146290 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/2268ed7c-96e8-4452-b1fb-babe8572783e-ssh-key\") pod \"configure-os-openstack-openstack-cell1-7cgmn\" (UID: \"2268ed7c-96e8-4452-b1fb-babe8572783e\") " pod="openstack/configure-os-openstack-openstack-cell1-7cgmn" Nov 28 18:16:49 crc kubenswrapper[4909]: I1128 18:16:49.147556 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2268ed7c-96e8-4452-b1fb-babe8572783e-inventory\") pod \"configure-os-openstack-openstack-cell1-7cgmn\" (UID: \"2268ed7c-96e8-4452-b1fb-babe8572783e\") " pod="openstack/configure-os-openstack-openstack-cell1-7cgmn" Nov 28 18:16:49 crc kubenswrapper[4909]: I1128 18:16:49.154419 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h7qn2\" (UniqueName: \"kubernetes.io/projected/2268ed7c-96e8-4452-b1fb-babe8572783e-kube-api-access-h7qn2\") pod \"configure-os-openstack-openstack-cell1-7cgmn\" (UID: \"2268ed7c-96e8-4452-b1fb-babe8572783e\") " pod="openstack/configure-os-openstack-openstack-cell1-7cgmn" Nov 28 18:16:49 crc kubenswrapper[4909]: I1128 18:16:49.220244 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-openstack-openstack-cell1-7cgmn" Nov 28 18:16:49 crc kubenswrapper[4909]: I1128 18:16:49.762686 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-os-openstack-openstack-cell1-7cgmn"] Nov 28 18:16:49 crc kubenswrapper[4909]: W1128 18:16:49.769589 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2268ed7c_96e8_4452_b1fb_babe8572783e.slice/crio-dc0941eb964d1853ccbc19ccfc216eb24826b62e48e83bec74bb3a93788eec27 WatchSource:0}: Error finding container dc0941eb964d1853ccbc19ccfc216eb24826b62e48e83bec74bb3a93788eec27: Status 404 returned error can't find the container with id dc0941eb964d1853ccbc19ccfc216eb24826b62e48e83bec74bb3a93788eec27 Nov 28 18:16:49 crc kubenswrapper[4909]: I1128 18:16:49.802462 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-openstack-openstack-cell1-7cgmn" event={"ID":"2268ed7c-96e8-4452-b1fb-babe8572783e","Type":"ContainerStarted","Data":"dc0941eb964d1853ccbc19ccfc216eb24826b62e48e83bec74bb3a93788eec27"} Nov 28 18:16:50 crc kubenswrapper[4909]: I1128 18:16:50.816126 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-openstack-openstack-cell1-7cgmn" event={"ID":"2268ed7c-96e8-4452-b1fb-babe8572783e","Type":"ContainerStarted","Data":"ededf46d7b44993c318d4561e9dcc5d93e77d2dcb7b60abd9b3379f1d1bdaf0f"} Nov 28 18:16:50 crc kubenswrapper[4909]: I1128 18:16:50.841236 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/configure-os-openstack-openstack-cell1-7cgmn" podStartSLOduration=2.350582745 podStartE2EDuration="2.841218457s" podCreationTimestamp="2025-11-28 18:16:48 +0000 UTC" firstStartedPulling="2025-11-28 18:16:49.772341602 +0000 UTC m=+7592.169026126" lastFinishedPulling="2025-11-28 18:16:50.262977314 +0000 UTC m=+7592.659661838" observedRunningTime="2025-11-28 18:16:50.831198267 +0000 UTC m=+7593.227882791" watchObservedRunningTime="2025-11-28 18:16:50.841218457 +0000 UTC m=+7593.237902981" Nov 28 18:16:51 crc kubenswrapper[4909]: I1128 18:16:51.902476 4909 scope.go:117] "RemoveContainer" containerID="16af51197304d83d362143f44527eca9e3f72c8e7894bd79e4b089fd278bdb1d" Nov 28 18:16:51 crc kubenswrapper[4909]: E1128 18:16:51.904164 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 18:17:04 crc kubenswrapper[4909]: I1128 18:17:04.902445 4909 scope.go:117] "RemoveContainer" containerID="16af51197304d83d362143f44527eca9e3f72c8e7894bd79e4b089fd278bdb1d" Nov 28 18:17:04 crc kubenswrapper[4909]: E1128 18:17:04.903392 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 18:17:09 crc kubenswrapper[4909]: I1128 18:17:09.201459 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-lkdwk"] Nov 28 18:17:09 crc kubenswrapper[4909]: I1128 18:17:09.204104 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-lkdwk" Nov 28 18:17:09 crc kubenswrapper[4909]: I1128 18:17:09.247576 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-lkdwk"] Nov 28 18:17:09 crc kubenswrapper[4909]: I1128 18:17:09.378937 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/89d20210-a1a7-4e58-ac23-055ebb93c212-utilities\") pod \"certified-operators-lkdwk\" (UID: \"89d20210-a1a7-4e58-ac23-055ebb93c212\") " pod="openshift-marketplace/certified-operators-lkdwk" Nov 28 18:17:09 crc kubenswrapper[4909]: I1128 18:17:09.379049 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9qcts\" (UniqueName: \"kubernetes.io/projected/89d20210-a1a7-4e58-ac23-055ebb93c212-kube-api-access-9qcts\") pod \"certified-operators-lkdwk\" (UID: \"89d20210-a1a7-4e58-ac23-055ebb93c212\") " pod="openshift-marketplace/certified-operators-lkdwk" Nov 28 18:17:09 crc kubenswrapper[4909]: I1128 18:17:09.379386 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/89d20210-a1a7-4e58-ac23-055ebb93c212-catalog-content\") pod \"certified-operators-lkdwk\" (UID: \"89d20210-a1a7-4e58-ac23-055ebb93c212\") " pod="openshift-marketplace/certified-operators-lkdwk" Nov 28 18:17:09 crc kubenswrapper[4909]: I1128 18:17:09.481724 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/89d20210-a1a7-4e58-ac23-055ebb93c212-catalog-content\") pod \"certified-operators-lkdwk\" (UID: \"89d20210-a1a7-4e58-ac23-055ebb93c212\") " pod="openshift-marketplace/certified-operators-lkdwk" Nov 28 18:17:09 crc kubenswrapper[4909]: I1128 18:17:09.481867 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/89d20210-a1a7-4e58-ac23-055ebb93c212-utilities\") pod \"certified-operators-lkdwk\" (UID: \"89d20210-a1a7-4e58-ac23-055ebb93c212\") " pod="openshift-marketplace/certified-operators-lkdwk" Nov 28 18:17:09 crc kubenswrapper[4909]: I1128 18:17:09.481929 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9qcts\" (UniqueName: \"kubernetes.io/projected/89d20210-a1a7-4e58-ac23-055ebb93c212-kube-api-access-9qcts\") pod \"certified-operators-lkdwk\" (UID: \"89d20210-a1a7-4e58-ac23-055ebb93c212\") " pod="openshift-marketplace/certified-operators-lkdwk" Nov 28 18:17:09 crc kubenswrapper[4909]: I1128 18:17:09.482272 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/89d20210-a1a7-4e58-ac23-055ebb93c212-catalog-content\") pod \"certified-operators-lkdwk\" (UID: \"89d20210-a1a7-4e58-ac23-055ebb93c212\") " pod="openshift-marketplace/certified-operators-lkdwk" Nov 28 18:17:09 crc kubenswrapper[4909]: I1128 18:17:09.482289 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/89d20210-a1a7-4e58-ac23-055ebb93c212-utilities\") pod \"certified-operators-lkdwk\" (UID: \"89d20210-a1a7-4e58-ac23-055ebb93c212\") " pod="openshift-marketplace/certified-operators-lkdwk" Nov 28 18:17:09 crc kubenswrapper[4909]: I1128 18:17:09.509942 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9qcts\" (UniqueName: \"kubernetes.io/projected/89d20210-a1a7-4e58-ac23-055ebb93c212-kube-api-access-9qcts\") pod \"certified-operators-lkdwk\" (UID: \"89d20210-a1a7-4e58-ac23-055ebb93c212\") " pod="openshift-marketplace/certified-operators-lkdwk" Nov 28 18:17:09 crc kubenswrapper[4909]: I1128 18:17:09.548972 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-lkdwk" Nov 28 18:17:10 crc kubenswrapper[4909]: I1128 18:17:10.111276 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-lkdwk"] Nov 28 18:17:11 crc kubenswrapper[4909]: I1128 18:17:11.064093 4909 generic.go:334] "Generic (PLEG): container finished" podID="89d20210-a1a7-4e58-ac23-055ebb93c212" containerID="843db022934f5c901277b8e74f038187d4a25a9fb7d2f1a7e07caa4301dd1a01" exitCode=0 Nov 28 18:17:11 crc kubenswrapper[4909]: I1128 18:17:11.064174 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lkdwk" event={"ID":"89d20210-a1a7-4e58-ac23-055ebb93c212","Type":"ContainerDied","Data":"843db022934f5c901277b8e74f038187d4a25a9fb7d2f1a7e07caa4301dd1a01"} Nov 28 18:17:11 crc kubenswrapper[4909]: I1128 18:17:11.064823 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lkdwk" event={"ID":"89d20210-a1a7-4e58-ac23-055ebb93c212","Type":"ContainerStarted","Data":"3518749f84574fd16c824f0e5a42a7a2277fe823a33a22694cd4774a714604ca"} Nov 28 18:17:12 crc kubenswrapper[4909]: I1128 18:17:12.077345 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lkdwk" event={"ID":"89d20210-a1a7-4e58-ac23-055ebb93c212","Type":"ContainerStarted","Data":"849f79e41a1504ae8d40df8f1c8df4d711e2f65938cb5148bb213331ddd609c8"} Nov 28 18:17:14 crc kubenswrapper[4909]: I1128 18:17:14.101201 4909 generic.go:334] "Generic (PLEG): container finished" podID="89d20210-a1a7-4e58-ac23-055ebb93c212" containerID="849f79e41a1504ae8d40df8f1c8df4d711e2f65938cb5148bb213331ddd609c8" exitCode=0 Nov 28 18:17:14 crc kubenswrapper[4909]: I1128 18:17:14.101271 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lkdwk" event={"ID":"89d20210-a1a7-4e58-ac23-055ebb93c212","Type":"ContainerDied","Data":"849f79e41a1504ae8d40df8f1c8df4d711e2f65938cb5148bb213331ddd609c8"} Nov 28 18:17:15 crc kubenswrapper[4909]: I1128 18:17:15.113589 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lkdwk" event={"ID":"89d20210-a1a7-4e58-ac23-055ebb93c212","Type":"ContainerStarted","Data":"def83f09cec569c3ee9d0aec016c56fd39a260089fd552d21d56138bad8d19b3"} Nov 28 18:17:15 crc kubenswrapper[4909]: I1128 18:17:15.142812 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-lkdwk" podStartSLOduration=2.684634038 podStartE2EDuration="6.142794083s" podCreationTimestamp="2025-11-28 18:17:09 +0000 UTC" firstStartedPulling="2025-11-28 18:17:11.068831603 +0000 UTC m=+7613.465516127" lastFinishedPulling="2025-11-28 18:17:14.526991648 +0000 UTC m=+7616.923676172" observedRunningTime="2025-11-28 18:17:15.129830554 +0000 UTC m=+7617.526515078" watchObservedRunningTime="2025-11-28 18:17:15.142794083 +0000 UTC m=+7617.539478607" Nov 28 18:17:15 crc kubenswrapper[4909]: I1128 18:17:15.901894 4909 scope.go:117] "RemoveContainer" containerID="16af51197304d83d362143f44527eca9e3f72c8e7894bd79e4b089fd278bdb1d" Nov 28 18:17:15 crc kubenswrapper[4909]: E1128 18:17:15.902411 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 18:17:19 crc kubenswrapper[4909]: I1128 18:17:19.550698 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-lkdwk" Nov 28 18:17:19 crc kubenswrapper[4909]: I1128 18:17:19.551477 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-lkdwk" Nov 28 18:17:19 crc kubenswrapper[4909]: I1128 18:17:19.637916 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-lkdwk" Nov 28 18:17:20 crc kubenswrapper[4909]: I1128 18:17:20.240006 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-lkdwk" Nov 28 18:17:20 crc kubenswrapper[4909]: I1128 18:17:20.317231 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-lkdwk"] Nov 28 18:17:22 crc kubenswrapper[4909]: I1128 18:17:22.192121 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-lkdwk" podUID="89d20210-a1a7-4e58-ac23-055ebb93c212" containerName="registry-server" containerID="cri-o://def83f09cec569c3ee9d0aec016c56fd39a260089fd552d21d56138bad8d19b3" gracePeriod=2 Nov 28 18:17:22 crc kubenswrapper[4909]: E1128 18:17:22.490190 4909 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod89d20210_a1a7_4e58_ac23_055ebb93c212.slice/crio-conmon-def83f09cec569c3ee9d0aec016c56fd39a260089fd552d21d56138bad8d19b3.scope\": RecentStats: unable to find data in memory cache]" Nov 28 18:17:22 crc kubenswrapper[4909]: I1128 18:17:22.738575 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-lkdwk" Nov 28 18:17:22 crc kubenswrapper[4909]: I1128 18:17:22.843869 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/89d20210-a1a7-4e58-ac23-055ebb93c212-catalog-content\") pod \"89d20210-a1a7-4e58-ac23-055ebb93c212\" (UID: \"89d20210-a1a7-4e58-ac23-055ebb93c212\") " Nov 28 18:17:22 crc kubenswrapper[4909]: I1128 18:17:22.844000 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9qcts\" (UniqueName: \"kubernetes.io/projected/89d20210-a1a7-4e58-ac23-055ebb93c212-kube-api-access-9qcts\") pod \"89d20210-a1a7-4e58-ac23-055ebb93c212\" (UID: \"89d20210-a1a7-4e58-ac23-055ebb93c212\") " Nov 28 18:17:22 crc kubenswrapper[4909]: I1128 18:17:22.844093 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/89d20210-a1a7-4e58-ac23-055ebb93c212-utilities\") pod \"89d20210-a1a7-4e58-ac23-055ebb93c212\" (UID: \"89d20210-a1a7-4e58-ac23-055ebb93c212\") " Nov 28 18:17:22 crc kubenswrapper[4909]: I1128 18:17:22.845161 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/89d20210-a1a7-4e58-ac23-055ebb93c212-utilities" (OuterVolumeSpecName: "utilities") pod "89d20210-a1a7-4e58-ac23-055ebb93c212" (UID: "89d20210-a1a7-4e58-ac23-055ebb93c212"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 18:17:22 crc kubenswrapper[4909]: I1128 18:17:22.845626 4909 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/89d20210-a1a7-4e58-ac23-055ebb93c212-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 18:17:22 crc kubenswrapper[4909]: I1128 18:17:22.850943 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/89d20210-a1a7-4e58-ac23-055ebb93c212-kube-api-access-9qcts" (OuterVolumeSpecName: "kube-api-access-9qcts") pod "89d20210-a1a7-4e58-ac23-055ebb93c212" (UID: "89d20210-a1a7-4e58-ac23-055ebb93c212"). InnerVolumeSpecName "kube-api-access-9qcts". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 18:17:22 crc kubenswrapper[4909]: I1128 18:17:22.891481 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/89d20210-a1a7-4e58-ac23-055ebb93c212-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "89d20210-a1a7-4e58-ac23-055ebb93c212" (UID: "89d20210-a1a7-4e58-ac23-055ebb93c212"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 18:17:22 crc kubenswrapper[4909]: I1128 18:17:22.947453 4909 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/89d20210-a1a7-4e58-ac23-055ebb93c212-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 18:17:22 crc kubenswrapper[4909]: I1128 18:17:22.947788 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9qcts\" (UniqueName: \"kubernetes.io/projected/89d20210-a1a7-4e58-ac23-055ebb93c212-kube-api-access-9qcts\") on node \"crc\" DevicePath \"\"" Nov 28 18:17:23 crc kubenswrapper[4909]: I1128 18:17:23.301407 4909 generic.go:334] "Generic (PLEG): container finished" podID="89d20210-a1a7-4e58-ac23-055ebb93c212" containerID="def83f09cec569c3ee9d0aec016c56fd39a260089fd552d21d56138bad8d19b3" exitCode=0 Nov 28 18:17:23 crc kubenswrapper[4909]: I1128 18:17:23.301485 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-lkdwk" Nov 28 18:17:23 crc kubenswrapper[4909]: I1128 18:17:23.301487 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lkdwk" event={"ID":"89d20210-a1a7-4e58-ac23-055ebb93c212","Type":"ContainerDied","Data":"def83f09cec569c3ee9d0aec016c56fd39a260089fd552d21d56138bad8d19b3"} Nov 28 18:17:23 crc kubenswrapper[4909]: I1128 18:17:23.301685 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lkdwk" event={"ID":"89d20210-a1a7-4e58-ac23-055ebb93c212","Type":"ContainerDied","Data":"3518749f84574fd16c824f0e5a42a7a2277fe823a33a22694cd4774a714604ca"} Nov 28 18:17:23 crc kubenswrapper[4909]: I1128 18:17:23.301697 4909 scope.go:117] "RemoveContainer" containerID="def83f09cec569c3ee9d0aec016c56fd39a260089fd552d21d56138bad8d19b3" Nov 28 18:17:23 crc kubenswrapper[4909]: I1128 18:17:23.328088 4909 scope.go:117] "RemoveContainer" containerID="849f79e41a1504ae8d40df8f1c8df4d711e2f65938cb5148bb213331ddd609c8" Nov 28 18:17:23 crc kubenswrapper[4909]: I1128 18:17:23.365095 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-lkdwk"] Nov 28 18:17:23 crc kubenswrapper[4909]: I1128 18:17:23.367872 4909 scope.go:117] "RemoveContainer" containerID="843db022934f5c901277b8e74f038187d4a25a9fb7d2f1a7e07caa4301dd1a01" Nov 28 18:17:23 crc kubenswrapper[4909]: I1128 18:17:23.378526 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-lkdwk"] Nov 28 18:17:23 crc kubenswrapper[4909]: I1128 18:17:23.418371 4909 scope.go:117] "RemoveContainer" containerID="def83f09cec569c3ee9d0aec016c56fd39a260089fd552d21d56138bad8d19b3" Nov 28 18:17:23 crc kubenswrapper[4909]: E1128 18:17:23.418905 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"def83f09cec569c3ee9d0aec016c56fd39a260089fd552d21d56138bad8d19b3\": container with ID starting with def83f09cec569c3ee9d0aec016c56fd39a260089fd552d21d56138bad8d19b3 not found: ID does not exist" containerID="def83f09cec569c3ee9d0aec016c56fd39a260089fd552d21d56138bad8d19b3" Nov 28 18:17:23 crc kubenswrapper[4909]: I1128 18:17:23.418948 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"def83f09cec569c3ee9d0aec016c56fd39a260089fd552d21d56138bad8d19b3"} err="failed to get container status \"def83f09cec569c3ee9d0aec016c56fd39a260089fd552d21d56138bad8d19b3\": rpc error: code = NotFound desc = could not find container \"def83f09cec569c3ee9d0aec016c56fd39a260089fd552d21d56138bad8d19b3\": container with ID starting with def83f09cec569c3ee9d0aec016c56fd39a260089fd552d21d56138bad8d19b3 not found: ID does not exist" Nov 28 18:17:23 crc kubenswrapper[4909]: I1128 18:17:23.418974 4909 scope.go:117] "RemoveContainer" containerID="849f79e41a1504ae8d40df8f1c8df4d711e2f65938cb5148bb213331ddd609c8" Nov 28 18:17:23 crc kubenswrapper[4909]: E1128 18:17:23.419291 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"849f79e41a1504ae8d40df8f1c8df4d711e2f65938cb5148bb213331ddd609c8\": container with ID starting with 849f79e41a1504ae8d40df8f1c8df4d711e2f65938cb5148bb213331ddd609c8 not found: ID does not exist" containerID="849f79e41a1504ae8d40df8f1c8df4d711e2f65938cb5148bb213331ddd609c8" Nov 28 18:17:23 crc kubenswrapper[4909]: I1128 18:17:23.419318 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"849f79e41a1504ae8d40df8f1c8df4d711e2f65938cb5148bb213331ddd609c8"} err="failed to get container status \"849f79e41a1504ae8d40df8f1c8df4d711e2f65938cb5148bb213331ddd609c8\": rpc error: code = NotFound desc = could not find container \"849f79e41a1504ae8d40df8f1c8df4d711e2f65938cb5148bb213331ddd609c8\": container with ID starting with 849f79e41a1504ae8d40df8f1c8df4d711e2f65938cb5148bb213331ddd609c8 not found: ID does not exist" Nov 28 18:17:23 crc kubenswrapper[4909]: I1128 18:17:23.419333 4909 scope.go:117] "RemoveContainer" containerID="843db022934f5c901277b8e74f038187d4a25a9fb7d2f1a7e07caa4301dd1a01" Nov 28 18:17:23 crc kubenswrapper[4909]: E1128 18:17:23.419693 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"843db022934f5c901277b8e74f038187d4a25a9fb7d2f1a7e07caa4301dd1a01\": container with ID starting with 843db022934f5c901277b8e74f038187d4a25a9fb7d2f1a7e07caa4301dd1a01 not found: ID does not exist" containerID="843db022934f5c901277b8e74f038187d4a25a9fb7d2f1a7e07caa4301dd1a01" Nov 28 18:17:23 crc kubenswrapper[4909]: I1128 18:17:23.419735 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"843db022934f5c901277b8e74f038187d4a25a9fb7d2f1a7e07caa4301dd1a01"} err="failed to get container status \"843db022934f5c901277b8e74f038187d4a25a9fb7d2f1a7e07caa4301dd1a01\": rpc error: code = NotFound desc = could not find container \"843db022934f5c901277b8e74f038187d4a25a9fb7d2f1a7e07caa4301dd1a01\": container with ID starting with 843db022934f5c901277b8e74f038187d4a25a9fb7d2f1a7e07caa4301dd1a01 not found: ID does not exist" Nov 28 18:17:23 crc kubenswrapper[4909]: I1128 18:17:23.921175 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="89d20210-a1a7-4e58-ac23-055ebb93c212" path="/var/lib/kubelet/pods/89d20210-a1a7-4e58-ac23-055ebb93c212/volumes" Nov 28 18:17:30 crc kubenswrapper[4909]: I1128 18:17:30.901904 4909 scope.go:117] "RemoveContainer" containerID="16af51197304d83d362143f44527eca9e3f72c8e7894bd79e4b089fd278bdb1d" Nov 28 18:17:30 crc kubenswrapper[4909]: E1128 18:17:30.903400 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 18:17:37 crc kubenswrapper[4909]: I1128 18:17:37.459497 4909 generic.go:334] "Generic (PLEG): container finished" podID="2268ed7c-96e8-4452-b1fb-babe8572783e" containerID="ededf46d7b44993c318d4561e9dcc5d93e77d2dcb7b60abd9b3379f1d1bdaf0f" exitCode=0 Nov 28 18:17:37 crc kubenswrapper[4909]: I1128 18:17:37.459650 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-openstack-openstack-cell1-7cgmn" event={"ID":"2268ed7c-96e8-4452-b1fb-babe8572783e","Type":"ContainerDied","Data":"ededf46d7b44993c318d4561e9dcc5d93e77d2dcb7b60abd9b3379f1d1bdaf0f"} Nov 28 18:17:39 crc kubenswrapper[4909]: I1128 18:17:39.003909 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-openstack-openstack-cell1-7cgmn" Nov 28 18:17:39 crc kubenswrapper[4909]: I1128 18:17:39.151929 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h7qn2\" (UniqueName: \"kubernetes.io/projected/2268ed7c-96e8-4452-b1fb-babe8572783e-kube-api-access-h7qn2\") pod \"2268ed7c-96e8-4452-b1fb-babe8572783e\" (UID: \"2268ed7c-96e8-4452-b1fb-babe8572783e\") " Nov 28 18:17:39 crc kubenswrapper[4909]: I1128 18:17:39.152157 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2268ed7c-96e8-4452-b1fb-babe8572783e-inventory\") pod \"2268ed7c-96e8-4452-b1fb-babe8572783e\" (UID: \"2268ed7c-96e8-4452-b1fb-babe8572783e\") " Nov 28 18:17:39 crc kubenswrapper[4909]: I1128 18:17:39.152188 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/2268ed7c-96e8-4452-b1fb-babe8572783e-ssh-key\") pod \"2268ed7c-96e8-4452-b1fb-babe8572783e\" (UID: \"2268ed7c-96e8-4452-b1fb-babe8572783e\") " Nov 28 18:17:39 crc kubenswrapper[4909]: I1128 18:17:39.152229 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/2268ed7c-96e8-4452-b1fb-babe8572783e-ceph\") pod \"2268ed7c-96e8-4452-b1fb-babe8572783e\" (UID: \"2268ed7c-96e8-4452-b1fb-babe8572783e\") " Nov 28 18:17:39 crc kubenswrapper[4909]: I1128 18:17:39.159448 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2268ed7c-96e8-4452-b1fb-babe8572783e-kube-api-access-h7qn2" (OuterVolumeSpecName: "kube-api-access-h7qn2") pod "2268ed7c-96e8-4452-b1fb-babe8572783e" (UID: "2268ed7c-96e8-4452-b1fb-babe8572783e"). InnerVolumeSpecName "kube-api-access-h7qn2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 18:17:39 crc kubenswrapper[4909]: I1128 18:17:39.159544 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2268ed7c-96e8-4452-b1fb-babe8572783e-ceph" (OuterVolumeSpecName: "ceph") pod "2268ed7c-96e8-4452-b1fb-babe8572783e" (UID: "2268ed7c-96e8-4452-b1fb-babe8572783e"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 18:17:39 crc kubenswrapper[4909]: I1128 18:17:39.192915 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2268ed7c-96e8-4452-b1fb-babe8572783e-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "2268ed7c-96e8-4452-b1fb-babe8572783e" (UID: "2268ed7c-96e8-4452-b1fb-babe8572783e"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 18:17:39 crc kubenswrapper[4909]: I1128 18:17:39.199783 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2268ed7c-96e8-4452-b1fb-babe8572783e-inventory" (OuterVolumeSpecName: "inventory") pod "2268ed7c-96e8-4452-b1fb-babe8572783e" (UID: "2268ed7c-96e8-4452-b1fb-babe8572783e"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 18:17:39 crc kubenswrapper[4909]: I1128 18:17:39.254739 4909 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2268ed7c-96e8-4452-b1fb-babe8572783e-inventory\") on node \"crc\" DevicePath \"\"" Nov 28 18:17:39 crc kubenswrapper[4909]: I1128 18:17:39.255101 4909 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/2268ed7c-96e8-4452-b1fb-babe8572783e-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 28 18:17:39 crc kubenswrapper[4909]: I1128 18:17:39.255137 4909 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/2268ed7c-96e8-4452-b1fb-babe8572783e-ceph\") on node \"crc\" DevicePath \"\"" Nov 28 18:17:39 crc kubenswrapper[4909]: I1128 18:17:39.255151 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h7qn2\" (UniqueName: \"kubernetes.io/projected/2268ed7c-96e8-4452-b1fb-babe8572783e-kube-api-access-h7qn2\") on node \"crc\" DevicePath \"\"" Nov 28 18:17:39 crc kubenswrapper[4909]: I1128 18:17:39.488596 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-openstack-openstack-cell1-7cgmn" event={"ID":"2268ed7c-96e8-4452-b1fb-babe8572783e","Type":"ContainerDied","Data":"dc0941eb964d1853ccbc19ccfc216eb24826b62e48e83bec74bb3a93788eec27"} Nov 28 18:17:39 crc kubenswrapper[4909]: I1128 18:17:39.488639 4909 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="dc0941eb964d1853ccbc19ccfc216eb24826b62e48e83bec74bb3a93788eec27" Nov 28 18:17:39 crc kubenswrapper[4909]: I1128 18:17:39.489014 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-openstack-openstack-cell1-7cgmn" Nov 28 18:17:39 crc kubenswrapper[4909]: I1128 18:17:39.594752 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ssh-known-hosts-openstack-9xrx8"] Nov 28 18:17:39 crc kubenswrapper[4909]: E1128 18:17:39.595563 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="89d20210-a1a7-4e58-ac23-055ebb93c212" containerName="extract-utilities" Nov 28 18:17:39 crc kubenswrapper[4909]: I1128 18:17:39.595617 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="89d20210-a1a7-4e58-ac23-055ebb93c212" containerName="extract-utilities" Nov 28 18:17:39 crc kubenswrapper[4909]: E1128 18:17:39.595700 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2268ed7c-96e8-4452-b1fb-babe8572783e" containerName="configure-os-openstack-openstack-cell1" Nov 28 18:17:39 crc kubenswrapper[4909]: I1128 18:17:39.595721 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="2268ed7c-96e8-4452-b1fb-babe8572783e" containerName="configure-os-openstack-openstack-cell1" Nov 28 18:17:39 crc kubenswrapper[4909]: E1128 18:17:39.595772 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="89d20210-a1a7-4e58-ac23-055ebb93c212" containerName="registry-server" Nov 28 18:17:39 crc kubenswrapper[4909]: I1128 18:17:39.595790 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="89d20210-a1a7-4e58-ac23-055ebb93c212" containerName="registry-server" Nov 28 18:17:39 crc kubenswrapper[4909]: E1128 18:17:39.595837 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="89d20210-a1a7-4e58-ac23-055ebb93c212" containerName="extract-content" Nov 28 18:17:39 crc kubenswrapper[4909]: I1128 18:17:39.595857 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="89d20210-a1a7-4e58-ac23-055ebb93c212" containerName="extract-content" Nov 28 18:17:39 crc kubenswrapper[4909]: I1128 18:17:39.596354 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="89d20210-a1a7-4e58-ac23-055ebb93c212" containerName="registry-server" Nov 28 18:17:39 crc kubenswrapper[4909]: I1128 18:17:39.596405 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="2268ed7c-96e8-4452-b1fb-babe8572783e" containerName="configure-os-openstack-openstack-cell1" Nov 28 18:17:39 crc kubenswrapper[4909]: I1128 18:17:39.597732 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-openstack-9xrx8" Nov 28 18:17:39 crc kubenswrapper[4909]: I1128 18:17:39.601643 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 28 18:17:39 crc kubenswrapper[4909]: I1128 18:17:39.601810 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-z249h" Nov 28 18:17:39 crc kubenswrapper[4909]: I1128 18:17:39.601833 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Nov 28 18:17:39 crc kubenswrapper[4909]: I1128 18:17:39.602310 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Nov 28 18:17:39 crc kubenswrapper[4909]: I1128 18:17:39.623901 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ssh-known-hosts-openstack-9xrx8"] Nov 28 18:17:39 crc kubenswrapper[4909]: I1128 18:17:39.683511 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lrjqv\" (UniqueName: \"kubernetes.io/projected/6e7eacb2-3379-4878-bc25-b818620f471f-kube-api-access-lrjqv\") pod \"ssh-known-hosts-openstack-9xrx8\" (UID: \"6e7eacb2-3379-4878-bc25-b818620f471f\") " pod="openstack/ssh-known-hosts-openstack-9xrx8" Nov 28 18:17:39 crc kubenswrapper[4909]: I1128 18:17:39.683590 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/6e7eacb2-3379-4878-bc25-b818620f471f-inventory-0\") pod \"ssh-known-hosts-openstack-9xrx8\" (UID: \"6e7eacb2-3379-4878-bc25-b818620f471f\") " pod="openstack/ssh-known-hosts-openstack-9xrx8" Nov 28 18:17:39 crc kubenswrapper[4909]: I1128 18:17:39.683635 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/6e7eacb2-3379-4878-bc25-b818620f471f-ssh-key-openstack-cell1\") pod \"ssh-known-hosts-openstack-9xrx8\" (UID: \"6e7eacb2-3379-4878-bc25-b818620f471f\") " pod="openstack/ssh-known-hosts-openstack-9xrx8" Nov 28 18:17:39 crc kubenswrapper[4909]: I1128 18:17:39.683674 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/6e7eacb2-3379-4878-bc25-b818620f471f-ceph\") pod \"ssh-known-hosts-openstack-9xrx8\" (UID: \"6e7eacb2-3379-4878-bc25-b818620f471f\") " pod="openstack/ssh-known-hosts-openstack-9xrx8" Nov 28 18:17:39 crc kubenswrapper[4909]: I1128 18:17:39.785592 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/6e7eacb2-3379-4878-bc25-b818620f471f-inventory-0\") pod \"ssh-known-hosts-openstack-9xrx8\" (UID: \"6e7eacb2-3379-4878-bc25-b818620f471f\") " pod="openstack/ssh-known-hosts-openstack-9xrx8" Nov 28 18:17:39 crc kubenswrapper[4909]: I1128 18:17:39.785712 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/6e7eacb2-3379-4878-bc25-b818620f471f-ssh-key-openstack-cell1\") pod \"ssh-known-hosts-openstack-9xrx8\" (UID: \"6e7eacb2-3379-4878-bc25-b818620f471f\") " pod="openstack/ssh-known-hosts-openstack-9xrx8" Nov 28 18:17:39 crc kubenswrapper[4909]: I1128 18:17:39.785742 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/6e7eacb2-3379-4878-bc25-b818620f471f-ceph\") pod \"ssh-known-hosts-openstack-9xrx8\" (UID: \"6e7eacb2-3379-4878-bc25-b818620f471f\") " pod="openstack/ssh-known-hosts-openstack-9xrx8" Nov 28 18:17:39 crc kubenswrapper[4909]: I1128 18:17:39.785905 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lrjqv\" (UniqueName: \"kubernetes.io/projected/6e7eacb2-3379-4878-bc25-b818620f471f-kube-api-access-lrjqv\") pod \"ssh-known-hosts-openstack-9xrx8\" (UID: \"6e7eacb2-3379-4878-bc25-b818620f471f\") " pod="openstack/ssh-known-hosts-openstack-9xrx8" Nov 28 18:17:39 crc kubenswrapper[4909]: I1128 18:17:39.789832 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/6e7eacb2-3379-4878-bc25-b818620f471f-ssh-key-openstack-cell1\") pod \"ssh-known-hosts-openstack-9xrx8\" (UID: \"6e7eacb2-3379-4878-bc25-b818620f471f\") " pod="openstack/ssh-known-hosts-openstack-9xrx8" Nov 28 18:17:39 crc kubenswrapper[4909]: I1128 18:17:39.790136 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/6e7eacb2-3379-4878-bc25-b818620f471f-inventory-0\") pod \"ssh-known-hosts-openstack-9xrx8\" (UID: \"6e7eacb2-3379-4878-bc25-b818620f471f\") " pod="openstack/ssh-known-hosts-openstack-9xrx8" Nov 28 18:17:39 crc kubenswrapper[4909]: I1128 18:17:39.794841 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/6e7eacb2-3379-4878-bc25-b818620f471f-ceph\") pod \"ssh-known-hosts-openstack-9xrx8\" (UID: \"6e7eacb2-3379-4878-bc25-b818620f471f\") " pod="openstack/ssh-known-hosts-openstack-9xrx8" Nov 28 18:17:39 crc kubenswrapper[4909]: I1128 18:17:39.811330 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lrjqv\" (UniqueName: \"kubernetes.io/projected/6e7eacb2-3379-4878-bc25-b818620f471f-kube-api-access-lrjqv\") pod \"ssh-known-hosts-openstack-9xrx8\" (UID: \"6e7eacb2-3379-4878-bc25-b818620f471f\") " pod="openstack/ssh-known-hosts-openstack-9xrx8" Nov 28 18:17:39 crc kubenswrapper[4909]: I1128 18:17:39.921742 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-openstack-9xrx8" Nov 28 18:17:40 crc kubenswrapper[4909]: I1128 18:17:40.450423 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ssh-known-hosts-openstack-9xrx8"] Nov 28 18:17:40 crc kubenswrapper[4909]: I1128 18:17:40.507019 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-openstack-9xrx8" event={"ID":"6e7eacb2-3379-4878-bc25-b818620f471f","Type":"ContainerStarted","Data":"504a09dcb9dc60808805a3986af67fdd791e9757713143a7c5472b765d4da73c"} Nov 28 18:17:41 crc kubenswrapper[4909]: I1128 18:17:41.518929 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-openstack-9xrx8" event={"ID":"6e7eacb2-3379-4878-bc25-b818620f471f","Type":"ContainerStarted","Data":"bea65e0e8c3948e6c32fef1320443ae92f6dfef03c25d78f40da1b97deea85ab"} Nov 28 18:17:41 crc kubenswrapper[4909]: I1128 18:17:41.552524 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ssh-known-hosts-openstack-9xrx8" podStartSLOduration=1.81664187 podStartE2EDuration="2.552503641s" podCreationTimestamp="2025-11-28 18:17:39 +0000 UTC" firstStartedPulling="2025-11-28 18:17:40.453448993 +0000 UTC m=+7642.850133537" lastFinishedPulling="2025-11-28 18:17:41.189310774 +0000 UTC m=+7643.585995308" observedRunningTime="2025-11-28 18:17:41.540157119 +0000 UTC m=+7643.936841683" watchObservedRunningTime="2025-11-28 18:17:41.552503641 +0000 UTC m=+7643.949188175" Nov 28 18:17:43 crc kubenswrapper[4909]: I1128 18:17:43.901831 4909 scope.go:117] "RemoveContainer" containerID="16af51197304d83d362143f44527eca9e3f72c8e7894bd79e4b089fd278bdb1d" Nov 28 18:17:43 crc kubenswrapper[4909]: E1128 18:17:43.902545 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 18:17:50 crc kubenswrapper[4909]: I1128 18:17:50.630130 4909 generic.go:334] "Generic (PLEG): container finished" podID="6e7eacb2-3379-4878-bc25-b818620f471f" containerID="bea65e0e8c3948e6c32fef1320443ae92f6dfef03c25d78f40da1b97deea85ab" exitCode=0 Nov 28 18:17:50 crc kubenswrapper[4909]: I1128 18:17:50.630249 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-openstack-9xrx8" event={"ID":"6e7eacb2-3379-4878-bc25-b818620f471f","Type":"ContainerDied","Data":"bea65e0e8c3948e6c32fef1320443ae92f6dfef03c25d78f40da1b97deea85ab"} Nov 28 18:17:52 crc kubenswrapper[4909]: I1128 18:17:52.241980 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-openstack-9xrx8" Nov 28 18:17:52 crc kubenswrapper[4909]: I1128 18:17:52.373166 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lrjqv\" (UniqueName: \"kubernetes.io/projected/6e7eacb2-3379-4878-bc25-b818620f471f-kube-api-access-lrjqv\") pod \"6e7eacb2-3379-4878-bc25-b818620f471f\" (UID: \"6e7eacb2-3379-4878-bc25-b818620f471f\") " Nov 28 18:17:52 crc kubenswrapper[4909]: I1128 18:17:52.373633 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/6e7eacb2-3379-4878-bc25-b818620f471f-ssh-key-openstack-cell1\") pod \"6e7eacb2-3379-4878-bc25-b818620f471f\" (UID: \"6e7eacb2-3379-4878-bc25-b818620f471f\") " Nov 28 18:17:52 crc kubenswrapper[4909]: I1128 18:17:52.373823 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/6e7eacb2-3379-4878-bc25-b818620f471f-inventory-0\") pod \"6e7eacb2-3379-4878-bc25-b818620f471f\" (UID: \"6e7eacb2-3379-4878-bc25-b818620f471f\") " Nov 28 18:17:52 crc kubenswrapper[4909]: I1128 18:17:52.373855 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/6e7eacb2-3379-4878-bc25-b818620f471f-ceph\") pod \"6e7eacb2-3379-4878-bc25-b818620f471f\" (UID: \"6e7eacb2-3379-4878-bc25-b818620f471f\") " Nov 28 18:17:52 crc kubenswrapper[4909]: I1128 18:17:52.381972 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6e7eacb2-3379-4878-bc25-b818620f471f-kube-api-access-lrjqv" (OuterVolumeSpecName: "kube-api-access-lrjqv") pod "6e7eacb2-3379-4878-bc25-b818620f471f" (UID: "6e7eacb2-3379-4878-bc25-b818620f471f"). InnerVolumeSpecName "kube-api-access-lrjqv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 18:17:52 crc kubenswrapper[4909]: I1128 18:17:52.382795 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6e7eacb2-3379-4878-bc25-b818620f471f-ceph" (OuterVolumeSpecName: "ceph") pod "6e7eacb2-3379-4878-bc25-b818620f471f" (UID: "6e7eacb2-3379-4878-bc25-b818620f471f"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 18:17:52 crc kubenswrapper[4909]: I1128 18:17:52.408806 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6e7eacb2-3379-4878-bc25-b818620f471f-inventory-0" (OuterVolumeSpecName: "inventory-0") pod "6e7eacb2-3379-4878-bc25-b818620f471f" (UID: "6e7eacb2-3379-4878-bc25-b818620f471f"). InnerVolumeSpecName "inventory-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 18:17:52 crc kubenswrapper[4909]: I1128 18:17:52.428980 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6e7eacb2-3379-4878-bc25-b818620f471f-ssh-key-openstack-cell1" (OuterVolumeSpecName: "ssh-key-openstack-cell1") pod "6e7eacb2-3379-4878-bc25-b818620f471f" (UID: "6e7eacb2-3379-4878-bc25-b818620f471f"). InnerVolumeSpecName "ssh-key-openstack-cell1". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 18:17:52 crc kubenswrapper[4909]: I1128 18:17:52.477129 4909 reconciler_common.go:293] "Volume detached for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/6e7eacb2-3379-4878-bc25-b818620f471f-inventory-0\") on node \"crc\" DevicePath \"\"" Nov 28 18:17:52 crc kubenswrapper[4909]: I1128 18:17:52.477186 4909 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/6e7eacb2-3379-4878-bc25-b818620f471f-ceph\") on node \"crc\" DevicePath \"\"" Nov 28 18:17:52 crc kubenswrapper[4909]: I1128 18:17:52.477209 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lrjqv\" (UniqueName: \"kubernetes.io/projected/6e7eacb2-3379-4878-bc25-b818620f471f-kube-api-access-lrjqv\") on node \"crc\" DevicePath \"\"" Nov 28 18:17:52 crc kubenswrapper[4909]: I1128 18:17:52.477230 4909 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/6e7eacb2-3379-4878-bc25-b818620f471f-ssh-key-openstack-cell1\") on node \"crc\" DevicePath \"\"" Nov 28 18:17:52 crc kubenswrapper[4909]: I1128 18:17:52.662092 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-openstack-9xrx8" event={"ID":"6e7eacb2-3379-4878-bc25-b818620f471f","Type":"ContainerDied","Data":"504a09dcb9dc60808805a3986af67fdd791e9757713143a7c5472b765d4da73c"} Nov 28 18:17:52 crc kubenswrapper[4909]: I1128 18:17:52.662148 4909 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="504a09dcb9dc60808805a3986af67fdd791e9757713143a7c5472b765d4da73c" Nov 28 18:17:52 crc kubenswrapper[4909]: I1128 18:17:52.662208 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-openstack-9xrx8" Nov 28 18:17:52 crc kubenswrapper[4909]: I1128 18:17:52.747971 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/run-os-openstack-openstack-cell1-lmwjv"] Nov 28 18:17:52 crc kubenswrapper[4909]: E1128 18:17:52.748401 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6e7eacb2-3379-4878-bc25-b818620f471f" containerName="ssh-known-hosts-openstack" Nov 28 18:17:52 crc kubenswrapper[4909]: I1128 18:17:52.748418 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="6e7eacb2-3379-4878-bc25-b818620f471f" containerName="ssh-known-hosts-openstack" Nov 28 18:17:52 crc kubenswrapper[4909]: I1128 18:17:52.748723 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="6e7eacb2-3379-4878-bc25-b818620f471f" containerName="ssh-known-hosts-openstack" Nov 28 18:17:52 crc kubenswrapper[4909]: I1128 18:17:52.750650 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-openstack-openstack-cell1-lmwjv" Nov 28 18:17:52 crc kubenswrapper[4909]: I1128 18:17:52.754736 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Nov 28 18:17:52 crc kubenswrapper[4909]: I1128 18:17:52.754772 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-z249h" Nov 28 18:17:52 crc kubenswrapper[4909]: I1128 18:17:52.754796 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Nov 28 18:17:52 crc kubenswrapper[4909]: I1128 18:17:52.754988 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 28 18:17:52 crc kubenswrapper[4909]: I1128 18:17:52.768241 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/run-os-openstack-openstack-cell1-lmwjv"] Nov 28 18:17:52 crc kubenswrapper[4909]: I1128 18:17:52.783340 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/20cfff51-0619-4abf-9897-6c4add02ace1-ceph\") pod \"run-os-openstack-openstack-cell1-lmwjv\" (UID: \"20cfff51-0619-4abf-9897-6c4add02ace1\") " pod="openstack/run-os-openstack-openstack-cell1-lmwjv" Nov 28 18:17:52 crc kubenswrapper[4909]: I1128 18:17:52.783401 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/20cfff51-0619-4abf-9897-6c4add02ace1-ssh-key\") pod \"run-os-openstack-openstack-cell1-lmwjv\" (UID: \"20cfff51-0619-4abf-9897-6c4add02ace1\") " pod="openstack/run-os-openstack-openstack-cell1-lmwjv" Nov 28 18:17:52 crc kubenswrapper[4909]: I1128 18:17:52.783430 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cx9xm\" (UniqueName: \"kubernetes.io/projected/20cfff51-0619-4abf-9897-6c4add02ace1-kube-api-access-cx9xm\") pod \"run-os-openstack-openstack-cell1-lmwjv\" (UID: \"20cfff51-0619-4abf-9897-6c4add02ace1\") " pod="openstack/run-os-openstack-openstack-cell1-lmwjv" Nov 28 18:17:52 crc kubenswrapper[4909]: I1128 18:17:52.783488 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/20cfff51-0619-4abf-9897-6c4add02ace1-inventory\") pod \"run-os-openstack-openstack-cell1-lmwjv\" (UID: \"20cfff51-0619-4abf-9897-6c4add02ace1\") " pod="openstack/run-os-openstack-openstack-cell1-lmwjv" Nov 28 18:17:52 crc kubenswrapper[4909]: I1128 18:17:52.886080 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/20cfff51-0619-4abf-9897-6c4add02ace1-ceph\") pod \"run-os-openstack-openstack-cell1-lmwjv\" (UID: \"20cfff51-0619-4abf-9897-6c4add02ace1\") " pod="openstack/run-os-openstack-openstack-cell1-lmwjv" Nov 28 18:17:52 crc kubenswrapper[4909]: I1128 18:17:52.886169 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/20cfff51-0619-4abf-9897-6c4add02ace1-ssh-key\") pod \"run-os-openstack-openstack-cell1-lmwjv\" (UID: \"20cfff51-0619-4abf-9897-6c4add02ace1\") " pod="openstack/run-os-openstack-openstack-cell1-lmwjv" Nov 28 18:17:52 crc kubenswrapper[4909]: I1128 18:17:52.886213 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cx9xm\" (UniqueName: \"kubernetes.io/projected/20cfff51-0619-4abf-9897-6c4add02ace1-kube-api-access-cx9xm\") pod \"run-os-openstack-openstack-cell1-lmwjv\" (UID: \"20cfff51-0619-4abf-9897-6c4add02ace1\") " pod="openstack/run-os-openstack-openstack-cell1-lmwjv" Nov 28 18:17:52 crc kubenswrapper[4909]: I1128 18:17:52.886299 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/20cfff51-0619-4abf-9897-6c4add02ace1-inventory\") pod \"run-os-openstack-openstack-cell1-lmwjv\" (UID: \"20cfff51-0619-4abf-9897-6c4add02ace1\") " pod="openstack/run-os-openstack-openstack-cell1-lmwjv" Nov 28 18:17:52 crc kubenswrapper[4909]: I1128 18:17:52.890986 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/20cfff51-0619-4abf-9897-6c4add02ace1-ssh-key\") pod \"run-os-openstack-openstack-cell1-lmwjv\" (UID: \"20cfff51-0619-4abf-9897-6c4add02ace1\") " pod="openstack/run-os-openstack-openstack-cell1-lmwjv" Nov 28 18:17:52 crc kubenswrapper[4909]: I1128 18:17:52.891092 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/20cfff51-0619-4abf-9897-6c4add02ace1-inventory\") pod \"run-os-openstack-openstack-cell1-lmwjv\" (UID: \"20cfff51-0619-4abf-9897-6c4add02ace1\") " pod="openstack/run-os-openstack-openstack-cell1-lmwjv" Nov 28 18:17:52 crc kubenswrapper[4909]: I1128 18:17:52.891246 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/20cfff51-0619-4abf-9897-6c4add02ace1-ceph\") pod \"run-os-openstack-openstack-cell1-lmwjv\" (UID: \"20cfff51-0619-4abf-9897-6c4add02ace1\") " pod="openstack/run-os-openstack-openstack-cell1-lmwjv" Nov 28 18:17:52 crc kubenswrapper[4909]: I1128 18:17:52.903411 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cx9xm\" (UniqueName: \"kubernetes.io/projected/20cfff51-0619-4abf-9897-6c4add02ace1-kube-api-access-cx9xm\") pod \"run-os-openstack-openstack-cell1-lmwjv\" (UID: \"20cfff51-0619-4abf-9897-6c4add02ace1\") " pod="openstack/run-os-openstack-openstack-cell1-lmwjv" Nov 28 18:17:53 crc kubenswrapper[4909]: I1128 18:17:53.075517 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-openstack-openstack-cell1-lmwjv" Nov 28 18:17:53 crc kubenswrapper[4909]: I1128 18:17:53.665205 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/run-os-openstack-openstack-cell1-lmwjv"] Nov 28 18:17:54 crc kubenswrapper[4909]: I1128 18:17:54.689114 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-openstack-openstack-cell1-lmwjv" event={"ID":"20cfff51-0619-4abf-9897-6c4add02ace1","Type":"ContainerStarted","Data":"8ca16ea6fef689d9b3d6cd18fe7f60258e7d2a4a39751cb9973c9cb8f0e353f2"} Nov 28 18:17:54 crc kubenswrapper[4909]: I1128 18:17:54.689722 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-openstack-openstack-cell1-lmwjv" event={"ID":"20cfff51-0619-4abf-9897-6c4add02ace1","Type":"ContainerStarted","Data":"d59ab709ab0f2bbedefa31335f5681310f5b7700d7de3b814b8cc303601dfe91"} Nov 28 18:17:54 crc kubenswrapper[4909]: I1128 18:17:54.711569 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/run-os-openstack-openstack-cell1-lmwjv" podStartSLOduration=2.150484825 podStartE2EDuration="2.711541235s" podCreationTimestamp="2025-11-28 18:17:52 +0000 UTC" firstStartedPulling="2025-11-28 18:17:53.6823761 +0000 UTC m=+7656.079060624" lastFinishedPulling="2025-11-28 18:17:54.24343249 +0000 UTC m=+7656.640117034" observedRunningTime="2025-11-28 18:17:54.706619783 +0000 UTC m=+7657.103304327" watchObservedRunningTime="2025-11-28 18:17:54.711541235 +0000 UTC m=+7657.108225789" Nov 28 18:17:57 crc kubenswrapper[4909]: I1128 18:17:57.903012 4909 scope.go:117] "RemoveContainer" containerID="16af51197304d83d362143f44527eca9e3f72c8e7894bd79e4b089fd278bdb1d" Nov 28 18:17:57 crc kubenswrapper[4909]: E1128 18:17:57.903837 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 18:17:59 crc kubenswrapper[4909]: I1128 18:17:59.296472 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-6gv67"] Nov 28 18:17:59 crc kubenswrapper[4909]: I1128 18:17:59.300675 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-6gv67" Nov 28 18:17:59 crc kubenswrapper[4909]: I1128 18:17:59.311751 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-6gv67"] Nov 28 18:17:59 crc kubenswrapper[4909]: I1128 18:17:59.336301 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d7e7943c-0fc4-45c1-81a9-5d206ad5e9f3-utilities\") pod \"community-operators-6gv67\" (UID: \"d7e7943c-0fc4-45c1-81a9-5d206ad5e9f3\") " pod="openshift-marketplace/community-operators-6gv67" Nov 28 18:17:59 crc kubenswrapper[4909]: I1128 18:17:59.336450 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d7e7943c-0fc4-45c1-81a9-5d206ad5e9f3-catalog-content\") pod \"community-operators-6gv67\" (UID: \"d7e7943c-0fc4-45c1-81a9-5d206ad5e9f3\") " pod="openshift-marketplace/community-operators-6gv67" Nov 28 18:17:59 crc kubenswrapper[4909]: I1128 18:17:59.336470 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6zpqn\" (UniqueName: \"kubernetes.io/projected/d7e7943c-0fc4-45c1-81a9-5d206ad5e9f3-kube-api-access-6zpqn\") pod \"community-operators-6gv67\" (UID: \"d7e7943c-0fc4-45c1-81a9-5d206ad5e9f3\") " pod="openshift-marketplace/community-operators-6gv67" Nov 28 18:17:59 crc kubenswrapper[4909]: I1128 18:17:59.438906 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d7e7943c-0fc4-45c1-81a9-5d206ad5e9f3-catalog-content\") pod \"community-operators-6gv67\" (UID: \"d7e7943c-0fc4-45c1-81a9-5d206ad5e9f3\") " pod="openshift-marketplace/community-operators-6gv67" Nov 28 18:17:59 crc kubenswrapper[4909]: I1128 18:17:59.438963 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6zpqn\" (UniqueName: \"kubernetes.io/projected/d7e7943c-0fc4-45c1-81a9-5d206ad5e9f3-kube-api-access-6zpqn\") pod \"community-operators-6gv67\" (UID: \"d7e7943c-0fc4-45c1-81a9-5d206ad5e9f3\") " pod="openshift-marketplace/community-operators-6gv67" Nov 28 18:17:59 crc kubenswrapper[4909]: I1128 18:17:59.439113 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d7e7943c-0fc4-45c1-81a9-5d206ad5e9f3-utilities\") pod \"community-operators-6gv67\" (UID: \"d7e7943c-0fc4-45c1-81a9-5d206ad5e9f3\") " pod="openshift-marketplace/community-operators-6gv67" Nov 28 18:17:59 crc kubenswrapper[4909]: I1128 18:17:59.439531 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d7e7943c-0fc4-45c1-81a9-5d206ad5e9f3-catalog-content\") pod \"community-operators-6gv67\" (UID: \"d7e7943c-0fc4-45c1-81a9-5d206ad5e9f3\") " pod="openshift-marketplace/community-operators-6gv67" Nov 28 18:17:59 crc kubenswrapper[4909]: I1128 18:17:59.439550 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d7e7943c-0fc4-45c1-81a9-5d206ad5e9f3-utilities\") pod \"community-operators-6gv67\" (UID: \"d7e7943c-0fc4-45c1-81a9-5d206ad5e9f3\") " pod="openshift-marketplace/community-operators-6gv67" Nov 28 18:17:59 crc kubenswrapper[4909]: I1128 18:17:59.458231 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6zpqn\" (UniqueName: \"kubernetes.io/projected/d7e7943c-0fc4-45c1-81a9-5d206ad5e9f3-kube-api-access-6zpqn\") pod \"community-operators-6gv67\" (UID: \"d7e7943c-0fc4-45c1-81a9-5d206ad5e9f3\") " pod="openshift-marketplace/community-operators-6gv67" Nov 28 18:17:59 crc kubenswrapper[4909]: I1128 18:17:59.643005 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-6gv67" Nov 28 18:18:00 crc kubenswrapper[4909]: I1128 18:18:00.196276 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-6gv67"] Nov 28 18:18:00 crc kubenswrapper[4909]: I1128 18:18:00.764444 4909 generic.go:334] "Generic (PLEG): container finished" podID="d7e7943c-0fc4-45c1-81a9-5d206ad5e9f3" containerID="697ba4558496739f5992aceb724f2fb38a822ab46b761c5cd94fc823bcedeff4" exitCode=0 Nov 28 18:18:00 crc kubenswrapper[4909]: I1128 18:18:00.766478 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6gv67" event={"ID":"d7e7943c-0fc4-45c1-81a9-5d206ad5e9f3","Type":"ContainerDied","Data":"697ba4558496739f5992aceb724f2fb38a822ab46b761c5cd94fc823bcedeff4"} Nov 28 18:18:00 crc kubenswrapper[4909]: I1128 18:18:00.766898 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6gv67" event={"ID":"d7e7943c-0fc4-45c1-81a9-5d206ad5e9f3","Type":"ContainerStarted","Data":"c48ee66dea2dd99ca844cfe119795c72b226f156c5eaebc9cc458ce752786deb"} Nov 28 18:18:02 crc kubenswrapper[4909]: I1128 18:18:02.796868 4909 generic.go:334] "Generic (PLEG): container finished" podID="20cfff51-0619-4abf-9897-6c4add02ace1" containerID="8ca16ea6fef689d9b3d6cd18fe7f60258e7d2a4a39751cb9973c9cb8f0e353f2" exitCode=0 Nov 28 18:18:02 crc kubenswrapper[4909]: I1128 18:18:02.796968 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-openstack-openstack-cell1-lmwjv" event={"ID":"20cfff51-0619-4abf-9897-6c4add02ace1","Type":"ContainerDied","Data":"8ca16ea6fef689d9b3d6cd18fe7f60258e7d2a4a39751cb9973c9cb8f0e353f2"} Nov 28 18:18:02 crc kubenswrapper[4909]: I1128 18:18:02.800870 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6gv67" event={"ID":"d7e7943c-0fc4-45c1-81a9-5d206ad5e9f3","Type":"ContainerStarted","Data":"f9cd65a57341e80187d3b6018ea8aacf864553b6dd881c264d50f199f5eecd2a"} Nov 28 18:18:03 crc kubenswrapper[4909]: I1128 18:18:03.812015 4909 generic.go:334] "Generic (PLEG): container finished" podID="d7e7943c-0fc4-45c1-81a9-5d206ad5e9f3" containerID="f9cd65a57341e80187d3b6018ea8aacf864553b6dd881c264d50f199f5eecd2a" exitCode=0 Nov 28 18:18:03 crc kubenswrapper[4909]: I1128 18:18:03.812117 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6gv67" event={"ID":"d7e7943c-0fc4-45c1-81a9-5d206ad5e9f3","Type":"ContainerDied","Data":"f9cd65a57341e80187d3b6018ea8aacf864553b6dd881c264d50f199f5eecd2a"} Nov 28 18:18:04 crc kubenswrapper[4909]: I1128 18:18:04.323512 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-openstack-openstack-cell1-lmwjv" Nov 28 18:18:04 crc kubenswrapper[4909]: I1128 18:18:04.386866 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cx9xm\" (UniqueName: \"kubernetes.io/projected/20cfff51-0619-4abf-9897-6c4add02ace1-kube-api-access-cx9xm\") pod \"20cfff51-0619-4abf-9897-6c4add02ace1\" (UID: \"20cfff51-0619-4abf-9897-6c4add02ace1\") " Nov 28 18:18:04 crc kubenswrapper[4909]: I1128 18:18:04.386907 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/20cfff51-0619-4abf-9897-6c4add02ace1-ssh-key\") pod \"20cfff51-0619-4abf-9897-6c4add02ace1\" (UID: \"20cfff51-0619-4abf-9897-6c4add02ace1\") " Nov 28 18:18:04 crc kubenswrapper[4909]: I1128 18:18:04.386940 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/20cfff51-0619-4abf-9897-6c4add02ace1-inventory\") pod \"20cfff51-0619-4abf-9897-6c4add02ace1\" (UID: \"20cfff51-0619-4abf-9897-6c4add02ace1\") " Nov 28 18:18:04 crc kubenswrapper[4909]: I1128 18:18:04.386956 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/20cfff51-0619-4abf-9897-6c4add02ace1-ceph\") pod \"20cfff51-0619-4abf-9897-6c4add02ace1\" (UID: \"20cfff51-0619-4abf-9897-6c4add02ace1\") " Nov 28 18:18:04 crc kubenswrapper[4909]: I1128 18:18:04.393547 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/20cfff51-0619-4abf-9897-6c4add02ace1-ceph" (OuterVolumeSpecName: "ceph") pod "20cfff51-0619-4abf-9897-6c4add02ace1" (UID: "20cfff51-0619-4abf-9897-6c4add02ace1"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 18:18:04 crc kubenswrapper[4909]: I1128 18:18:04.393629 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/20cfff51-0619-4abf-9897-6c4add02ace1-kube-api-access-cx9xm" (OuterVolumeSpecName: "kube-api-access-cx9xm") pod "20cfff51-0619-4abf-9897-6c4add02ace1" (UID: "20cfff51-0619-4abf-9897-6c4add02ace1"). InnerVolumeSpecName "kube-api-access-cx9xm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 18:18:04 crc kubenswrapper[4909]: I1128 18:18:04.423261 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/20cfff51-0619-4abf-9897-6c4add02ace1-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "20cfff51-0619-4abf-9897-6c4add02ace1" (UID: "20cfff51-0619-4abf-9897-6c4add02ace1"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 18:18:04 crc kubenswrapper[4909]: I1128 18:18:04.425831 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/20cfff51-0619-4abf-9897-6c4add02ace1-inventory" (OuterVolumeSpecName: "inventory") pod "20cfff51-0619-4abf-9897-6c4add02ace1" (UID: "20cfff51-0619-4abf-9897-6c4add02ace1"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 18:18:04 crc kubenswrapper[4909]: I1128 18:18:04.489676 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cx9xm\" (UniqueName: \"kubernetes.io/projected/20cfff51-0619-4abf-9897-6c4add02ace1-kube-api-access-cx9xm\") on node \"crc\" DevicePath \"\"" Nov 28 18:18:04 crc kubenswrapper[4909]: I1128 18:18:04.489726 4909 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/20cfff51-0619-4abf-9897-6c4add02ace1-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 28 18:18:04 crc kubenswrapper[4909]: I1128 18:18:04.489740 4909 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/20cfff51-0619-4abf-9897-6c4add02ace1-inventory\") on node \"crc\" DevicePath \"\"" Nov 28 18:18:04 crc kubenswrapper[4909]: I1128 18:18:04.489752 4909 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/20cfff51-0619-4abf-9897-6c4add02ace1-ceph\") on node \"crc\" DevicePath \"\"" Nov 28 18:18:04 crc kubenswrapper[4909]: I1128 18:18:04.824973 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-openstack-openstack-cell1-lmwjv" event={"ID":"20cfff51-0619-4abf-9897-6c4add02ace1","Type":"ContainerDied","Data":"d59ab709ab0f2bbedefa31335f5681310f5b7700d7de3b814b8cc303601dfe91"} Nov 28 18:18:04 crc kubenswrapper[4909]: I1128 18:18:04.826189 4909 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d59ab709ab0f2bbedefa31335f5681310f5b7700d7de3b814b8cc303601dfe91" Nov 28 18:18:04 crc kubenswrapper[4909]: I1128 18:18:04.825032 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-openstack-openstack-cell1-lmwjv" Nov 28 18:18:04 crc kubenswrapper[4909]: I1128 18:18:04.828905 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6gv67" event={"ID":"d7e7943c-0fc4-45c1-81a9-5d206ad5e9f3","Type":"ContainerStarted","Data":"8e84399317c8e2d970d6faa1665e0a5650c2f122eb83941319e6006ca49191a3"} Nov 28 18:18:04 crc kubenswrapper[4909]: I1128 18:18:04.864166 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-6gv67" podStartSLOduration=2.2299505809999998 podStartE2EDuration="5.864130739s" podCreationTimestamp="2025-11-28 18:17:59 +0000 UTC" firstStartedPulling="2025-11-28 18:18:00.768651589 +0000 UTC m=+7663.165336143" lastFinishedPulling="2025-11-28 18:18:04.402831767 +0000 UTC m=+7666.799516301" observedRunningTime="2025-11-28 18:18:04.85342071 +0000 UTC m=+7667.250105264" watchObservedRunningTime="2025-11-28 18:18:04.864130739 +0000 UTC m=+7667.260815253" Nov 28 18:18:04 crc kubenswrapper[4909]: I1128 18:18:04.904373 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/reboot-os-openstack-openstack-cell1-8zfjh"] Nov 28 18:18:04 crc kubenswrapper[4909]: E1128 18:18:04.906009 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="20cfff51-0619-4abf-9897-6c4add02ace1" containerName="run-os-openstack-openstack-cell1" Nov 28 18:18:04 crc kubenswrapper[4909]: I1128 18:18:04.906050 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="20cfff51-0619-4abf-9897-6c4add02ace1" containerName="run-os-openstack-openstack-cell1" Nov 28 18:18:04 crc kubenswrapper[4909]: I1128 18:18:04.906452 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="20cfff51-0619-4abf-9897-6c4add02ace1" containerName="run-os-openstack-openstack-cell1" Nov 28 18:18:04 crc kubenswrapper[4909]: I1128 18:18:04.907373 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-openstack-openstack-cell1-8zfjh" Nov 28 18:18:04 crc kubenswrapper[4909]: I1128 18:18:04.914272 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 28 18:18:04 crc kubenswrapper[4909]: I1128 18:18:04.914430 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-z249h" Nov 28 18:18:04 crc kubenswrapper[4909]: I1128 18:18:04.914758 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Nov 28 18:18:04 crc kubenswrapper[4909]: I1128 18:18:04.915276 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Nov 28 18:18:04 crc kubenswrapper[4909]: I1128 18:18:04.918768 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/reboot-os-openstack-openstack-cell1-8zfjh"] Nov 28 18:18:05 crc kubenswrapper[4909]: I1128 18:18:05.004700 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-82llm\" (UniqueName: \"kubernetes.io/projected/7b2328b5-fd31-4da6-bd22-401dee11788e-kube-api-access-82llm\") pod \"reboot-os-openstack-openstack-cell1-8zfjh\" (UID: \"7b2328b5-fd31-4da6-bd22-401dee11788e\") " pod="openstack/reboot-os-openstack-openstack-cell1-8zfjh" Nov 28 18:18:05 crc kubenswrapper[4909]: I1128 18:18:05.004756 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7b2328b5-fd31-4da6-bd22-401dee11788e-ssh-key\") pod \"reboot-os-openstack-openstack-cell1-8zfjh\" (UID: \"7b2328b5-fd31-4da6-bd22-401dee11788e\") " pod="openstack/reboot-os-openstack-openstack-cell1-8zfjh" Nov 28 18:18:05 crc kubenswrapper[4909]: I1128 18:18:05.005226 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/7b2328b5-fd31-4da6-bd22-401dee11788e-ceph\") pod \"reboot-os-openstack-openstack-cell1-8zfjh\" (UID: \"7b2328b5-fd31-4da6-bd22-401dee11788e\") " pod="openstack/reboot-os-openstack-openstack-cell1-8zfjh" Nov 28 18:18:05 crc kubenswrapper[4909]: I1128 18:18:05.005529 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7b2328b5-fd31-4da6-bd22-401dee11788e-inventory\") pod \"reboot-os-openstack-openstack-cell1-8zfjh\" (UID: \"7b2328b5-fd31-4da6-bd22-401dee11788e\") " pod="openstack/reboot-os-openstack-openstack-cell1-8zfjh" Nov 28 18:18:05 crc kubenswrapper[4909]: I1128 18:18:05.107249 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-82llm\" (UniqueName: \"kubernetes.io/projected/7b2328b5-fd31-4da6-bd22-401dee11788e-kube-api-access-82llm\") pod \"reboot-os-openstack-openstack-cell1-8zfjh\" (UID: \"7b2328b5-fd31-4da6-bd22-401dee11788e\") " pod="openstack/reboot-os-openstack-openstack-cell1-8zfjh" Nov 28 18:18:05 crc kubenswrapper[4909]: I1128 18:18:05.107297 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7b2328b5-fd31-4da6-bd22-401dee11788e-ssh-key\") pod \"reboot-os-openstack-openstack-cell1-8zfjh\" (UID: \"7b2328b5-fd31-4da6-bd22-401dee11788e\") " pod="openstack/reboot-os-openstack-openstack-cell1-8zfjh" Nov 28 18:18:05 crc kubenswrapper[4909]: I1128 18:18:05.107395 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/7b2328b5-fd31-4da6-bd22-401dee11788e-ceph\") pod \"reboot-os-openstack-openstack-cell1-8zfjh\" (UID: \"7b2328b5-fd31-4da6-bd22-401dee11788e\") " pod="openstack/reboot-os-openstack-openstack-cell1-8zfjh" Nov 28 18:18:05 crc kubenswrapper[4909]: I1128 18:18:05.107468 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7b2328b5-fd31-4da6-bd22-401dee11788e-inventory\") pod \"reboot-os-openstack-openstack-cell1-8zfjh\" (UID: \"7b2328b5-fd31-4da6-bd22-401dee11788e\") " pod="openstack/reboot-os-openstack-openstack-cell1-8zfjh" Nov 28 18:18:05 crc kubenswrapper[4909]: I1128 18:18:05.110902 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/7b2328b5-fd31-4da6-bd22-401dee11788e-ceph\") pod \"reboot-os-openstack-openstack-cell1-8zfjh\" (UID: \"7b2328b5-fd31-4da6-bd22-401dee11788e\") " pod="openstack/reboot-os-openstack-openstack-cell1-8zfjh" Nov 28 18:18:05 crc kubenswrapper[4909]: I1128 18:18:05.114091 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7b2328b5-fd31-4da6-bd22-401dee11788e-inventory\") pod \"reboot-os-openstack-openstack-cell1-8zfjh\" (UID: \"7b2328b5-fd31-4da6-bd22-401dee11788e\") " pod="openstack/reboot-os-openstack-openstack-cell1-8zfjh" Nov 28 18:18:05 crc kubenswrapper[4909]: I1128 18:18:05.116294 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7b2328b5-fd31-4da6-bd22-401dee11788e-ssh-key\") pod \"reboot-os-openstack-openstack-cell1-8zfjh\" (UID: \"7b2328b5-fd31-4da6-bd22-401dee11788e\") " pod="openstack/reboot-os-openstack-openstack-cell1-8zfjh" Nov 28 18:18:05 crc kubenswrapper[4909]: I1128 18:18:05.127455 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-82llm\" (UniqueName: \"kubernetes.io/projected/7b2328b5-fd31-4da6-bd22-401dee11788e-kube-api-access-82llm\") pod \"reboot-os-openstack-openstack-cell1-8zfjh\" (UID: \"7b2328b5-fd31-4da6-bd22-401dee11788e\") " pod="openstack/reboot-os-openstack-openstack-cell1-8zfjh" Nov 28 18:18:05 crc kubenswrapper[4909]: I1128 18:18:05.292350 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-openstack-openstack-cell1-8zfjh" Nov 28 18:18:05 crc kubenswrapper[4909]: W1128 18:18:05.918012 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7b2328b5_fd31_4da6_bd22_401dee11788e.slice/crio-548d02564e2eb2703faafe8b6ee679142a1419438366c834b7332e1c96c3b521 WatchSource:0}: Error finding container 548d02564e2eb2703faafe8b6ee679142a1419438366c834b7332e1c96c3b521: Status 404 returned error can't find the container with id 548d02564e2eb2703faafe8b6ee679142a1419438366c834b7332e1c96c3b521 Nov 28 18:18:05 crc kubenswrapper[4909]: I1128 18:18:05.920392 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/reboot-os-openstack-openstack-cell1-8zfjh"] Nov 28 18:18:06 crc kubenswrapper[4909]: I1128 18:18:06.849674 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-openstack-openstack-cell1-8zfjh" event={"ID":"7b2328b5-fd31-4da6-bd22-401dee11788e","Type":"ContainerStarted","Data":"d252d585fa0aa252491cee128d0d2d0123eb9faaf2a428e52831d25e3bfa666b"} Nov 28 18:18:06 crc kubenswrapper[4909]: I1128 18:18:06.850006 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-openstack-openstack-cell1-8zfjh" event={"ID":"7b2328b5-fd31-4da6-bd22-401dee11788e","Type":"ContainerStarted","Data":"548d02564e2eb2703faafe8b6ee679142a1419438366c834b7332e1c96c3b521"} Nov 28 18:18:06 crc kubenswrapper[4909]: I1128 18:18:06.877329 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/reboot-os-openstack-openstack-cell1-8zfjh" podStartSLOduration=2.39592651 podStartE2EDuration="2.877309833s" podCreationTimestamp="2025-11-28 18:18:04 +0000 UTC" firstStartedPulling="2025-11-28 18:18:05.922901642 +0000 UTC m=+7668.319586166" lastFinishedPulling="2025-11-28 18:18:06.404284945 +0000 UTC m=+7668.800969489" observedRunningTime="2025-11-28 18:18:06.863918642 +0000 UTC m=+7669.260603166" watchObservedRunningTime="2025-11-28 18:18:06.877309833 +0000 UTC m=+7669.273994357" Nov 28 18:18:09 crc kubenswrapper[4909]: I1128 18:18:09.643140 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-6gv67" Nov 28 18:18:09 crc kubenswrapper[4909]: I1128 18:18:09.643582 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-6gv67" Nov 28 18:18:09 crc kubenswrapper[4909]: I1128 18:18:09.698969 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-6gv67" Nov 28 18:18:09 crc kubenswrapper[4909]: I1128 18:18:09.938352 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-6gv67" Nov 28 18:18:09 crc kubenswrapper[4909]: I1128 18:18:09.990442 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-6gv67"] Nov 28 18:18:11 crc kubenswrapper[4909]: I1128 18:18:11.910518 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-6gv67" podUID="d7e7943c-0fc4-45c1-81a9-5d206ad5e9f3" containerName="registry-server" containerID="cri-o://8e84399317c8e2d970d6faa1665e0a5650c2f122eb83941319e6006ca49191a3" gracePeriod=2 Nov 28 18:18:12 crc kubenswrapper[4909]: I1128 18:18:12.634485 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-6gv67" Nov 28 18:18:12 crc kubenswrapper[4909]: I1128 18:18:12.664708 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d7e7943c-0fc4-45c1-81a9-5d206ad5e9f3-catalog-content\") pod \"d7e7943c-0fc4-45c1-81a9-5d206ad5e9f3\" (UID: \"d7e7943c-0fc4-45c1-81a9-5d206ad5e9f3\") " Nov 28 18:18:12 crc kubenswrapper[4909]: I1128 18:18:12.664946 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6zpqn\" (UniqueName: \"kubernetes.io/projected/d7e7943c-0fc4-45c1-81a9-5d206ad5e9f3-kube-api-access-6zpqn\") pod \"d7e7943c-0fc4-45c1-81a9-5d206ad5e9f3\" (UID: \"d7e7943c-0fc4-45c1-81a9-5d206ad5e9f3\") " Nov 28 18:18:12 crc kubenswrapper[4909]: I1128 18:18:12.665003 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d7e7943c-0fc4-45c1-81a9-5d206ad5e9f3-utilities\") pod \"d7e7943c-0fc4-45c1-81a9-5d206ad5e9f3\" (UID: \"d7e7943c-0fc4-45c1-81a9-5d206ad5e9f3\") " Nov 28 18:18:12 crc kubenswrapper[4909]: I1128 18:18:12.666081 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d7e7943c-0fc4-45c1-81a9-5d206ad5e9f3-utilities" (OuterVolumeSpecName: "utilities") pod "d7e7943c-0fc4-45c1-81a9-5d206ad5e9f3" (UID: "d7e7943c-0fc4-45c1-81a9-5d206ad5e9f3"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 18:18:12 crc kubenswrapper[4909]: I1128 18:18:12.672297 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d7e7943c-0fc4-45c1-81a9-5d206ad5e9f3-kube-api-access-6zpqn" (OuterVolumeSpecName: "kube-api-access-6zpqn") pod "d7e7943c-0fc4-45c1-81a9-5d206ad5e9f3" (UID: "d7e7943c-0fc4-45c1-81a9-5d206ad5e9f3"). InnerVolumeSpecName "kube-api-access-6zpqn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 18:18:12 crc kubenswrapper[4909]: I1128 18:18:12.748987 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d7e7943c-0fc4-45c1-81a9-5d206ad5e9f3-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "d7e7943c-0fc4-45c1-81a9-5d206ad5e9f3" (UID: "d7e7943c-0fc4-45c1-81a9-5d206ad5e9f3"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 18:18:12 crc kubenswrapper[4909]: I1128 18:18:12.768136 4909 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d7e7943c-0fc4-45c1-81a9-5d206ad5e9f3-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 18:18:12 crc kubenswrapper[4909]: I1128 18:18:12.768167 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6zpqn\" (UniqueName: \"kubernetes.io/projected/d7e7943c-0fc4-45c1-81a9-5d206ad5e9f3-kube-api-access-6zpqn\") on node \"crc\" DevicePath \"\"" Nov 28 18:18:12 crc kubenswrapper[4909]: I1128 18:18:12.768178 4909 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d7e7943c-0fc4-45c1-81a9-5d206ad5e9f3-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 18:18:12 crc kubenswrapper[4909]: I1128 18:18:12.902544 4909 scope.go:117] "RemoveContainer" containerID="16af51197304d83d362143f44527eca9e3f72c8e7894bd79e4b089fd278bdb1d" Nov 28 18:18:12 crc kubenswrapper[4909]: E1128 18:18:12.903021 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 18:18:12 crc kubenswrapper[4909]: I1128 18:18:12.922570 4909 generic.go:334] "Generic (PLEG): container finished" podID="d7e7943c-0fc4-45c1-81a9-5d206ad5e9f3" containerID="8e84399317c8e2d970d6faa1665e0a5650c2f122eb83941319e6006ca49191a3" exitCode=0 Nov 28 18:18:12 crc kubenswrapper[4909]: I1128 18:18:12.922706 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-6gv67" Nov 28 18:18:12 crc kubenswrapper[4909]: I1128 18:18:12.922726 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6gv67" event={"ID":"d7e7943c-0fc4-45c1-81a9-5d206ad5e9f3","Type":"ContainerDied","Data":"8e84399317c8e2d970d6faa1665e0a5650c2f122eb83941319e6006ca49191a3"} Nov 28 18:18:12 crc kubenswrapper[4909]: I1128 18:18:12.923167 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6gv67" event={"ID":"d7e7943c-0fc4-45c1-81a9-5d206ad5e9f3","Type":"ContainerDied","Data":"c48ee66dea2dd99ca844cfe119795c72b226f156c5eaebc9cc458ce752786deb"} Nov 28 18:18:12 crc kubenswrapper[4909]: I1128 18:18:12.923359 4909 scope.go:117] "RemoveContainer" containerID="8e84399317c8e2d970d6faa1665e0a5650c2f122eb83941319e6006ca49191a3" Nov 28 18:18:12 crc kubenswrapper[4909]: I1128 18:18:12.965691 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-6gv67"] Nov 28 18:18:12 crc kubenswrapper[4909]: I1128 18:18:12.966031 4909 scope.go:117] "RemoveContainer" containerID="f9cd65a57341e80187d3b6018ea8aacf864553b6dd881c264d50f199f5eecd2a" Nov 28 18:18:12 crc kubenswrapper[4909]: I1128 18:18:12.975874 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-6gv67"] Nov 28 18:18:12 crc kubenswrapper[4909]: I1128 18:18:12.999940 4909 scope.go:117] "RemoveContainer" containerID="697ba4558496739f5992aceb724f2fb38a822ab46b761c5cd94fc823bcedeff4" Nov 28 18:18:13 crc kubenswrapper[4909]: I1128 18:18:13.034773 4909 scope.go:117] "RemoveContainer" containerID="8e84399317c8e2d970d6faa1665e0a5650c2f122eb83941319e6006ca49191a3" Nov 28 18:18:13 crc kubenswrapper[4909]: E1128 18:18:13.035163 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8e84399317c8e2d970d6faa1665e0a5650c2f122eb83941319e6006ca49191a3\": container with ID starting with 8e84399317c8e2d970d6faa1665e0a5650c2f122eb83941319e6006ca49191a3 not found: ID does not exist" containerID="8e84399317c8e2d970d6faa1665e0a5650c2f122eb83941319e6006ca49191a3" Nov 28 18:18:13 crc kubenswrapper[4909]: I1128 18:18:13.035203 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8e84399317c8e2d970d6faa1665e0a5650c2f122eb83941319e6006ca49191a3"} err="failed to get container status \"8e84399317c8e2d970d6faa1665e0a5650c2f122eb83941319e6006ca49191a3\": rpc error: code = NotFound desc = could not find container \"8e84399317c8e2d970d6faa1665e0a5650c2f122eb83941319e6006ca49191a3\": container with ID starting with 8e84399317c8e2d970d6faa1665e0a5650c2f122eb83941319e6006ca49191a3 not found: ID does not exist" Nov 28 18:18:13 crc kubenswrapper[4909]: I1128 18:18:13.035224 4909 scope.go:117] "RemoveContainer" containerID="f9cd65a57341e80187d3b6018ea8aacf864553b6dd881c264d50f199f5eecd2a" Nov 28 18:18:13 crc kubenswrapper[4909]: E1128 18:18:13.035950 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f9cd65a57341e80187d3b6018ea8aacf864553b6dd881c264d50f199f5eecd2a\": container with ID starting with f9cd65a57341e80187d3b6018ea8aacf864553b6dd881c264d50f199f5eecd2a not found: ID does not exist" containerID="f9cd65a57341e80187d3b6018ea8aacf864553b6dd881c264d50f199f5eecd2a" Nov 28 18:18:13 crc kubenswrapper[4909]: I1128 18:18:13.035996 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f9cd65a57341e80187d3b6018ea8aacf864553b6dd881c264d50f199f5eecd2a"} err="failed to get container status \"f9cd65a57341e80187d3b6018ea8aacf864553b6dd881c264d50f199f5eecd2a\": rpc error: code = NotFound desc = could not find container \"f9cd65a57341e80187d3b6018ea8aacf864553b6dd881c264d50f199f5eecd2a\": container with ID starting with f9cd65a57341e80187d3b6018ea8aacf864553b6dd881c264d50f199f5eecd2a not found: ID does not exist" Nov 28 18:18:13 crc kubenswrapper[4909]: I1128 18:18:13.036021 4909 scope.go:117] "RemoveContainer" containerID="697ba4558496739f5992aceb724f2fb38a822ab46b761c5cd94fc823bcedeff4" Nov 28 18:18:13 crc kubenswrapper[4909]: E1128 18:18:13.036299 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"697ba4558496739f5992aceb724f2fb38a822ab46b761c5cd94fc823bcedeff4\": container with ID starting with 697ba4558496739f5992aceb724f2fb38a822ab46b761c5cd94fc823bcedeff4 not found: ID does not exist" containerID="697ba4558496739f5992aceb724f2fb38a822ab46b761c5cd94fc823bcedeff4" Nov 28 18:18:13 crc kubenswrapper[4909]: I1128 18:18:13.036325 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"697ba4558496739f5992aceb724f2fb38a822ab46b761c5cd94fc823bcedeff4"} err="failed to get container status \"697ba4558496739f5992aceb724f2fb38a822ab46b761c5cd94fc823bcedeff4\": rpc error: code = NotFound desc = could not find container \"697ba4558496739f5992aceb724f2fb38a822ab46b761c5cd94fc823bcedeff4\": container with ID starting with 697ba4558496739f5992aceb724f2fb38a822ab46b761c5cd94fc823bcedeff4 not found: ID does not exist" Nov 28 18:18:13 crc kubenswrapper[4909]: I1128 18:18:13.922975 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d7e7943c-0fc4-45c1-81a9-5d206ad5e9f3" path="/var/lib/kubelet/pods/d7e7943c-0fc4-45c1-81a9-5d206ad5e9f3/volumes" Nov 28 18:18:23 crc kubenswrapper[4909]: I1128 18:18:23.036283 4909 generic.go:334] "Generic (PLEG): container finished" podID="7b2328b5-fd31-4da6-bd22-401dee11788e" containerID="d252d585fa0aa252491cee128d0d2d0123eb9faaf2a428e52831d25e3bfa666b" exitCode=0 Nov 28 18:18:23 crc kubenswrapper[4909]: I1128 18:18:23.036373 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-openstack-openstack-cell1-8zfjh" event={"ID":"7b2328b5-fd31-4da6-bd22-401dee11788e","Type":"ContainerDied","Data":"d252d585fa0aa252491cee128d0d2d0123eb9faaf2a428e52831d25e3bfa666b"} Nov 28 18:18:24 crc kubenswrapper[4909]: I1128 18:18:24.616246 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-openstack-openstack-cell1-8zfjh" Nov 28 18:18:24 crc kubenswrapper[4909]: I1128 18:18:24.747700 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7b2328b5-fd31-4da6-bd22-401dee11788e-ssh-key\") pod \"7b2328b5-fd31-4da6-bd22-401dee11788e\" (UID: \"7b2328b5-fd31-4da6-bd22-401dee11788e\") " Nov 28 18:18:24 crc kubenswrapper[4909]: I1128 18:18:24.747813 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7b2328b5-fd31-4da6-bd22-401dee11788e-inventory\") pod \"7b2328b5-fd31-4da6-bd22-401dee11788e\" (UID: \"7b2328b5-fd31-4da6-bd22-401dee11788e\") " Nov 28 18:18:24 crc kubenswrapper[4909]: I1128 18:18:24.747922 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-82llm\" (UniqueName: \"kubernetes.io/projected/7b2328b5-fd31-4da6-bd22-401dee11788e-kube-api-access-82llm\") pod \"7b2328b5-fd31-4da6-bd22-401dee11788e\" (UID: \"7b2328b5-fd31-4da6-bd22-401dee11788e\") " Nov 28 18:18:24 crc kubenswrapper[4909]: I1128 18:18:24.748057 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/7b2328b5-fd31-4da6-bd22-401dee11788e-ceph\") pod \"7b2328b5-fd31-4da6-bd22-401dee11788e\" (UID: \"7b2328b5-fd31-4da6-bd22-401dee11788e\") " Nov 28 18:18:24 crc kubenswrapper[4909]: I1128 18:18:24.755352 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7b2328b5-fd31-4da6-bd22-401dee11788e-kube-api-access-82llm" (OuterVolumeSpecName: "kube-api-access-82llm") pod "7b2328b5-fd31-4da6-bd22-401dee11788e" (UID: "7b2328b5-fd31-4da6-bd22-401dee11788e"). InnerVolumeSpecName "kube-api-access-82llm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 18:18:24 crc kubenswrapper[4909]: I1128 18:18:24.755865 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7b2328b5-fd31-4da6-bd22-401dee11788e-ceph" (OuterVolumeSpecName: "ceph") pod "7b2328b5-fd31-4da6-bd22-401dee11788e" (UID: "7b2328b5-fd31-4da6-bd22-401dee11788e"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 18:18:24 crc kubenswrapper[4909]: I1128 18:18:24.780923 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7b2328b5-fd31-4da6-bd22-401dee11788e-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "7b2328b5-fd31-4da6-bd22-401dee11788e" (UID: "7b2328b5-fd31-4da6-bd22-401dee11788e"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 18:18:24 crc kubenswrapper[4909]: I1128 18:18:24.802636 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7b2328b5-fd31-4da6-bd22-401dee11788e-inventory" (OuterVolumeSpecName: "inventory") pod "7b2328b5-fd31-4da6-bd22-401dee11788e" (UID: "7b2328b5-fd31-4da6-bd22-401dee11788e"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 18:18:24 crc kubenswrapper[4909]: I1128 18:18:24.851609 4909 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7b2328b5-fd31-4da6-bd22-401dee11788e-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 28 18:18:24 crc kubenswrapper[4909]: I1128 18:18:24.851647 4909 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7b2328b5-fd31-4da6-bd22-401dee11788e-inventory\") on node \"crc\" DevicePath \"\"" Nov 28 18:18:24 crc kubenswrapper[4909]: I1128 18:18:24.851728 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-82llm\" (UniqueName: \"kubernetes.io/projected/7b2328b5-fd31-4da6-bd22-401dee11788e-kube-api-access-82llm\") on node \"crc\" DevicePath \"\"" Nov 28 18:18:24 crc kubenswrapper[4909]: I1128 18:18:24.851740 4909 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/7b2328b5-fd31-4da6-bd22-401dee11788e-ceph\") on node \"crc\" DevicePath \"\"" Nov 28 18:18:25 crc kubenswrapper[4909]: I1128 18:18:25.070910 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-openstack-openstack-cell1-8zfjh" event={"ID":"7b2328b5-fd31-4da6-bd22-401dee11788e","Type":"ContainerDied","Data":"548d02564e2eb2703faafe8b6ee679142a1419438366c834b7332e1c96c3b521"} Nov 28 18:18:25 crc kubenswrapper[4909]: I1128 18:18:25.070990 4909 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="548d02564e2eb2703faafe8b6ee679142a1419438366c834b7332e1c96c3b521" Nov 28 18:18:25 crc kubenswrapper[4909]: I1128 18:18:25.071045 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-openstack-openstack-cell1-8zfjh" Nov 28 18:18:25 crc kubenswrapper[4909]: I1128 18:18:25.258774 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/install-certs-openstack-openstack-cell1-rg4b9"] Nov 28 18:18:25 crc kubenswrapper[4909]: E1128 18:18:25.259155 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7b2328b5-fd31-4da6-bd22-401dee11788e" containerName="reboot-os-openstack-openstack-cell1" Nov 28 18:18:25 crc kubenswrapper[4909]: I1128 18:18:25.259174 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="7b2328b5-fd31-4da6-bd22-401dee11788e" containerName="reboot-os-openstack-openstack-cell1" Nov 28 18:18:25 crc kubenswrapper[4909]: E1128 18:18:25.259194 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d7e7943c-0fc4-45c1-81a9-5d206ad5e9f3" containerName="registry-server" Nov 28 18:18:25 crc kubenswrapper[4909]: I1128 18:18:25.259201 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="d7e7943c-0fc4-45c1-81a9-5d206ad5e9f3" containerName="registry-server" Nov 28 18:18:25 crc kubenswrapper[4909]: E1128 18:18:25.259218 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d7e7943c-0fc4-45c1-81a9-5d206ad5e9f3" containerName="extract-utilities" Nov 28 18:18:25 crc kubenswrapper[4909]: I1128 18:18:25.259225 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="d7e7943c-0fc4-45c1-81a9-5d206ad5e9f3" containerName="extract-utilities" Nov 28 18:18:25 crc kubenswrapper[4909]: E1128 18:18:25.259237 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d7e7943c-0fc4-45c1-81a9-5d206ad5e9f3" containerName="extract-content" Nov 28 18:18:25 crc kubenswrapper[4909]: I1128 18:18:25.259243 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="d7e7943c-0fc4-45c1-81a9-5d206ad5e9f3" containerName="extract-content" Nov 28 18:18:25 crc kubenswrapper[4909]: I1128 18:18:25.259474 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="d7e7943c-0fc4-45c1-81a9-5d206ad5e9f3" containerName="registry-server" Nov 28 18:18:25 crc kubenswrapper[4909]: I1128 18:18:25.259488 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="7b2328b5-fd31-4da6-bd22-401dee11788e" containerName="reboot-os-openstack-openstack-cell1" Nov 28 18:18:25 crc kubenswrapper[4909]: I1128 18:18:25.260282 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-openstack-openstack-cell1-rg4b9" Nov 28 18:18:25 crc kubenswrapper[4909]: I1128 18:18:25.264183 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Nov 28 18:18:25 crc kubenswrapper[4909]: I1128 18:18:25.264339 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 28 18:18:25 crc kubenswrapper[4909]: I1128 18:18:25.264344 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Nov 28 18:18:25 crc kubenswrapper[4909]: I1128 18:18:25.264627 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-z249h" Nov 28 18:18:25 crc kubenswrapper[4909]: I1128 18:18:25.269012 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-certs-openstack-openstack-cell1-rg4b9"] Nov 28 18:18:25 crc kubenswrapper[4909]: I1128 18:18:25.363014 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-sriov-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c3aabae-25df-4037-9383-d8affbcd3674-neutron-sriov-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-rg4b9\" (UID: \"0c3aabae-25df-4037-9383-d8affbcd3674\") " pod="openstack/install-certs-openstack-openstack-cell1-rg4b9" Nov 28 18:18:25 crc kubenswrapper[4909]: I1128 18:18:25.363096 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/0c3aabae-25df-4037-9383-d8affbcd3674-ceph\") pod \"install-certs-openstack-openstack-cell1-rg4b9\" (UID: \"0c3aabae-25df-4037-9383-d8affbcd3674\") " pod="openstack/install-certs-openstack-openstack-cell1-rg4b9" Nov 28 18:18:25 crc kubenswrapper[4909]: I1128 18:18:25.363193 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-dhcp-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c3aabae-25df-4037-9383-d8affbcd3674-neutron-dhcp-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-rg4b9\" (UID: \"0c3aabae-25df-4037-9383-d8affbcd3674\") " pod="openstack/install-certs-openstack-openstack-cell1-rg4b9" Nov 28 18:18:25 crc kubenswrapper[4909]: I1128 18:18:25.363521 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fhlsc\" (UniqueName: \"kubernetes.io/projected/0c3aabae-25df-4037-9383-d8affbcd3674-kube-api-access-fhlsc\") pod \"install-certs-openstack-openstack-cell1-rg4b9\" (UID: \"0c3aabae-25df-4037-9383-d8affbcd3674\") " pod="openstack/install-certs-openstack-openstack-cell1-rg4b9" Nov 28 18:18:25 crc kubenswrapper[4909]: I1128 18:18:25.363649 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/0c3aabae-25df-4037-9383-d8affbcd3674-ssh-key\") pod \"install-certs-openstack-openstack-cell1-rg4b9\" (UID: \"0c3aabae-25df-4037-9383-d8affbcd3674\") " pod="openstack/install-certs-openstack-openstack-cell1-rg4b9" Nov 28 18:18:25 crc kubenswrapper[4909]: I1128 18:18:25.363778 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c3aabae-25df-4037-9383-d8affbcd3674-neutron-metadata-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-rg4b9\" (UID: \"0c3aabae-25df-4037-9383-d8affbcd3674\") " pod="openstack/install-certs-openstack-openstack-cell1-rg4b9" Nov 28 18:18:25 crc kubenswrapper[4909]: I1128 18:18:25.363858 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c3aabae-25df-4037-9383-d8affbcd3674-bootstrap-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-rg4b9\" (UID: \"0c3aabae-25df-4037-9383-d8affbcd3674\") " pod="openstack/install-certs-openstack-openstack-cell1-rg4b9" Nov 28 18:18:25 crc kubenswrapper[4909]: I1128 18:18:25.364021 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c3aabae-25df-4037-9383-d8affbcd3674-ovn-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-rg4b9\" (UID: \"0c3aabae-25df-4037-9383-d8affbcd3674\") " pod="openstack/install-certs-openstack-openstack-cell1-rg4b9" Nov 28 18:18:25 crc kubenswrapper[4909]: I1128 18:18:25.364043 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c3aabae-25df-4037-9383-d8affbcd3674-libvirt-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-rg4b9\" (UID: \"0c3aabae-25df-4037-9383-d8affbcd3674\") " pod="openstack/install-certs-openstack-openstack-cell1-rg4b9" Nov 28 18:18:25 crc kubenswrapper[4909]: I1128 18:18:25.364120 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c3aabae-25df-4037-9383-d8affbcd3674-nova-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-rg4b9\" (UID: \"0c3aabae-25df-4037-9383-d8affbcd3674\") " pod="openstack/install-certs-openstack-openstack-cell1-rg4b9" Nov 28 18:18:25 crc kubenswrapper[4909]: I1128 18:18:25.364184 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c3aabae-25df-4037-9383-d8affbcd3674-telemetry-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-rg4b9\" (UID: \"0c3aabae-25df-4037-9383-d8affbcd3674\") " pod="openstack/install-certs-openstack-openstack-cell1-rg4b9" Nov 28 18:18:25 crc kubenswrapper[4909]: I1128 18:18:25.364215 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0c3aabae-25df-4037-9383-d8affbcd3674-inventory\") pod \"install-certs-openstack-openstack-cell1-rg4b9\" (UID: \"0c3aabae-25df-4037-9383-d8affbcd3674\") " pod="openstack/install-certs-openstack-openstack-cell1-rg4b9" Nov 28 18:18:25 crc kubenswrapper[4909]: I1128 18:18:25.467182 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-dhcp-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c3aabae-25df-4037-9383-d8affbcd3674-neutron-dhcp-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-rg4b9\" (UID: \"0c3aabae-25df-4037-9383-d8affbcd3674\") " pod="openstack/install-certs-openstack-openstack-cell1-rg4b9" Nov 28 18:18:25 crc kubenswrapper[4909]: I1128 18:18:25.467341 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fhlsc\" (UniqueName: \"kubernetes.io/projected/0c3aabae-25df-4037-9383-d8affbcd3674-kube-api-access-fhlsc\") pod \"install-certs-openstack-openstack-cell1-rg4b9\" (UID: \"0c3aabae-25df-4037-9383-d8affbcd3674\") " pod="openstack/install-certs-openstack-openstack-cell1-rg4b9" Nov 28 18:18:25 crc kubenswrapper[4909]: I1128 18:18:25.467407 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/0c3aabae-25df-4037-9383-d8affbcd3674-ssh-key\") pod \"install-certs-openstack-openstack-cell1-rg4b9\" (UID: \"0c3aabae-25df-4037-9383-d8affbcd3674\") " pod="openstack/install-certs-openstack-openstack-cell1-rg4b9" Nov 28 18:18:25 crc kubenswrapper[4909]: I1128 18:18:25.467471 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c3aabae-25df-4037-9383-d8affbcd3674-neutron-metadata-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-rg4b9\" (UID: \"0c3aabae-25df-4037-9383-d8affbcd3674\") " pod="openstack/install-certs-openstack-openstack-cell1-rg4b9" Nov 28 18:18:25 crc kubenswrapper[4909]: I1128 18:18:25.467530 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c3aabae-25df-4037-9383-d8affbcd3674-bootstrap-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-rg4b9\" (UID: \"0c3aabae-25df-4037-9383-d8affbcd3674\") " pod="openstack/install-certs-openstack-openstack-cell1-rg4b9" Nov 28 18:18:25 crc kubenswrapper[4909]: I1128 18:18:25.467602 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c3aabae-25df-4037-9383-d8affbcd3674-ovn-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-rg4b9\" (UID: \"0c3aabae-25df-4037-9383-d8affbcd3674\") " pod="openstack/install-certs-openstack-openstack-cell1-rg4b9" Nov 28 18:18:25 crc kubenswrapper[4909]: I1128 18:18:25.467638 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c3aabae-25df-4037-9383-d8affbcd3674-libvirt-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-rg4b9\" (UID: \"0c3aabae-25df-4037-9383-d8affbcd3674\") " pod="openstack/install-certs-openstack-openstack-cell1-rg4b9" Nov 28 18:18:25 crc kubenswrapper[4909]: I1128 18:18:25.467723 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c3aabae-25df-4037-9383-d8affbcd3674-nova-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-rg4b9\" (UID: \"0c3aabae-25df-4037-9383-d8affbcd3674\") " pod="openstack/install-certs-openstack-openstack-cell1-rg4b9" Nov 28 18:18:25 crc kubenswrapper[4909]: I1128 18:18:25.467778 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c3aabae-25df-4037-9383-d8affbcd3674-telemetry-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-rg4b9\" (UID: \"0c3aabae-25df-4037-9383-d8affbcd3674\") " pod="openstack/install-certs-openstack-openstack-cell1-rg4b9" Nov 28 18:18:25 crc kubenswrapper[4909]: I1128 18:18:25.467815 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0c3aabae-25df-4037-9383-d8affbcd3674-inventory\") pod \"install-certs-openstack-openstack-cell1-rg4b9\" (UID: \"0c3aabae-25df-4037-9383-d8affbcd3674\") " pod="openstack/install-certs-openstack-openstack-cell1-rg4b9" Nov 28 18:18:25 crc kubenswrapper[4909]: I1128 18:18:25.467929 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-sriov-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c3aabae-25df-4037-9383-d8affbcd3674-neutron-sriov-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-rg4b9\" (UID: \"0c3aabae-25df-4037-9383-d8affbcd3674\") " pod="openstack/install-certs-openstack-openstack-cell1-rg4b9" Nov 28 18:18:25 crc kubenswrapper[4909]: I1128 18:18:25.467998 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/0c3aabae-25df-4037-9383-d8affbcd3674-ceph\") pod \"install-certs-openstack-openstack-cell1-rg4b9\" (UID: \"0c3aabae-25df-4037-9383-d8affbcd3674\") " pod="openstack/install-certs-openstack-openstack-cell1-rg4b9" Nov 28 18:18:25 crc kubenswrapper[4909]: I1128 18:18:25.475930 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c3aabae-25df-4037-9383-d8affbcd3674-libvirt-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-rg4b9\" (UID: \"0c3aabae-25df-4037-9383-d8affbcd3674\") " pod="openstack/install-certs-openstack-openstack-cell1-rg4b9" Nov 28 18:18:25 crc kubenswrapper[4909]: I1128 18:18:25.475940 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c3aabae-25df-4037-9383-d8affbcd3674-neutron-metadata-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-rg4b9\" (UID: \"0c3aabae-25df-4037-9383-d8affbcd3674\") " pod="openstack/install-certs-openstack-openstack-cell1-rg4b9" Nov 28 18:18:25 crc kubenswrapper[4909]: I1128 18:18:25.476108 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/0c3aabae-25df-4037-9383-d8affbcd3674-ssh-key\") pod \"install-certs-openstack-openstack-cell1-rg4b9\" (UID: \"0c3aabae-25df-4037-9383-d8affbcd3674\") " pod="openstack/install-certs-openstack-openstack-cell1-rg4b9" Nov 28 18:18:25 crc kubenswrapper[4909]: I1128 18:18:25.478504 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-dhcp-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c3aabae-25df-4037-9383-d8affbcd3674-neutron-dhcp-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-rg4b9\" (UID: \"0c3aabae-25df-4037-9383-d8affbcd3674\") " pod="openstack/install-certs-openstack-openstack-cell1-rg4b9" Nov 28 18:18:25 crc kubenswrapper[4909]: I1128 18:18:25.480292 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c3aabae-25df-4037-9383-d8affbcd3674-nova-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-rg4b9\" (UID: \"0c3aabae-25df-4037-9383-d8affbcd3674\") " pod="openstack/install-certs-openstack-openstack-cell1-rg4b9" Nov 28 18:18:25 crc kubenswrapper[4909]: I1128 18:18:25.480433 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c3aabae-25df-4037-9383-d8affbcd3674-telemetry-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-rg4b9\" (UID: \"0c3aabae-25df-4037-9383-d8affbcd3674\") " pod="openstack/install-certs-openstack-openstack-cell1-rg4b9" Nov 28 18:18:25 crc kubenswrapper[4909]: I1128 18:18:25.481052 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/0c3aabae-25df-4037-9383-d8affbcd3674-ceph\") pod \"install-certs-openstack-openstack-cell1-rg4b9\" (UID: \"0c3aabae-25df-4037-9383-d8affbcd3674\") " pod="openstack/install-certs-openstack-openstack-cell1-rg4b9" Nov 28 18:18:25 crc kubenswrapper[4909]: I1128 18:18:25.481532 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-sriov-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c3aabae-25df-4037-9383-d8affbcd3674-neutron-sriov-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-rg4b9\" (UID: \"0c3aabae-25df-4037-9383-d8affbcd3674\") " pod="openstack/install-certs-openstack-openstack-cell1-rg4b9" Nov 28 18:18:25 crc kubenswrapper[4909]: I1128 18:18:25.482982 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0c3aabae-25df-4037-9383-d8affbcd3674-inventory\") pod \"install-certs-openstack-openstack-cell1-rg4b9\" (UID: \"0c3aabae-25df-4037-9383-d8affbcd3674\") " pod="openstack/install-certs-openstack-openstack-cell1-rg4b9" Nov 28 18:18:25 crc kubenswrapper[4909]: I1128 18:18:25.483914 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c3aabae-25df-4037-9383-d8affbcd3674-ovn-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-rg4b9\" (UID: \"0c3aabae-25df-4037-9383-d8affbcd3674\") " pod="openstack/install-certs-openstack-openstack-cell1-rg4b9" Nov 28 18:18:25 crc kubenswrapper[4909]: I1128 18:18:25.501542 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c3aabae-25df-4037-9383-d8affbcd3674-bootstrap-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-rg4b9\" (UID: \"0c3aabae-25df-4037-9383-d8affbcd3674\") " pod="openstack/install-certs-openstack-openstack-cell1-rg4b9" Nov 28 18:18:25 crc kubenswrapper[4909]: I1128 18:18:25.503281 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fhlsc\" (UniqueName: \"kubernetes.io/projected/0c3aabae-25df-4037-9383-d8affbcd3674-kube-api-access-fhlsc\") pod \"install-certs-openstack-openstack-cell1-rg4b9\" (UID: \"0c3aabae-25df-4037-9383-d8affbcd3674\") " pod="openstack/install-certs-openstack-openstack-cell1-rg4b9" Nov 28 18:18:25 crc kubenswrapper[4909]: I1128 18:18:25.589454 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-openstack-openstack-cell1-rg4b9" Nov 28 18:18:25 crc kubenswrapper[4909]: I1128 18:18:25.902167 4909 scope.go:117] "RemoveContainer" containerID="16af51197304d83d362143f44527eca9e3f72c8e7894bd79e4b089fd278bdb1d" Nov 28 18:18:25 crc kubenswrapper[4909]: E1128 18:18:25.902822 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 18:18:26 crc kubenswrapper[4909]: I1128 18:18:26.244292 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-certs-openstack-openstack-cell1-rg4b9"] Nov 28 18:18:27 crc kubenswrapper[4909]: I1128 18:18:27.094584 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-openstack-openstack-cell1-rg4b9" event={"ID":"0c3aabae-25df-4037-9383-d8affbcd3674","Type":"ContainerStarted","Data":"8dcb3f51b32feaca220a3fb4e141cfc91b59ac9664eb575ed2b7b2ff6babae58"} Nov 28 18:18:27 crc kubenswrapper[4909]: I1128 18:18:27.094999 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-openstack-openstack-cell1-rg4b9" event={"ID":"0c3aabae-25df-4037-9383-d8affbcd3674","Type":"ContainerStarted","Data":"e7c14919058a9a04eca62a2ca2b8557836ec6609d4d4386e470620bd5e4ba7f6"} Nov 28 18:18:27 crc kubenswrapper[4909]: I1128 18:18:27.127985 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/install-certs-openstack-openstack-cell1-rg4b9" podStartSLOduration=1.629012992 podStartE2EDuration="2.127963699s" podCreationTimestamp="2025-11-28 18:18:25 +0000 UTC" firstStartedPulling="2025-11-28 18:18:26.235928709 +0000 UTC m=+7688.632613233" lastFinishedPulling="2025-11-28 18:18:26.734879386 +0000 UTC m=+7689.131563940" observedRunningTime="2025-11-28 18:18:27.126714205 +0000 UTC m=+7689.523398759" watchObservedRunningTime="2025-11-28 18:18:27.127963699 +0000 UTC m=+7689.524648233" Nov 28 18:18:37 crc kubenswrapper[4909]: I1128 18:18:37.912333 4909 scope.go:117] "RemoveContainer" containerID="16af51197304d83d362143f44527eca9e3f72c8e7894bd79e4b089fd278bdb1d" Nov 28 18:18:37 crc kubenswrapper[4909]: E1128 18:18:37.913386 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 18:18:46 crc kubenswrapper[4909]: I1128 18:18:46.330755 4909 generic.go:334] "Generic (PLEG): container finished" podID="0c3aabae-25df-4037-9383-d8affbcd3674" containerID="8dcb3f51b32feaca220a3fb4e141cfc91b59ac9664eb575ed2b7b2ff6babae58" exitCode=0 Nov 28 18:18:46 crc kubenswrapper[4909]: I1128 18:18:46.331303 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-openstack-openstack-cell1-rg4b9" event={"ID":"0c3aabae-25df-4037-9383-d8affbcd3674","Type":"ContainerDied","Data":"8dcb3f51b32feaca220a3fb4e141cfc91b59ac9664eb575ed2b7b2ff6babae58"} Nov 28 18:18:47 crc kubenswrapper[4909]: I1128 18:18:47.834015 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-openstack-openstack-cell1-rg4b9" Nov 28 18:18:47 crc kubenswrapper[4909]: I1128 18:18:47.848356 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/0c3aabae-25df-4037-9383-d8affbcd3674-ceph\") pod \"0c3aabae-25df-4037-9383-d8affbcd3674\" (UID: \"0c3aabae-25df-4037-9383-d8affbcd3674\") " Nov 28 18:18:47 crc kubenswrapper[4909]: I1128 18:18:47.848633 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0c3aabae-25df-4037-9383-d8affbcd3674-inventory\") pod \"0c3aabae-25df-4037-9383-d8affbcd3674\" (UID: \"0c3aabae-25df-4037-9383-d8affbcd3674\") " Nov 28 18:18:47 crc kubenswrapper[4909]: I1128 18:18:47.848699 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fhlsc\" (UniqueName: \"kubernetes.io/projected/0c3aabae-25df-4037-9383-d8affbcd3674-kube-api-access-fhlsc\") pod \"0c3aabae-25df-4037-9383-d8affbcd3674\" (UID: \"0c3aabae-25df-4037-9383-d8affbcd3674\") " Nov 28 18:18:47 crc kubenswrapper[4909]: I1128 18:18:47.848734 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-sriov-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c3aabae-25df-4037-9383-d8affbcd3674-neutron-sriov-combined-ca-bundle\") pod \"0c3aabae-25df-4037-9383-d8affbcd3674\" (UID: \"0c3aabae-25df-4037-9383-d8affbcd3674\") " Nov 28 18:18:47 crc kubenswrapper[4909]: I1128 18:18:47.848788 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c3aabae-25df-4037-9383-d8affbcd3674-ovn-combined-ca-bundle\") pod \"0c3aabae-25df-4037-9383-d8affbcd3674\" (UID: \"0c3aabae-25df-4037-9383-d8affbcd3674\") " Nov 28 18:18:47 crc kubenswrapper[4909]: I1128 18:18:47.858908 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0c3aabae-25df-4037-9383-d8affbcd3674-kube-api-access-fhlsc" (OuterVolumeSpecName: "kube-api-access-fhlsc") pod "0c3aabae-25df-4037-9383-d8affbcd3674" (UID: "0c3aabae-25df-4037-9383-d8affbcd3674"). InnerVolumeSpecName "kube-api-access-fhlsc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 18:18:47 crc kubenswrapper[4909]: I1128 18:18:47.859616 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0c3aabae-25df-4037-9383-d8affbcd3674-neutron-sriov-combined-ca-bundle" (OuterVolumeSpecName: "neutron-sriov-combined-ca-bundle") pod "0c3aabae-25df-4037-9383-d8affbcd3674" (UID: "0c3aabae-25df-4037-9383-d8affbcd3674"). InnerVolumeSpecName "neutron-sriov-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 18:18:47 crc kubenswrapper[4909]: I1128 18:18:47.861085 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0c3aabae-25df-4037-9383-d8affbcd3674-ovn-combined-ca-bundle" (OuterVolumeSpecName: "ovn-combined-ca-bundle") pod "0c3aabae-25df-4037-9383-d8affbcd3674" (UID: "0c3aabae-25df-4037-9383-d8affbcd3674"). InnerVolumeSpecName "ovn-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 18:18:47 crc kubenswrapper[4909]: I1128 18:18:47.866703 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0c3aabae-25df-4037-9383-d8affbcd3674-ceph" (OuterVolumeSpecName: "ceph") pod "0c3aabae-25df-4037-9383-d8affbcd3674" (UID: "0c3aabae-25df-4037-9383-d8affbcd3674"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 18:18:47 crc kubenswrapper[4909]: I1128 18:18:47.919397 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0c3aabae-25df-4037-9383-d8affbcd3674-inventory" (OuterVolumeSpecName: "inventory") pod "0c3aabae-25df-4037-9383-d8affbcd3674" (UID: "0c3aabae-25df-4037-9383-d8affbcd3674"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 18:18:47 crc kubenswrapper[4909]: I1128 18:18:47.949880 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/0c3aabae-25df-4037-9383-d8affbcd3674-ssh-key\") pod \"0c3aabae-25df-4037-9383-d8affbcd3674\" (UID: \"0c3aabae-25df-4037-9383-d8affbcd3674\") " Nov 28 18:18:47 crc kubenswrapper[4909]: I1128 18:18:47.949926 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c3aabae-25df-4037-9383-d8affbcd3674-neutron-metadata-combined-ca-bundle\") pod \"0c3aabae-25df-4037-9383-d8affbcd3674\" (UID: \"0c3aabae-25df-4037-9383-d8affbcd3674\") " Nov 28 18:18:47 crc kubenswrapper[4909]: I1128 18:18:47.949955 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c3aabae-25df-4037-9383-d8affbcd3674-libvirt-combined-ca-bundle\") pod \"0c3aabae-25df-4037-9383-d8affbcd3674\" (UID: \"0c3aabae-25df-4037-9383-d8affbcd3674\") " Nov 28 18:18:47 crc kubenswrapper[4909]: I1128 18:18:47.949984 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c3aabae-25df-4037-9383-d8affbcd3674-nova-combined-ca-bundle\") pod \"0c3aabae-25df-4037-9383-d8affbcd3674\" (UID: \"0c3aabae-25df-4037-9383-d8affbcd3674\") " Nov 28 18:18:47 crc kubenswrapper[4909]: I1128 18:18:47.950006 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-dhcp-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c3aabae-25df-4037-9383-d8affbcd3674-neutron-dhcp-combined-ca-bundle\") pod \"0c3aabae-25df-4037-9383-d8affbcd3674\" (UID: \"0c3aabae-25df-4037-9383-d8affbcd3674\") " Nov 28 18:18:47 crc kubenswrapper[4909]: I1128 18:18:47.950133 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c3aabae-25df-4037-9383-d8affbcd3674-telemetry-combined-ca-bundle\") pod \"0c3aabae-25df-4037-9383-d8affbcd3674\" (UID: \"0c3aabae-25df-4037-9383-d8affbcd3674\") " Nov 28 18:18:47 crc kubenswrapper[4909]: I1128 18:18:47.950150 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c3aabae-25df-4037-9383-d8affbcd3674-bootstrap-combined-ca-bundle\") pod \"0c3aabae-25df-4037-9383-d8affbcd3674\" (UID: \"0c3aabae-25df-4037-9383-d8affbcd3674\") " Nov 28 18:18:47 crc kubenswrapper[4909]: I1128 18:18:47.950490 4909 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/0c3aabae-25df-4037-9383-d8affbcd3674-ceph\") on node \"crc\" DevicePath \"\"" Nov 28 18:18:47 crc kubenswrapper[4909]: I1128 18:18:47.950507 4909 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0c3aabae-25df-4037-9383-d8affbcd3674-inventory\") on node \"crc\" DevicePath \"\"" Nov 28 18:18:47 crc kubenswrapper[4909]: I1128 18:18:47.950516 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fhlsc\" (UniqueName: \"kubernetes.io/projected/0c3aabae-25df-4037-9383-d8affbcd3674-kube-api-access-fhlsc\") on node \"crc\" DevicePath \"\"" Nov 28 18:18:47 crc kubenswrapper[4909]: I1128 18:18:47.950526 4909 reconciler_common.go:293] "Volume detached for volume \"neutron-sriov-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c3aabae-25df-4037-9383-d8affbcd3674-neutron-sriov-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 18:18:47 crc kubenswrapper[4909]: I1128 18:18:47.950537 4909 reconciler_common.go:293] "Volume detached for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c3aabae-25df-4037-9383-d8affbcd3674-ovn-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 18:18:47 crc kubenswrapper[4909]: I1128 18:18:47.953373 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0c3aabae-25df-4037-9383-d8affbcd3674-telemetry-combined-ca-bundle" (OuterVolumeSpecName: "telemetry-combined-ca-bundle") pod "0c3aabae-25df-4037-9383-d8affbcd3674" (UID: "0c3aabae-25df-4037-9383-d8affbcd3674"). InnerVolumeSpecName "telemetry-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 18:18:47 crc kubenswrapper[4909]: I1128 18:18:47.953393 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0c3aabae-25df-4037-9383-d8affbcd3674-nova-combined-ca-bundle" (OuterVolumeSpecName: "nova-combined-ca-bundle") pod "0c3aabae-25df-4037-9383-d8affbcd3674" (UID: "0c3aabae-25df-4037-9383-d8affbcd3674"). InnerVolumeSpecName "nova-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 18:18:47 crc kubenswrapper[4909]: I1128 18:18:47.953416 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0c3aabae-25df-4037-9383-d8affbcd3674-bootstrap-combined-ca-bundle" (OuterVolumeSpecName: "bootstrap-combined-ca-bundle") pod "0c3aabae-25df-4037-9383-d8affbcd3674" (UID: "0c3aabae-25df-4037-9383-d8affbcd3674"). InnerVolumeSpecName "bootstrap-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 18:18:47 crc kubenswrapper[4909]: I1128 18:18:47.953800 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0c3aabae-25df-4037-9383-d8affbcd3674-neutron-dhcp-combined-ca-bundle" (OuterVolumeSpecName: "neutron-dhcp-combined-ca-bundle") pod "0c3aabae-25df-4037-9383-d8affbcd3674" (UID: "0c3aabae-25df-4037-9383-d8affbcd3674"). InnerVolumeSpecName "neutron-dhcp-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 18:18:47 crc kubenswrapper[4909]: I1128 18:18:47.954069 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0c3aabae-25df-4037-9383-d8affbcd3674-neutron-metadata-combined-ca-bundle" (OuterVolumeSpecName: "neutron-metadata-combined-ca-bundle") pod "0c3aabae-25df-4037-9383-d8affbcd3674" (UID: "0c3aabae-25df-4037-9383-d8affbcd3674"). InnerVolumeSpecName "neutron-metadata-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 18:18:47 crc kubenswrapper[4909]: I1128 18:18:47.954792 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0c3aabae-25df-4037-9383-d8affbcd3674-libvirt-combined-ca-bundle" (OuterVolumeSpecName: "libvirt-combined-ca-bundle") pod "0c3aabae-25df-4037-9383-d8affbcd3674" (UID: "0c3aabae-25df-4037-9383-d8affbcd3674"). InnerVolumeSpecName "libvirt-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 18:18:47 crc kubenswrapper[4909]: I1128 18:18:47.977602 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0c3aabae-25df-4037-9383-d8affbcd3674-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "0c3aabae-25df-4037-9383-d8affbcd3674" (UID: "0c3aabae-25df-4037-9383-d8affbcd3674"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 18:18:48 crc kubenswrapper[4909]: I1128 18:18:48.051814 4909 reconciler_common.go:293] "Volume detached for volume \"neutron-dhcp-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c3aabae-25df-4037-9383-d8affbcd3674-neutron-dhcp-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 18:18:48 crc kubenswrapper[4909]: I1128 18:18:48.051844 4909 reconciler_common.go:293] "Volume detached for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c3aabae-25df-4037-9383-d8affbcd3674-telemetry-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 18:18:48 crc kubenswrapper[4909]: I1128 18:18:48.051854 4909 reconciler_common.go:293] "Volume detached for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c3aabae-25df-4037-9383-d8affbcd3674-bootstrap-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 18:18:48 crc kubenswrapper[4909]: I1128 18:18:48.051863 4909 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/0c3aabae-25df-4037-9383-d8affbcd3674-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 28 18:18:48 crc kubenswrapper[4909]: I1128 18:18:48.051874 4909 reconciler_common.go:293] "Volume detached for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c3aabae-25df-4037-9383-d8affbcd3674-neutron-metadata-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 18:18:48 crc kubenswrapper[4909]: I1128 18:18:48.051885 4909 reconciler_common.go:293] "Volume detached for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c3aabae-25df-4037-9383-d8affbcd3674-libvirt-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 18:18:48 crc kubenswrapper[4909]: I1128 18:18:48.051894 4909 reconciler_common.go:293] "Volume detached for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c3aabae-25df-4037-9383-d8affbcd3674-nova-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 18:18:48 crc kubenswrapper[4909]: I1128 18:18:48.365307 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-openstack-openstack-cell1-rg4b9" event={"ID":"0c3aabae-25df-4037-9383-d8affbcd3674","Type":"ContainerDied","Data":"e7c14919058a9a04eca62a2ca2b8557836ec6609d4d4386e470620bd5e4ba7f6"} Nov 28 18:18:48 crc kubenswrapper[4909]: I1128 18:18:48.365349 4909 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e7c14919058a9a04eca62a2ca2b8557836ec6609d4d4386e470620bd5e4ba7f6" Nov 28 18:18:48 crc kubenswrapper[4909]: I1128 18:18:48.365361 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-openstack-openstack-cell1-rg4b9" Nov 28 18:18:48 crc kubenswrapper[4909]: I1128 18:18:48.449926 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceph-client-openstack-openstack-cell1-kj8w5"] Nov 28 18:18:48 crc kubenswrapper[4909]: E1128 18:18:48.450311 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0c3aabae-25df-4037-9383-d8affbcd3674" containerName="install-certs-openstack-openstack-cell1" Nov 28 18:18:48 crc kubenswrapper[4909]: I1128 18:18:48.450333 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="0c3aabae-25df-4037-9383-d8affbcd3674" containerName="install-certs-openstack-openstack-cell1" Nov 28 18:18:48 crc kubenswrapper[4909]: I1128 18:18:48.450585 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="0c3aabae-25df-4037-9383-d8affbcd3674" containerName="install-certs-openstack-openstack-cell1" Nov 28 18:18:48 crc kubenswrapper[4909]: I1128 18:18:48.451286 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceph-client-openstack-openstack-cell1-kj8w5" Nov 28 18:18:48 crc kubenswrapper[4909]: I1128 18:18:48.453538 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 28 18:18:48 crc kubenswrapper[4909]: I1128 18:18:48.453717 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-z249h" Nov 28 18:18:48 crc kubenswrapper[4909]: I1128 18:18:48.453734 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Nov 28 18:18:48 crc kubenswrapper[4909]: I1128 18:18:48.454780 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Nov 28 18:18:48 crc kubenswrapper[4909]: I1128 18:18:48.490347 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vj2kq\" (UniqueName: \"kubernetes.io/projected/54f8470a-3a05-43ec-9e56-d2cf963b71bb-kube-api-access-vj2kq\") pod \"ceph-client-openstack-openstack-cell1-kj8w5\" (UID: \"54f8470a-3a05-43ec-9e56-d2cf963b71bb\") " pod="openstack/ceph-client-openstack-openstack-cell1-kj8w5" Nov 28 18:18:48 crc kubenswrapper[4909]: I1128 18:18:48.490388 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/54f8470a-3a05-43ec-9e56-d2cf963b71bb-ssh-key\") pod \"ceph-client-openstack-openstack-cell1-kj8w5\" (UID: \"54f8470a-3a05-43ec-9e56-d2cf963b71bb\") " pod="openstack/ceph-client-openstack-openstack-cell1-kj8w5" Nov 28 18:18:48 crc kubenswrapper[4909]: I1128 18:18:48.490408 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/54f8470a-3a05-43ec-9e56-d2cf963b71bb-ceph\") pod \"ceph-client-openstack-openstack-cell1-kj8w5\" (UID: \"54f8470a-3a05-43ec-9e56-d2cf963b71bb\") " pod="openstack/ceph-client-openstack-openstack-cell1-kj8w5" Nov 28 18:18:48 crc kubenswrapper[4909]: I1128 18:18:48.490546 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/54f8470a-3a05-43ec-9e56-d2cf963b71bb-inventory\") pod \"ceph-client-openstack-openstack-cell1-kj8w5\" (UID: \"54f8470a-3a05-43ec-9e56-d2cf963b71bb\") " pod="openstack/ceph-client-openstack-openstack-cell1-kj8w5" Nov 28 18:18:48 crc kubenswrapper[4909]: I1128 18:18:48.495441 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceph-client-openstack-openstack-cell1-kj8w5"] Nov 28 18:18:48 crc kubenswrapper[4909]: I1128 18:18:48.592499 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vj2kq\" (UniqueName: \"kubernetes.io/projected/54f8470a-3a05-43ec-9e56-d2cf963b71bb-kube-api-access-vj2kq\") pod \"ceph-client-openstack-openstack-cell1-kj8w5\" (UID: \"54f8470a-3a05-43ec-9e56-d2cf963b71bb\") " pod="openstack/ceph-client-openstack-openstack-cell1-kj8w5" Nov 28 18:18:48 crc kubenswrapper[4909]: I1128 18:18:48.592540 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/54f8470a-3a05-43ec-9e56-d2cf963b71bb-ssh-key\") pod \"ceph-client-openstack-openstack-cell1-kj8w5\" (UID: \"54f8470a-3a05-43ec-9e56-d2cf963b71bb\") " pod="openstack/ceph-client-openstack-openstack-cell1-kj8w5" Nov 28 18:18:48 crc kubenswrapper[4909]: I1128 18:18:48.592562 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/54f8470a-3a05-43ec-9e56-d2cf963b71bb-ceph\") pod \"ceph-client-openstack-openstack-cell1-kj8w5\" (UID: \"54f8470a-3a05-43ec-9e56-d2cf963b71bb\") " pod="openstack/ceph-client-openstack-openstack-cell1-kj8w5" Nov 28 18:18:48 crc kubenswrapper[4909]: I1128 18:18:48.592635 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/54f8470a-3a05-43ec-9e56-d2cf963b71bb-inventory\") pod \"ceph-client-openstack-openstack-cell1-kj8w5\" (UID: \"54f8470a-3a05-43ec-9e56-d2cf963b71bb\") " pod="openstack/ceph-client-openstack-openstack-cell1-kj8w5" Nov 28 18:18:48 crc kubenswrapper[4909]: I1128 18:18:48.597230 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/54f8470a-3a05-43ec-9e56-d2cf963b71bb-ceph\") pod \"ceph-client-openstack-openstack-cell1-kj8w5\" (UID: \"54f8470a-3a05-43ec-9e56-d2cf963b71bb\") " pod="openstack/ceph-client-openstack-openstack-cell1-kj8w5" Nov 28 18:18:48 crc kubenswrapper[4909]: I1128 18:18:48.599021 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/54f8470a-3a05-43ec-9e56-d2cf963b71bb-inventory\") pod \"ceph-client-openstack-openstack-cell1-kj8w5\" (UID: \"54f8470a-3a05-43ec-9e56-d2cf963b71bb\") " pod="openstack/ceph-client-openstack-openstack-cell1-kj8w5" Nov 28 18:18:48 crc kubenswrapper[4909]: I1128 18:18:48.607848 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/54f8470a-3a05-43ec-9e56-d2cf963b71bb-ssh-key\") pod \"ceph-client-openstack-openstack-cell1-kj8w5\" (UID: \"54f8470a-3a05-43ec-9e56-d2cf963b71bb\") " pod="openstack/ceph-client-openstack-openstack-cell1-kj8w5" Nov 28 18:18:48 crc kubenswrapper[4909]: I1128 18:18:48.609035 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vj2kq\" (UniqueName: \"kubernetes.io/projected/54f8470a-3a05-43ec-9e56-d2cf963b71bb-kube-api-access-vj2kq\") pod \"ceph-client-openstack-openstack-cell1-kj8w5\" (UID: \"54f8470a-3a05-43ec-9e56-d2cf963b71bb\") " pod="openstack/ceph-client-openstack-openstack-cell1-kj8w5" Nov 28 18:18:48 crc kubenswrapper[4909]: I1128 18:18:48.803331 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceph-client-openstack-openstack-cell1-kj8w5" Nov 28 18:18:49 crc kubenswrapper[4909]: I1128 18:18:49.339906 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceph-client-openstack-openstack-cell1-kj8w5"] Nov 28 18:18:49 crc kubenswrapper[4909]: I1128 18:18:49.379236 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceph-client-openstack-openstack-cell1-kj8w5" event={"ID":"54f8470a-3a05-43ec-9e56-d2cf963b71bb","Type":"ContainerStarted","Data":"3aa27b3c6c78f473be8cfcd94e0cdf3dea68e0dca4e7591b16d38c3264dd95a5"} Nov 28 18:18:50 crc kubenswrapper[4909]: I1128 18:18:50.391325 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceph-client-openstack-openstack-cell1-kj8w5" event={"ID":"54f8470a-3a05-43ec-9e56-d2cf963b71bb","Type":"ContainerStarted","Data":"096e5e470bd5616f08f50c95fbe31847f13a5757e2a97a31eab490483cbb7c7a"} Nov 28 18:18:50 crc kubenswrapper[4909]: I1128 18:18:50.414243 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceph-client-openstack-openstack-cell1-kj8w5" podStartSLOduration=1.913917461 podStartE2EDuration="2.414223804s" podCreationTimestamp="2025-11-28 18:18:48 +0000 UTC" firstStartedPulling="2025-11-28 18:18:49.346809308 +0000 UTC m=+7711.743493842" lastFinishedPulling="2025-11-28 18:18:49.847115651 +0000 UTC m=+7712.243800185" observedRunningTime="2025-11-28 18:18:50.405020146 +0000 UTC m=+7712.801704670" watchObservedRunningTime="2025-11-28 18:18:50.414223804 +0000 UTC m=+7712.810908328" Nov 28 18:18:51 crc kubenswrapper[4909]: I1128 18:18:51.901804 4909 scope.go:117] "RemoveContainer" containerID="16af51197304d83d362143f44527eca9e3f72c8e7894bd79e4b089fd278bdb1d" Nov 28 18:18:51 crc kubenswrapper[4909]: E1128 18:18:51.902690 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 18:18:55 crc kubenswrapper[4909]: E1128 18:18:55.242394 4909 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod54f8470a_3a05_43ec_9e56_d2cf963b71bb.slice/crio-096e5e470bd5616f08f50c95fbe31847f13a5757e2a97a31eab490483cbb7c7a.scope\": RecentStats: unable to find data in memory cache]" Nov 28 18:18:55 crc kubenswrapper[4909]: I1128 18:18:55.445634 4909 generic.go:334] "Generic (PLEG): container finished" podID="54f8470a-3a05-43ec-9e56-d2cf963b71bb" containerID="096e5e470bd5616f08f50c95fbe31847f13a5757e2a97a31eab490483cbb7c7a" exitCode=0 Nov 28 18:18:55 crc kubenswrapper[4909]: I1128 18:18:55.445709 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceph-client-openstack-openstack-cell1-kj8w5" event={"ID":"54f8470a-3a05-43ec-9e56-d2cf963b71bb","Type":"ContainerDied","Data":"096e5e470bd5616f08f50c95fbe31847f13a5757e2a97a31eab490483cbb7c7a"} Nov 28 18:18:56 crc kubenswrapper[4909]: I1128 18:18:56.901811 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceph-client-openstack-openstack-cell1-kj8w5" Nov 28 18:18:57 crc kubenswrapper[4909]: I1128 18:18:57.001697 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/54f8470a-3a05-43ec-9e56-d2cf963b71bb-ssh-key\") pod \"54f8470a-3a05-43ec-9e56-d2cf963b71bb\" (UID: \"54f8470a-3a05-43ec-9e56-d2cf963b71bb\") " Nov 28 18:18:57 crc kubenswrapper[4909]: I1128 18:18:57.001762 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vj2kq\" (UniqueName: \"kubernetes.io/projected/54f8470a-3a05-43ec-9e56-d2cf963b71bb-kube-api-access-vj2kq\") pod \"54f8470a-3a05-43ec-9e56-d2cf963b71bb\" (UID: \"54f8470a-3a05-43ec-9e56-d2cf963b71bb\") " Nov 28 18:18:57 crc kubenswrapper[4909]: I1128 18:18:57.001809 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/54f8470a-3a05-43ec-9e56-d2cf963b71bb-inventory\") pod \"54f8470a-3a05-43ec-9e56-d2cf963b71bb\" (UID: \"54f8470a-3a05-43ec-9e56-d2cf963b71bb\") " Nov 28 18:18:57 crc kubenswrapper[4909]: I1128 18:18:57.001876 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/54f8470a-3a05-43ec-9e56-d2cf963b71bb-ceph\") pod \"54f8470a-3a05-43ec-9e56-d2cf963b71bb\" (UID: \"54f8470a-3a05-43ec-9e56-d2cf963b71bb\") " Nov 28 18:18:57 crc kubenswrapper[4909]: I1128 18:18:57.007831 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/54f8470a-3a05-43ec-9e56-d2cf963b71bb-kube-api-access-vj2kq" (OuterVolumeSpecName: "kube-api-access-vj2kq") pod "54f8470a-3a05-43ec-9e56-d2cf963b71bb" (UID: "54f8470a-3a05-43ec-9e56-d2cf963b71bb"). InnerVolumeSpecName "kube-api-access-vj2kq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 18:18:57 crc kubenswrapper[4909]: I1128 18:18:57.010844 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/54f8470a-3a05-43ec-9e56-d2cf963b71bb-ceph" (OuterVolumeSpecName: "ceph") pod "54f8470a-3a05-43ec-9e56-d2cf963b71bb" (UID: "54f8470a-3a05-43ec-9e56-d2cf963b71bb"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 18:18:57 crc kubenswrapper[4909]: I1128 18:18:57.031738 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/54f8470a-3a05-43ec-9e56-d2cf963b71bb-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "54f8470a-3a05-43ec-9e56-d2cf963b71bb" (UID: "54f8470a-3a05-43ec-9e56-d2cf963b71bb"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 18:18:57 crc kubenswrapper[4909]: I1128 18:18:57.041022 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/54f8470a-3a05-43ec-9e56-d2cf963b71bb-inventory" (OuterVolumeSpecName: "inventory") pod "54f8470a-3a05-43ec-9e56-d2cf963b71bb" (UID: "54f8470a-3a05-43ec-9e56-d2cf963b71bb"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 18:18:57 crc kubenswrapper[4909]: I1128 18:18:57.105144 4909 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/54f8470a-3a05-43ec-9e56-d2cf963b71bb-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 28 18:18:57 crc kubenswrapper[4909]: I1128 18:18:57.105181 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vj2kq\" (UniqueName: \"kubernetes.io/projected/54f8470a-3a05-43ec-9e56-d2cf963b71bb-kube-api-access-vj2kq\") on node \"crc\" DevicePath \"\"" Nov 28 18:18:57 crc kubenswrapper[4909]: I1128 18:18:57.105193 4909 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/54f8470a-3a05-43ec-9e56-d2cf963b71bb-inventory\") on node \"crc\" DevicePath \"\"" Nov 28 18:18:57 crc kubenswrapper[4909]: I1128 18:18:57.105201 4909 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/54f8470a-3a05-43ec-9e56-d2cf963b71bb-ceph\") on node \"crc\" DevicePath \"\"" Nov 28 18:18:57 crc kubenswrapper[4909]: I1128 18:18:57.467718 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceph-client-openstack-openstack-cell1-kj8w5" event={"ID":"54f8470a-3a05-43ec-9e56-d2cf963b71bb","Type":"ContainerDied","Data":"3aa27b3c6c78f473be8cfcd94e0cdf3dea68e0dca4e7591b16d38c3264dd95a5"} Nov 28 18:18:57 crc kubenswrapper[4909]: I1128 18:18:57.467791 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceph-client-openstack-openstack-cell1-kj8w5" Nov 28 18:18:57 crc kubenswrapper[4909]: I1128 18:18:57.467810 4909 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3aa27b3c6c78f473be8cfcd94e0cdf3dea68e0dca4e7591b16d38c3264dd95a5" Nov 28 18:18:57 crc kubenswrapper[4909]: I1128 18:18:57.623778 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-openstack-openstack-cell1-qgjl5"] Nov 28 18:18:57 crc kubenswrapper[4909]: E1128 18:18:57.624242 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="54f8470a-3a05-43ec-9e56-d2cf963b71bb" containerName="ceph-client-openstack-openstack-cell1" Nov 28 18:18:57 crc kubenswrapper[4909]: I1128 18:18:57.624263 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="54f8470a-3a05-43ec-9e56-d2cf963b71bb" containerName="ceph-client-openstack-openstack-cell1" Nov 28 18:18:57 crc kubenswrapper[4909]: I1128 18:18:57.624504 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="54f8470a-3a05-43ec-9e56-d2cf963b71bb" containerName="ceph-client-openstack-openstack-cell1" Nov 28 18:18:57 crc kubenswrapper[4909]: I1128 18:18:57.625234 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-openstack-openstack-cell1-qgjl5" Nov 28 18:18:57 crc kubenswrapper[4909]: I1128 18:18:57.627392 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-config" Nov 28 18:18:57 crc kubenswrapper[4909]: I1128 18:18:57.627847 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Nov 28 18:18:57 crc kubenswrapper[4909]: I1128 18:18:57.628232 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 28 18:18:57 crc kubenswrapper[4909]: I1128 18:18:57.628443 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-z249h" Nov 28 18:18:57 crc kubenswrapper[4909]: I1128 18:18:57.628815 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Nov 28 18:18:57 crc kubenswrapper[4909]: I1128 18:18:57.650184 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-openstack-openstack-cell1-qgjl5"] Nov 28 18:18:57 crc kubenswrapper[4909]: I1128 18:18:57.716230 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/c51d51e8-794d-433f-940e-5d12bff09057-ceph\") pod \"ovn-openstack-openstack-cell1-qgjl5\" (UID: \"c51d51e8-794d-433f-940e-5d12bff09057\") " pod="openstack/ovn-openstack-openstack-cell1-qgjl5" Nov 28 18:18:57 crc kubenswrapper[4909]: I1128 18:18:57.716335 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/c51d51e8-794d-433f-940e-5d12bff09057-ovncontroller-config-0\") pod \"ovn-openstack-openstack-cell1-qgjl5\" (UID: \"c51d51e8-794d-433f-940e-5d12bff09057\") " pod="openstack/ovn-openstack-openstack-cell1-qgjl5" Nov 28 18:18:57 crc kubenswrapper[4909]: I1128 18:18:57.716390 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c51d51e8-794d-433f-940e-5d12bff09057-inventory\") pod \"ovn-openstack-openstack-cell1-qgjl5\" (UID: \"c51d51e8-794d-433f-940e-5d12bff09057\") " pod="openstack/ovn-openstack-openstack-cell1-qgjl5" Nov 28 18:18:57 crc kubenswrapper[4909]: I1128 18:18:57.716573 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qsfwq\" (UniqueName: \"kubernetes.io/projected/c51d51e8-794d-433f-940e-5d12bff09057-kube-api-access-qsfwq\") pod \"ovn-openstack-openstack-cell1-qgjl5\" (UID: \"c51d51e8-794d-433f-940e-5d12bff09057\") " pod="openstack/ovn-openstack-openstack-cell1-qgjl5" Nov 28 18:18:57 crc kubenswrapper[4909]: I1128 18:18:57.716615 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c51d51e8-794d-433f-940e-5d12bff09057-ovn-combined-ca-bundle\") pod \"ovn-openstack-openstack-cell1-qgjl5\" (UID: \"c51d51e8-794d-433f-940e-5d12bff09057\") " pod="openstack/ovn-openstack-openstack-cell1-qgjl5" Nov 28 18:18:57 crc kubenswrapper[4909]: I1128 18:18:57.716892 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c51d51e8-794d-433f-940e-5d12bff09057-ssh-key\") pod \"ovn-openstack-openstack-cell1-qgjl5\" (UID: \"c51d51e8-794d-433f-940e-5d12bff09057\") " pod="openstack/ovn-openstack-openstack-cell1-qgjl5" Nov 28 18:18:57 crc kubenswrapper[4909]: I1128 18:18:57.820451 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c51d51e8-794d-433f-940e-5d12bff09057-ssh-key\") pod \"ovn-openstack-openstack-cell1-qgjl5\" (UID: \"c51d51e8-794d-433f-940e-5d12bff09057\") " pod="openstack/ovn-openstack-openstack-cell1-qgjl5" Nov 28 18:18:57 crc kubenswrapper[4909]: I1128 18:18:57.820726 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/c51d51e8-794d-433f-940e-5d12bff09057-ceph\") pod \"ovn-openstack-openstack-cell1-qgjl5\" (UID: \"c51d51e8-794d-433f-940e-5d12bff09057\") " pod="openstack/ovn-openstack-openstack-cell1-qgjl5" Nov 28 18:18:57 crc kubenswrapper[4909]: I1128 18:18:57.820804 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/c51d51e8-794d-433f-940e-5d12bff09057-ovncontroller-config-0\") pod \"ovn-openstack-openstack-cell1-qgjl5\" (UID: \"c51d51e8-794d-433f-940e-5d12bff09057\") " pod="openstack/ovn-openstack-openstack-cell1-qgjl5" Nov 28 18:18:57 crc kubenswrapper[4909]: I1128 18:18:57.820878 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c51d51e8-794d-433f-940e-5d12bff09057-inventory\") pod \"ovn-openstack-openstack-cell1-qgjl5\" (UID: \"c51d51e8-794d-433f-940e-5d12bff09057\") " pod="openstack/ovn-openstack-openstack-cell1-qgjl5" Nov 28 18:18:57 crc kubenswrapper[4909]: I1128 18:18:57.820963 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qsfwq\" (UniqueName: \"kubernetes.io/projected/c51d51e8-794d-433f-940e-5d12bff09057-kube-api-access-qsfwq\") pod \"ovn-openstack-openstack-cell1-qgjl5\" (UID: \"c51d51e8-794d-433f-940e-5d12bff09057\") " pod="openstack/ovn-openstack-openstack-cell1-qgjl5" Nov 28 18:18:57 crc kubenswrapper[4909]: I1128 18:18:57.821022 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c51d51e8-794d-433f-940e-5d12bff09057-ovn-combined-ca-bundle\") pod \"ovn-openstack-openstack-cell1-qgjl5\" (UID: \"c51d51e8-794d-433f-940e-5d12bff09057\") " pod="openstack/ovn-openstack-openstack-cell1-qgjl5" Nov 28 18:18:57 crc kubenswrapper[4909]: I1128 18:18:57.821744 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/c51d51e8-794d-433f-940e-5d12bff09057-ovncontroller-config-0\") pod \"ovn-openstack-openstack-cell1-qgjl5\" (UID: \"c51d51e8-794d-433f-940e-5d12bff09057\") " pod="openstack/ovn-openstack-openstack-cell1-qgjl5" Nov 28 18:18:57 crc kubenswrapper[4909]: I1128 18:18:57.827548 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c51d51e8-794d-433f-940e-5d12bff09057-ssh-key\") pod \"ovn-openstack-openstack-cell1-qgjl5\" (UID: \"c51d51e8-794d-433f-940e-5d12bff09057\") " pod="openstack/ovn-openstack-openstack-cell1-qgjl5" Nov 28 18:18:57 crc kubenswrapper[4909]: I1128 18:18:57.828532 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/c51d51e8-794d-433f-940e-5d12bff09057-ceph\") pod \"ovn-openstack-openstack-cell1-qgjl5\" (UID: \"c51d51e8-794d-433f-940e-5d12bff09057\") " pod="openstack/ovn-openstack-openstack-cell1-qgjl5" Nov 28 18:18:57 crc kubenswrapper[4909]: I1128 18:18:57.832295 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c51d51e8-794d-433f-940e-5d12bff09057-ovn-combined-ca-bundle\") pod \"ovn-openstack-openstack-cell1-qgjl5\" (UID: \"c51d51e8-794d-433f-940e-5d12bff09057\") " pod="openstack/ovn-openstack-openstack-cell1-qgjl5" Nov 28 18:18:57 crc kubenswrapper[4909]: I1128 18:18:57.833229 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c51d51e8-794d-433f-940e-5d12bff09057-inventory\") pod \"ovn-openstack-openstack-cell1-qgjl5\" (UID: \"c51d51e8-794d-433f-940e-5d12bff09057\") " pod="openstack/ovn-openstack-openstack-cell1-qgjl5" Nov 28 18:18:57 crc kubenswrapper[4909]: I1128 18:18:57.852138 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qsfwq\" (UniqueName: \"kubernetes.io/projected/c51d51e8-794d-433f-940e-5d12bff09057-kube-api-access-qsfwq\") pod \"ovn-openstack-openstack-cell1-qgjl5\" (UID: \"c51d51e8-794d-433f-940e-5d12bff09057\") " pod="openstack/ovn-openstack-openstack-cell1-qgjl5" Nov 28 18:18:57 crc kubenswrapper[4909]: I1128 18:18:57.945284 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-openstack-openstack-cell1-qgjl5" Nov 28 18:18:58 crc kubenswrapper[4909]: I1128 18:18:58.511033 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-openstack-openstack-cell1-qgjl5"] Nov 28 18:18:59 crc kubenswrapper[4909]: I1128 18:18:59.492264 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-openstack-openstack-cell1-qgjl5" event={"ID":"c51d51e8-794d-433f-940e-5d12bff09057","Type":"ContainerStarted","Data":"84520e6aceceab9cd2f1376b7fa522f76e4fa754e9877bbae7e67e667d8d1744"} Nov 28 18:18:59 crc kubenswrapper[4909]: I1128 18:18:59.492512 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-openstack-openstack-cell1-qgjl5" event={"ID":"c51d51e8-794d-433f-940e-5d12bff09057","Type":"ContainerStarted","Data":"c637f86c4ee61a44fc7c5d557897767266441d02fcd9af804a62c9be112f0f48"} Nov 28 18:18:59 crc kubenswrapper[4909]: I1128 18:18:59.511283 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-openstack-openstack-cell1-qgjl5" podStartSLOduration=1.851640475 podStartE2EDuration="2.511266531s" podCreationTimestamp="2025-11-28 18:18:57 +0000 UTC" firstStartedPulling="2025-11-28 18:18:58.514356445 +0000 UTC m=+7720.911040969" lastFinishedPulling="2025-11-28 18:18:59.173982491 +0000 UTC m=+7721.570667025" observedRunningTime="2025-11-28 18:18:59.510749197 +0000 UTC m=+7721.907433761" watchObservedRunningTime="2025-11-28 18:18:59.511266531 +0000 UTC m=+7721.907951055" Nov 28 18:19:02 crc kubenswrapper[4909]: I1128 18:19:02.901805 4909 scope.go:117] "RemoveContainer" containerID="16af51197304d83d362143f44527eca9e3f72c8e7894bd79e4b089fd278bdb1d" Nov 28 18:19:02 crc kubenswrapper[4909]: E1128 18:19:02.902360 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 18:19:15 crc kubenswrapper[4909]: I1128 18:19:15.902586 4909 scope.go:117] "RemoveContainer" containerID="16af51197304d83d362143f44527eca9e3f72c8e7894bd79e4b089fd278bdb1d" Nov 28 18:19:15 crc kubenswrapper[4909]: E1128 18:19:15.903513 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 18:19:27 crc kubenswrapper[4909]: I1128 18:19:27.913854 4909 scope.go:117] "RemoveContainer" containerID="16af51197304d83d362143f44527eca9e3f72c8e7894bd79e4b089fd278bdb1d" Nov 28 18:19:28 crc kubenswrapper[4909]: I1128 18:19:28.847747 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" event={"ID":"5f0ac931-d37b-4342-8c12-c2779b455cc5","Type":"ContainerStarted","Data":"e9bed4a6bcd109c2766aab7a7fb38f137b95e53ec473471aec321e9fdd1bfb74"} Nov 28 18:20:09 crc kubenswrapper[4909]: I1128 18:20:09.296804 4909 generic.go:334] "Generic (PLEG): container finished" podID="c51d51e8-794d-433f-940e-5d12bff09057" containerID="84520e6aceceab9cd2f1376b7fa522f76e4fa754e9877bbae7e67e667d8d1744" exitCode=0 Nov 28 18:20:09 crc kubenswrapper[4909]: I1128 18:20:09.296903 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-openstack-openstack-cell1-qgjl5" event={"ID":"c51d51e8-794d-433f-940e-5d12bff09057","Type":"ContainerDied","Data":"84520e6aceceab9cd2f1376b7fa522f76e4fa754e9877bbae7e67e667d8d1744"} Nov 28 18:20:10 crc kubenswrapper[4909]: I1128 18:20:10.826638 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-openstack-openstack-cell1-qgjl5" Nov 28 18:20:11 crc kubenswrapper[4909]: I1128 18:20:11.002273 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c51d51e8-794d-433f-940e-5d12bff09057-ssh-key\") pod \"c51d51e8-794d-433f-940e-5d12bff09057\" (UID: \"c51d51e8-794d-433f-940e-5d12bff09057\") " Nov 28 18:20:11 crc kubenswrapper[4909]: I1128 18:20:11.002342 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c51d51e8-794d-433f-940e-5d12bff09057-inventory\") pod \"c51d51e8-794d-433f-940e-5d12bff09057\" (UID: \"c51d51e8-794d-433f-940e-5d12bff09057\") " Nov 28 18:20:11 crc kubenswrapper[4909]: I1128 18:20:11.002490 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/c51d51e8-794d-433f-940e-5d12bff09057-ceph\") pod \"c51d51e8-794d-433f-940e-5d12bff09057\" (UID: \"c51d51e8-794d-433f-940e-5d12bff09057\") " Nov 28 18:20:11 crc kubenswrapper[4909]: I1128 18:20:11.002582 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qsfwq\" (UniqueName: \"kubernetes.io/projected/c51d51e8-794d-433f-940e-5d12bff09057-kube-api-access-qsfwq\") pod \"c51d51e8-794d-433f-940e-5d12bff09057\" (UID: \"c51d51e8-794d-433f-940e-5d12bff09057\") " Nov 28 18:20:11 crc kubenswrapper[4909]: I1128 18:20:11.002607 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c51d51e8-794d-433f-940e-5d12bff09057-ovn-combined-ca-bundle\") pod \"c51d51e8-794d-433f-940e-5d12bff09057\" (UID: \"c51d51e8-794d-433f-940e-5d12bff09057\") " Nov 28 18:20:11 crc kubenswrapper[4909]: I1128 18:20:11.002642 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/c51d51e8-794d-433f-940e-5d12bff09057-ovncontroller-config-0\") pod \"c51d51e8-794d-433f-940e-5d12bff09057\" (UID: \"c51d51e8-794d-433f-940e-5d12bff09057\") " Nov 28 18:20:11 crc kubenswrapper[4909]: I1128 18:20:11.007874 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c51d51e8-794d-433f-940e-5d12bff09057-ceph" (OuterVolumeSpecName: "ceph") pod "c51d51e8-794d-433f-940e-5d12bff09057" (UID: "c51d51e8-794d-433f-940e-5d12bff09057"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 18:20:11 crc kubenswrapper[4909]: I1128 18:20:11.008511 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c51d51e8-794d-433f-940e-5d12bff09057-kube-api-access-qsfwq" (OuterVolumeSpecName: "kube-api-access-qsfwq") pod "c51d51e8-794d-433f-940e-5d12bff09057" (UID: "c51d51e8-794d-433f-940e-5d12bff09057"). InnerVolumeSpecName "kube-api-access-qsfwq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 18:20:11 crc kubenswrapper[4909]: I1128 18:20:11.008676 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c51d51e8-794d-433f-940e-5d12bff09057-ovn-combined-ca-bundle" (OuterVolumeSpecName: "ovn-combined-ca-bundle") pod "c51d51e8-794d-433f-940e-5d12bff09057" (UID: "c51d51e8-794d-433f-940e-5d12bff09057"). InnerVolumeSpecName "ovn-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 18:20:11 crc kubenswrapper[4909]: I1128 18:20:11.032341 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c51d51e8-794d-433f-940e-5d12bff09057-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "c51d51e8-794d-433f-940e-5d12bff09057" (UID: "c51d51e8-794d-433f-940e-5d12bff09057"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 18:20:11 crc kubenswrapper[4909]: I1128 18:20:11.040136 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c51d51e8-794d-433f-940e-5d12bff09057-inventory" (OuterVolumeSpecName: "inventory") pod "c51d51e8-794d-433f-940e-5d12bff09057" (UID: "c51d51e8-794d-433f-940e-5d12bff09057"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 18:20:11 crc kubenswrapper[4909]: I1128 18:20:11.043407 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c51d51e8-794d-433f-940e-5d12bff09057-ovncontroller-config-0" (OuterVolumeSpecName: "ovncontroller-config-0") pod "c51d51e8-794d-433f-940e-5d12bff09057" (UID: "c51d51e8-794d-433f-940e-5d12bff09057"). InnerVolumeSpecName "ovncontroller-config-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 18:20:11 crc kubenswrapper[4909]: I1128 18:20:11.104985 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qsfwq\" (UniqueName: \"kubernetes.io/projected/c51d51e8-794d-433f-940e-5d12bff09057-kube-api-access-qsfwq\") on node \"crc\" DevicePath \"\"" Nov 28 18:20:11 crc kubenswrapper[4909]: I1128 18:20:11.105017 4909 reconciler_common.go:293] "Volume detached for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c51d51e8-794d-433f-940e-5d12bff09057-ovn-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 18:20:11 crc kubenswrapper[4909]: I1128 18:20:11.105026 4909 reconciler_common.go:293] "Volume detached for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/c51d51e8-794d-433f-940e-5d12bff09057-ovncontroller-config-0\") on node \"crc\" DevicePath \"\"" Nov 28 18:20:11 crc kubenswrapper[4909]: I1128 18:20:11.105035 4909 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c51d51e8-794d-433f-940e-5d12bff09057-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 28 18:20:11 crc kubenswrapper[4909]: I1128 18:20:11.105045 4909 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c51d51e8-794d-433f-940e-5d12bff09057-inventory\") on node \"crc\" DevicePath \"\"" Nov 28 18:20:11 crc kubenswrapper[4909]: I1128 18:20:11.105055 4909 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/c51d51e8-794d-433f-940e-5d12bff09057-ceph\") on node \"crc\" DevicePath \"\"" Nov 28 18:20:11 crc kubenswrapper[4909]: I1128 18:20:11.321973 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-openstack-openstack-cell1-qgjl5" event={"ID":"c51d51e8-794d-433f-940e-5d12bff09057","Type":"ContainerDied","Data":"c637f86c4ee61a44fc7c5d557897767266441d02fcd9af804a62c9be112f0f48"} Nov 28 18:20:11 crc kubenswrapper[4909]: I1128 18:20:11.322019 4909 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c637f86c4ee61a44fc7c5d557897767266441d02fcd9af804a62c9be112f0f48" Nov 28 18:20:11 crc kubenswrapper[4909]: I1128 18:20:11.322023 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-openstack-openstack-cell1-qgjl5" Nov 28 18:20:11 crc kubenswrapper[4909]: I1128 18:20:11.424377 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-metadata-openstack-openstack-cell1-5rpbd"] Nov 28 18:20:11 crc kubenswrapper[4909]: E1128 18:20:11.424834 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c51d51e8-794d-433f-940e-5d12bff09057" containerName="ovn-openstack-openstack-cell1" Nov 28 18:20:11 crc kubenswrapper[4909]: I1128 18:20:11.424852 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="c51d51e8-794d-433f-940e-5d12bff09057" containerName="ovn-openstack-openstack-cell1" Nov 28 18:20:11 crc kubenswrapper[4909]: I1128 18:20:11.425064 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="c51d51e8-794d-433f-940e-5d12bff09057" containerName="ovn-openstack-openstack-cell1" Nov 28 18:20:11 crc kubenswrapper[4909]: I1128 18:20:11.425970 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-openstack-openstack-cell1-5rpbd" Nov 28 18:20:11 crc kubenswrapper[4909]: I1128 18:20:11.428704 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-z249h" Nov 28 18:20:11 crc kubenswrapper[4909]: I1128 18:20:11.430735 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 28 18:20:11 crc kubenswrapper[4909]: I1128 18:20:11.430793 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-ovn-metadata-agent-neutron-config" Nov 28 18:20:11 crc kubenswrapper[4909]: I1128 18:20:11.431492 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Nov 28 18:20:11 crc kubenswrapper[4909]: I1128 18:20:11.431634 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Nov 28 18:20:11 crc kubenswrapper[4909]: I1128 18:20:11.435397 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-neutron-config" Nov 28 18:20:11 crc kubenswrapper[4909]: I1128 18:20:11.439597 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-metadata-openstack-openstack-cell1-5rpbd"] Nov 28 18:20:11 crc kubenswrapper[4909]: I1128 18:20:11.512649 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/38f1fa07-931f-4994-a9da-219f6464f5ca-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-openstack-openstack-cell1-5rpbd\" (UID: \"38f1fa07-931f-4994-a9da-219f6464f5ca\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-5rpbd" Nov 28 18:20:11 crc kubenswrapper[4909]: I1128 18:20:11.512732 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/38f1fa07-931f-4994-a9da-219f6464f5ca-nova-metadata-neutron-config-0\") pod \"neutron-metadata-openstack-openstack-cell1-5rpbd\" (UID: \"38f1fa07-931f-4994-a9da-219f6464f5ca\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-5rpbd" Nov 28 18:20:11 crc kubenswrapper[4909]: I1128 18:20:11.512760 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/38f1fa07-931f-4994-a9da-219f6464f5ca-ceph\") pod \"neutron-metadata-openstack-openstack-cell1-5rpbd\" (UID: \"38f1fa07-931f-4994-a9da-219f6464f5ca\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-5rpbd" Nov 28 18:20:11 crc kubenswrapper[4909]: I1128 18:20:11.512869 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qpqpr\" (UniqueName: \"kubernetes.io/projected/38f1fa07-931f-4994-a9da-219f6464f5ca-kube-api-access-qpqpr\") pod \"neutron-metadata-openstack-openstack-cell1-5rpbd\" (UID: \"38f1fa07-931f-4994-a9da-219f6464f5ca\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-5rpbd" Nov 28 18:20:11 crc kubenswrapper[4909]: I1128 18:20:11.512945 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/38f1fa07-931f-4994-a9da-219f6464f5ca-ssh-key\") pod \"neutron-metadata-openstack-openstack-cell1-5rpbd\" (UID: \"38f1fa07-931f-4994-a9da-219f6464f5ca\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-5rpbd" Nov 28 18:20:11 crc kubenswrapper[4909]: I1128 18:20:11.512980 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/38f1fa07-931f-4994-a9da-219f6464f5ca-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-openstack-openstack-cell1-5rpbd\" (UID: \"38f1fa07-931f-4994-a9da-219f6464f5ca\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-5rpbd" Nov 28 18:20:11 crc kubenswrapper[4909]: I1128 18:20:11.512996 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/38f1fa07-931f-4994-a9da-219f6464f5ca-inventory\") pod \"neutron-metadata-openstack-openstack-cell1-5rpbd\" (UID: \"38f1fa07-931f-4994-a9da-219f6464f5ca\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-5rpbd" Nov 28 18:20:11 crc kubenswrapper[4909]: I1128 18:20:11.614393 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/38f1fa07-931f-4994-a9da-219f6464f5ca-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-openstack-openstack-cell1-5rpbd\" (UID: \"38f1fa07-931f-4994-a9da-219f6464f5ca\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-5rpbd" Nov 28 18:20:11 crc kubenswrapper[4909]: I1128 18:20:11.614455 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/38f1fa07-931f-4994-a9da-219f6464f5ca-nova-metadata-neutron-config-0\") pod \"neutron-metadata-openstack-openstack-cell1-5rpbd\" (UID: \"38f1fa07-931f-4994-a9da-219f6464f5ca\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-5rpbd" Nov 28 18:20:11 crc kubenswrapper[4909]: I1128 18:20:11.614479 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/38f1fa07-931f-4994-a9da-219f6464f5ca-ceph\") pod \"neutron-metadata-openstack-openstack-cell1-5rpbd\" (UID: \"38f1fa07-931f-4994-a9da-219f6464f5ca\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-5rpbd" Nov 28 18:20:11 crc kubenswrapper[4909]: I1128 18:20:11.614552 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qpqpr\" (UniqueName: \"kubernetes.io/projected/38f1fa07-931f-4994-a9da-219f6464f5ca-kube-api-access-qpqpr\") pod \"neutron-metadata-openstack-openstack-cell1-5rpbd\" (UID: \"38f1fa07-931f-4994-a9da-219f6464f5ca\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-5rpbd" Nov 28 18:20:11 crc kubenswrapper[4909]: I1128 18:20:11.614630 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/38f1fa07-931f-4994-a9da-219f6464f5ca-ssh-key\") pod \"neutron-metadata-openstack-openstack-cell1-5rpbd\" (UID: \"38f1fa07-931f-4994-a9da-219f6464f5ca\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-5rpbd" Nov 28 18:20:11 crc kubenswrapper[4909]: I1128 18:20:11.614761 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/38f1fa07-931f-4994-a9da-219f6464f5ca-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-openstack-openstack-cell1-5rpbd\" (UID: \"38f1fa07-931f-4994-a9da-219f6464f5ca\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-5rpbd" Nov 28 18:20:11 crc kubenswrapper[4909]: I1128 18:20:11.614786 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/38f1fa07-931f-4994-a9da-219f6464f5ca-inventory\") pod \"neutron-metadata-openstack-openstack-cell1-5rpbd\" (UID: \"38f1fa07-931f-4994-a9da-219f6464f5ca\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-5rpbd" Nov 28 18:20:11 crc kubenswrapper[4909]: I1128 18:20:11.620482 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/38f1fa07-931f-4994-a9da-219f6464f5ca-nova-metadata-neutron-config-0\") pod \"neutron-metadata-openstack-openstack-cell1-5rpbd\" (UID: \"38f1fa07-931f-4994-a9da-219f6464f5ca\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-5rpbd" Nov 28 18:20:11 crc kubenswrapper[4909]: I1128 18:20:11.620502 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/38f1fa07-931f-4994-a9da-219f6464f5ca-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-openstack-openstack-cell1-5rpbd\" (UID: \"38f1fa07-931f-4994-a9da-219f6464f5ca\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-5rpbd" Nov 28 18:20:11 crc kubenswrapper[4909]: I1128 18:20:11.620545 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/38f1fa07-931f-4994-a9da-219f6464f5ca-inventory\") pod \"neutron-metadata-openstack-openstack-cell1-5rpbd\" (UID: \"38f1fa07-931f-4994-a9da-219f6464f5ca\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-5rpbd" Nov 28 18:20:11 crc kubenswrapper[4909]: I1128 18:20:11.620951 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/38f1fa07-931f-4994-a9da-219f6464f5ca-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-openstack-openstack-cell1-5rpbd\" (UID: \"38f1fa07-931f-4994-a9da-219f6464f5ca\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-5rpbd" Nov 28 18:20:11 crc kubenswrapper[4909]: I1128 18:20:11.621396 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/38f1fa07-931f-4994-a9da-219f6464f5ca-ceph\") pod \"neutron-metadata-openstack-openstack-cell1-5rpbd\" (UID: \"38f1fa07-931f-4994-a9da-219f6464f5ca\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-5rpbd" Nov 28 18:20:11 crc kubenswrapper[4909]: I1128 18:20:11.623486 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/38f1fa07-931f-4994-a9da-219f6464f5ca-ssh-key\") pod \"neutron-metadata-openstack-openstack-cell1-5rpbd\" (UID: \"38f1fa07-931f-4994-a9da-219f6464f5ca\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-5rpbd" Nov 28 18:20:11 crc kubenswrapper[4909]: I1128 18:20:11.637294 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qpqpr\" (UniqueName: \"kubernetes.io/projected/38f1fa07-931f-4994-a9da-219f6464f5ca-kube-api-access-qpqpr\") pod \"neutron-metadata-openstack-openstack-cell1-5rpbd\" (UID: \"38f1fa07-931f-4994-a9da-219f6464f5ca\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-5rpbd" Nov 28 18:20:11 crc kubenswrapper[4909]: I1128 18:20:11.742407 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-openstack-openstack-cell1-5rpbd" Nov 28 18:20:12 crc kubenswrapper[4909]: I1128 18:20:12.337349 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-metadata-openstack-openstack-cell1-5rpbd"] Nov 28 18:20:13 crc kubenswrapper[4909]: I1128 18:20:13.349453 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-openstack-openstack-cell1-5rpbd" event={"ID":"38f1fa07-931f-4994-a9da-219f6464f5ca","Type":"ContainerStarted","Data":"ba5c60a23304ed5d44c37c21e18e5ad1e1d5e9ad0051809d59923097883b152e"} Nov 28 18:20:13 crc kubenswrapper[4909]: I1128 18:20:13.349943 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-openstack-openstack-cell1-5rpbd" event={"ID":"38f1fa07-931f-4994-a9da-219f6464f5ca","Type":"ContainerStarted","Data":"a4ea31d18f3352dfc099fd003d8c178be5f547abe5bf1cae93d7b21432c35bfd"} Nov 28 18:20:13 crc kubenswrapper[4909]: I1128 18:20:13.379252 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-metadata-openstack-openstack-cell1-5rpbd" podStartSLOduration=1.712510449 podStartE2EDuration="2.379234377s" podCreationTimestamp="2025-11-28 18:20:11 +0000 UTC" firstStartedPulling="2025-11-28 18:20:12.338424759 +0000 UTC m=+7794.735109283" lastFinishedPulling="2025-11-28 18:20:13.005148647 +0000 UTC m=+7795.401833211" observedRunningTime="2025-11-28 18:20:13.373526393 +0000 UTC m=+7795.770210917" watchObservedRunningTime="2025-11-28 18:20:13.379234377 +0000 UTC m=+7795.775918891" Nov 28 18:20:47 crc kubenswrapper[4909]: I1128 18:20:47.717175 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-7qspl"] Nov 28 18:20:47 crc kubenswrapper[4909]: I1128 18:20:47.721176 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-7qspl" Nov 28 18:20:47 crc kubenswrapper[4909]: I1128 18:20:47.734221 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-7qspl"] Nov 28 18:20:47 crc kubenswrapper[4909]: I1128 18:20:47.813980 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0960700b-72ad-4f99-8af6-20f3436a38a9-utilities\") pod \"redhat-marketplace-7qspl\" (UID: \"0960700b-72ad-4f99-8af6-20f3436a38a9\") " pod="openshift-marketplace/redhat-marketplace-7qspl" Nov 28 18:20:47 crc kubenswrapper[4909]: I1128 18:20:47.814080 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r9x6c\" (UniqueName: \"kubernetes.io/projected/0960700b-72ad-4f99-8af6-20f3436a38a9-kube-api-access-r9x6c\") pod \"redhat-marketplace-7qspl\" (UID: \"0960700b-72ad-4f99-8af6-20f3436a38a9\") " pod="openshift-marketplace/redhat-marketplace-7qspl" Nov 28 18:20:47 crc kubenswrapper[4909]: I1128 18:20:47.814196 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0960700b-72ad-4f99-8af6-20f3436a38a9-catalog-content\") pod \"redhat-marketplace-7qspl\" (UID: \"0960700b-72ad-4f99-8af6-20f3436a38a9\") " pod="openshift-marketplace/redhat-marketplace-7qspl" Nov 28 18:20:47 crc kubenswrapper[4909]: I1128 18:20:47.916159 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0960700b-72ad-4f99-8af6-20f3436a38a9-utilities\") pod \"redhat-marketplace-7qspl\" (UID: \"0960700b-72ad-4f99-8af6-20f3436a38a9\") " pod="openshift-marketplace/redhat-marketplace-7qspl" Nov 28 18:20:47 crc kubenswrapper[4909]: I1128 18:20:47.916249 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r9x6c\" (UniqueName: \"kubernetes.io/projected/0960700b-72ad-4f99-8af6-20f3436a38a9-kube-api-access-r9x6c\") pod \"redhat-marketplace-7qspl\" (UID: \"0960700b-72ad-4f99-8af6-20f3436a38a9\") " pod="openshift-marketplace/redhat-marketplace-7qspl" Nov 28 18:20:47 crc kubenswrapper[4909]: I1128 18:20:47.916425 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0960700b-72ad-4f99-8af6-20f3436a38a9-catalog-content\") pod \"redhat-marketplace-7qspl\" (UID: \"0960700b-72ad-4f99-8af6-20f3436a38a9\") " pod="openshift-marketplace/redhat-marketplace-7qspl" Nov 28 18:20:47 crc kubenswrapper[4909]: I1128 18:20:47.917093 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0960700b-72ad-4f99-8af6-20f3436a38a9-catalog-content\") pod \"redhat-marketplace-7qspl\" (UID: \"0960700b-72ad-4f99-8af6-20f3436a38a9\") " pod="openshift-marketplace/redhat-marketplace-7qspl" Nov 28 18:20:47 crc kubenswrapper[4909]: I1128 18:20:47.917365 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0960700b-72ad-4f99-8af6-20f3436a38a9-utilities\") pod \"redhat-marketplace-7qspl\" (UID: \"0960700b-72ad-4f99-8af6-20f3436a38a9\") " pod="openshift-marketplace/redhat-marketplace-7qspl" Nov 28 18:20:47 crc kubenswrapper[4909]: I1128 18:20:47.940517 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r9x6c\" (UniqueName: \"kubernetes.io/projected/0960700b-72ad-4f99-8af6-20f3436a38a9-kube-api-access-r9x6c\") pod \"redhat-marketplace-7qspl\" (UID: \"0960700b-72ad-4f99-8af6-20f3436a38a9\") " pod="openshift-marketplace/redhat-marketplace-7qspl" Nov 28 18:20:48 crc kubenswrapper[4909]: I1128 18:20:48.103620 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-7qspl" Nov 28 18:20:48 crc kubenswrapper[4909]: I1128 18:20:48.830759 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-7qspl"] Nov 28 18:20:49 crc kubenswrapper[4909]: I1128 18:20:49.731638 4909 generic.go:334] "Generic (PLEG): container finished" podID="0960700b-72ad-4f99-8af6-20f3436a38a9" containerID="301b65f1a5c1e045f0180260b03ca7e8221515a961f990965bec96087a4ad32c" exitCode=0 Nov 28 18:20:49 crc kubenswrapper[4909]: I1128 18:20:49.731832 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-7qspl" event={"ID":"0960700b-72ad-4f99-8af6-20f3436a38a9","Type":"ContainerDied","Data":"301b65f1a5c1e045f0180260b03ca7e8221515a961f990965bec96087a4ad32c"} Nov 28 18:20:49 crc kubenswrapper[4909]: I1128 18:20:49.732130 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-7qspl" event={"ID":"0960700b-72ad-4f99-8af6-20f3436a38a9","Type":"ContainerStarted","Data":"4ec58f19fb6d567c5b9d3ddef1c42787607c2e8b9acd3b6018e0de1c334ed390"} Nov 28 18:20:49 crc kubenswrapper[4909]: I1128 18:20:49.735537 4909 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 28 18:20:51 crc kubenswrapper[4909]: I1128 18:20:51.758965 4909 generic.go:334] "Generic (PLEG): container finished" podID="0960700b-72ad-4f99-8af6-20f3436a38a9" containerID="5bc724bf24bad60429ff673dca9abc5c0660f2f11d9fb77d3f0cb372bf09bc64" exitCode=0 Nov 28 18:20:51 crc kubenswrapper[4909]: I1128 18:20:51.759172 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-7qspl" event={"ID":"0960700b-72ad-4f99-8af6-20f3436a38a9","Type":"ContainerDied","Data":"5bc724bf24bad60429ff673dca9abc5c0660f2f11d9fb77d3f0cb372bf09bc64"} Nov 28 18:20:52 crc kubenswrapper[4909]: I1128 18:20:52.776530 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-7qspl" event={"ID":"0960700b-72ad-4f99-8af6-20f3436a38a9","Type":"ContainerStarted","Data":"60bf8bdccbd9b86085b697fab1ce7be9d9e3b4519faae7306cb0477bbcfd3f6f"} Nov 28 18:20:52 crc kubenswrapper[4909]: I1128 18:20:52.806999 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-7qspl" podStartSLOduration=3.28264291 podStartE2EDuration="5.806974247s" podCreationTimestamp="2025-11-28 18:20:47 +0000 UTC" firstStartedPulling="2025-11-28 18:20:49.735214873 +0000 UTC m=+7832.131899407" lastFinishedPulling="2025-11-28 18:20:52.25954622 +0000 UTC m=+7834.656230744" observedRunningTime="2025-11-28 18:20:52.803509433 +0000 UTC m=+7835.200193967" watchObservedRunningTime="2025-11-28 18:20:52.806974247 +0000 UTC m=+7835.203658771" Nov 28 18:20:58 crc kubenswrapper[4909]: I1128 18:20:58.103989 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-7qspl" Nov 28 18:20:58 crc kubenswrapper[4909]: I1128 18:20:58.104562 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-7qspl" Nov 28 18:20:58 crc kubenswrapper[4909]: I1128 18:20:58.159917 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-7qspl" Nov 28 18:20:58 crc kubenswrapper[4909]: I1128 18:20:58.900776 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-7qspl" Nov 28 18:20:58 crc kubenswrapper[4909]: I1128 18:20:58.950288 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-7qspl"] Nov 28 18:21:00 crc kubenswrapper[4909]: I1128 18:21:00.861126 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-7qspl" podUID="0960700b-72ad-4f99-8af6-20f3436a38a9" containerName="registry-server" containerID="cri-o://60bf8bdccbd9b86085b697fab1ce7be9d9e3b4519faae7306cb0477bbcfd3f6f" gracePeriod=2 Nov 28 18:21:01 crc kubenswrapper[4909]: I1128 18:21:01.374142 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-7qspl" Nov 28 18:21:01 crc kubenswrapper[4909]: I1128 18:21:01.465834 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0960700b-72ad-4f99-8af6-20f3436a38a9-utilities\") pod \"0960700b-72ad-4f99-8af6-20f3436a38a9\" (UID: \"0960700b-72ad-4f99-8af6-20f3436a38a9\") " Nov 28 18:21:01 crc kubenswrapper[4909]: I1128 18:21:01.466115 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0960700b-72ad-4f99-8af6-20f3436a38a9-catalog-content\") pod \"0960700b-72ad-4f99-8af6-20f3436a38a9\" (UID: \"0960700b-72ad-4f99-8af6-20f3436a38a9\") " Nov 28 18:21:01 crc kubenswrapper[4909]: I1128 18:21:01.466154 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r9x6c\" (UniqueName: \"kubernetes.io/projected/0960700b-72ad-4f99-8af6-20f3436a38a9-kube-api-access-r9x6c\") pod \"0960700b-72ad-4f99-8af6-20f3436a38a9\" (UID: \"0960700b-72ad-4f99-8af6-20f3436a38a9\") " Nov 28 18:21:01 crc kubenswrapper[4909]: I1128 18:21:01.466837 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0960700b-72ad-4f99-8af6-20f3436a38a9-utilities" (OuterVolumeSpecName: "utilities") pod "0960700b-72ad-4f99-8af6-20f3436a38a9" (UID: "0960700b-72ad-4f99-8af6-20f3436a38a9"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 18:21:01 crc kubenswrapper[4909]: I1128 18:21:01.472116 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0960700b-72ad-4f99-8af6-20f3436a38a9-kube-api-access-r9x6c" (OuterVolumeSpecName: "kube-api-access-r9x6c") pod "0960700b-72ad-4f99-8af6-20f3436a38a9" (UID: "0960700b-72ad-4f99-8af6-20f3436a38a9"). InnerVolumeSpecName "kube-api-access-r9x6c". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 18:21:01 crc kubenswrapper[4909]: I1128 18:21:01.500739 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0960700b-72ad-4f99-8af6-20f3436a38a9-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "0960700b-72ad-4f99-8af6-20f3436a38a9" (UID: "0960700b-72ad-4f99-8af6-20f3436a38a9"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 18:21:01 crc kubenswrapper[4909]: I1128 18:21:01.578819 4909 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0960700b-72ad-4f99-8af6-20f3436a38a9-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 18:21:01 crc kubenswrapper[4909]: I1128 18:21:01.578868 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r9x6c\" (UniqueName: \"kubernetes.io/projected/0960700b-72ad-4f99-8af6-20f3436a38a9-kube-api-access-r9x6c\") on node \"crc\" DevicePath \"\"" Nov 28 18:21:01 crc kubenswrapper[4909]: I1128 18:21:01.578897 4909 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0960700b-72ad-4f99-8af6-20f3436a38a9-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 18:21:01 crc kubenswrapper[4909]: I1128 18:21:01.875528 4909 generic.go:334] "Generic (PLEG): container finished" podID="0960700b-72ad-4f99-8af6-20f3436a38a9" containerID="60bf8bdccbd9b86085b697fab1ce7be9d9e3b4519faae7306cb0477bbcfd3f6f" exitCode=0 Nov 28 18:21:01 crc kubenswrapper[4909]: I1128 18:21:01.875586 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-7qspl" event={"ID":"0960700b-72ad-4f99-8af6-20f3436a38a9","Type":"ContainerDied","Data":"60bf8bdccbd9b86085b697fab1ce7be9d9e3b4519faae7306cb0477bbcfd3f6f"} Nov 28 18:21:01 crc kubenswrapper[4909]: I1128 18:21:01.876783 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-7qspl" event={"ID":"0960700b-72ad-4f99-8af6-20f3436a38a9","Type":"ContainerDied","Data":"4ec58f19fb6d567c5b9d3ddef1c42787607c2e8b9acd3b6018e0de1c334ed390"} Nov 28 18:21:01 crc kubenswrapper[4909]: I1128 18:21:01.875632 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-7qspl" Nov 28 18:21:01 crc kubenswrapper[4909]: I1128 18:21:01.876931 4909 scope.go:117] "RemoveContainer" containerID="60bf8bdccbd9b86085b697fab1ce7be9d9e3b4519faae7306cb0477bbcfd3f6f" Nov 28 18:21:01 crc kubenswrapper[4909]: I1128 18:21:01.922541 4909 scope.go:117] "RemoveContainer" containerID="5bc724bf24bad60429ff673dca9abc5c0660f2f11d9fb77d3f0cb372bf09bc64" Nov 28 18:21:01 crc kubenswrapper[4909]: I1128 18:21:01.940258 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-7qspl"] Nov 28 18:21:01 crc kubenswrapper[4909]: I1128 18:21:01.940323 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-7qspl"] Nov 28 18:21:01 crc kubenswrapper[4909]: I1128 18:21:01.968391 4909 scope.go:117] "RemoveContainer" containerID="301b65f1a5c1e045f0180260b03ca7e8221515a961f990965bec96087a4ad32c" Nov 28 18:21:02 crc kubenswrapper[4909]: I1128 18:21:02.022321 4909 scope.go:117] "RemoveContainer" containerID="60bf8bdccbd9b86085b697fab1ce7be9d9e3b4519faae7306cb0477bbcfd3f6f" Nov 28 18:21:02 crc kubenswrapper[4909]: E1128 18:21:02.022875 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"60bf8bdccbd9b86085b697fab1ce7be9d9e3b4519faae7306cb0477bbcfd3f6f\": container with ID starting with 60bf8bdccbd9b86085b697fab1ce7be9d9e3b4519faae7306cb0477bbcfd3f6f not found: ID does not exist" containerID="60bf8bdccbd9b86085b697fab1ce7be9d9e3b4519faae7306cb0477bbcfd3f6f" Nov 28 18:21:02 crc kubenswrapper[4909]: I1128 18:21:02.022949 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"60bf8bdccbd9b86085b697fab1ce7be9d9e3b4519faae7306cb0477bbcfd3f6f"} err="failed to get container status \"60bf8bdccbd9b86085b697fab1ce7be9d9e3b4519faae7306cb0477bbcfd3f6f\": rpc error: code = NotFound desc = could not find container \"60bf8bdccbd9b86085b697fab1ce7be9d9e3b4519faae7306cb0477bbcfd3f6f\": container with ID starting with 60bf8bdccbd9b86085b697fab1ce7be9d9e3b4519faae7306cb0477bbcfd3f6f not found: ID does not exist" Nov 28 18:21:02 crc kubenswrapper[4909]: I1128 18:21:02.022990 4909 scope.go:117] "RemoveContainer" containerID="5bc724bf24bad60429ff673dca9abc5c0660f2f11d9fb77d3f0cb372bf09bc64" Nov 28 18:21:02 crc kubenswrapper[4909]: E1128 18:21:02.023710 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5bc724bf24bad60429ff673dca9abc5c0660f2f11d9fb77d3f0cb372bf09bc64\": container with ID starting with 5bc724bf24bad60429ff673dca9abc5c0660f2f11d9fb77d3f0cb372bf09bc64 not found: ID does not exist" containerID="5bc724bf24bad60429ff673dca9abc5c0660f2f11d9fb77d3f0cb372bf09bc64" Nov 28 18:21:02 crc kubenswrapper[4909]: I1128 18:21:02.023754 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5bc724bf24bad60429ff673dca9abc5c0660f2f11d9fb77d3f0cb372bf09bc64"} err="failed to get container status \"5bc724bf24bad60429ff673dca9abc5c0660f2f11d9fb77d3f0cb372bf09bc64\": rpc error: code = NotFound desc = could not find container \"5bc724bf24bad60429ff673dca9abc5c0660f2f11d9fb77d3f0cb372bf09bc64\": container with ID starting with 5bc724bf24bad60429ff673dca9abc5c0660f2f11d9fb77d3f0cb372bf09bc64 not found: ID does not exist" Nov 28 18:21:02 crc kubenswrapper[4909]: I1128 18:21:02.023782 4909 scope.go:117] "RemoveContainer" containerID="301b65f1a5c1e045f0180260b03ca7e8221515a961f990965bec96087a4ad32c" Nov 28 18:21:02 crc kubenswrapper[4909]: E1128 18:21:02.024171 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"301b65f1a5c1e045f0180260b03ca7e8221515a961f990965bec96087a4ad32c\": container with ID starting with 301b65f1a5c1e045f0180260b03ca7e8221515a961f990965bec96087a4ad32c not found: ID does not exist" containerID="301b65f1a5c1e045f0180260b03ca7e8221515a961f990965bec96087a4ad32c" Nov 28 18:21:02 crc kubenswrapper[4909]: I1128 18:21:02.024202 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"301b65f1a5c1e045f0180260b03ca7e8221515a961f990965bec96087a4ad32c"} err="failed to get container status \"301b65f1a5c1e045f0180260b03ca7e8221515a961f990965bec96087a4ad32c\": rpc error: code = NotFound desc = could not find container \"301b65f1a5c1e045f0180260b03ca7e8221515a961f990965bec96087a4ad32c\": container with ID starting with 301b65f1a5c1e045f0180260b03ca7e8221515a961f990965bec96087a4ad32c not found: ID does not exist" Nov 28 18:21:03 crc kubenswrapper[4909]: I1128 18:21:03.915378 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0960700b-72ad-4f99-8af6-20f3436a38a9" path="/var/lib/kubelet/pods/0960700b-72ad-4f99-8af6-20f3436a38a9/volumes" Nov 28 18:21:08 crc kubenswrapper[4909]: I1128 18:21:08.961940 4909 generic.go:334] "Generic (PLEG): container finished" podID="38f1fa07-931f-4994-a9da-219f6464f5ca" containerID="ba5c60a23304ed5d44c37c21e18e5ad1e1d5e9ad0051809d59923097883b152e" exitCode=0 Nov 28 18:21:08 crc kubenswrapper[4909]: I1128 18:21:08.962066 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-openstack-openstack-cell1-5rpbd" event={"ID":"38f1fa07-931f-4994-a9da-219f6464f5ca","Type":"ContainerDied","Data":"ba5c60a23304ed5d44c37c21e18e5ad1e1d5e9ad0051809d59923097883b152e"} Nov 28 18:21:10 crc kubenswrapper[4909]: I1128 18:21:10.602571 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-openstack-openstack-cell1-5rpbd" Nov 28 18:21:10 crc kubenswrapper[4909]: I1128 18:21:10.675184 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/38f1fa07-931f-4994-a9da-219f6464f5ca-neutron-ovn-metadata-agent-neutron-config-0\") pod \"38f1fa07-931f-4994-a9da-219f6464f5ca\" (UID: \"38f1fa07-931f-4994-a9da-219f6464f5ca\") " Nov 28 18:21:10 crc kubenswrapper[4909]: I1128 18:21:10.675249 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/38f1fa07-931f-4994-a9da-219f6464f5ca-inventory\") pod \"38f1fa07-931f-4994-a9da-219f6464f5ca\" (UID: \"38f1fa07-931f-4994-a9da-219f6464f5ca\") " Nov 28 18:21:10 crc kubenswrapper[4909]: I1128 18:21:10.675277 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/38f1fa07-931f-4994-a9da-219f6464f5ca-ssh-key\") pod \"38f1fa07-931f-4994-a9da-219f6464f5ca\" (UID: \"38f1fa07-931f-4994-a9da-219f6464f5ca\") " Nov 28 18:21:10 crc kubenswrapper[4909]: I1128 18:21:10.675489 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/38f1fa07-931f-4994-a9da-219f6464f5ca-nova-metadata-neutron-config-0\") pod \"38f1fa07-931f-4994-a9da-219f6464f5ca\" (UID: \"38f1fa07-931f-4994-a9da-219f6464f5ca\") " Nov 28 18:21:10 crc kubenswrapper[4909]: I1128 18:21:10.675547 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qpqpr\" (UniqueName: \"kubernetes.io/projected/38f1fa07-931f-4994-a9da-219f6464f5ca-kube-api-access-qpqpr\") pod \"38f1fa07-931f-4994-a9da-219f6464f5ca\" (UID: \"38f1fa07-931f-4994-a9da-219f6464f5ca\") " Nov 28 18:21:10 crc kubenswrapper[4909]: I1128 18:21:10.675619 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/38f1fa07-931f-4994-a9da-219f6464f5ca-neutron-metadata-combined-ca-bundle\") pod \"38f1fa07-931f-4994-a9da-219f6464f5ca\" (UID: \"38f1fa07-931f-4994-a9da-219f6464f5ca\") " Nov 28 18:21:10 crc kubenswrapper[4909]: I1128 18:21:10.675695 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/38f1fa07-931f-4994-a9da-219f6464f5ca-ceph\") pod \"38f1fa07-931f-4994-a9da-219f6464f5ca\" (UID: \"38f1fa07-931f-4994-a9da-219f6464f5ca\") " Nov 28 18:21:10 crc kubenswrapper[4909]: I1128 18:21:10.681818 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/38f1fa07-931f-4994-a9da-219f6464f5ca-ceph" (OuterVolumeSpecName: "ceph") pod "38f1fa07-931f-4994-a9da-219f6464f5ca" (UID: "38f1fa07-931f-4994-a9da-219f6464f5ca"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 18:21:10 crc kubenswrapper[4909]: I1128 18:21:10.682516 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/38f1fa07-931f-4994-a9da-219f6464f5ca-neutron-metadata-combined-ca-bundle" (OuterVolumeSpecName: "neutron-metadata-combined-ca-bundle") pod "38f1fa07-931f-4994-a9da-219f6464f5ca" (UID: "38f1fa07-931f-4994-a9da-219f6464f5ca"). InnerVolumeSpecName "neutron-metadata-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 18:21:10 crc kubenswrapper[4909]: I1128 18:21:10.685806 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/38f1fa07-931f-4994-a9da-219f6464f5ca-kube-api-access-qpqpr" (OuterVolumeSpecName: "kube-api-access-qpqpr") pod "38f1fa07-931f-4994-a9da-219f6464f5ca" (UID: "38f1fa07-931f-4994-a9da-219f6464f5ca"). InnerVolumeSpecName "kube-api-access-qpqpr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 18:21:10 crc kubenswrapper[4909]: I1128 18:21:10.711509 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/38f1fa07-931f-4994-a9da-219f6464f5ca-neutron-ovn-metadata-agent-neutron-config-0" (OuterVolumeSpecName: "neutron-ovn-metadata-agent-neutron-config-0") pod "38f1fa07-931f-4994-a9da-219f6464f5ca" (UID: "38f1fa07-931f-4994-a9da-219f6464f5ca"). InnerVolumeSpecName "neutron-ovn-metadata-agent-neutron-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 18:21:10 crc kubenswrapper[4909]: I1128 18:21:10.718275 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/38f1fa07-931f-4994-a9da-219f6464f5ca-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "38f1fa07-931f-4994-a9da-219f6464f5ca" (UID: "38f1fa07-931f-4994-a9da-219f6464f5ca"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 18:21:10 crc kubenswrapper[4909]: I1128 18:21:10.724848 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/38f1fa07-931f-4994-a9da-219f6464f5ca-nova-metadata-neutron-config-0" (OuterVolumeSpecName: "nova-metadata-neutron-config-0") pod "38f1fa07-931f-4994-a9da-219f6464f5ca" (UID: "38f1fa07-931f-4994-a9da-219f6464f5ca"). InnerVolumeSpecName "nova-metadata-neutron-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 18:21:10 crc kubenswrapper[4909]: I1128 18:21:10.729908 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/38f1fa07-931f-4994-a9da-219f6464f5ca-inventory" (OuterVolumeSpecName: "inventory") pod "38f1fa07-931f-4994-a9da-219f6464f5ca" (UID: "38f1fa07-931f-4994-a9da-219f6464f5ca"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 18:21:10 crc kubenswrapper[4909]: I1128 18:21:10.778843 4909 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/38f1fa07-931f-4994-a9da-219f6464f5ca-nova-metadata-neutron-config-0\") on node \"crc\" DevicePath \"\"" Nov 28 18:21:10 crc kubenswrapper[4909]: I1128 18:21:10.778880 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qpqpr\" (UniqueName: \"kubernetes.io/projected/38f1fa07-931f-4994-a9da-219f6464f5ca-kube-api-access-qpqpr\") on node \"crc\" DevicePath \"\"" Nov 28 18:21:10 crc kubenswrapper[4909]: I1128 18:21:10.778891 4909 reconciler_common.go:293] "Volume detached for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/38f1fa07-931f-4994-a9da-219f6464f5ca-neutron-metadata-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 18:21:10 crc kubenswrapper[4909]: I1128 18:21:10.778902 4909 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/38f1fa07-931f-4994-a9da-219f6464f5ca-ceph\") on node \"crc\" DevicePath \"\"" Nov 28 18:21:10 crc kubenswrapper[4909]: I1128 18:21:10.778912 4909 reconciler_common.go:293] "Volume detached for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/38f1fa07-931f-4994-a9da-219f6464f5ca-neutron-ovn-metadata-agent-neutron-config-0\") on node \"crc\" DevicePath \"\"" Nov 28 18:21:10 crc kubenswrapper[4909]: I1128 18:21:10.778925 4909 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/38f1fa07-931f-4994-a9da-219f6464f5ca-inventory\") on node \"crc\" DevicePath \"\"" Nov 28 18:21:10 crc kubenswrapper[4909]: I1128 18:21:10.778933 4909 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/38f1fa07-931f-4994-a9da-219f6464f5ca-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 28 18:21:11 crc kubenswrapper[4909]: I1128 18:21:11.004476 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-openstack-openstack-cell1-5rpbd" event={"ID":"38f1fa07-931f-4994-a9da-219f6464f5ca","Type":"ContainerDied","Data":"a4ea31d18f3352dfc099fd003d8c178be5f547abe5bf1cae93d7b21432c35bfd"} Nov 28 18:21:11 crc kubenswrapper[4909]: I1128 18:21:11.004549 4909 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a4ea31d18f3352dfc099fd003d8c178be5f547abe5bf1cae93d7b21432c35bfd" Nov 28 18:21:11 crc kubenswrapper[4909]: I1128 18:21:11.004567 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-openstack-openstack-cell1-5rpbd" Nov 28 18:21:11 crc kubenswrapper[4909]: I1128 18:21:11.115755 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/libvirt-openstack-openstack-cell1-srn2t"] Nov 28 18:21:11 crc kubenswrapper[4909]: E1128 18:21:11.116209 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0960700b-72ad-4f99-8af6-20f3436a38a9" containerName="extract-content" Nov 28 18:21:11 crc kubenswrapper[4909]: I1128 18:21:11.116226 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="0960700b-72ad-4f99-8af6-20f3436a38a9" containerName="extract-content" Nov 28 18:21:11 crc kubenswrapper[4909]: E1128 18:21:11.116248 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0960700b-72ad-4f99-8af6-20f3436a38a9" containerName="registry-server" Nov 28 18:21:11 crc kubenswrapper[4909]: I1128 18:21:11.116255 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="0960700b-72ad-4f99-8af6-20f3436a38a9" containerName="registry-server" Nov 28 18:21:11 crc kubenswrapper[4909]: E1128 18:21:11.116284 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="38f1fa07-931f-4994-a9da-219f6464f5ca" containerName="neutron-metadata-openstack-openstack-cell1" Nov 28 18:21:11 crc kubenswrapper[4909]: I1128 18:21:11.116291 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="38f1fa07-931f-4994-a9da-219f6464f5ca" containerName="neutron-metadata-openstack-openstack-cell1" Nov 28 18:21:11 crc kubenswrapper[4909]: E1128 18:21:11.116306 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0960700b-72ad-4f99-8af6-20f3436a38a9" containerName="extract-utilities" Nov 28 18:21:11 crc kubenswrapper[4909]: I1128 18:21:11.116313 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="0960700b-72ad-4f99-8af6-20f3436a38a9" containerName="extract-utilities" Nov 28 18:21:11 crc kubenswrapper[4909]: I1128 18:21:11.116533 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="38f1fa07-931f-4994-a9da-219f6464f5ca" containerName="neutron-metadata-openstack-openstack-cell1" Nov 28 18:21:11 crc kubenswrapper[4909]: I1128 18:21:11.116546 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="0960700b-72ad-4f99-8af6-20f3436a38a9" containerName="registry-server" Nov 28 18:21:11 crc kubenswrapper[4909]: I1128 18:21:11.117385 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-openstack-openstack-cell1-srn2t" Nov 28 18:21:11 crc kubenswrapper[4909]: I1128 18:21:11.120101 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 28 18:21:11 crc kubenswrapper[4909]: I1128 18:21:11.120367 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Nov 28 18:21:11 crc kubenswrapper[4909]: I1128 18:21:11.122376 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Nov 28 18:21:11 crc kubenswrapper[4909]: I1128 18:21:11.123359 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"libvirt-secret" Nov 28 18:21:11 crc kubenswrapper[4909]: I1128 18:21:11.126411 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-z249h" Nov 28 18:21:11 crc kubenswrapper[4909]: I1128 18:21:11.137257 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/libvirt-openstack-openstack-cell1-srn2t"] Nov 28 18:21:11 crc kubenswrapper[4909]: I1128 18:21:11.187531 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/de3e2413-2749-4df8-b633-30842a045c5c-libvirt-secret-0\") pod \"libvirt-openstack-openstack-cell1-srn2t\" (UID: \"de3e2413-2749-4df8-b633-30842a045c5c\") " pod="openstack/libvirt-openstack-openstack-cell1-srn2t" Nov 28 18:21:11 crc kubenswrapper[4909]: I1128 18:21:11.187582 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/de3e2413-2749-4df8-b633-30842a045c5c-libvirt-combined-ca-bundle\") pod \"libvirt-openstack-openstack-cell1-srn2t\" (UID: \"de3e2413-2749-4df8-b633-30842a045c5c\") " pod="openstack/libvirt-openstack-openstack-cell1-srn2t" Nov 28 18:21:11 crc kubenswrapper[4909]: I1128 18:21:11.187640 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/de3e2413-2749-4df8-b633-30842a045c5c-inventory\") pod \"libvirt-openstack-openstack-cell1-srn2t\" (UID: \"de3e2413-2749-4df8-b633-30842a045c5c\") " pod="openstack/libvirt-openstack-openstack-cell1-srn2t" Nov 28 18:21:11 crc kubenswrapper[4909]: I1128 18:21:11.188074 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ts2z9\" (UniqueName: \"kubernetes.io/projected/de3e2413-2749-4df8-b633-30842a045c5c-kube-api-access-ts2z9\") pod \"libvirt-openstack-openstack-cell1-srn2t\" (UID: \"de3e2413-2749-4df8-b633-30842a045c5c\") " pod="openstack/libvirt-openstack-openstack-cell1-srn2t" Nov 28 18:21:11 crc kubenswrapper[4909]: I1128 18:21:11.188131 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/de3e2413-2749-4df8-b633-30842a045c5c-ssh-key\") pod \"libvirt-openstack-openstack-cell1-srn2t\" (UID: \"de3e2413-2749-4df8-b633-30842a045c5c\") " pod="openstack/libvirt-openstack-openstack-cell1-srn2t" Nov 28 18:21:11 crc kubenswrapper[4909]: I1128 18:21:11.188197 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/de3e2413-2749-4df8-b633-30842a045c5c-ceph\") pod \"libvirt-openstack-openstack-cell1-srn2t\" (UID: \"de3e2413-2749-4df8-b633-30842a045c5c\") " pod="openstack/libvirt-openstack-openstack-cell1-srn2t" Nov 28 18:21:11 crc kubenswrapper[4909]: I1128 18:21:11.290152 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/de3e2413-2749-4df8-b633-30842a045c5c-ceph\") pod \"libvirt-openstack-openstack-cell1-srn2t\" (UID: \"de3e2413-2749-4df8-b633-30842a045c5c\") " pod="openstack/libvirt-openstack-openstack-cell1-srn2t" Nov 28 18:21:11 crc kubenswrapper[4909]: I1128 18:21:11.290543 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/de3e2413-2749-4df8-b633-30842a045c5c-libvirt-secret-0\") pod \"libvirt-openstack-openstack-cell1-srn2t\" (UID: \"de3e2413-2749-4df8-b633-30842a045c5c\") " pod="openstack/libvirt-openstack-openstack-cell1-srn2t" Nov 28 18:21:11 crc kubenswrapper[4909]: I1128 18:21:11.290575 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/de3e2413-2749-4df8-b633-30842a045c5c-libvirt-combined-ca-bundle\") pod \"libvirt-openstack-openstack-cell1-srn2t\" (UID: \"de3e2413-2749-4df8-b633-30842a045c5c\") " pod="openstack/libvirt-openstack-openstack-cell1-srn2t" Nov 28 18:21:11 crc kubenswrapper[4909]: I1128 18:21:11.290640 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/de3e2413-2749-4df8-b633-30842a045c5c-inventory\") pod \"libvirt-openstack-openstack-cell1-srn2t\" (UID: \"de3e2413-2749-4df8-b633-30842a045c5c\") " pod="openstack/libvirt-openstack-openstack-cell1-srn2t" Nov 28 18:21:11 crc kubenswrapper[4909]: I1128 18:21:11.290728 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ts2z9\" (UniqueName: \"kubernetes.io/projected/de3e2413-2749-4df8-b633-30842a045c5c-kube-api-access-ts2z9\") pod \"libvirt-openstack-openstack-cell1-srn2t\" (UID: \"de3e2413-2749-4df8-b633-30842a045c5c\") " pod="openstack/libvirt-openstack-openstack-cell1-srn2t" Nov 28 18:21:11 crc kubenswrapper[4909]: I1128 18:21:11.290798 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/de3e2413-2749-4df8-b633-30842a045c5c-ssh-key\") pod \"libvirt-openstack-openstack-cell1-srn2t\" (UID: \"de3e2413-2749-4df8-b633-30842a045c5c\") " pod="openstack/libvirt-openstack-openstack-cell1-srn2t" Nov 28 18:21:11 crc kubenswrapper[4909]: I1128 18:21:11.297476 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/de3e2413-2749-4df8-b633-30842a045c5c-ssh-key\") pod \"libvirt-openstack-openstack-cell1-srn2t\" (UID: \"de3e2413-2749-4df8-b633-30842a045c5c\") " pod="openstack/libvirt-openstack-openstack-cell1-srn2t" Nov 28 18:21:11 crc kubenswrapper[4909]: I1128 18:21:11.297495 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/de3e2413-2749-4df8-b633-30842a045c5c-ceph\") pod \"libvirt-openstack-openstack-cell1-srn2t\" (UID: \"de3e2413-2749-4df8-b633-30842a045c5c\") " pod="openstack/libvirt-openstack-openstack-cell1-srn2t" Nov 28 18:21:11 crc kubenswrapper[4909]: I1128 18:21:11.297678 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/de3e2413-2749-4df8-b633-30842a045c5c-libvirt-combined-ca-bundle\") pod \"libvirt-openstack-openstack-cell1-srn2t\" (UID: \"de3e2413-2749-4df8-b633-30842a045c5c\") " pod="openstack/libvirt-openstack-openstack-cell1-srn2t" Nov 28 18:21:11 crc kubenswrapper[4909]: I1128 18:21:11.297945 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/de3e2413-2749-4df8-b633-30842a045c5c-libvirt-secret-0\") pod \"libvirt-openstack-openstack-cell1-srn2t\" (UID: \"de3e2413-2749-4df8-b633-30842a045c5c\") " pod="openstack/libvirt-openstack-openstack-cell1-srn2t" Nov 28 18:21:11 crc kubenswrapper[4909]: I1128 18:21:11.298601 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/de3e2413-2749-4df8-b633-30842a045c5c-inventory\") pod \"libvirt-openstack-openstack-cell1-srn2t\" (UID: \"de3e2413-2749-4df8-b633-30842a045c5c\") " pod="openstack/libvirt-openstack-openstack-cell1-srn2t" Nov 28 18:21:11 crc kubenswrapper[4909]: I1128 18:21:11.308346 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ts2z9\" (UniqueName: \"kubernetes.io/projected/de3e2413-2749-4df8-b633-30842a045c5c-kube-api-access-ts2z9\") pod \"libvirt-openstack-openstack-cell1-srn2t\" (UID: \"de3e2413-2749-4df8-b633-30842a045c5c\") " pod="openstack/libvirt-openstack-openstack-cell1-srn2t" Nov 28 18:21:11 crc kubenswrapper[4909]: I1128 18:21:11.436202 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-openstack-openstack-cell1-srn2t" Nov 28 18:21:12 crc kubenswrapper[4909]: I1128 18:21:12.007949 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/libvirt-openstack-openstack-cell1-srn2t"] Nov 28 18:21:13 crc kubenswrapper[4909]: I1128 18:21:13.028063 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-openstack-openstack-cell1-srn2t" event={"ID":"de3e2413-2749-4df8-b633-30842a045c5c","Type":"ContainerStarted","Data":"fa7a08f3a9f306d73780a8cc92df76d628c4604511addfefdab7cd57038451b1"} Nov 28 18:21:13 crc kubenswrapper[4909]: I1128 18:21:13.028352 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-openstack-openstack-cell1-srn2t" event={"ID":"de3e2413-2749-4df8-b633-30842a045c5c","Type":"ContainerStarted","Data":"f3813704162bf2358ff848c03a482c67d8705cda4711f2ad42cf67e1af26aaed"} Nov 28 18:21:13 crc kubenswrapper[4909]: I1128 18:21:13.056958 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/libvirt-openstack-openstack-cell1-srn2t" podStartSLOduration=1.432658317 podStartE2EDuration="2.056934386s" podCreationTimestamp="2025-11-28 18:21:11 +0000 UTC" firstStartedPulling="2025-11-28 18:21:12.018609883 +0000 UTC m=+7854.415294407" lastFinishedPulling="2025-11-28 18:21:12.642885912 +0000 UTC m=+7855.039570476" observedRunningTime="2025-11-28 18:21:13.046636499 +0000 UTC m=+7855.443321063" watchObservedRunningTime="2025-11-28 18:21:13.056934386 +0000 UTC m=+7855.453618930" Nov 28 18:21:49 crc kubenswrapper[4909]: I1128 18:21:49.910781 4909 patch_prober.go:28] interesting pod/machine-config-daemon-d5nd7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 18:21:49 crc kubenswrapper[4909]: I1128 18:21:49.911439 4909 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 18:22:16 crc kubenswrapper[4909]: I1128 18:22:16.490821 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-wnh4w"] Nov 28 18:22:16 crc kubenswrapper[4909]: I1128 18:22:16.493700 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wnh4w" Nov 28 18:22:16 crc kubenswrapper[4909]: I1128 18:22:16.515113 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-wnh4w"] Nov 28 18:22:16 crc kubenswrapper[4909]: I1128 18:22:16.595897 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/785f74f7-e997-4e19-b8b1-825a28034d8c-utilities\") pod \"redhat-operators-wnh4w\" (UID: \"785f74f7-e997-4e19-b8b1-825a28034d8c\") " pod="openshift-marketplace/redhat-operators-wnh4w" Nov 28 18:22:16 crc kubenswrapper[4909]: I1128 18:22:16.595941 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/785f74f7-e997-4e19-b8b1-825a28034d8c-catalog-content\") pod \"redhat-operators-wnh4w\" (UID: \"785f74f7-e997-4e19-b8b1-825a28034d8c\") " pod="openshift-marketplace/redhat-operators-wnh4w" Nov 28 18:22:16 crc kubenswrapper[4909]: I1128 18:22:16.595975 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h7n47\" (UniqueName: \"kubernetes.io/projected/785f74f7-e997-4e19-b8b1-825a28034d8c-kube-api-access-h7n47\") pod \"redhat-operators-wnh4w\" (UID: \"785f74f7-e997-4e19-b8b1-825a28034d8c\") " pod="openshift-marketplace/redhat-operators-wnh4w" Nov 28 18:22:16 crc kubenswrapper[4909]: I1128 18:22:16.698559 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h7n47\" (UniqueName: \"kubernetes.io/projected/785f74f7-e997-4e19-b8b1-825a28034d8c-kube-api-access-h7n47\") pod \"redhat-operators-wnh4w\" (UID: \"785f74f7-e997-4e19-b8b1-825a28034d8c\") " pod="openshift-marketplace/redhat-operators-wnh4w" Nov 28 18:22:16 crc kubenswrapper[4909]: I1128 18:22:16.700545 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/785f74f7-e997-4e19-b8b1-825a28034d8c-utilities\") pod \"redhat-operators-wnh4w\" (UID: \"785f74f7-e997-4e19-b8b1-825a28034d8c\") " pod="openshift-marketplace/redhat-operators-wnh4w" Nov 28 18:22:16 crc kubenswrapper[4909]: I1128 18:22:16.701091 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/785f74f7-e997-4e19-b8b1-825a28034d8c-utilities\") pod \"redhat-operators-wnh4w\" (UID: \"785f74f7-e997-4e19-b8b1-825a28034d8c\") " pod="openshift-marketplace/redhat-operators-wnh4w" Nov 28 18:22:16 crc kubenswrapper[4909]: I1128 18:22:16.701176 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/785f74f7-e997-4e19-b8b1-825a28034d8c-catalog-content\") pod \"redhat-operators-wnh4w\" (UID: \"785f74f7-e997-4e19-b8b1-825a28034d8c\") " pod="openshift-marketplace/redhat-operators-wnh4w" Nov 28 18:22:16 crc kubenswrapper[4909]: I1128 18:22:16.705971 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/785f74f7-e997-4e19-b8b1-825a28034d8c-catalog-content\") pod \"redhat-operators-wnh4w\" (UID: \"785f74f7-e997-4e19-b8b1-825a28034d8c\") " pod="openshift-marketplace/redhat-operators-wnh4w" Nov 28 18:22:16 crc kubenswrapper[4909]: I1128 18:22:16.718790 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h7n47\" (UniqueName: \"kubernetes.io/projected/785f74f7-e997-4e19-b8b1-825a28034d8c-kube-api-access-h7n47\") pod \"redhat-operators-wnh4w\" (UID: \"785f74f7-e997-4e19-b8b1-825a28034d8c\") " pod="openshift-marketplace/redhat-operators-wnh4w" Nov 28 18:22:16 crc kubenswrapper[4909]: I1128 18:22:16.832788 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wnh4w" Nov 28 18:22:17 crc kubenswrapper[4909]: I1128 18:22:17.296224 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-wnh4w"] Nov 28 18:22:17 crc kubenswrapper[4909]: I1128 18:22:17.759459 4909 generic.go:334] "Generic (PLEG): container finished" podID="785f74f7-e997-4e19-b8b1-825a28034d8c" containerID="3dbe8575a521833c29ec26bc74976216e9af3285fb2b4f94e89507629874e623" exitCode=0 Nov 28 18:22:17 crc kubenswrapper[4909]: I1128 18:22:17.759515 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wnh4w" event={"ID":"785f74f7-e997-4e19-b8b1-825a28034d8c","Type":"ContainerDied","Data":"3dbe8575a521833c29ec26bc74976216e9af3285fb2b4f94e89507629874e623"} Nov 28 18:22:17 crc kubenswrapper[4909]: I1128 18:22:17.759789 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wnh4w" event={"ID":"785f74f7-e997-4e19-b8b1-825a28034d8c","Type":"ContainerStarted","Data":"5f5af01254913b48505d566e25750449e2e116a66cd394fe1c5b6011f165dc71"} Nov 28 18:22:19 crc kubenswrapper[4909]: I1128 18:22:19.780803 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wnh4w" event={"ID":"785f74f7-e997-4e19-b8b1-825a28034d8c","Type":"ContainerStarted","Data":"4100645f4f3d673672e509faab051e06363e50cc1e82e83b3981579a78f1fb44"} Nov 28 18:22:19 crc kubenswrapper[4909]: I1128 18:22:19.911817 4909 patch_prober.go:28] interesting pod/machine-config-daemon-d5nd7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 18:22:19 crc kubenswrapper[4909]: I1128 18:22:19.911906 4909 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 18:22:22 crc kubenswrapper[4909]: I1128 18:22:22.827050 4909 generic.go:334] "Generic (PLEG): container finished" podID="785f74f7-e997-4e19-b8b1-825a28034d8c" containerID="4100645f4f3d673672e509faab051e06363e50cc1e82e83b3981579a78f1fb44" exitCode=0 Nov 28 18:22:22 crc kubenswrapper[4909]: I1128 18:22:22.827138 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wnh4w" event={"ID":"785f74f7-e997-4e19-b8b1-825a28034d8c","Type":"ContainerDied","Data":"4100645f4f3d673672e509faab051e06363e50cc1e82e83b3981579a78f1fb44"} Nov 28 18:22:23 crc kubenswrapper[4909]: I1128 18:22:23.850887 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wnh4w" event={"ID":"785f74f7-e997-4e19-b8b1-825a28034d8c","Type":"ContainerStarted","Data":"7d64f190a3ae31d4404f884454aab4f8dd3ca683b04f9551328ff158397ae32e"} Nov 28 18:22:23 crc kubenswrapper[4909]: I1128 18:22:23.886010 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-wnh4w" podStartSLOduration=2.364288692 podStartE2EDuration="7.885949837s" podCreationTimestamp="2025-11-28 18:22:16 +0000 UTC" firstStartedPulling="2025-11-28 18:22:17.761346419 +0000 UTC m=+7920.158030943" lastFinishedPulling="2025-11-28 18:22:23.283007524 +0000 UTC m=+7925.679692088" observedRunningTime="2025-11-28 18:22:23.872867845 +0000 UTC m=+7926.269552389" watchObservedRunningTime="2025-11-28 18:22:23.885949837 +0000 UTC m=+7926.282634401" Nov 28 18:22:26 crc kubenswrapper[4909]: I1128 18:22:26.833090 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-wnh4w" Nov 28 18:22:26 crc kubenswrapper[4909]: I1128 18:22:26.833860 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-wnh4w" Nov 28 18:22:27 crc kubenswrapper[4909]: I1128 18:22:27.904994 4909 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-wnh4w" podUID="785f74f7-e997-4e19-b8b1-825a28034d8c" containerName="registry-server" probeResult="failure" output=< Nov 28 18:22:27 crc kubenswrapper[4909]: timeout: failed to connect service ":50051" within 1s Nov 28 18:22:27 crc kubenswrapper[4909]: > Nov 28 18:22:36 crc kubenswrapper[4909]: I1128 18:22:36.901074 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-wnh4w" Nov 28 18:22:36 crc kubenswrapper[4909]: I1128 18:22:36.965600 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-wnh4w" Nov 28 18:22:37 crc kubenswrapper[4909]: I1128 18:22:37.149249 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-wnh4w"] Nov 28 18:22:38 crc kubenswrapper[4909]: I1128 18:22:38.011072 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-wnh4w" podUID="785f74f7-e997-4e19-b8b1-825a28034d8c" containerName="registry-server" containerID="cri-o://7d64f190a3ae31d4404f884454aab4f8dd3ca683b04f9551328ff158397ae32e" gracePeriod=2 Nov 28 18:22:38 crc kubenswrapper[4909]: I1128 18:22:38.544579 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wnh4w" Nov 28 18:22:38 crc kubenswrapper[4909]: I1128 18:22:38.620250 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h7n47\" (UniqueName: \"kubernetes.io/projected/785f74f7-e997-4e19-b8b1-825a28034d8c-kube-api-access-h7n47\") pod \"785f74f7-e997-4e19-b8b1-825a28034d8c\" (UID: \"785f74f7-e997-4e19-b8b1-825a28034d8c\") " Nov 28 18:22:38 crc kubenswrapper[4909]: I1128 18:22:38.620569 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/785f74f7-e997-4e19-b8b1-825a28034d8c-utilities\") pod \"785f74f7-e997-4e19-b8b1-825a28034d8c\" (UID: \"785f74f7-e997-4e19-b8b1-825a28034d8c\") " Nov 28 18:22:38 crc kubenswrapper[4909]: I1128 18:22:38.620614 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/785f74f7-e997-4e19-b8b1-825a28034d8c-catalog-content\") pod \"785f74f7-e997-4e19-b8b1-825a28034d8c\" (UID: \"785f74f7-e997-4e19-b8b1-825a28034d8c\") " Nov 28 18:22:38 crc kubenswrapper[4909]: I1128 18:22:38.621778 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/785f74f7-e997-4e19-b8b1-825a28034d8c-utilities" (OuterVolumeSpecName: "utilities") pod "785f74f7-e997-4e19-b8b1-825a28034d8c" (UID: "785f74f7-e997-4e19-b8b1-825a28034d8c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 18:22:38 crc kubenswrapper[4909]: I1128 18:22:38.628433 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/785f74f7-e997-4e19-b8b1-825a28034d8c-kube-api-access-h7n47" (OuterVolumeSpecName: "kube-api-access-h7n47") pod "785f74f7-e997-4e19-b8b1-825a28034d8c" (UID: "785f74f7-e997-4e19-b8b1-825a28034d8c"). InnerVolumeSpecName "kube-api-access-h7n47". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 18:22:38 crc kubenswrapper[4909]: I1128 18:22:38.723908 4909 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/785f74f7-e997-4e19-b8b1-825a28034d8c-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 18:22:38 crc kubenswrapper[4909]: I1128 18:22:38.723946 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h7n47\" (UniqueName: \"kubernetes.io/projected/785f74f7-e997-4e19-b8b1-825a28034d8c-kube-api-access-h7n47\") on node \"crc\" DevicePath \"\"" Nov 28 18:22:38 crc kubenswrapper[4909]: I1128 18:22:38.742637 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/785f74f7-e997-4e19-b8b1-825a28034d8c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "785f74f7-e997-4e19-b8b1-825a28034d8c" (UID: "785f74f7-e997-4e19-b8b1-825a28034d8c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 18:22:38 crc kubenswrapper[4909]: I1128 18:22:38.825580 4909 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/785f74f7-e997-4e19-b8b1-825a28034d8c-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 18:22:39 crc kubenswrapper[4909]: I1128 18:22:39.025224 4909 generic.go:334] "Generic (PLEG): container finished" podID="785f74f7-e997-4e19-b8b1-825a28034d8c" containerID="7d64f190a3ae31d4404f884454aab4f8dd3ca683b04f9551328ff158397ae32e" exitCode=0 Nov 28 18:22:39 crc kubenswrapper[4909]: I1128 18:22:39.025261 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wnh4w" event={"ID":"785f74f7-e997-4e19-b8b1-825a28034d8c","Type":"ContainerDied","Data":"7d64f190a3ae31d4404f884454aab4f8dd3ca683b04f9551328ff158397ae32e"} Nov 28 18:22:39 crc kubenswrapper[4909]: I1128 18:22:39.025299 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wnh4w" event={"ID":"785f74f7-e997-4e19-b8b1-825a28034d8c","Type":"ContainerDied","Data":"5f5af01254913b48505d566e25750449e2e116a66cd394fe1c5b6011f165dc71"} Nov 28 18:22:39 crc kubenswrapper[4909]: I1128 18:22:39.025300 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wnh4w" Nov 28 18:22:39 crc kubenswrapper[4909]: I1128 18:22:39.025317 4909 scope.go:117] "RemoveContainer" containerID="7d64f190a3ae31d4404f884454aab4f8dd3ca683b04f9551328ff158397ae32e" Nov 28 18:22:39 crc kubenswrapper[4909]: I1128 18:22:39.053700 4909 scope.go:117] "RemoveContainer" containerID="4100645f4f3d673672e509faab051e06363e50cc1e82e83b3981579a78f1fb44" Nov 28 18:22:39 crc kubenswrapper[4909]: I1128 18:22:39.076681 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-wnh4w"] Nov 28 18:22:39 crc kubenswrapper[4909]: I1128 18:22:39.089595 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-wnh4w"] Nov 28 18:22:39 crc kubenswrapper[4909]: I1128 18:22:39.093252 4909 scope.go:117] "RemoveContainer" containerID="3dbe8575a521833c29ec26bc74976216e9af3285fb2b4f94e89507629874e623" Nov 28 18:22:39 crc kubenswrapper[4909]: I1128 18:22:39.150069 4909 scope.go:117] "RemoveContainer" containerID="7d64f190a3ae31d4404f884454aab4f8dd3ca683b04f9551328ff158397ae32e" Nov 28 18:22:39 crc kubenswrapper[4909]: E1128 18:22:39.151294 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7d64f190a3ae31d4404f884454aab4f8dd3ca683b04f9551328ff158397ae32e\": container with ID starting with 7d64f190a3ae31d4404f884454aab4f8dd3ca683b04f9551328ff158397ae32e not found: ID does not exist" containerID="7d64f190a3ae31d4404f884454aab4f8dd3ca683b04f9551328ff158397ae32e" Nov 28 18:22:39 crc kubenswrapper[4909]: I1128 18:22:39.151332 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7d64f190a3ae31d4404f884454aab4f8dd3ca683b04f9551328ff158397ae32e"} err="failed to get container status \"7d64f190a3ae31d4404f884454aab4f8dd3ca683b04f9551328ff158397ae32e\": rpc error: code = NotFound desc = could not find container \"7d64f190a3ae31d4404f884454aab4f8dd3ca683b04f9551328ff158397ae32e\": container with ID starting with 7d64f190a3ae31d4404f884454aab4f8dd3ca683b04f9551328ff158397ae32e not found: ID does not exist" Nov 28 18:22:39 crc kubenswrapper[4909]: I1128 18:22:39.151360 4909 scope.go:117] "RemoveContainer" containerID="4100645f4f3d673672e509faab051e06363e50cc1e82e83b3981579a78f1fb44" Nov 28 18:22:39 crc kubenswrapper[4909]: E1128 18:22:39.152119 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4100645f4f3d673672e509faab051e06363e50cc1e82e83b3981579a78f1fb44\": container with ID starting with 4100645f4f3d673672e509faab051e06363e50cc1e82e83b3981579a78f1fb44 not found: ID does not exist" containerID="4100645f4f3d673672e509faab051e06363e50cc1e82e83b3981579a78f1fb44" Nov 28 18:22:39 crc kubenswrapper[4909]: I1128 18:22:39.152196 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4100645f4f3d673672e509faab051e06363e50cc1e82e83b3981579a78f1fb44"} err="failed to get container status \"4100645f4f3d673672e509faab051e06363e50cc1e82e83b3981579a78f1fb44\": rpc error: code = NotFound desc = could not find container \"4100645f4f3d673672e509faab051e06363e50cc1e82e83b3981579a78f1fb44\": container with ID starting with 4100645f4f3d673672e509faab051e06363e50cc1e82e83b3981579a78f1fb44 not found: ID does not exist" Nov 28 18:22:39 crc kubenswrapper[4909]: I1128 18:22:39.152238 4909 scope.go:117] "RemoveContainer" containerID="3dbe8575a521833c29ec26bc74976216e9af3285fb2b4f94e89507629874e623" Nov 28 18:22:39 crc kubenswrapper[4909]: E1128 18:22:39.152725 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3dbe8575a521833c29ec26bc74976216e9af3285fb2b4f94e89507629874e623\": container with ID starting with 3dbe8575a521833c29ec26bc74976216e9af3285fb2b4f94e89507629874e623 not found: ID does not exist" containerID="3dbe8575a521833c29ec26bc74976216e9af3285fb2b4f94e89507629874e623" Nov 28 18:22:39 crc kubenswrapper[4909]: I1128 18:22:39.152757 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3dbe8575a521833c29ec26bc74976216e9af3285fb2b4f94e89507629874e623"} err="failed to get container status \"3dbe8575a521833c29ec26bc74976216e9af3285fb2b4f94e89507629874e623\": rpc error: code = NotFound desc = could not find container \"3dbe8575a521833c29ec26bc74976216e9af3285fb2b4f94e89507629874e623\": container with ID starting with 3dbe8575a521833c29ec26bc74976216e9af3285fb2b4f94e89507629874e623 not found: ID does not exist" Nov 28 18:22:39 crc kubenswrapper[4909]: I1128 18:22:39.920194 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="785f74f7-e997-4e19-b8b1-825a28034d8c" path="/var/lib/kubelet/pods/785f74f7-e997-4e19-b8b1-825a28034d8c/volumes" Nov 28 18:22:49 crc kubenswrapper[4909]: I1128 18:22:49.911216 4909 patch_prober.go:28] interesting pod/machine-config-daemon-d5nd7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 18:22:49 crc kubenswrapper[4909]: I1128 18:22:49.912009 4909 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 18:22:49 crc kubenswrapper[4909]: I1128 18:22:49.946783 4909 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" Nov 28 18:22:49 crc kubenswrapper[4909]: I1128 18:22:49.949720 4909 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"e9bed4a6bcd109c2766aab7a7fb38f137b95e53ec473471aec321e9fdd1bfb74"} pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 18:22:49 crc kubenswrapper[4909]: I1128 18:22:49.949882 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerName="machine-config-daemon" containerID="cri-o://e9bed4a6bcd109c2766aab7a7fb38f137b95e53ec473471aec321e9fdd1bfb74" gracePeriod=600 Nov 28 18:22:50 crc kubenswrapper[4909]: I1128 18:22:50.151523 4909 generic.go:334] "Generic (PLEG): container finished" podID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerID="e9bed4a6bcd109c2766aab7a7fb38f137b95e53ec473471aec321e9fdd1bfb74" exitCode=0 Nov 28 18:22:50 crc kubenswrapper[4909]: I1128 18:22:50.151622 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" event={"ID":"5f0ac931-d37b-4342-8c12-c2779b455cc5","Type":"ContainerDied","Data":"e9bed4a6bcd109c2766aab7a7fb38f137b95e53ec473471aec321e9fdd1bfb74"} Nov 28 18:22:50 crc kubenswrapper[4909]: I1128 18:22:50.151971 4909 scope.go:117] "RemoveContainer" containerID="16af51197304d83d362143f44527eca9e3f72c8e7894bd79e4b089fd278bdb1d" Nov 28 18:22:51 crc kubenswrapper[4909]: I1128 18:22:51.166774 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" event={"ID":"5f0ac931-d37b-4342-8c12-c2779b455cc5","Type":"ContainerStarted","Data":"5226a771ffcb00b5135f06b8e238e32f1bb502ad55f4c3b1d915e6908d8a3ef7"} Nov 28 18:25:19 crc kubenswrapper[4909]: I1128 18:25:19.910955 4909 patch_prober.go:28] interesting pod/machine-config-daemon-d5nd7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 18:25:19 crc kubenswrapper[4909]: I1128 18:25:19.911390 4909 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 18:25:49 crc kubenswrapper[4909]: I1128 18:25:49.910973 4909 patch_prober.go:28] interesting pod/machine-config-daemon-d5nd7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 18:25:49 crc kubenswrapper[4909]: I1128 18:25:49.911622 4909 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 18:26:00 crc kubenswrapper[4909]: I1128 18:26:00.362409 4909 generic.go:334] "Generic (PLEG): container finished" podID="de3e2413-2749-4df8-b633-30842a045c5c" containerID="fa7a08f3a9f306d73780a8cc92df76d628c4604511addfefdab7cd57038451b1" exitCode=0 Nov 28 18:26:00 crc kubenswrapper[4909]: I1128 18:26:00.362504 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-openstack-openstack-cell1-srn2t" event={"ID":"de3e2413-2749-4df8-b633-30842a045c5c","Type":"ContainerDied","Data":"fa7a08f3a9f306d73780a8cc92df76d628c4604511addfefdab7cd57038451b1"} Nov 28 18:26:02 crc kubenswrapper[4909]: I1128 18:26:02.036992 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-openstack-openstack-cell1-srn2t" Nov 28 18:26:02 crc kubenswrapper[4909]: I1128 18:26:02.142785 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ts2z9\" (UniqueName: \"kubernetes.io/projected/de3e2413-2749-4df8-b633-30842a045c5c-kube-api-access-ts2z9\") pod \"de3e2413-2749-4df8-b633-30842a045c5c\" (UID: \"de3e2413-2749-4df8-b633-30842a045c5c\") " Nov 28 18:26:02 crc kubenswrapper[4909]: I1128 18:26:02.142933 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/de3e2413-2749-4df8-b633-30842a045c5c-inventory\") pod \"de3e2413-2749-4df8-b633-30842a045c5c\" (UID: \"de3e2413-2749-4df8-b633-30842a045c5c\") " Nov 28 18:26:02 crc kubenswrapper[4909]: I1128 18:26:02.142977 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/de3e2413-2749-4df8-b633-30842a045c5c-ceph\") pod \"de3e2413-2749-4df8-b633-30842a045c5c\" (UID: \"de3e2413-2749-4df8-b633-30842a045c5c\") " Nov 28 18:26:02 crc kubenswrapper[4909]: I1128 18:26:02.143055 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/de3e2413-2749-4df8-b633-30842a045c5c-libvirt-secret-0\") pod \"de3e2413-2749-4df8-b633-30842a045c5c\" (UID: \"de3e2413-2749-4df8-b633-30842a045c5c\") " Nov 28 18:26:02 crc kubenswrapper[4909]: I1128 18:26:02.143115 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/de3e2413-2749-4df8-b633-30842a045c5c-ssh-key\") pod \"de3e2413-2749-4df8-b633-30842a045c5c\" (UID: \"de3e2413-2749-4df8-b633-30842a045c5c\") " Nov 28 18:26:02 crc kubenswrapper[4909]: I1128 18:26:02.143199 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/de3e2413-2749-4df8-b633-30842a045c5c-libvirt-combined-ca-bundle\") pod \"de3e2413-2749-4df8-b633-30842a045c5c\" (UID: \"de3e2413-2749-4df8-b633-30842a045c5c\") " Nov 28 18:26:02 crc kubenswrapper[4909]: I1128 18:26:02.149378 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/de3e2413-2749-4df8-b633-30842a045c5c-ceph" (OuterVolumeSpecName: "ceph") pod "de3e2413-2749-4df8-b633-30842a045c5c" (UID: "de3e2413-2749-4df8-b633-30842a045c5c"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 18:26:02 crc kubenswrapper[4909]: I1128 18:26:02.151895 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/de3e2413-2749-4df8-b633-30842a045c5c-libvirt-combined-ca-bundle" (OuterVolumeSpecName: "libvirt-combined-ca-bundle") pod "de3e2413-2749-4df8-b633-30842a045c5c" (UID: "de3e2413-2749-4df8-b633-30842a045c5c"). InnerVolumeSpecName "libvirt-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 18:26:02 crc kubenswrapper[4909]: I1128 18:26:02.155240 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/de3e2413-2749-4df8-b633-30842a045c5c-kube-api-access-ts2z9" (OuterVolumeSpecName: "kube-api-access-ts2z9") pod "de3e2413-2749-4df8-b633-30842a045c5c" (UID: "de3e2413-2749-4df8-b633-30842a045c5c"). InnerVolumeSpecName "kube-api-access-ts2z9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 18:26:02 crc kubenswrapper[4909]: I1128 18:26:02.175067 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/de3e2413-2749-4df8-b633-30842a045c5c-libvirt-secret-0" (OuterVolumeSpecName: "libvirt-secret-0") pod "de3e2413-2749-4df8-b633-30842a045c5c" (UID: "de3e2413-2749-4df8-b633-30842a045c5c"). InnerVolumeSpecName "libvirt-secret-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 18:26:02 crc kubenswrapper[4909]: I1128 18:26:02.175930 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/de3e2413-2749-4df8-b633-30842a045c5c-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "de3e2413-2749-4df8-b633-30842a045c5c" (UID: "de3e2413-2749-4df8-b633-30842a045c5c"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 18:26:02 crc kubenswrapper[4909]: I1128 18:26:02.187260 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/de3e2413-2749-4df8-b633-30842a045c5c-inventory" (OuterVolumeSpecName: "inventory") pod "de3e2413-2749-4df8-b633-30842a045c5c" (UID: "de3e2413-2749-4df8-b633-30842a045c5c"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 18:26:02 crc kubenswrapper[4909]: I1128 18:26:02.246167 4909 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/de3e2413-2749-4df8-b633-30842a045c5c-inventory\") on node \"crc\" DevicePath \"\"" Nov 28 18:26:02 crc kubenswrapper[4909]: I1128 18:26:02.246208 4909 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/de3e2413-2749-4df8-b633-30842a045c5c-ceph\") on node \"crc\" DevicePath \"\"" Nov 28 18:26:02 crc kubenswrapper[4909]: I1128 18:26:02.246219 4909 reconciler_common.go:293] "Volume detached for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/de3e2413-2749-4df8-b633-30842a045c5c-libvirt-secret-0\") on node \"crc\" DevicePath \"\"" Nov 28 18:26:02 crc kubenswrapper[4909]: I1128 18:26:02.246228 4909 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/de3e2413-2749-4df8-b633-30842a045c5c-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 28 18:26:02 crc kubenswrapper[4909]: I1128 18:26:02.246237 4909 reconciler_common.go:293] "Volume detached for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/de3e2413-2749-4df8-b633-30842a045c5c-libvirt-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 18:26:02 crc kubenswrapper[4909]: I1128 18:26:02.246246 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ts2z9\" (UniqueName: \"kubernetes.io/projected/de3e2413-2749-4df8-b633-30842a045c5c-kube-api-access-ts2z9\") on node \"crc\" DevicePath \"\"" Nov 28 18:26:02 crc kubenswrapper[4909]: I1128 18:26:02.387090 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-openstack-openstack-cell1-srn2t" event={"ID":"de3e2413-2749-4df8-b633-30842a045c5c","Type":"ContainerDied","Data":"f3813704162bf2358ff848c03a482c67d8705cda4711f2ad42cf67e1af26aaed"} Nov 28 18:26:02 crc kubenswrapper[4909]: I1128 18:26:02.387141 4909 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f3813704162bf2358ff848c03a482c67d8705cda4711f2ad42cf67e1af26aaed" Nov 28 18:26:02 crc kubenswrapper[4909]: I1128 18:26:02.387163 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-openstack-openstack-cell1-srn2t" Nov 28 18:26:02 crc kubenswrapper[4909]: I1128 18:26:02.484189 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-openstack-openstack-cell1-wztp8"] Nov 28 18:26:02 crc kubenswrapper[4909]: E1128 18:26:02.484590 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="785f74f7-e997-4e19-b8b1-825a28034d8c" containerName="registry-server" Nov 28 18:26:02 crc kubenswrapper[4909]: I1128 18:26:02.484610 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="785f74f7-e997-4e19-b8b1-825a28034d8c" containerName="registry-server" Nov 28 18:26:02 crc kubenswrapper[4909]: E1128 18:26:02.484627 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="de3e2413-2749-4df8-b633-30842a045c5c" containerName="libvirt-openstack-openstack-cell1" Nov 28 18:26:02 crc kubenswrapper[4909]: I1128 18:26:02.484636 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="de3e2413-2749-4df8-b633-30842a045c5c" containerName="libvirt-openstack-openstack-cell1" Nov 28 18:26:02 crc kubenswrapper[4909]: E1128 18:26:02.484678 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="785f74f7-e997-4e19-b8b1-825a28034d8c" containerName="extract-utilities" Nov 28 18:26:02 crc kubenswrapper[4909]: I1128 18:26:02.484689 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="785f74f7-e997-4e19-b8b1-825a28034d8c" containerName="extract-utilities" Nov 28 18:26:02 crc kubenswrapper[4909]: E1128 18:26:02.484711 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="785f74f7-e997-4e19-b8b1-825a28034d8c" containerName="extract-content" Nov 28 18:26:02 crc kubenswrapper[4909]: I1128 18:26:02.484718 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="785f74f7-e997-4e19-b8b1-825a28034d8c" containerName="extract-content" Nov 28 18:26:02 crc kubenswrapper[4909]: I1128 18:26:02.484984 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="de3e2413-2749-4df8-b633-30842a045c5c" containerName="libvirt-openstack-openstack-cell1" Nov 28 18:26:02 crc kubenswrapper[4909]: I1128 18:26:02.485015 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="785f74f7-e997-4e19-b8b1-825a28034d8c" containerName="registry-server" Nov 28 18:26:02 crc kubenswrapper[4909]: I1128 18:26:02.485968 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-openstack-openstack-cell1-wztp8" Nov 28 18:26:02 crc kubenswrapper[4909]: I1128 18:26:02.487929 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 28 18:26:02 crc kubenswrapper[4909]: I1128 18:26:02.488261 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-compute-config" Nov 28 18:26:02 crc kubenswrapper[4909]: I1128 18:26:02.488275 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Nov 28 18:26:02 crc kubenswrapper[4909]: I1128 18:26:02.489469 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Nov 28 18:26:02 crc kubenswrapper[4909]: I1128 18:26:02.489641 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-z249h" Nov 28 18:26:02 crc kubenswrapper[4909]: I1128 18:26:02.489796 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"nova-cells-global-config" Nov 28 18:26:02 crc kubenswrapper[4909]: I1128 18:26:02.489829 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-migration-ssh-key" Nov 28 18:26:02 crc kubenswrapper[4909]: I1128 18:26:02.498043 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-openstack-openstack-cell1-wztp8"] Nov 28 18:26:02 crc kubenswrapper[4909]: I1128 18:26:02.654919 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8561d38e-6957-4c25-9849-1e73103a9efd-nova-cell1-combined-ca-bundle\") pod \"nova-cell1-openstack-openstack-cell1-wztp8\" (UID: \"8561d38e-6957-4c25-9849-1e73103a9efd\") " pod="openstack/nova-cell1-openstack-openstack-cell1-wztp8" Nov 28 18:26:02 crc kubenswrapper[4909]: I1128 18:26:02.655277 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d2wck\" (UniqueName: \"kubernetes.io/projected/8561d38e-6957-4c25-9849-1e73103a9efd-kube-api-access-d2wck\") pod \"nova-cell1-openstack-openstack-cell1-wztp8\" (UID: \"8561d38e-6957-4c25-9849-1e73103a9efd\") " pod="openstack/nova-cell1-openstack-openstack-cell1-wztp8" Nov 28 18:26:02 crc kubenswrapper[4909]: I1128 18:26:02.655315 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/8561d38e-6957-4c25-9849-1e73103a9efd-ceph\") pod \"nova-cell1-openstack-openstack-cell1-wztp8\" (UID: \"8561d38e-6957-4c25-9849-1e73103a9efd\") " pod="openstack/nova-cell1-openstack-openstack-cell1-wztp8" Nov 28 18:26:02 crc kubenswrapper[4909]: I1128 18:26:02.655337 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/8561d38e-6957-4c25-9849-1e73103a9efd-nova-cell1-compute-config-0\") pod \"nova-cell1-openstack-openstack-cell1-wztp8\" (UID: \"8561d38e-6957-4c25-9849-1e73103a9efd\") " pod="openstack/nova-cell1-openstack-openstack-cell1-wztp8" Nov 28 18:26:02 crc kubenswrapper[4909]: I1128 18:26:02.655377 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8561d38e-6957-4c25-9849-1e73103a9efd-inventory\") pod \"nova-cell1-openstack-openstack-cell1-wztp8\" (UID: \"8561d38e-6957-4c25-9849-1e73103a9efd\") " pod="openstack/nova-cell1-openstack-openstack-cell1-wztp8" Nov 28 18:26:02 crc kubenswrapper[4909]: I1128 18:26:02.655478 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/8561d38e-6957-4c25-9849-1e73103a9efd-nova-cell1-compute-config-1\") pod \"nova-cell1-openstack-openstack-cell1-wztp8\" (UID: \"8561d38e-6957-4c25-9849-1e73103a9efd\") " pod="openstack/nova-cell1-openstack-openstack-cell1-wztp8" Nov 28 18:26:02 crc kubenswrapper[4909]: I1128 18:26:02.655516 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/8561d38e-6957-4c25-9849-1e73103a9efd-nova-migration-ssh-key-1\") pod \"nova-cell1-openstack-openstack-cell1-wztp8\" (UID: \"8561d38e-6957-4c25-9849-1e73103a9efd\") " pod="openstack/nova-cell1-openstack-openstack-cell1-wztp8" Nov 28 18:26:02 crc kubenswrapper[4909]: I1128 18:26:02.655702 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/8561d38e-6957-4c25-9849-1e73103a9efd-nova-migration-ssh-key-0\") pod \"nova-cell1-openstack-openstack-cell1-wztp8\" (UID: \"8561d38e-6957-4c25-9849-1e73103a9efd\") " pod="openstack/nova-cell1-openstack-openstack-cell1-wztp8" Nov 28 18:26:02 crc kubenswrapper[4909]: I1128 18:26:02.655784 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cells-global-config-1\" (UniqueName: \"kubernetes.io/configmap/8561d38e-6957-4c25-9849-1e73103a9efd-nova-cells-global-config-1\") pod \"nova-cell1-openstack-openstack-cell1-wztp8\" (UID: \"8561d38e-6957-4c25-9849-1e73103a9efd\") " pod="openstack/nova-cell1-openstack-openstack-cell1-wztp8" Nov 28 18:26:02 crc kubenswrapper[4909]: I1128 18:26:02.655941 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cells-global-config-0\" (UniqueName: \"kubernetes.io/configmap/8561d38e-6957-4c25-9849-1e73103a9efd-nova-cells-global-config-0\") pod \"nova-cell1-openstack-openstack-cell1-wztp8\" (UID: \"8561d38e-6957-4c25-9849-1e73103a9efd\") " pod="openstack/nova-cell1-openstack-openstack-cell1-wztp8" Nov 28 18:26:02 crc kubenswrapper[4909]: I1128 18:26:02.656016 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/8561d38e-6957-4c25-9849-1e73103a9efd-ssh-key\") pod \"nova-cell1-openstack-openstack-cell1-wztp8\" (UID: \"8561d38e-6957-4c25-9849-1e73103a9efd\") " pod="openstack/nova-cell1-openstack-openstack-cell1-wztp8" Nov 28 18:26:02 crc kubenswrapper[4909]: I1128 18:26:02.758017 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/8561d38e-6957-4c25-9849-1e73103a9efd-ceph\") pod \"nova-cell1-openstack-openstack-cell1-wztp8\" (UID: \"8561d38e-6957-4c25-9849-1e73103a9efd\") " pod="openstack/nova-cell1-openstack-openstack-cell1-wztp8" Nov 28 18:26:02 crc kubenswrapper[4909]: I1128 18:26:02.758129 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/8561d38e-6957-4c25-9849-1e73103a9efd-nova-cell1-compute-config-0\") pod \"nova-cell1-openstack-openstack-cell1-wztp8\" (UID: \"8561d38e-6957-4c25-9849-1e73103a9efd\") " pod="openstack/nova-cell1-openstack-openstack-cell1-wztp8" Nov 28 18:26:02 crc kubenswrapper[4909]: I1128 18:26:02.758194 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8561d38e-6957-4c25-9849-1e73103a9efd-inventory\") pod \"nova-cell1-openstack-openstack-cell1-wztp8\" (UID: \"8561d38e-6957-4c25-9849-1e73103a9efd\") " pod="openstack/nova-cell1-openstack-openstack-cell1-wztp8" Nov 28 18:26:02 crc kubenswrapper[4909]: I1128 18:26:02.758221 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/8561d38e-6957-4c25-9849-1e73103a9efd-nova-cell1-compute-config-1\") pod \"nova-cell1-openstack-openstack-cell1-wztp8\" (UID: \"8561d38e-6957-4c25-9849-1e73103a9efd\") " pod="openstack/nova-cell1-openstack-openstack-cell1-wztp8" Nov 28 18:26:02 crc kubenswrapper[4909]: I1128 18:26:02.758258 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/8561d38e-6957-4c25-9849-1e73103a9efd-nova-migration-ssh-key-1\") pod \"nova-cell1-openstack-openstack-cell1-wztp8\" (UID: \"8561d38e-6957-4c25-9849-1e73103a9efd\") " pod="openstack/nova-cell1-openstack-openstack-cell1-wztp8" Nov 28 18:26:02 crc kubenswrapper[4909]: I1128 18:26:02.758316 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/8561d38e-6957-4c25-9849-1e73103a9efd-nova-migration-ssh-key-0\") pod \"nova-cell1-openstack-openstack-cell1-wztp8\" (UID: \"8561d38e-6957-4c25-9849-1e73103a9efd\") " pod="openstack/nova-cell1-openstack-openstack-cell1-wztp8" Nov 28 18:26:02 crc kubenswrapper[4909]: I1128 18:26:02.758355 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cells-global-config-1\" (UniqueName: \"kubernetes.io/configmap/8561d38e-6957-4c25-9849-1e73103a9efd-nova-cells-global-config-1\") pod \"nova-cell1-openstack-openstack-cell1-wztp8\" (UID: \"8561d38e-6957-4c25-9849-1e73103a9efd\") " pod="openstack/nova-cell1-openstack-openstack-cell1-wztp8" Nov 28 18:26:02 crc kubenswrapper[4909]: I1128 18:26:02.758431 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cells-global-config-0\" (UniqueName: \"kubernetes.io/configmap/8561d38e-6957-4c25-9849-1e73103a9efd-nova-cells-global-config-0\") pod \"nova-cell1-openstack-openstack-cell1-wztp8\" (UID: \"8561d38e-6957-4c25-9849-1e73103a9efd\") " pod="openstack/nova-cell1-openstack-openstack-cell1-wztp8" Nov 28 18:26:02 crc kubenswrapper[4909]: I1128 18:26:02.758472 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/8561d38e-6957-4c25-9849-1e73103a9efd-ssh-key\") pod \"nova-cell1-openstack-openstack-cell1-wztp8\" (UID: \"8561d38e-6957-4c25-9849-1e73103a9efd\") " pod="openstack/nova-cell1-openstack-openstack-cell1-wztp8" Nov 28 18:26:02 crc kubenswrapper[4909]: I1128 18:26:02.758551 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8561d38e-6957-4c25-9849-1e73103a9efd-nova-cell1-combined-ca-bundle\") pod \"nova-cell1-openstack-openstack-cell1-wztp8\" (UID: \"8561d38e-6957-4c25-9849-1e73103a9efd\") " pod="openstack/nova-cell1-openstack-openstack-cell1-wztp8" Nov 28 18:26:02 crc kubenswrapper[4909]: I1128 18:26:02.758626 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d2wck\" (UniqueName: \"kubernetes.io/projected/8561d38e-6957-4c25-9849-1e73103a9efd-kube-api-access-d2wck\") pod \"nova-cell1-openstack-openstack-cell1-wztp8\" (UID: \"8561d38e-6957-4c25-9849-1e73103a9efd\") " pod="openstack/nova-cell1-openstack-openstack-cell1-wztp8" Nov 28 18:26:02 crc kubenswrapper[4909]: I1128 18:26:02.759838 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cells-global-config-1\" (UniqueName: \"kubernetes.io/configmap/8561d38e-6957-4c25-9849-1e73103a9efd-nova-cells-global-config-1\") pod \"nova-cell1-openstack-openstack-cell1-wztp8\" (UID: \"8561d38e-6957-4c25-9849-1e73103a9efd\") " pod="openstack/nova-cell1-openstack-openstack-cell1-wztp8" Nov 28 18:26:02 crc kubenswrapper[4909]: I1128 18:26:02.759947 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cells-global-config-0\" (UniqueName: \"kubernetes.io/configmap/8561d38e-6957-4c25-9849-1e73103a9efd-nova-cells-global-config-0\") pod \"nova-cell1-openstack-openstack-cell1-wztp8\" (UID: \"8561d38e-6957-4c25-9849-1e73103a9efd\") " pod="openstack/nova-cell1-openstack-openstack-cell1-wztp8" Nov 28 18:26:02 crc kubenswrapper[4909]: I1128 18:26:02.762061 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/8561d38e-6957-4c25-9849-1e73103a9efd-nova-migration-ssh-key-1\") pod \"nova-cell1-openstack-openstack-cell1-wztp8\" (UID: \"8561d38e-6957-4c25-9849-1e73103a9efd\") " pod="openstack/nova-cell1-openstack-openstack-cell1-wztp8" Nov 28 18:26:02 crc kubenswrapper[4909]: I1128 18:26:02.762348 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/8561d38e-6957-4c25-9849-1e73103a9efd-ssh-key\") pod \"nova-cell1-openstack-openstack-cell1-wztp8\" (UID: \"8561d38e-6957-4c25-9849-1e73103a9efd\") " pod="openstack/nova-cell1-openstack-openstack-cell1-wztp8" Nov 28 18:26:02 crc kubenswrapper[4909]: I1128 18:26:02.762846 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8561d38e-6957-4c25-9849-1e73103a9efd-inventory\") pod \"nova-cell1-openstack-openstack-cell1-wztp8\" (UID: \"8561d38e-6957-4c25-9849-1e73103a9efd\") " pod="openstack/nova-cell1-openstack-openstack-cell1-wztp8" Nov 28 18:26:02 crc kubenswrapper[4909]: I1128 18:26:02.762973 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/8561d38e-6957-4c25-9849-1e73103a9efd-ceph\") pod \"nova-cell1-openstack-openstack-cell1-wztp8\" (UID: \"8561d38e-6957-4c25-9849-1e73103a9efd\") " pod="openstack/nova-cell1-openstack-openstack-cell1-wztp8" Nov 28 18:26:02 crc kubenswrapper[4909]: I1128 18:26:02.764840 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/8561d38e-6957-4c25-9849-1e73103a9efd-nova-cell1-compute-config-1\") pod \"nova-cell1-openstack-openstack-cell1-wztp8\" (UID: \"8561d38e-6957-4c25-9849-1e73103a9efd\") " pod="openstack/nova-cell1-openstack-openstack-cell1-wztp8" Nov 28 18:26:02 crc kubenswrapper[4909]: I1128 18:26:02.769440 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/8561d38e-6957-4c25-9849-1e73103a9efd-nova-cell1-compute-config-0\") pod \"nova-cell1-openstack-openstack-cell1-wztp8\" (UID: \"8561d38e-6957-4c25-9849-1e73103a9efd\") " pod="openstack/nova-cell1-openstack-openstack-cell1-wztp8" Nov 28 18:26:02 crc kubenswrapper[4909]: I1128 18:26:02.770303 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8561d38e-6957-4c25-9849-1e73103a9efd-nova-cell1-combined-ca-bundle\") pod \"nova-cell1-openstack-openstack-cell1-wztp8\" (UID: \"8561d38e-6957-4c25-9849-1e73103a9efd\") " pod="openstack/nova-cell1-openstack-openstack-cell1-wztp8" Nov 28 18:26:02 crc kubenswrapper[4909]: I1128 18:26:02.771988 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/8561d38e-6957-4c25-9849-1e73103a9efd-nova-migration-ssh-key-0\") pod \"nova-cell1-openstack-openstack-cell1-wztp8\" (UID: \"8561d38e-6957-4c25-9849-1e73103a9efd\") " pod="openstack/nova-cell1-openstack-openstack-cell1-wztp8" Nov 28 18:26:02 crc kubenswrapper[4909]: I1128 18:26:02.791716 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d2wck\" (UniqueName: \"kubernetes.io/projected/8561d38e-6957-4c25-9849-1e73103a9efd-kube-api-access-d2wck\") pod \"nova-cell1-openstack-openstack-cell1-wztp8\" (UID: \"8561d38e-6957-4c25-9849-1e73103a9efd\") " pod="openstack/nova-cell1-openstack-openstack-cell1-wztp8" Nov 28 18:26:02 crc kubenswrapper[4909]: I1128 18:26:02.814843 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-openstack-openstack-cell1-wztp8" Nov 28 18:26:03 crc kubenswrapper[4909]: I1128 18:26:03.364997 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-openstack-openstack-cell1-wztp8"] Nov 28 18:26:03 crc kubenswrapper[4909]: I1128 18:26:03.376203 4909 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 28 18:26:03 crc kubenswrapper[4909]: I1128 18:26:03.398414 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-openstack-openstack-cell1-wztp8" event={"ID":"8561d38e-6957-4c25-9849-1e73103a9efd","Type":"ContainerStarted","Data":"73ea8f42007f64801b0b87125152a3cb25f3db4048b15226da82f679dd3210f4"} Nov 28 18:26:04 crc kubenswrapper[4909]: I1128 18:26:04.411184 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-openstack-openstack-cell1-wztp8" event={"ID":"8561d38e-6957-4c25-9849-1e73103a9efd","Type":"ContainerStarted","Data":"e36e8ef5d227a283408b6587f4fcc3c545638141c3e00e9168da791080c91f24"} Nov 28 18:26:04 crc kubenswrapper[4909]: I1128 18:26:04.451256 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-openstack-openstack-cell1-wztp8" podStartSLOduration=1.898928937 podStartE2EDuration="2.451235547s" podCreationTimestamp="2025-11-28 18:26:02 +0000 UTC" firstStartedPulling="2025-11-28 18:26:03.37599443 +0000 UTC m=+8145.772678954" lastFinishedPulling="2025-11-28 18:26:03.92830104 +0000 UTC m=+8146.324985564" observedRunningTime="2025-11-28 18:26:04.43833673 +0000 UTC m=+8146.835021264" watchObservedRunningTime="2025-11-28 18:26:04.451235547 +0000 UTC m=+8146.847920081" Nov 28 18:26:19 crc kubenswrapper[4909]: I1128 18:26:19.911306 4909 patch_prober.go:28] interesting pod/machine-config-daemon-d5nd7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 18:26:19 crc kubenswrapper[4909]: I1128 18:26:19.911828 4909 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 18:26:19 crc kubenswrapper[4909]: I1128 18:26:19.913751 4909 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" Nov 28 18:26:19 crc kubenswrapper[4909]: I1128 18:26:19.914726 4909 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"5226a771ffcb00b5135f06b8e238e32f1bb502ad55f4c3b1d915e6908d8a3ef7"} pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 18:26:19 crc kubenswrapper[4909]: I1128 18:26:19.914867 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerName="machine-config-daemon" containerID="cri-o://5226a771ffcb00b5135f06b8e238e32f1bb502ad55f4c3b1d915e6908d8a3ef7" gracePeriod=600 Nov 28 18:26:20 crc kubenswrapper[4909]: E1128 18:26:20.059065 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 18:26:20 crc kubenswrapper[4909]: I1128 18:26:20.580366 4909 generic.go:334] "Generic (PLEG): container finished" podID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerID="5226a771ffcb00b5135f06b8e238e32f1bb502ad55f4c3b1d915e6908d8a3ef7" exitCode=0 Nov 28 18:26:20 crc kubenswrapper[4909]: I1128 18:26:20.580474 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" event={"ID":"5f0ac931-d37b-4342-8c12-c2779b455cc5","Type":"ContainerDied","Data":"5226a771ffcb00b5135f06b8e238e32f1bb502ad55f4c3b1d915e6908d8a3ef7"} Nov 28 18:26:20 crc kubenswrapper[4909]: I1128 18:26:20.580818 4909 scope.go:117] "RemoveContainer" containerID="e9bed4a6bcd109c2766aab7a7fb38f137b95e53ec473471aec321e9fdd1bfb74" Nov 28 18:26:20 crc kubenswrapper[4909]: I1128 18:26:20.583080 4909 scope.go:117] "RemoveContainer" containerID="5226a771ffcb00b5135f06b8e238e32f1bb502ad55f4c3b1d915e6908d8a3ef7" Nov 28 18:26:20 crc kubenswrapper[4909]: E1128 18:26:20.585260 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 18:26:34 crc kubenswrapper[4909]: I1128 18:26:34.901559 4909 scope.go:117] "RemoveContainer" containerID="5226a771ffcb00b5135f06b8e238e32f1bb502ad55f4c3b1d915e6908d8a3ef7" Nov 28 18:26:34 crc kubenswrapper[4909]: E1128 18:26:34.902401 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 18:26:45 crc kubenswrapper[4909]: I1128 18:26:45.901545 4909 scope.go:117] "RemoveContainer" containerID="5226a771ffcb00b5135f06b8e238e32f1bb502ad55f4c3b1d915e6908d8a3ef7" Nov 28 18:26:45 crc kubenswrapper[4909]: E1128 18:26:45.902620 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 18:26:59 crc kubenswrapper[4909]: I1128 18:26:59.902623 4909 scope.go:117] "RemoveContainer" containerID="5226a771ffcb00b5135f06b8e238e32f1bb502ad55f4c3b1d915e6908d8a3ef7" Nov 28 18:26:59 crc kubenswrapper[4909]: E1128 18:26:59.903424 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 18:27:11 crc kubenswrapper[4909]: I1128 18:27:11.901756 4909 scope.go:117] "RemoveContainer" containerID="5226a771ffcb00b5135f06b8e238e32f1bb502ad55f4c3b1d915e6908d8a3ef7" Nov 28 18:27:11 crc kubenswrapper[4909]: E1128 18:27:11.902757 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 18:27:25 crc kubenswrapper[4909]: I1128 18:27:25.903702 4909 scope.go:117] "RemoveContainer" containerID="5226a771ffcb00b5135f06b8e238e32f1bb502ad55f4c3b1d915e6908d8a3ef7" Nov 28 18:27:25 crc kubenswrapper[4909]: E1128 18:27:25.904393 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 18:27:40 crc kubenswrapper[4909]: I1128 18:27:40.914937 4909 scope.go:117] "RemoveContainer" containerID="5226a771ffcb00b5135f06b8e238e32f1bb502ad55f4c3b1d915e6908d8a3ef7" Nov 28 18:27:40 crc kubenswrapper[4909]: E1128 18:27:40.916325 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 18:27:55 crc kubenswrapper[4909]: I1128 18:27:55.901855 4909 scope.go:117] "RemoveContainer" containerID="5226a771ffcb00b5135f06b8e238e32f1bb502ad55f4c3b1d915e6908d8a3ef7" Nov 28 18:27:55 crc kubenswrapper[4909]: E1128 18:27:55.902724 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 18:28:10 crc kubenswrapper[4909]: I1128 18:28:10.903260 4909 scope.go:117] "RemoveContainer" containerID="5226a771ffcb00b5135f06b8e238e32f1bb502ad55f4c3b1d915e6908d8a3ef7" Nov 28 18:28:10 crc kubenswrapper[4909]: E1128 18:28:10.905134 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 18:28:15 crc kubenswrapper[4909]: I1128 18:28:15.175474 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-55btq"] Nov 28 18:28:15 crc kubenswrapper[4909]: I1128 18:28:15.182189 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-55btq" Nov 28 18:28:15 crc kubenswrapper[4909]: I1128 18:28:15.193367 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-55btq"] Nov 28 18:28:15 crc kubenswrapper[4909]: I1128 18:28:15.233792 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a3b61472-81d0-41b6-beb8-5c30abb25d44-utilities\") pod \"community-operators-55btq\" (UID: \"a3b61472-81d0-41b6-beb8-5c30abb25d44\") " pod="openshift-marketplace/community-operators-55btq" Nov 28 18:28:15 crc kubenswrapper[4909]: I1128 18:28:15.233986 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d9rbq\" (UniqueName: \"kubernetes.io/projected/a3b61472-81d0-41b6-beb8-5c30abb25d44-kube-api-access-d9rbq\") pod \"community-operators-55btq\" (UID: \"a3b61472-81d0-41b6-beb8-5c30abb25d44\") " pod="openshift-marketplace/community-operators-55btq" Nov 28 18:28:15 crc kubenswrapper[4909]: I1128 18:28:15.234169 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a3b61472-81d0-41b6-beb8-5c30abb25d44-catalog-content\") pod \"community-operators-55btq\" (UID: \"a3b61472-81d0-41b6-beb8-5c30abb25d44\") " pod="openshift-marketplace/community-operators-55btq" Nov 28 18:28:15 crc kubenswrapper[4909]: I1128 18:28:15.335462 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a3b61472-81d0-41b6-beb8-5c30abb25d44-catalog-content\") pod \"community-operators-55btq\" (UID: \"a3b61472-81d0-41b6-beb8-5c30abb25d44\") " pod="openshift-marketplace/community-operators-55btq" Nov 28 18:28:15 crc kubenswrapper[4909]: I1128 18:28:15.335533 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a3b61472-81d0-41b6-beb8-5c30abb25d44-utilities\") pod \"community-operators-55btq\" (UID: \"a3b61472-81d0-41b6-beb8-5c30abb25d44\") " pod="openshift-marketplace/community-operators-55btq" Nov 28 18:28:15 crc kubenswrapper[4909]: I1128 18:28:15.335617 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d9rbq\" (UniqueName: \"kubernetes.io/projected/a3b61472-81d0-41b6-beb8-5c30abb25d44-kube-api-access-d9rbq\") pod \"community-operators-55btq\" (UID: \"a3b61472-81d0-41b6-beb8-5c30abb25d44\") " pod="openshift-marketplace/community-operators-55btq" Nov 28 18:28:15 crc kubenswrapper[4909]: I1128 18:28:15.336181 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a3b61472-81d0-41b6-beb8-5c30abb25d44-catalog-content\") pod \"community-operators-55btq\" (UID: \"a3b61472-81d0-41b6-beb8-5c30abb25d44\") " pod="openshift-marketplace/community-operators-55btq" Nov 28 18:28:15 crc kubenswrapper[4909]: I1128 18:28:15.336226 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a3b61472-81d0-41b6-beb8-5c30abb25d44-utilities\") pod \"community-operators-55btq\" (UID: \"a3b61472-81d0-41b6-beb8-5c30abb25d44\") " pod="openshift-marketplace/community-operators-55btq" Nov 28 18:28:15 crc kubenswrapper[4909]: I1128 18:28:15.354722 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d9rbq\" (UniqueName: \"kubernetes.io/projected/a3b61472-81d0-41b6-beb8-5c30abb25d44-kube-api-access-d9rbq\") pod \"community-operators-55btq\" (UID: \"a3b61472-81d0-41b6-beb8-5c30abb25d44\") " pod="openshift-marketplace/community-operators-55btq" Nov 28 18:28:15 crc kubenswrapper[4909]: I1128 18:28:15.522729 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-55btq" Nov 28 18:28:16 crc kubenswrapper[4909]: I1128 18:28:16.088100 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-55btq"] Nov 28 18:28:16 crc kubenswrapper[4909]: I1128 18:28:16.878136 4909 generic.go:334] "Generic (PLEG): container finished" podID="a3b61472-81d0-41b6-beb8-5c30abb25d44" containerID="baeeee39e446004a4fa5d2cfb695a32cbd093ca423eed95c74e0070af82733ce" exitCode=0 Nov 28 18:28:16 crc kubenswrapper[4909]: I1128 18:28:16.878213 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-55btq" event={"ID":"a3b61472-81d0-41b6-beb8-5c30abb25d44","Type":"ContainerDied","Data":"baeeee39e446004a4fa5d2cfb695a32cbd093ca423eed95c74e0070af82733ce"} Nov 28 18:28:16 crc kubenswrapper[4909]: I1128 18:28:16.879648 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-55btq" event={"ID":"a3b61472-81d0-41b6-beb8-5c30abb25d44","Type":"ContainerStarted","Data":"0c2740a175478126433f8cd18d41ceb6ecbeb7de289f94100814bcfd2272ce36"} Nov 28 18:28:18 crc kubenswrapper[4909]: I1128 18:28:18.902332 4909 generic.go:334] "Generic (PLEG): container finished" podID="a3b61472-81d0-41b6-beb8-5c30abb25d44" containerID="8334af6031954b8ba13a54fd786b71ecada769db527ff43852ae3bd01ff40ec4" exitCode=0 Nov 28 18:28:18 crc kubenswrapper[4909]: I1128 18:28:18.902456 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-55btq" event={"ID":"a3b61472-81d0-41b6-beb8-5c30abb25d44","Type":"ContainerDied","Data":"8334af6031954b8ba13a54fd786b71ecada769db527ff43852ae3bd01ff40ec4"} Nov 28 18:28:19 crc kubenswrapper[4909]: I1128 18:28:19.921039 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-55btq" event={"ID":"a3b61472-81d0-41b6-beb8-5c30abb25d44","Type":"ContainerStarted","Data":"01258e433e15ead20f58e38dae99bac92ccaadedf50577f339d90660e238803d"} Nov 28 18:28:19 crc kubenswrapper[4909]: I1128 18:28:19.952448 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-55btq" podStartSLOduration=2.439264419 podStartE2EDuration="4.952422176s" podCreationTimestamp="2025-11-28 18:28:15 +0000 UTC" firstStartedPulling="2025-11-28 18:28:16.880359033 +0000 UTC m=+8279.277043567" lastFinishedPulling="2025-11-28 18:28:19.3935168 +0000 UTC m=+8281.790201324" observedRunningTime="2025-11-28 18:28:19.947169084 +0000 UTC m=+8282.343853678" watchObservedRunningTime="2025-11-28 18:28:19.952422176 +0000 UTC m=+8282.349106740" Nov 28 18:28:21 crc kubenswrapper[4909]: I1128 18:28:21.551993 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-r5hbl"] Nov 28 18:28:21 crc kubenswrapper[4909]: I1128 18:28:21.557305 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-r5hbl" Nov 28 18:28:21 crc kubenswrapper[4909]: I1128 18:28:21.562964 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/eeead2fe-143d-4bd3-a09e-3a4c8c4f7205-utilities\") pod \"certified-operators-r5hbl\" (UID: \"eeead2fe-143d-4bd3-a09e-3a4c8c4f7205\") " pod="openshift-marketplace/certified-operators-r5hbl" Nov 28 18:28:21 crc kubenswrapper[4909]: I1128 18:28:21.563124 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7blbl\" (UniqueName: \"kubernetes.io/projected/eeead2fe-143d-4bd3-a09e-3a4c8c4f7205-kube-api-access-7blbl\") pod \"certified-operators-r5hbl\" (UID: \"eeead2fe-143d-4bd3-a09e-3a4c8c4f7205\") " pod="openshift-marketplace/certified-operators-r5hbl" Nov 28 18:28:21 crc kubenswrapper[4909]: I1128 18:28:21.563822 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/eeead2fe-143d-4bd3-a09e-3a4c8c4f7205-catalog-content\") pod \"certified-operators-r5hbl\" (UID: \"eeead2fe-143d-4bd3-a09e-3a4c8c4f7205\") " pod="openshift-marketplace/certified-operators-r5hbl" Nov 28 18:28:21 crc kubenswrapper[4909]: I1128 18:28:21.585829 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-r5hbl"] Nov 28 18:28:21 crc kubenswrapper[4909]: I1128 18:28:21.672482 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/eeead2fe-143d-4bd3-a09e-3a4c8c4f7205-utilities\") pod \"certified-operators-r5hbl\" (UID: \"eeead2fe-143d-4bd3-a09e-3a4c8c4f7205\") " pod="openshift-marketplace/certified-operators-r5hbl" Nov 28 18:28:21 crc kubenswrapper[4909]: I1128 18:28:21.672578 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7blbl\" (UniqueName: \"kubernetes.io/projected/eeead2fe-143d-4bd3-a09e-3a4c8c4f7205-kube-api-access-7blbl\") pod \"certified-operators-r5hbl\" (UID: \"eeead2fe-143d-4bd3-a09e-3a4c8c4f7205\") " pod="openshift-marketplace/certified-operators-r5hbl" Nov 28 18:28:21 crc kubenswrapper[4909]: I1128 18:28:21.672821 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/eeead2fe-143d-4bd3-a09e-3a4c8c4f7205-catalog-content\") pod \"certified-operators-r5hbl\" (UID: \"eeead2fe-143d-4bd3-a09e-3a4c8c4f7205\") " pod="openshift-marketplace/certified-operators-r5hbl" Nov 28 18:28:21 crc kubenswrapper[4909]: I1128 18:28:21.673261 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/eeead2fe-143d-4bd3-a09e-3a4c8c4f7205-utilities\") pod \"certified-operators-r5hbl\" (UID: \"eeead2fe-143d-4bd3-a09e-3a4c8c4f7205\") " pod="openshift-marketplace/certified-operators-r5hbl" Nov 28 18:28:21 crc kubenswrapper[4909]: I1128 18:28:21.673290 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/eeead2fe-143d-4bd3-a09e-3a4c8c4f7205-catalog-content\") pod \"certified-operators-r5hbl\" (UID: \"eeead2fe-143d-4bd3-a09e-3a4c8c4f7205\") " pod="openshift-marketplace/certified-operators-r5hbl" Nov 28 18:28:21 crc kubenswrapper[4909]: I1128 18:28:21.710813 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7blbl\" (UniqueName: \"kubernetes.io/projected/eeead2fe-143d-4bd3-a09e-3a4c8c4f7205-kube-api-access-7blbl\") pod \"certified-operators-r5hbl\" (UID: \"eeead2fe-143d-4bd3-a09e-3a4c8c4f7205\") " pod="openshift-marketplace/certified-operators-r5hbl" Nov 28 18:28:21 crc kubenswrapper[4909]: I1128 18:28:21.922858 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-r5hbl" Nov 28 18:28:22 crc kubenswrapper[4909]: I1128 18:28:22.544547 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-r5hbl"] Nov 28 18:28:22 crc kubenswrapper[4909]: I1128 18:28:22.955697 4909 generic.go:334] "Generic (PLEG): container finished" podID="eeead2fe-143d-4bd3-a09e-3a4c8c4f7205" containerID="bd09e76ea45e0a610a06a6e51653b48065db329cf3e359abf407f5eb0abcaf75" exitCode=0 Nov 28 18:28:22 crc kubenswrapper[4909]: I1128 18:28:22.955777 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-r5hbl" event={"ID":"eeead2fe-143d-4bd3-a09e-3a4c8c4f7205","Type":"ContainerDied","Data":"bd09e76ea45e0a610a06a6e51653b48065db329cf3e359abf407f5eb0abcaf75"} Nov 28 18:28:22 crc kubenswrapper[4909]: I1128 18:28:22.956082 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-r5hbl" event={"ID":"eeead2fe-143d-4bd3-a09e-3a4c8c4f7205","Type":"ContainerStarted","Data":"59f2243472c2cc8fd3bfb1a7843308dc0a7c00b8269976ca4d42bae4647e11ba"} Nov 28 18:28:25 crc kubenswrapper[4909]: I1128 18:28:25.416970 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-r5hbl" event={"ID":"eeead2fe-143d-4bd3-a09e-3a4c8c4f7205","Type":"ContainerStarted","Data":"1c356cea28eafc739f0c0778a339771ad687016bd5d4b82cf6e21806b32da7a6"} Nov 28 18:28:25 crc kubenswrapper[4909]: I1128 18:28:25.523168 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-55btq" Nov 28 18:28:25 crc kubenswrapper[4909]: I1128 18:28:25.523647 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-55btq" Nov 28 18:28:25 crc kubenswrapper[4909]: I1128 18:28:25.589277 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-55btq" Nov 28 18:28:25 crc kubenswrapper[4909]: I1128 18:28:25.903173 4909 scope.go:117] "RemoveContainer" containerID="5226a771ffcb00b5135f06b8e238e32f1bb502ad55f4c3b1d915e6908d8a3ef7" Nov 28 18:28:25 crc kubenswrapper[4909]: E1128 18:28:25.903482 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 18:28:26 crc kubenswrapper[4909]: I1128 18:28:26.472712 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-55btq" Nov 28 18:28:26 crc kubenswrapper[4909]: I1128 18:28:26.736428 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-55btq"] Nov 28 18:28:27 crc kubenswrapper[4909]: I1128 18:28:27.443383 4909 generic.go:334] "Generic (PLEG): container finished" podID="eeead2fe-143d-4bd3-a09e-3a4c8c4f7205" containerID="1c356cea28eafc739f0c0778a339771ad687016bd5d4b82cf6e21806b32da7a6" exitCode=0 Nov 28 18:28:27 crc kubenswrapper[4909]: I1128 18:28:27.444030 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-r5hbl" event={"ID":"eeead2fe-143d-4bd3-a09e-3a4c8c4f7205","Type":"ContainerDied","Data":"1c356cea28eafc739f0c0778a339771ad687016bd5d4b82cf6e21806b32da7a6"} Nov 28 18:28:28 crc kubenswrapper[4909]: I1128 18:28:28.460139 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-55btq" podUID="a3b61472-81d0-41b6-beb8-5c30abb25d44" containerName="registry-server" containerID="cri-o://01258e433e15ead20f58e38dae99bac92ccaadedf50577f339d90660e238803d" gracePeriod=2 Nov 28 18:28:28 crc kubenswrapper[4909]: I1128 18:28:28.460596 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-r5hbl" event={"ID":"eeead2fe-143d-4bd3-a09e-3a4c8c4f7205","Type":"ContainerStarted","Data":"4a8f1941ac3c70142e595091cd7f14f0444d1f5ef2d690ecf1181f528e983389"} Nov 28 18:28:28 crc kubenswrapper[4909]: I1128 18:28:28.495906 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-r5hbl" podStartSLOduration=2.459400547 podStartE2EDuration="7.495889572s" podCreationTimestamp="2025-11-28 18:28:21 +0000 UTC" firstStartedPulling="2025-11-28 18:28:22.957906226 +0000 UTC m=+8285.354590780" lastFinishedPulling="2025-11-28 18:28:27.994395281 +0000 UTC m=+8290.391079805" observedRunningTime="2025-11-28 18:28:28.490134907 +0000 UTC m=+8290.886819451" watchObservedRunningTime="2025-11-28 18:28:28.495889572 +0000 UTC m=+8290.892574096" Nov 28 18:28:29 crc kubenswrapper[4909]: I1128 18:28:29.042723 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-55btq" Nov 28 18:28:29 crc kubenswrapper[4909]: I1128 18:28:29.189999 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a3b61472-81d0-41b6-beb8-5c30abb25d44-catalog-content\") pod \"a3b61472-81d0-41b6-beb8-5c30abb25d44\" (UID: \"a3b61472-81d0-41b6-beb8-5c30abb25d44\") " Nov 28 18:28:29 crc kubenswrapper[4909]: I1128 18:28:29.190144 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a3b61472-81d0-41b6-beb8-5c30abb25d44-utilities\") pod \"a3b61472-81d0-41b6-beb8-5c30abb25d44\" (UID: \"a3b61472-81d0-41b6-beb8-5c30abb25d44\") " Nov 28 18:28:29 crc kubenswrapper[4909]: I1128 18:28:29.190296 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d9rbq\" (UniqueName: \"kubernetes.io/projected/a3b61472-81d0-41b6-beb8-5c30abb25d44-kube-api-access-d9rbq\") pod \"a3b61472-81d0-41b6-beb8-5c30abb25d44\" (UID: \"a3b61472-81d0-41b6-beb8-5c30abb25d44\") " Nov 28 18:28:29 crc kubenswrapper[4909]: I1128 18:28:29.190702 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a3b61472-81d0-41b6-beb8-5c30abb25d44-utilities" (OuterVolumeSpecName: "utilities") pod "a3b61472-81d0-41b6-beb8-5c30abb25d44" (UID: "a3b61472-81d0-41b6-beb8-5c30abb25d44"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 18:28:29 crc kubenswrapper[4909]: I1128 18:28:29.191042 4909 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a3b61472-81d0-41b6-beb8-5c30abb25d44-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 18:28:29 crc kubenswrapper[4909]: I1128 18:28:29.198821 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a3b61472-81d0-41b6-beb8-5c30abb25d44-kube-api-access-d9rbq" (OuterVolumeSpecName: "kube-api-access-d9rbq") pod "a3b61472-81d0-41b6-beb8-5c30abb25d44" (UID: "a3b61472-81d0-41b6-beb8-5c30abb25d44"). InnerVolumeSpecName "kube-api-access-d9rbq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 18:28:29 crc kubenswrapper[4909]: I1128 18:28:29.248323 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a3b61472-81d0-41b6-beb8-5c30abb25d44-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a3b61472-81d0-41b6-beb8-5c30abb25d44" (UID: "a3b61472-81d0-41b6-beb8-5c30abb25d44"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 18:28:29 crc kubenswrapper[4909]: I1128 18:28:29.293871 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d9rbq\" (UniqueName: \"kubernetes.io/projected/a3b61472-81d0-41b6-beb8-5c30abb25d44-kube-api-access-d9rbq\") on node \"crc\" DevicePath \"\"" Nov 28 18:28:29 crc kubenswrapper[4909]: I1128 18:28:29.294401 4909 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a3b61472-81d0-41b6-beb8-5c30abb25d44-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 18:28:29 crc kubenswrapper[4909]: I1128 18:28:29.470880 4909 generic.go:334] "Generic (PLEG): container finished" podID="a3b61472-81d0-41b6-beb8-5c30abb25d44" containerID="01258e433e15ead20f58e38dae99bac92ccaadedf50577f339d90660e238803d" exitCode=0 Nov 28 18:28:29 crc kubenswrapper[4909]: I1128 18:28:29.470969 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-55btq" Nov 28 18:28:29 crc kubenswrapper[4909]: I1128 18:28:29.472649 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-55btq" event={"ID":"a3b61472-81d0-41b6-beb8-5c30abb25d44","Type":"ContainerDied","Data":"01258e433e15ead20f58e38dae99bac92ccaadedf50577f339d90660e238803d"} Nov 28 18:28:29 crc kubenswrapper[4909]: I1128 18:28:29.472985 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-55btq" event={"ID":"a3b61472-81d0-41b6-beb8-5c30abb25d44","Type":"ContainerDied","Data":"0c2740a175478126433f8cd18d41ceb6ecbeb7de289f94100814bcfd2272ce36"} Nov 28 18:28:29 crc kubenswrapper[4909]: I1128 18:28:29.473067 4909 scope.go:117] "RemoveContainer" containerID="01258e433e15ead20f58e38dae99bac92ccaadedf50577f339d90660e238803d" Nov 28 18:28:29 crc kubenswrapper[4909]: I1128 18:28:29.515749 4909 scope.go:117] "RemoveContainer" containerID="8334af6031954b8ba13a54fd786b71ecada769db527ff43852ae3bd01ff40ec4" Nov 28 18:28:29 crc kubenswrapper[4909]: I1128 18:28:29.517080 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-55btq"] Nov 28 18:28:29 crc kubenswrapper[4909]: I1128 18:28:29.529911 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-55btq"] Nov 28 18:28:29 crc kubenswrapper[4909]: I1128 18:28:29.537406 4909 scope.go:117] "RemoveContainer" containerID="baeeee39e446004a4fa5d2cfb695a32cbd093ca423eed95c74e0070af82733ce" Nov 28 18:28:29 crc kubenswrapper[4909]: I1128 18:28:29.622828 4909 scope.go:117] "RemoveContainer" containerID="01258e433e15ead20f58e38dae99bac92ccaadedf50577f339d90660e238803d" Nov 28 18:28:29 crc kubenswrapper[4909]: E1128 18:28:29.626284 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"01258e433e15ead20f58e38dae99bac92ccaadedf50577f339d90660e238803d\": container with ID starting with 01258e433e15ead20f58e38dae99bac92ccaadedf50577f339d90660e238803d not found: ID does not exist" containerID="01258e433e15ead20f58e38dae99bac92ccaadedf50577f339d90660e238803d" Nov 28 18:28:29 crc kubenswrapper[4909]: I1128 18:28:29.626343 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"01258e433e15ead20f58e38dae99bac92ccaadedf50577f339d90660e238803d"} err="failed to get container status \"01258e433e15ead20f58e38dae99bac92ccaadedf50577f339d90660e238803d\": rpc error: code = NotFound desc = could not find container \"01258e433e15ead20f58e38dae99bac92ccaadedf50577f339d90660e238803d\": container with ID starting with 01258e433e15ead20f58e38dae99bac92ccaadedf50577f339d90660e238803d not found: ID does not exist" Nov 28 18:28:29 crc kubenswrapper[4909]: I1128 18:28:29.626375 4909 scope.go:117] "RemoveContainer" containerID="8334af6031954b8ba13a54fd786b71ecada769db527ff43852ae3bd01ff40ec4" Nov 28 18:28:29 crc kubenswrapper[4909]: E1128 18:28:29.626799 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8334af6031954b8ba13a54fd786b71ecada769db527ff43852ae3bd01ff40ec4\": container with ID starting with 8334af6031954b8ba13a54fd786b71ecada769db527ff43852ae3bd01ff40ec4 not found: ID does not exist" containerID="8334af6031954b8ba13a54fd786b71ecada769db527ff43852ae3bd01ff40ec4" Nov 28 18:28:29 crc kubenswrapper[4909]: I1128 18:28:29.626867 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8334af6031954b8ba13a54fd786b71ecada769db527ff43852ae3bd01ff40ec4"} err="failed to get container status \"8334af6031954b8ba13a54fd786b71ecada769db527ff43852ae3bd01ff40ec4\": rpc error: code = NotFound desc = could not find container \"8334af6031954b8ba13a54fd786b71ecada769db527ff43852ae3bd01ff40ec4\": container with ID starting with 8334af6031954b8ba13a54fd786b71ecada769db527ff43852ae3bd01ff40ec4 not found: ID does not exist" Nov 28 18:28:29 crc kubenswrapper[4909]: I1128 18:28:29.626892 4909 scope.go:117] "RemoveContainer" containerID="baeeee39e446004a4fa5d2cfb695a32cbd093ca423eed95c74e0070af82733ce" Nov 28 18:28:29 crc kubenswrapper[4909]: E1128 18:28:29.631941 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"baeeee39e446004a4fa5d2cfb695a32cbd093ca423eed95c74e0070af82733ce\": container with ID starting with baeeee39e446004a4fa5d2cfb695a32cbd093ca423eed95c74e0070af82733ce not found: ID does not exist" containerID="baeeee39e446004a4fa5d2cfb695a32cbd093ca423eed95c74e0070af82733ce" Nov 28 18:28:29 crc kubenswrapper[4909]: I1128 18:28:29.631986 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"baeeee39e446004a4fa5d2cfb695a32cbd093ca423eed95c74e0070af82733ce"} err="failed to get container status \"baeeee39e446004a4fa5d2cfb695a32cbd093ca423eed95c74e0070af82733ce\": rpc error: code = NotFound desc = could not find container \"baeeee39e446004a4fa5d2cfb695a32cbd093ca423eed95c74e0070af82733ce\": container with ID starting with baeeee39e446004a4fa5d2cfb695a32cbd093ca423eed95c74e0070af82733ce not found: ID does not exist" Nov 28 18:28:29 crc kubenswrapper[4909]: I1128 18:28:29.920022 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a3b61472-81d0-41b6-beb8-5c30abb25d44" path="/var/lib/kubelet/pods/a3b61472-81d0-41b6-beb8-5c30abb25d44/volumes" Nov 28 18:28:31 crc kubenswrapper[4909]: I1128 18:28:31.923411 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-r5hbl" Nov 28 18:28:31 crc kubenswrapper[4909]: I1128 18:28:31.923925 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-r5hbl" Nov 28 18:28:31 crc kubenswrapper[4909]: I1128 18:28:31.993175 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-r5hbl" Nov 28 18:28:36 crc kubenswrapper[4909]: I1128 18:28:36.901405 4909 scope.go:117] "RemoveContainer" containerID="5226a771ffcb00b5135f06b8e238e32f1bb502ad55f4c3b1d915e6908d8a3ef7" Nov 28 18:28:36 crc kubenswrapper[4909]: E1128 18:28:36.902287 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 18:28:41 crc kubenswrapper[4909]: I1128 18:28:41.983240 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-r5hbl" Nov 28 18:28:42 crc kubenswrapper[4909]: I1128 18:28:42.042187 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-r5hbl"] Nov 28 18:28:42 crc kubenswrapper[4909]: I1128 18:28:42.634148 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-r5hbl" podUID="eeead2fe-143d-4bd3-a09e-3a4c8c4f7205" containerName="registry-server" containerID="cri-o://4a8f1941ac3c70142e595091cd7f14f0444d1f5ef2d690ecf1181f528e983389" gracePeriod=2 Nov 28 18:28:43 crc kubenswrapper[4909]: I1128 18:28:43.211198 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-r5hbl" Nov 28 18:28:43 crc kubenswrapper[4909]: I1128 18:28:43.311727 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/eeead2fe-143d-4bd3-a09e-3a4c8c4f7205-utilities\") pod \"eeead2fe-143d-4bd3-a09e-3a4c8c4f7205\" (UID: \"eeead2fe-143d-4bd3-a09e-3a4c8c4f7205\") " Nov 28 18:28:43 crc kubenswrapper[4909]: I1128 18:28:43.311842 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/eeead2fe-143d-4bd3-a09e-3a4c8c4f7205-catalog-content\") pod \"eeead2fe-143d-4bd3-a09e-3a4c8c4f7205\" (UID: \"eeead2fe-143d-4bd3-a09e-3a4c8c4f7205\") " Nov 28 18:28:43 crc kubenswrapper[4909]: I1128 18:28:43.312027 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7blbl\" (UniqueName: \"kubernetes.io/projected/eeead2fe-143d-4bd3-a09e-3a4c8c4f7205-kube-api-access-7blbl\") pod \"eeead2fe-143d-4bd3-a09e-3a4c8c4f7205\" (UID: \"eeead2fe-143d-4bd3-a09e-3a4c8c4f7205\") " Nov 28 18:28:43 crc kubenswrapper[4909]: I1128 18:28:43.313768 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/eeead2fe-143d-4bd3-a09e-3a4c8c4f7205-utilities" (OuterVolumeSpecName: "utilities") pod "eeead2fe-143d-4bd3-a09e-3a4c8c4f7205" (UID: "eeead2fe-143d-4bd3-a09e-3a4c8c4f7205"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 18:28:43 crc kubenswrapper[4909]: I1128 18:28:43.318066 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/eeead2fe-143d-4bd3-a09e-3a4c8c4f7205-kube-api-access-7blbl" (OuterVolumeSpecName: "kube-api-access-7blbl") pod "eeead2fe-143d-4bd3-a09e-3a4c8c4f7205" (UID: "eeead2fe-143d-4bd3-a09e-3a4c8c4f7205"). InnerVolumeSpecName "kube-api-access-7blbl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 18:28:43 crc kubenswrapper[4909]: I1128 18:28:43.360276 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/eeead2fe-143d-4bd3-a09e-3a4c8c4f7205-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "eeead2fe-143d-4bd3-a09e-3a4c8c4f7205" (UID: "eeead2fe-143d-4bd3-a09e-3a4c8c4f7205"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 18:28:43 crc kubenswrapper[4909]: I1128 18:28:43.414027 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7blbl\" (UniqueName: \"kubernetes.io/projected/eeead2fe-143d-4bd3-a09e-3a4c8c4f7205-kube-api-access-7blbl\") on node \"crc\" DevicePath \"\"" Nov 28 18:28:43 crc kubenswrapper[4909]: I1128 18:28:43.414059 4909 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/eeead2fe-143d-4bd3-a09e-3a4c8c4f7205-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 18:28:43 crc kubenswrapper[4909]: I1128 18:28:43.414072 4909 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/eeead2fe-143d-4bd3-a09e-3a4c8c4f7205-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 18:28:43 crc kubenswrapper[4909]: I1128 18:28:43.645626 4909 generic.go:334] "Generic (PLEG): container finished" podID="eeead2fe-143d-4bd3-a09e-3a4c8c4f7205" containerID="4a8f1941ac3c70142e595091cd7f14f0444d1f5ef2d690ecf1181f528e983389" exitCode=0 Nov 28 18:28:43 crc kubenswrapper[4909]: I1128 18:28:43.645745 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-r5hbl" Nov 28 18:28:43 crc kubenswrapper[4909]: I1128 18:28:43.645764 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-r5hbl" event={"ID":"eeead2fe-143d-4bd3-a09e-3a4c8c4f7205","Type":"ContainerDied","Data":"4a8f1941ac3c70142e595091cd7f14f0444d1f5ef2d690ecf1181f528e983389"} Nov 28 18:28:43 crc kubenswrapper[4909]: I1128 18:28:43.646478 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-r5hbl" event={"ID":"eeead2fe-143d-4bd3-a09e-3a4c8c4f7205","Type":"ContainerDied","Data":"59f2243472c2cc8fd3bfb1a7843308dc0a7c00b8269976ca4d42bae4647e11ba"} Nov 28 18:28:43 crc kubenswrapper[4909]: I1128 18:28:43.646565 4909 scope.go:117] "RemoveContainer" containerID="4a8f1941ac3c70142e595091cd7f14f0444d1f5ef2d690ecf1181f528e983389" Nov 28 18:28:43 crc kubenswrapper[4909]: I1128 18:28:43.687563 4909 scope.go:117] "RemoveContainer" containerID="1c356cea28eafc739f0c0778a339771ad687016bd5d4b82cf6e21806b32da7a6" Nov 28 18:28:43 crc kubenswrapper[4909]: I1128 18:28:43.690937 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-r5hbl"] Nov 28 18:28:43 crc kubenswrapper[4909]: I1128 18:28:43.700945 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-r5hbl"] Nov 28 18:28:43 crc kubenswrapper[4909]: I1128 18:28:43.732266 4909 scope.go:117] "RemoveContainer" containerID="bd09e76ea45e0a610a06a6e51653b48065db329cf3e359abf407f5eb0abcaf75" Nov 28 18:28:43 crc kubenswrapper[4909]: I1128 18:28:43.797208 4909 scope.go:117] "RemoveContainer" containerID="4a8f1941ac3c70142e595091cd7f14f0444d1f5ef2d690ecf1181f528e983389" Nov 28 18:28:43 crc kubenswrapper[4909]: E1128 18:28:43.797846 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4a8f1941ac3c70142e595091cd7f14f0444d1f5ef2d690ecf1181f528e983389\": container with ID starting with 4a8f1941ac3c70142e595091cd7f14f0444d1f5ef2d690ecf1181f528e983389 not found: ID does not exist" containerID="4a8f1941ac3c70142e595091cd7f14f0444d1f5ef2d690ecf1181f528e983389" Nov 28 18:28:43 crc kubenswrapper[4909]: I1128 18:28:43.797908 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4a8f1941ac3c70142e595091cd7f14f0444d1f5ef2d690ecf1181f528e983389"} err="failed to get container status \"4a8f1941ac3c70142e595091cd7f14f0444d1f5ef2d690ecf1181f528e983389\": rpc error: code = NotFound desc = could not find container \"4a8f1941ac3c70142e595091cd7f14f0444d1f5ef2d690ecf1181f528e983389\": container with ID starting with 4a8f1941ac3c70142e595091cd7f14f0444d1f5ef2d690ecf1181f528e983389 not found: ID does not exist" Nov 28 18:28:43 crc kubenswrapper[4909]: I1128 18:28:43.797940 4909 scope.go:117] "RemoveContainer" containerID="1c356cea28eafc739f0c0778a339771ad687016bd5d4b82cf6e21806b32da7a6" Nov 28 18:28:43 crc kubenswrapper[4909]: E1128 18:28:43.798285 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1c356cea28eafc739f0c0778a339771ad687016bd5d4b82cf6e21806b32da7a6\": container with ID starting with 1c356cea28eafc739f0c0778a339771ad687016bd5d4b82cf6e21806b32da7a6 not found: ID does not exist" containerID="1c356cea28eafc739f0c0778a339771ad687016bd5d4b82cf6e21806b32da7a6" Nov 28 18:28:43 crc kubenswrapper[4909]: I1128 18:28:43.798395 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1c356cea28eafc739f0c0778a339771ad687016bd5d4b82cf6e21806b32da7a6"} err="failed to get container status \"1c356cea28eafc739f0c0778a339771ad687016bd5d4b82cf6e21806b32da7a6\": rpc error: code = NotFound desc = could not find container \"1c356cea28eafc739f0c0778a339771ad687016bd5d4b82cf6e21806b32da7a6\": container with ID starting with 1c356cea28eafc739f0c0778a339771ad687016bd5d4b82cf6e21806b32da7a6 not found: ID does not exist" Nov 28 18:28:43 crc kubenswrapper[4909]: I1128 18:28:43.798438 4909 scope.go:117] "RemoveContainer" containerID="bd09e76ea45e0a610a06a6e51653b48065db329cf3e359abf407f5eb0abcaf75" Nov 28 18:28:43 crc kubenswrapper[4909]: E1128 18:28:43.798779 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bd09e76ea45e0a610a06a6e51653b48065db329cf3e359abf407f5eb0abcaf75\": container with ID starting with bd09e76ea45e0a610a06a6e51653b48065db329cf3e359abf407f5eb0abcaf75 not found: ID does not exist" containerID="bd09e76ea45e0a610a06a6e51653b48065db329cf3e359abf407f5eb0abcaf75" Nov 28 18:28:43 crc kubenswrapper[4909]: I1128 18:28:43.798847 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bd09e76ea45e0a610a06a6e51653b48065db329cf3e359abf407f5eb0abcaf75"} err="failed to get container status \"bd09e76ea45e0a610a06a6e51653b48065db329cf3e359abf407f5eb0abcaf75\": rpc error: code = NotFound desc = could not find container \"bd09e76ea45e0a610a06a6e51653b48065db329cf3e359abf407f5eb0abcaf75\": container with ID starting with bd09e76ea45e0a610a06a6e51653b48065db329cf3e359abf407f5eb0abcaf75 not found: ID does not exist" Nov 28 18:28:43 crc kubenswrapper[4909]: I1128 18:28:43.919319 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="eeead2fe-143d-4bd3-a09e-3a4c8c4f7205" path="/var/lib/kubelet/pods/eeead2fe-143d-4bd3-a09e-3a4c8c4f7205/volumes" Nov 28 18:28:49 crc kubenswrapper[4909]: I1128 18:28:49.902600 4909 scope.go:117] "RemoveContainer" containerID="5226a771ffcb00b5135f06b8e238e32f1bb502ad55f4c3b1d915e6908d8a3ef7" Nov 28 18:28:49 crc kubenswrapper[4909]: E1128 18:28:49.903460 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 18:29:02 crc kubenswrapper[4909]: I1128 18:29:02.901829 4909 scope.go:117] "RemoveContainer" containerID="5226a771ffcb00b5135f06b8e238e32f1bb502ad55f4c3b1d915e6908d8a3ef7" Nov 28 18:29:02 crc kubenswrapper[4909]: E1128 18:29:02.902605 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 18:29:17 crc kubenswrapper[4909]: I1128 18:29:17.911175 4909 scope.go:117] "RemoveContainer" containerID="5226a771ffcb00b5135f06b8e238e32f1bb502ad55f4c3b1d915e6908d8a3ef7" Nov 28 18:29:17 crc kubenswrapper[4909]: E1128 18:29:17.911928 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 18:29:27 crc kubenswrapper[4909]: I1128 18:29:27.207132 4909 generic.go:334] "Generic (PLEG): container finished" podID="8561d38e-6957-4c25-9849-1e73103a9efd" containerID="e36e8ef5d227a283408b6587f4fcc3c545638141c3e00e9168da791080c91f24" exitCode=0 Nov 28 18:29:27 crc kubenswrapper[4909]: I1128 18:29:27.207739 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-openstack-openstack-cell1-wztp8" event={"ID":"8561d38e-6957-4c25-9849-1e73103a9efd","Type":"ContainerDied","Data":"e36e8ef5d227a283408b6587f4fcc3c545638141c3e00e9168da791080c91f24"} Nov 28 18:29:28 crc kubenswrapper[4909]: I1128 18:29:28.750750 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-openstack-openstack-cell1-wztp8" Nov 28 18:29:28 crc kubenswrapper[4909]: I1128 18:29:28.949288 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/8561d38e-6957-4c25-9849-1e73103a9efd-nova-cell1-compute-config-0\") pod \"8561d38e-6957-4c25-9849-1e73103a9efd\" (UID: \"8561d38e-6957-4c25-9849-1e73103a9efd\") " Nov 28 18:29:28 crc kubenswrapper[4909]: I1128 18:29:28.950545 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d2wck\" (UniqueName: \"kubernetes.io/projected/8561d38e-6957-4c25-9849-1e73103a9efd-kube-api-access-d2wck\") pod \"8561d38e-6957-4c25-9849-1e73103a9efd\" (UID: \"8561d38e-6957-4c25-9849-1e73103a9efd\") " Nov 28 18:29:28 crc kubenswrapper[4909]: I1128 18:29:28.950609 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/8561d38e-6957-4c25-9849-1e73103a9efd-nova-migration-ssh-key-0\") pod \"8561d38e-6957-4c25-9849-1e73103a9efd\" (UID: \"8561d38e-6957-4c25-9849-1e73103a9efd\") " Nov 28 18:29:28 crc kubenswrapper[4909]: I1128 18:29:28.950737 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8561d38e-6957-4c25-9849-1e73103a9efd-inventory\") pod \"8561d38e-6957-4c25-9849-1e73103a9efd\" (UID: \"8561d38e-6957-4c25-9849-1e73103a9efd\") " Nov 28 18:29:28 crc kubenswrapper[4909]: I1128 18:29:28.950901 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/8561d38e-6957-4c25-9849-1e73103a9efd-ceph\") pod \"8561d38e-6957-4c25-9849-1e73103a9efd\" (UID: \"8561d38e-6957-4c25-9849-1e73103a9efd\") " Nov 28 18:29:28 crc kubenswrapper[4909]: I1128 18:29:28.950972 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/8561d38e-6957-4c25-9849-1e73103a9efd-nova-cell1-compute-config-1\") pod \"8561d38e-6957-4c25-9849-1e73103a9efd\" (UID: \"8561d38e-6957-4c25-9849-1e73103a9efd\") " Nov 28 18:29:28 crc kubenswrapper[4909]: I1128 18:29:28.951024 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8561d38e-6957-4c25-9849-1e73103a9efd-nova-cell1-combined-ca-bundle\") pod \"8561d38e-6957-4c25-9849-1e73103a9efd\" (UID: \"8561d38e-6957-4c25-9849-1e73103a9efd\") " Nov 28 18:29:28 crc kubenswrapper[4909]: I1128 18:29:28.951105 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cells-global-config-1\" (UniqueName: \"kubernetes.io/configmap/8561d38e-6957-4c25-9849-1e73103a9efd-nova-cells-global-config-1\") pod \"8561d38e-6957-4c25-9849-1e73103a9efd\" (UID: \"8561d38e-6957-4c25-9849-1e73103a9efd\") " Nov 28 18:29:28 crc kubenswrapper[4909]: I1128 18:29:28.951169 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cells-global-config-0\" (UniqueName: \"kubernetes.io/configmap/8561d38e-6957-4c25-9849-1e73103a9efd-nova-cells-global-config-0\") pod \"8561d38e-6957-4c25-9849-1e73103a9efd\" (UID: \"8561d38e-6957-4c25-9849-1e73103a9efd\") " Nov 28 18:29:28 crc kubenswrapper[4909]: I1128 18:29:28.951295 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/8561d38e-6957-4c25-9849-1e73103a9efd-ssh-key\") pod \"8561d38e-6957-4c25-9849-1e73103a9efd\" (UID: \"8561d38e-6957-4c25-9849-1e73103a9efd\") " Nov 28 18:29:28 crc kubenswrapper[4909]: I1128 18:29:28.951338 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/8561d38e-6957-4c25-9849-1e73103a9efd-nova-migration-ssh-key-1\") pod \"8561d38e-6957-4c25-9849-1e73103a9efd\" (UID: \"8561d38e-6957-4c25-9849-1e73103a9efd\") " Nov 28 18:29:28 crc kubenswrapper[4909]: I1128 18:29:28.956187 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8561d38e-6957-4c25-9849-1e73103a9efd-nova-cell1-combined-ca-bundle" (OuterVolumeSpecName: "nova-cell1-combined-ca-bundle") pod "8561d38e-6957-4c25-9849-1e73103a9efd" (UID: "8561d38e-6957-4c25-9849-1e73103a9efd"). InnerVolumeSpecName "nova-cell1-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 18:29:28 crc kubenswrapper[4909]: I1128 18:29:28.958028 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8561d38e-6957-4c25-9849-1e73103a9efd-kube-api-access-d2wck" (OuterVolumeSpecName: "kube-api-access-d2wck") pod "8561d38e-6957-4c25-9849-1e73103a9efd" (UID: "8561d38e-6957-4c25-9849-1e73103a9efd"). InnerVolumeSpecName "kube-api-access-d2wck". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 18:29:28 crc kubenswrapper[4909]: I1128 18:29:28.960098 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8561d38e-6957-4c25-9849-1e73103a9efd-ceph" (OuterVolumeSpecName: "ceph") pod "8561d38e-6957-4c25-9849-1e73103a9efd" (UID: "8561d38e-6957-4c25-9849-1e73103a9efd"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 18:29:28 crc kubenswrapper[4909]: I1128 18:29:28.980148 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8561d38e-6957-4c25-9849-1e73103a9efd-nova-cells-global-config-1" (OuterVolumeSpecName: "nova-cells-global-config-1") pod "8561d38e-6957-4c25-9849-1e73103a9efd" (UID: "8561d38e-6957-4c25-9849-1e73103a9efd"). InnerVolumeSpecName "nova-cells-global-config-1". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 18:29:28 crc kubenswrapper[4909]: I1128 18:29:28.982632 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8561d38e-6957-4c25-9849-1e73103a9efd-nova-cell1-compute-config-0" (OuterVolumeSpecName: "nova-cell1-compute-config-0") pod "8561d38e-6957-4c25-9849-1e73103a9efd" (UID: "8561d38e-6957-4c25-9849-1e73103a9efd"). InnerVolumeSpecName "nova-cell1-compute-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 18:29:28 crc kubenswrapper[4909]: I1128 18:29:28.986800 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8561d38e-6957-4c25-9849-1e73103a9efd-nova-cells-global-config-0" (OuterVolumeSpecName: "nova-cells-global-config-0") pod "8561d38e-6957-4c25-9849-1e73103a9efd" (UID: "8561d38e-6957-4c25-9849-1e73103a9efd"). InnerVolumeSpecName "nova-cells-global-config-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 18:29:28 crc kubenswrapper[4909]: I1128 18:29:28.989575 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8561d38e-6957-4c25-9849-1e73103a9efd-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "8561d38e-6957-4c25-9849-1e73103a9efd" (UID: "8561d38e-6957-4c25-9849-1e73103a9efd"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 18:29:28 crc kubenswrapper[4909]: I1128 18:29:28.990173 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8561d38e-6957-4c25-9849-1e73103a9efd-nova-migration-ssh-key-0" (OuterVolumeSpecName: "nova-migration-ssh-key-0") pod "8561d38e-6957-4c25-9849-1e73103a9efd" (UID: "8561d38e-6957-4c25-9849-1e73103a9efd"). InnerVolumeSpecName "nova-migration-ssh-key-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 18:29:29 crc kubenswrapper[4909]: I1128 18:29:29.010028 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8561d38e-6957-4c25-9849-1e73103a9efd-nova-cell1-compute-config-1" (OuterVolumeSpecName: "nova-cell1-compute-config-1") pod "8561d38e-6957-4c25-9849-1e73103a9efd" (UID: "8561d38e-6957-4c25-9849-1e73103a9efd"). InnerVolumeSpecName "nova-cell1-compute-config-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 18:29:29 crc kubenswrapper[4909]: I1128 18:29:29.010522 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8561d38e-6957-4c25-9849-1e73103a9efd-inventory" (OuterVolumeSpecName: "inventory") pod "8561d38e-6957-4c25-9849-1e73103a9efd" (UID: "8561d38e-6957-4c25-9849-1e73103a9efd"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 18:29:29 crc kubenswrapper[4909]: I1128 18:29:29.012421 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8561d38e-6957-4c25-9849-1e73103a9efd-nova-migration-ssh-key-1" (OuterVolumeSpecName: "nova-migration-ssh-key-1") pod "8561d38e-6957-4c25-9849-1e73103a9efd" (UID: "8561d38e-6957-4c25-9849-1e73103a9efd"). InnerVolumeSpecName "nova-migration-ssh-key-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 18:29:29 crc kubenswrapper[4909]: I1128 18:29:29.058260 4909 reconciler_common.go:293] "Volume detached for volume \"nova-cells-global-config-1\" (UniqueName: \"kubernetes.io/configmap/8561d38e-6957-4c25-9849-1e73103a9efd-nova-cells-global-config-1\") on node \"crc\" DevicePath \"\"" Nov 28 18:29:29 crc kubenswrapper[4909]: I1128 18:29:29.058293 4909 reconciler_common.go:293] "Volume detached for volume \"nova-cells-global-config-0\" (UniqueName: \"kubernetes.io/configmap/8561d38e-6957-4c25-9849-1e73103a9efd-nova-cells-global-config-0\") on node \"crc\" DevicePath \"\"" Nov 28 18:29:29 crc kubenswrapper[4909]: I1128 18:29:29.058307 4909 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/8561d38e-6957-4c25-9849-1e73103a9efd-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 28 18:29:29 crc kubenswrapper[4909]: I1128 18:29:29.058316 4909 reconciler_common.go:293] "Volume detached for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/8561d38e-6957-4c25-9849-1e73103a9efd-nova-migration-ssh-key-1\") on node \"crc\" DevicePath \"\"" Nov 28 18:29:29 crc kubenswrapper[4909]: I1128 18:29:29.058324 4909 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/8561d38e-6957-4c25-9849-1e73103a9efd-nova-cell1-compute-config-0\") on node \"crc\" DevicePath \"\"" Nov 28 18:29:29 crc kubenswrapper[4909]: I1128 18:29:29.058333 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d2wck\" (UniqueName: \"kubernetes.io/projected/8561d38e-6957-4c25-9849-1e73103a9efd-kube-api-access-d2wck\") on node \"crc\" DevicePath \"\"" Nov 28 18:29:29 crc kubenswrapper[4909]: I1128 18:29:29.058341 4909 reconciler_common.go:293] "Volume detached for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/8561d38e-6957-4c25-9849-1e73103a9efd-nova-migration-ssh-key-0\") on node \"crc\" DevicePath \"\"" Nov 28 18:29:29 crc kubenswrapper[4909]: I1128 18:29:29.058349 4909 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8561d38e-6957-4c25-9849-1e73103a9efd-inventory\") on node \"crc\" DevicePath \"\"" Nov 28 18:29:29 crc kubenswrapper[4909]: I1128 18:29:29.058359 4909 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/8561d38e-6957-4c25-9849-1e73103a9efd-ceph\") on node \"crc\" DevicePath \"\"" Nov 28 18:29:29 crc kubenswrapper[4909]: I1128 18:29:29.058368 4909 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/8561d38e-6957-4c25-9849-1e73103a9efd-nova-cell1-compute-config-1\") on node \"crc\" DevicePath \"\"" Nov 28 18:29:29 crc kubenswrapper[4909]: I1128 18:29:29.058376 4909 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8561d38e-6957-4c25-9849-1e73103a9efd-nova-cell1-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 18:29:29 crc kubenswrapper[4909]: I1128 18:29:29.246711 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-openstack-openstack-cell1-wztp8" event={"ID":"8561d38e-6957-4c25-9849-1e73103a9efd","Type":"ContainerDied","Data":"73ea8f42007f64801b0b87125152a3cb25f3db4048b15226da82f679dd3210f4"} Nov 28 18:29:29 crc kubenswrapper[4909]: I1128 18:29:29.246773 4909 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="73ea8f42007f64801b0b87125152a3cb25f3db4048b15226da82f679dd3210f4" Nov 28 18:29:29 crc kubenswrapper[4909]: I1128 18:29:29.246788 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-openstack-openstack-cell1-wztp8" Nov 28 18:29:29 crc kubenswrapper[4909]: I1128 18:29:29.352836 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/telemetry-openstack-openstack-cell1-dmmb6"] Nov 28 18:29:29 crc kubenswrapper[4909]: E1128 18:29:29.353403 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a3b61472-81d0-41b6-beb8-5c30abb25d44" containerName="registry-server" Nov 28 18:29:29 crc kubenswrapper[4909]: I1128 18:29:29.353432 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="a3b61472-81d0-41b6-beb8-5c30abb25d44" containerName="registry-server" Nov 28 18:29:29 crc kubenswrapper[4909]: E1128 18:29:29.353452 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eeead2fe-143d-4bd3-a09e-3a4c8c4f7205" containerName="registry-server" Nov 28 18:29:29 crc kubenswrapper[4909]: I1128 18:29:29.353462 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="eeead2fe-143d-4bd3-a09e-3a4c8c4f7205" containerName="registry-server" Nov 28 18:29:29 crc kubenswrapper[4909]: E1128 18:29:29.353473 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8561d38e-6957-4c25-9849-1e73103a9efd" containerName="nova-cell1-openstack-openstack-cell1" Nov 28 18:29:29 crc kubenswrapper[4909]: I1128 18:29:29.353483 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="8561d38e-6957-4c25-9849-1e73103a9efd" containerName="nova-cell1-openstack-openstack-cell1" Nov 28 18:29:29 crc kubenswrapper[4909]: E1128 18:29:29.353494 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a3b61472-81d0-41b6-beb8-5c30abb25d44" containerName="extract-content" Nov 28 18:29:29 crc kubenswrapper[4909]: I1128 18:29:29.353501 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="a3b61472-81d0-41b6-beb8-5c30abb25d44" containerName="extract-content" Nov 28 18:29:29 crc kubenswrapper[4909]: E1128 18:29:29.353514 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a3b61472-81d0-41b6-beb8-5c30abb25d44" containerName="extract-utilities" Nov 28 18:29:29 crc kubenswrapper[4909]: I1128 18:29:29.353522 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="a3b61472-81d0-41b6-beb8-5c30abb25d44" containerName="extract-utilities" Nov 28 18:29:29 crc kubenswrapper[4909]: E1128 18:29:29.353541 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eeead2fe-143d-4bd3-a09e-3a4c8c4f7205" containerName="extract-content" Nov 28 18:29:29 crc kubenswrapper[4909]: I1128 18:29:29.353548 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="eeead2fe-143d-4bd3-a09e-3a4c8c4f7205" containerName="extract-content" Nov 28 18:29:29 crc kubenswrapper[4909]: E1128 18:29:29.353580 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eeead2fe-143d-4bd3-a09e-3a4c8c4f7205" containerName="extract-utilities" Nov 28 18:29:29 crc kubenswrapper[4909]: I1128 18:29:29.353588 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="eeead2fe-143d-4bd3-a09e-3a4c8c4f7205" containerName="extract-utilities" Nov 28 18:29:29 crc kubenswrapper[4909]: I1128 18:29:29.353849 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="8561d38e-6957-4c25-9849-1e73103a9efd" containerName="nova-cell1-openstack-openstack-cell1" Nov 28 18:29:29 crc kubenswrapper[4909]: I1128 18:29:29.353887 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="eeead2fe-143d-4bd3-a09e-3a4c8c4f7205" containerName="registry-server" Nov 28 18:29:29 crc kubenswrapper[4909]: I1128 18:29:29.353907 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="a3b61472-81d0-41b6-beb8-5c30abb25d44" containerName="registry-server" Nov 28 18:29:29 crc kubenswrapper[4909]: I1128 18:29:29.354876 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-openstack-openstack-cell1-dmmb6" Nov 28 18:29:29 crc kubenswrapper[4909]: I1128 18:29:29.359470 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Nov 28 18:29:29 crc kubenswrapper[4909]: I1128 18:29:29.359754 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Nov 28 18:29:29 crc kubenswrapper[4909]: I1128 18:29:29.359832 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-z249h" Nov 28 18:29:29 crc kubenswrapper[4909]: I1128 18:29:29.359958 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 28 18:29:29 crc kubenswrapper[4909]: I1128 18:29:29.360028 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-compute-config-data" Nov 28 18:29:29 crc kubenswrapper[4909]: I1128 18:29:29.364113 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/0384aec0-25b2-49fd-b0d8-9a426a60005c-ceph\") pod \"telemetry-openstack-openstack-cell1-dmmb6\" (UID: \"0384aec0-25b2-49fd-b0d8-9a426a60005c\") " pod="openstack/telemetry-openstack-openstack-cell1-dmmb6" Nov 28 18:29:29 crc kubenswrapper[4909]: I1128 18:29:29.364177 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/0384aec0-25b2-49fd-b0d8-9a426a60005c-ceilometer-compute-config-data-1\") pod \"telemetry-openstack-openstack-cell1-dmmb6\" (UID: \"0384aec0-25b2-49fd-b0d8-9a426a60005c\") " pod="openstack/telemetry-openstack-openstack-cell1-dmmb6" Nov 28 18:29:29 crc kubenswrapper[4909]: I1128 18:29:29.364292 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0384aec0-25b2-49fd-b0d8-9a426a60005c-inventory\") pod \"telemetry-openstack-openstack-cell1-dmmb6\" (UID: \"0384aec0-25b2-49fd-b0d8-9a426a60005c\") " pod="openstack/telemetry-openstack-openstack-cell1-dmmb6" Nov 28 18:29:29 crc kubenswrapper[4909]: I1128 18:29:29.364405 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/0384aec0-25b2-49fd-b0d8-9a426a60005c-ceilometer-compute-config-data-2\") pod \"telemetry-openstack-openstack-cell1-dmmb6\" (UID: \"0384aec0-25b2-49fd-b0d8-9a426a60005c\") " pod="openstack/telemetry-openstack-openstack-cell1-dmmb6" Nov 28 18:29:29 crc kubenswrapper[4909]: I1128 18:29:29.364471 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/0384aec0-25b2-49fd-b0d8-9a426a60005c-ssh-key\") pod \"telemetry-openstack-openstack-cell1-dmmb6\" (UID: \"0384aec0-25b2-49fd-b0d8-9a426a60005c\") " pod="openstack/telemetry-openstack-openstack-cell1-dmmb6" Nov 28 18:29:29 crc kubenswrapper[4909]: I1128 18:29:29.364506 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0384aec0-25b2-49fd-b0d8-9a426a60005c-telemetry-combined-ca-bundle\") pod \"telemetry-openstack-openstack-cell1-dmmb6\" (UID: \"0384aec0-25b2-49fd-b0d8-9a426a60005c\") " pod="openstack/telemetry-openstack-openstack-cell1-dmmb6" Nov 28 18:29:29 crc kubenswrapper[4909]: I1128 18:29:29.364618 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4zrb7\" (UniqueName: \"kubernetes.io/projected/0384aec0-25b2-49fd-b0d8-9a426a60005c-kube-api-access-4zrb7\") pod \"telemetry-openstack-openstack-cell1-dmmb6\" (UID: \"0384aec0-25b2-49fd-b0d8-9a426a60005c\") " pod="openstack/telemetry-openstack-openstack-cell1-dmmb6" Nov 28 18:29:29 crc kubenswrapper[4909]: I1128 18:29:29.364785 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/0384aec0-25b2-49fd-b0d8-9a426a60005c-ceilometer-compute-config-data-0\") pod \"telemetry-openstack-openstack-cell1-dmmb6\" (UID: \"0384aec0-25b2-49fd-b0d8-9a426a60005c\") " pod="openstack/telemetry-openstack-openstack-cell1-dmmb6" Nov 28 18:29:29 crc kubenswrapper[4909]: I1128 18:29:29.389639 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/telemetry-openstack-openstack-cell1-dmmb6"] Nov 28 18:29:29 crc kubenswrapper[4909]: I1128 18:29:29.465510 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/0384aec0-25b2-49fd-b0d8-9a426a60005c-ceilometer-compute-config-data-0\") pod \"telemetry-openstack-openstack-cell1-dmmb6\" (UID: \"0384aec0-25b2-49fd-b0d8-9a426a60005c\") " pod="openstack/telemetry-openstack-openstack-cell1-dmmb6" Nov 28 18:29:29 crc kubenswrapper[4909]: I1128 18:29:29.465625 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/0384aec0-25b2-49fd-b0d8-9a426a60005c-ceph\") pod \"telemetry-openstack-openstack-cell1-dmmb6\" (UID: \"0384aec0-25b2-49fd-b0d8-9a426a60005c\") " pod="openstack/telemetry-openstack-openstack-cell1-dmmb6" Nov 28 18:29:29 crc kubenswrapper[4909]: I1128 18:29:29.465685 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/0384aec0-25b2-49fd-b0d8-9a426a60005c-ceilometer-compute-config-data-1\") pod \"telemetry-openstack-openstack-cell1-dmmb6\" (UID: \"0384aec0-25b2-49fd-b0d8-9a426a60005c\") " pod="openstack/telemetry-openstack-openstack-cell1-dmmb6" Nov 28 18:29:29 crc kubenswrapper[4909]: I1128 18:29:29.465707 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0384aec0-25b2-49fd-b0d8-9a426a60005c-inventory\") pod \"telemetry-openstack-openstack-cell1-dmmb6\" (UID: \"0384aec0-25b2-49fd-b0d8-9a426a60005c\") " pod="openstack/telemetry-openstack-openstack-cell1-dmmb6" Nov 28 18:29:29 crc kubenswrapper[4909]: I1128 18:29:29.465768 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/0384aec0-25b2-49fd-b0d8-9a426a60005c-ceilometer-compute-config-data-2\") pod \"telemetry-openstack-openstack-cell1-dmmb6\" (UID: \"0384aec0-25b2-49fd-b0d8-9a426a60005c\") " pod="openstack/telemetry-openstack-openstack-cell1-dmmb6" Nov 28 18:29:29 crc kubenswrapper[4909]: I1128 18:29:29.466356 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/0384aec0-25b2-49fd-b0d8-9a426a60005c-ssh-key\") pod \"telemetry-openstack-openstack-cell1-dmmb6\" (UID: \"0384aec0-25b2-49fd-b0d8-9a426a60005c\") " pod="openstack/telemetry-openstack-openstack-cell1-dmmb6" Nov 28 18:29:29 crc kubenswrapper[4909]: I1128 18:29:29.466385 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0384aec0-25b2-49fd-b0d8-9a426a60005c-telemetry-combined-ca-bundle\") pod \"telemetry-openstack-openstack-cell1-dmmb6\" (UID: \"0384aec0-25b2-49fd-b0d8-9a426a60005c\") " pod="openstack/telemetry-openstack-openstack-cell1-dmmb6" Nov 28 18:29:29 crc kubenswrapper[4909]: I1128 18:29:29.466428 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4zrb7\" (UniqueName: \"kubernetes.io/projected/0384aec0-25b2-49fd-b0d8-9a426a60005c-kube-api-access-4zrb7\") pod \"telemetry-openstack-openstack-cell1-dmmb6\" (UID: \"0384aec0-25b2-49fd-b0d8-9a426a60005c\") " pod="openstack/telemetry-openstack-openstack-cell1-dmmb6" Nov 28 18:29:29 crc kubenswrapper[4909]: I1128 18:29:29.470456 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0384aec0-25b2-49fd-b0d8-9a426a60005c-inventory\") pod \"telemetry-openstack-openstack-cell1-dmmb6\" (UID: \"0384aec0-25b2-49fd-b0d8-9a426a60005c\") " pod="openstack/telemetry-openstack-openstack-cell1-dmmb6" Nov 28 18:29:29 crc kubenswrapper[4909]: I1128 18:29:29.470491 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/0384aec0-25b2-49fd-b0d8-9a426a60005c-ceilometer-compute-config-data-2\") pod \"telemetry-openstack-openstack-cell1-dmmb6\" (UID: \"0384aec0-25b2-49fd-b0d8-9a426a60005c\") " pod="openstack/telemetry-openstack-openstack-cell1-dmmb6" Nov 28 18:29:29 crc kubenswrapper[4909]: I1128 18:29:29.475278 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/0384aec0-25b2-49fd-b0d8-9a426a60005c-ceilometer-compute-config-data-0\") pod \"telemetry-openstack-openstack-cell1-dmmb6\" (UID: \"0384aec0-25b2-49fd-b0d8-9a426a60005c\") " pod="openstack/telemetry-openstack-openstack-cell1-dmmb6" Nov 28 18:29:29 crc kubenswrapper[4909]: I1128 18:29:29.475384 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/0384aec0-25b2-49fd-b0d8-9a426a60005c-ssh-key\") pod \"telemetry-openstack-openstack-cell1-dmmb6\" (UID: \"0384aec0-25b2-49fd-b0d8-9a426a60005c\") " pod="openstack/telemetry-openstack-openstack-cell1-dmmb6" Nov 28 18:29:29 crc kubenswrapper[4909]: I1128 18:29:29.475634 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0384aec0-25b2-49fd-b0d8-9a426a60005c-telemetry-combined-ca-bundle\") pod \"telemetry-openstack-openstack-cell1-dmmb6\" (UID: \"0384aec0-25b2-49fd-b0d8-9a426a60005c\") " pod="openstack/telemetry-openstack-openstack-cell1-dmmb6" Nov 28 18:29:29 crc kubenswrapper[4909]: I1128 18:29:29.476039 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/0384aec0-25b2-49fd-b0d8-9a426a60005c-ceilometer-compute-config-data-1\") pod \"telemetry-openstack-openstack-cell1-dmmb6\" (UID: \"0384aec0-25b2-49fd-b0d8-9a426a60005c\") " pod="openstack/telemetry-openstack-openstack-cell1-dmmb6" Nov 28 18:29:29 crc kubenswrapper[4909]: I1128 18:29:29.477059 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/0384aec0-25b2-49fd-b0d8-9a426a60005c-ceph\") pod \"telemetry-openstack-openstack-cell1-dmmb6\" (UID: \"0384aec0-25b2-49fd-b0d8-9a426a60005c\") " pod="openstack/telemetry-openstack-openstack-cell1-dmmb6" Nov 28 18:29:29 crc kubenswrapper[4909]: I1128 18:29:29.484376 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4zrb7\" (UniqueName: \"kubernetes.io/projected/0384aec0-25b2-49fd-b0d8-9a426a60005c-kube-api-access-4zrb7\") pod \"telemetry-openstack-openstack-cell1-dmmb6\" (UID: \"0384aec0-25b2-49fd-b0d8-9a426a60005c\") " pod="openstack/telemetry-openstack-openstack-cell1-dmmb6" Nov 28 18:29:29 crc kubenswrapper[4909]: I1128 18:29:29.687612 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-openstack-openstack-cell1-dmmb6" Nov 28 18:29:30 crc kubenswrapper[4909]: I1128 18:29:30.359302 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/telemetry-openstack-openstack-cell1-dmmb6"] Nov 28 18:29:31 crc kubenswrapper[4909]: I1128 18:29:31.268275 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-openstack-openstack-cell1-dmmb6" event={"ID":"0384aec0-25b2-49fd-b0d8-9a426a60005c","Type":"ContainerStarted","Data":"1c9b886825f59fb091b6b414ef4bfd0541c4abd633b7606e9c49f7db17fa71a5"} Nov 28 18:29:31 crc kubenswrapper[4909]: I1128 18:29:31.268628 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-openstack-openstack-cell1-dmmb6" event={"ID":"0384aec0-25b2-49fd-b0d8-9a426a60005c","Type":"ContainerStarted","Data":"a391fda9268c46a9be67f46d860f1e6fe23b7d4fabd2ddf8be25892b8d9b6139"} Nov 28 18:29:31 crc kubenswrapper[4909]: I1128 18:29:31.299275 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/telemetry-openstack-openstack-cell1-dmmb6" podStartSLOduration=1.874160715 podStartE2EDuration="2.299249805s" podCreationTimestamp="2025-11-28 18:29:29 +0000 UTC" firstStartedPulling="2025-11-28 18:29:30.382808612 +0000 UTC m=+8352.779493136" lastFinishedPulling="2025-11-28 18:29:30.807897682 +0000 UTC m=+8353.204582226" observedRunningTime="2025-11-28 18:29:31.284515379 +0000 UTC m=+8353.681199913" watchObservedRunningTime="2025-11-28 18:29:31.299249805 +0000 UTC m=+8353.695934329" Nov 28 18:29:31 crc kubenswrapper[4909]: I1128 18:29:31.906509 4909 scope.go:117] "RemoveContainer" containerID="5226a771ffcb00b5135f06b8e238e32f1bb502ad55f4c3b1d915e6908d8a3ef7" Nov 28 18:29:31 crc kubenswrapper[4909]: E1128 18:29:31.907255 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 18:29:46 crc kubenswrapper[4909]: I1128 18:29:46.901502 4909 scope.go:117] "RemoveContainer" containerID="5226a771ffcb00b5135f06b8e238e32f1bb502ad55f4c3b1d915e6908d8a3ef7" Nov 28 18:29:46 crc kubenswrapper[4909]: E1128 18:29:46.902232 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 18:29:58 crc kubenswrapper[4909]: I1128 18:29:58.902571 4909 scope.go:117] "RemoveContainer" containerID="5226a771ffcb00b5135f06b8e238e32f1bb502ad55f4c3b1d915e6908d8a3ef7" Nov 28 18:29:58 crc kubenswrapper[4909]: E1128 18:29:58.903589 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 18:30:00 crc kubenswrapper[4909]: I1128 18:30:00.153109 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405910-7x7f9"] Nov 28 18:30:00 crc kubenswrapper[4909]: I1128 18:30:00.155368 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405910-7x7f9" Nov 28 18:30:00 crc kubenswrapper[4909]: I1128 18:30:00.160192 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 28 18:30:00 crc kubenswrapper[4909]: I1128 18:30:00.160423 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 28 18:30:00 crc kubenswrapper[4909]: I1128 18:30:00.165302 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405910-7x7f9"] Nov 28 18:30:00 crc kubenswrapper[4909]: I1128 18:30:00.215963 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4vqtb\" (UniqueName: \"kubernetes.io/projected/a71cbeca-e50d-421a-a45c-15a4a6e55665-kube-api-access-4vqtb\") pod \"collect-profiles-29405910-7x7f9\" (UID: \"a71cbeca-e50d-421a-a45c-15a4a6e55665\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405910-7x7f9" Nov 28 18:30:00 crc kubenswrapper[4909]: I1128 18:30:00.216192 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/a71cbeca-e50d-421a-a45c-15a4a6e55665-secret-volume\") pod \"collect-profiles-29405910-7x7f9\" (UID: \"a71cbeca-e50d-421a-a45c-15a4a6e55665\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405910-7x7f9" Nov 28 18:30:00 crc kubenswrapper[4909]: I1128 18:30:00.216482 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/a71cbeca-e50d-421a-a45c-15a4a6e55665-config-volume\") pod \"collect-profiles-29405910-7x7f9\" (UID: \"a71cbeca-e50d-421a-a45c-15a4a6e55665\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405910-7x7f9" Nov 28 18:30:00 crc kubenswrapper[4909]: I1128 18:30:00.319227 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/a71cbeca-e50d-421a-a45c-15a4a6e55665-secret-volume\") pod \"collect-profiles-29405910-7x7f9\" (UID: \"a71cbeca-e50d-421a-a45c-15a4a6e55665\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405910-7x7f9" Nov 28 18:30:00 crc kubenswrapper[4909]: I1128 18:30:00.319441 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/a71cbeca-e50d-421a-a45c-15a4a6e55665-config-volume\") pod \"collect-profiles-29405910-7x7f9\" (UID: \"a71cbeca-e50d-421a-a45c-15a4a6e55665\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405910-7x7f9" Nov 28 18:30:00 crc kubenswrapper[4909]: I1128 18:30:00.319575 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4vqtb\" (UniqueName: \"kubernetes.io/projected/a71cbeca-e50d-421a-a45c-15a4a6e55665-kube-api-access-4vqtb\") pod \"collect-profiles-29405910-7x7f9\" (UID: \"a71cbeca-e50d-421a-a45c-15a4a6e55665\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405910-7x7f9" Nov 28 18:30:00 crc kubenswrapper[4909]: I1128 18:30:00.320612 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/a71cbeca-e50d-421a-a45c-15a4a6e55665-config-volume\") pod \"collect-profiles-29405910-7x7f9\" (UID: \"a71cbeca-e50d-421a-a45c-15a4a6e55665\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405910-7x7f9" Nov 28 18:30:00 crc kubenswrapper[4909]: I1128 18:30:00.325214 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/a71cbeca-e50d-421a-a45c-15a4a6e55665-secret-volume\") pod \"collect-profiles-29405910-7x7f9\" (UID: \"a71cbeca-e50d-421a-a45c-15a4a6e55665\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405910-7x7f9" Nov 28 18:30:00 crc kubenswrapper[4909]: I1128 18:30:00.336009 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4vqtb\" (UniqueName: \"kubernetes.io/projected/a71cbeca-e50d-421a-a45c-15a4a6e55665-kube-api-access-4vqtb\") pod \"collect-profiles-29405910-7x7f9\" (UID: \"a71cbeca-e50d-421a-a45c-15a4a6e55665\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405910-7x7f9" Nov 28 18:30:00 crc kubenswrapper[4909]: I1128 18:30:00.500725 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405910-7x7f9" Nov 28 18:30:00 crc kubenswrapper[4909]: I1128 18:30:00.994494 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405910-7x7f9"] Nov 28 18:30:01 crc kubenswrapper[4909]: I1128 18:30:01.630452 4909 generic.go:334] "Generic (PLEG): container finished" podID="a71cbeca-e50d-421a-a45c-15a4a6e55665" containerID="c1078b9a60894c0b2a4ebb1bc1b871cfe3225cbbc39b65fa702242270fbcea60" exitCode=0 Nov 28 18:30:01 crc kubenswrapper[4909]: I1128 18:30:01.630510 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405910-7x7f9" event={"ID":"a71cbeca-e50d-421a-a45c-15a4a6e55665","Type":"ContainerDied","Data":"c1078b9a60894c0b2a4ebb1bc1b871cfe3225cbbc39b65fa702242270fbcea60"} Nov 28 18:30:01 crc kubenswrapper[4909]: I1128 18:30:01.630542 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405910-7x7f9" event={"ID":"a71cbeca-e50d-421a-a45c-15a4a6e55665","Type":"ContainerStarted","Data":"61dbb1198b29f195c4f26cecb72f7ac5bba85c8c13b534226b9169446272dcf9"} Nov 28 18:30:03 crc kubenswrapper[4909]: I1128 18:30:03.046886 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405910-7x7f9" Nov 28 18:30:03 crc kubenswrapper[4909]: I1128 18:30:03.083772 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/a71cbeca-e50d-421a-a45c-15a4a6e55665-config-volume\") pod \"a71cbeca-e50d-421a-a45c-15a4a6e55665\" (UID: \"a71cbeca-e50d-421a-a45c-15a4a6e55665\") " Nov 28 18:30:03 crc kubenswrapper[4909]: I1128 18:30:03.083946 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/a71cbeca-e50d-421a-a45c-15a4a6e55665-secret-volume\") pod \"a71cbeca-e50d-421a-a45c-15a4a6e55665\" (UID: \"a71cbeca-e50d-421a-a45c-15a4a6e55665\") " Nov 28 18:30:03 crc kubenswrapper[4909]: I1128 18:30:03.084142 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4vqtb\" (UniqueName: \"kubernetes.io/projected/a71cbeca-e50d-421a-a45c-15a4a6e55665-kube-api-access-4vqtb\") pod \"a71cbeca-e50d-421a-a45c-15a4a6e55665\" (UID: \"a71cbeca-e50d-421a-a45c-15a4a6e55665\") " Nov 28 18:30:03 crc kubenswrapper[4909]: I1128 18:30:03.086834 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a71cbeca-e50d-421a-a45c-15a4a6e55665-config-volume" (OuterVolumeSpecName: "config-volume") pod "a71cbeca-e50d-421a-a45c-15a4a6e55665" (UID: "a71cbeca-e50d-421a-a45c-15a4a6e55665"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 18:30:03 crc kubenswrapper[4909]: I1128 18:30:03.093752 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a71cbeca-e50d-421a-a45c-15a4a6e55665-kube-api-access-4vqtb" (OuterVolumeSpecName: "kube-api-access-4vqtb") pod "a71cbeca-e50d-421a-a45c-15a4a6e55665" (UID: "a71cbeca-e50d-421a-a45c-15a4a6e55665"). InnerVolumeSpecName "kube-api-access-4vqtb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 18:30:03 crc kubenswrapper[4909]: I1128 18:30:03.096326 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a71cbeca-e50d-421a-a45c-15a4a6e55665-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "a71cbeca-e50d-421a-a45c-15a4a6e55665" (UID: "a71cbeca-e50d-421a-a45c-15a4a6e55665"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 18:30:03 crc kubenswrapper[4909]: I1128 18:30:03.187058 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4vqtb\" (UniqueName: \"kubernetes.io/projected/a71cbeca-e50d-421a-a45c-15a4a6e55665-kube-api-access-4vqtb\") on node \"crc\" DevicePath \"\"" Nov 28 18:30:03 crc kubenswrapper[4909]: I1128 18:30:03.187096 4909 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/a71cbeca-e50d-421a-a45c-15a4a6e55665-config-volume\") on node \"crc\" DevicePath \"\"" Nov 28 18:30:03 crc kubenswrapper[4909]: I1128 18:30:03.187106 4909 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/a71cbeca-e50d-421a-a45c-15a4a6e55665-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 28 18:30:03 crc kubenswrapper[4909]: I1128 18:30:03.669800 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405910-7x7f9" event={"ID":"a71cbeca-e50d-421a-a45c-15a4a6e55665","Type":"ContainerDied","Data":"61dbb1198b29f195c4f26cecb72f7ac5bba85c8c13b534226b9169446272dcf9"} Nov 28 18:30:03 crc kubenswrapper[4909]: I1128 18:30:03.669853 4909 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="61dbb1198b29f195c4f26cecb72f7ac5bba85c8c13b534226b9169446272dcf9" Nov 28 18:30:03 crc kubenswrapper[4909]: I1128 18:30:03.669942 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405910-7x7f9" Nov 28 18:30:04 crc kubenswrapper[4909]: I1128 18:30:04.158915 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405865-v8wr6"] Nov 28 18:30:04 crc kubenswrapper[4909]: I1128 18:30:04.174542 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405865-v8wr6"] Nov 28 18:30:05 crc kubenswrapper[4909]: I1128 18:30:05.914029 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="40bf5f05-5059-4119-aba9-8b46f5ffd953" path="/var/lib/kubelet/pods/40bf5f05-5059-4119-aba9-8b46f5ffd953/volumes" Nov 28 18:30:12 crc kubenswrapper[4909]: I1128 18:30:12.902248 4909 scope.go:117] "RemoveContainer" containerID="5226a771ffcb00b5135f06b8e238e32f1bb502ad55f4c3b1d915e6908d8a3ef7" Nov 28 18:30:12 crc kubenswrapper[4909]: E1128 18:30:12.902982 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 18:30:25 crc kubenswrapper[4909]: I1128 18:30:25.901754 4909 scope.go:117] "RemoveContainer" containerID="5226a771ffcb00b5135f06b8e238e32f1bb502ad55f4c3b1d915e6908d8a3ef7" Nov 28 18:30:25 crc kubenswrapper[4909]: E1128 18:30:25.902578 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 18:30:38 crc kubenswrapper[4909]: I1128 18:30:38.740313 4909 scope.go:117] "RemoveContainer" containerID="b6cafabfb3fd093c06d23f9ae6815454ffa8d52c69392dcc9a2bfafe87ebd975" Nov 28 18:30:40 crc kubenswrapper[4909]: I1128 18:30:40.902899 4909 scope.go:117] "RemoveContainer" containerID="5226a771ffcb00b5135f06b8e238e32f1bb502ad55f4c3b1d915e6908d8a3ef7" Nov 28 18:30:40 crc kubenswrapper[4909]: E1128 18:30:40.903462 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 18:30:55 crc kubenswrapper[4909]: I1128 18:30:55.902204 4909 scope.go:117] "RemoveContainer" containerID="5226a771ffcb00b5135f06b8e238e32f1bb502ad55f4c3b1d915e6908d8a3ef7" Nov 28 18:30:55 crc kubenswrapper[4909]: E1128 18:30:55.903534 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 18:31:08 crc kubenswrapper[4909]: I1128 18:31:08.901960 4909 scope.go:117] "RemoveContainer" containerID="5226a771ffcb00b5135f06b8e238e32f1bb502ad55f4c3b1d915e6908d8a3ef7" Nov 28 18:31:08 crc kubenswrapper[4909]: E1128 18:31:08.902817 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 18:31:18 crc kubenswrapper[4909]: I1128 18:31:18.126961 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-dw925"] Nov 28 18:31:18 crc kubenswrapper[4909]: E1128 18:31:18.128767 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a71cbeca-e50d-421a-a45c-15a4a6e55665" containerName="collect-profiles" Nov 28 18:31:18 crc kubenswrapper[4909]: I1128 18:31:18.128801 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="a71cbeca-e50d-421a-a45c-15a4a6e55665" containerName="collect-profiles" Nov 28 18:31:18 crc kubenswrapper[4909]: I1128 18:31:18.129353 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="a71cbeca-e50d-421a-a45c-15a4a6e55665" containerName="collect-profiles" Nov 28 18:31:18 crc kubenswrapper[4909]: I1128 18:31:18.133358 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-dw925" Nov 28 18:31:18 crc kubenswrapper[4909]: I1128 18:31:18.148174 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-dw925"] Nov 28 18:31:18 crc kubenswrapper[4909]: I1128 18:31:18.328354 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/91335ed9-48f0-4254-b2ad-faa1cf438b4e-catalog-content\") pod \"redhat-marketplace-dw925\" (UID: \"91335ed9-48f0-4254-b2ad-faa1cf438b4e\") " pod="openshift-marketplace/redhat-marketplace-dw925" Nov 28 18:31:18 crc kubenswrapper[4909]: I1128 18:31:18.328681 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/91335ed9-48f0-4254-b2ad-faa1cf438b4e-utilities\") pod \"redhat-marketplace-dw925\" (UID: \"91335ed9-48f0-4254-b2ad-faa1cf438b4e\") " pod="openshift-marketplace/redhat-marketplace-dw925" Nov 28 18:31:18 crc kubenswrapper[4909]: I1128 18:31:18.328787 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wnbkk\" (UniqueName: \"kubernetes.io/projected/91335ed9-48f0-4254-b2ad-faa1cf438b4e-kube-api-access-wnbkk\") pod \"redhat-marketplace-dw925\" (UID: \"91335ed9-48f0-4254-b2ad-faa1cf438b4e\") " pod="openshift-marketplace/redhat-marketplace-dw925" Nov 28 18:31:18 crc kubenswrapper[4909]: I1128 18:31:18.430743 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wnbkk\" (UniqueName: \"kubernetes.io/projected/91335ed9-48f0-4254-b2ad-faa1cf438b4e-kube-api-access-wnbkk\") pod \"redhat-marketplace-dw925\" (UID: \"91335ed9-48f0-4254-b2ad-faa1cf438b4e\") " pod="openshift-marketplace/redhat-marketplace-dw925" Nov 28 18:31:18 crc kubenswrapper[4909]: I1128 18:31:18.430942 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/91335ed9-48f0-4254-b2ad-faa1cf438b4e-catalog-content\") pod \"redhat-marketplace-dw925\" (UID: \"91335ed9-48f0-4254-b2ad-faa1cf438b4e\") " pod="openshift-marketplace/redhat-marketplace-dw925" Nov 28 18:31:18 crc kubenswrapper[4909]: I1128 18:31:18.430999 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/91335ed9-48f0-4254-b2ad-faa1cf438b4e-utilities\") pod \"redhat-marketplace-dw925\" (UID: \"91335ed9-48f0-4254-b2ad-faa1cf438b4e\") " pod="openshift-marketplace/redhat-marketplace-dw925" Nov 28 18:31:18 crc kubenswrapper[4909]: I1128 18:31:18.431558 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/91335ed9-48f0-4254-b2ad-faa1cf438b4e-utilities\") pod \"redhat-marketplace-dw925\" (UID: \"91335ed9-48f0-4254-b2ad-faa1cf438b4e\") " pod="openshift-marketplace/redhat-marketplace-dw925" Nov 28 18:31:18 crc kubenswrapper[4909]: I1128 18:31:18.431631 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/91335ed9-48f0-4254-b2ad-faa1cf438b4e-catalog-content\") pod \"redhat-marketplace-dw925\" (UID: \"91335ed9-48f0-4254-b2ad-faa1cf438b4e\") " pod="openshift-marketplace/redhat-marketplace-dw925" Nov 28 18:31:18 crc kubenswrapper[4909]: I1128 18:31:18.466524 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wnbkk\" (UniqueName: \"kubernetes.io/projected/91335ed9-48f0-4254-b2ad-faa1cf438b4e-kube-api-access-wnbkk\") pod \"redhat-marketplace-dw925\" (UID: \"91335ed9-48f0-4254-b2ad-faa1cf438b4e\") " pod="openshift-marketplace/redhat-marketplace-dw925" Nov 28 18:31:18 crc kubenswrapper[4909]: I1128 18:31:18.486090 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-dw925" Nov 28 18:31:18 crc kubenswrapper[4909]: I1128 18:31:18.976622 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-dw925"] Nov 28 18:31:19 crc kubenswrapper[4909]: I1128 18:31:19.524040 4909 generic.go:334] "Generic (PLEG): container finished" podID="91335ed9-48f0-4254-b2ad-faa1cf438b4e" containerID="469c4d8b85956da1a1f4411aba62be002788b2a5ec580d6111ee37e45346b1d7" exitCode=0 Nov 28 18:31:19 crc kubenswrapper[4909]: I1128 18:31:19.524190 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dw925" event={"ID":"91335ed9-48f0-4254-b2ad-faa1cf438b4e","Type":"ContainerDied","Data":"469c4d8b85956da1a1f4411aba62be002788b2a5ec580d6111ee37e45346b1d7"} Nov 28 18:31:19 crc kubenswrapper[4909]: I1128 18:31:19.524363 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dw925" event={"ID":"91335ed9-48f0-4254-b2ad-faa1cf438b4e","Type":"ContainerStarted","Data":"8c16765f811695900a9b24b9b9da3c4ab324b5e7f91faf043d9844ecaa64a3c2"} Nov 28 18:31:19 crc kubenswrapper[4909]: I1128 18:31:19.526238 4909 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 28 18:31:19 crc kubenswrapper[4909]: I1128 18:31:19.901492 4909 scope.go:117] "RemoveContainer" containerID="5226a771ffcb00b5135f06b8e238e32f1bb502ad55f4c3b1d915e6908d8a3ef7" Nov 28 18:31:19 crc kubenswrapper[4909]: E1128 18:31:19.901846 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 18:31:21 crc kubenswrapper[4909]: I1128 18:31:21.550003 4909 generic.go:334] "Generic (PLEG): container finished" podID="91335ed9-48f0-4254-b2ad-faa1cf438b4e" containerID="7797d17527be7e04f08fca291469fd634d9cb1f660d86ba15e2aa6d0ca994542" exitCode=0 Nov 28 18:31:21 crc kubenswrapper[4909]: I1128 18:31:21.550095 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dw925" event={"ID":"91335ed9-48f0-4254-b2ad-faa1cf438b4e","Type":"ContainerDied","Data":"7797d17527be7e04f08fca291469fd634d9cb1f660d86ba15e2aa6d0ca994542"} Nov 28 18:31:22 crc kubenswrapper[4909]: I1128 18:31:22.561844 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dw925" event={"ID":"91335ed9-48f0-4254-b2ad-faa1cf438b4e","Type":"ContainerStarted","Data":"d7af811216b9638af976c3906997db0fb98daca4b4f94edb74d1a92d3c7b951e"} Nov 28 18:31:28 crc kubenswrapper[4909]: I1128 18:31:28.486244 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-dw925" Nov 28 18:31:28 crc kubenswrapper[4909]: I1128 18:31:28.486698 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-dw925" Nov 28 18:31:28 crc kubenswrapper[4909]: I1128 18:31:28.562967 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-dw925" Nov 28 18:31:28 crc kubenswrapper[4909]: I1128 18:31:28.584687 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-dw925" podStartSLOduration=7.989436416 podStartE2EDuration="10.584644304s" podCreationTimestamp="2025-11-28 18:31:18 +0000 UTC" firstStartedPulling="2025-11-28 18:31:19.526026194 +0000 UTC m=+8461.922710718" lastFinishedPulling="2025-11-28 18:31:22.121234082 +0000 UTC m=+8464.517918606" observedRunningTime="2025-11-28 18:31:22.588521867 +0000 UTC m=+8464.985206431" watchObservedRunningTime="2025-11-28 18:31:28.584644304 +0000 UTC m=+8470.981328828" Nov 28 18:31:28 crc kubenswrapper[4909]: I1128 18:31:28.673101 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-dw925" Nov 28 18:31:28 crc kubenswrapper[4909]: I1128 18:31:28.801583 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-dw925"] Nov 28 18:31:30 crc kubenswrapper[4909]: I1128 18:31:30.644266 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-dw925" podUID="91335ed9-48f0-4254-b2ad-faa1cf438b4e" containerName="registry-server" containerID="cri-o://d7af811216b9638af976c3906997db0fb98daca4b4f94edb74d1a92d3c7b951e" gracePeriod=2 Nov 28 18:31:31 crc kubenswrapper[4909]: I1128 18:31:31.155451 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-dw925" Nov 28 18:31:31 crc kubenswrapper[4909]: I1128 18:31:31.322181 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wnbkk\" (UniqueName: \"kubernetes.io/projected/91335ed9-48f0-4254-b2ad-faa1cf438b4e-kube-api-access-wnbkk\") pod \"91335ed9-48f0-4254-b2ad-faa1cf438b4e\" (UID: \"91335ed9-48f0-4254-b2ad-faa1cf438b4e\") " Nov 28 18:31:31 crc kubenswrapper[4909]: I1128 18:31:31.322350 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/91335ed9-48f0-4254-b2ad-faa1cf438b4e-catalog-content\") pod \"91335ed9-48f0-4254-b2ad-faa1cf438b4e\" (UID: \"91335ed9-48f0-4254-b2ad-faa1cf438b4e\") " Nov 28 18:31:31 crc kubenswrapper[4909]: I1128 18:31:31.322545 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/91335ed9-48f0-4254-b2ad-faa1cf438b4e-utilities\") pod \"91335ed9-48f0-4254-b2ad-faa1cf438b4e\" (UID: \"91335ed9-48f0-4254-b2ad-faa1cf438b4e\") " Nov 28 18:31:31 crc kubenswrapper[4909]: I1128 18:31:31.324226 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/91335ed9-48f0-4254-b2ad-faa1cf438b4e-utilities" (OuterVolumeSpecName: "utilities") pod "91335ed9-48f0-4254-b2ad-faa1cf438b4e" (UID: "91335ed9-48f0-4254-b2ad-faa1cf438b4e"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 18:31:31 crc kubenswrapper[4909]: I1128 18:31:31.333370 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/91335ed9-48f0-4254-b2ad-faa1cf438b4e-kube-api-access-wnbkk" (OuterVolumeSpecName: "kube-api-access-wnbkk") pod "91335ed9-48f0-4254-b2ad-faa1cf438b4e" (UID: "91335ed9-48f0-4254-b2ad-faa1cf438b4e"). InnerVolumeSpecName "kube-api-access-wnbkk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 18:31:31 crc kubenswrapper[4909]: I1128 18:31:31.342088 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/91335ed9-48f0-4254-b2ad-faa1cf438b4e-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "91335ed9-48f0-4254-b2ad-faa1cf438b4e" (UID: "91335ed9-48f0-4254-b2ad-faa1cf438b4e"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 18:31:31 crc kubenswrapper[4909]: I1128 18:31:31.424925 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wnbkk\" (UniqueName: \"kubernetes.io/projected/91335ed9-48f0-4254-b2ad-faa1cf438b4e-kube-api-access-wnbkk\") on node \"crc\" DevicePath \"\"" Nov 28 18:31:31 crc kubenswrapper[4909]: I1128 18:31:31.425249 4909 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/91335ed9-48f0-4254-b2ad-faa1cf438b4e-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 18:31:31 crc kubenswrapper[4909]: I1128 18:31:31.425259 4909 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/91335ed9-48f0-4254-b2ad-faa1cf438b4e-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 18:31:31 crc kubenswrapper[4909]: I1128 18:31:31.654817 4909 generic.go:334] "Generic (PLEG): container finished" podID="91335ed9-48f0-4254-b2ad-faa1cf438b4e" containerID="d7af811216b9638af976c3906997db0fb98daca4b4f94edb74d1a92d3c7b951e" exitCode=0 Nov 28 18:31:31 crc kubenswrapper[4909]: I1128 18:31:31.654864 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dw925" event={"ID":"91335ed9-48f0-4254-b2ad-faa1cf438b4e","Type":"ContainerDied","Data":"d7af811216b9638af976c3906997db0fb98daca4b4f94edb74d1a92d3c7b951e"} Nov 28 18:31:31 crc kubenswrapper[4909]: I1128 18:31:31.654895 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dw925" event={"ID":"91335ed9-48f0-4254-b2ad-faa1cf438b4e","Type":"ContainerDied","Data":"8c16765f811695900a9b24b9b9da3c4ab324b5e7f91faf043d9844ecaa64a3c2"} Nov 28 18:31:31 crc kubenswrapper[4909]: I1128 18:31:31.654916 4909 scope.go:117] "RemoveContainer" containerID="d7af811216b9638af976c3906997db0fb98daca4b4f94edb74d1a92d3c7b951e" Nov 28 18:31:31 crc kubenswrapper[4909]: I1128 18:31:31.655020 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-dw925" Nov 28 18:31:31 crc kubenswrapper[4909]: I1128 18:31:31.710889 4909 scope.go:117] "RemoveContainer" containerID="7797d17527be7e04f08fca291469fd634d9cb1f660d86ba15e2aa6d0ca994542" Nov 28 18:31:31 crc kubenswrapper[4909]: I1128 18:31:31.715752 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-dw925"] Nov 28 18:31:31 crc kubenswrapper[4909]: I1128 18:31:31.727564 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-dw925"] Nov 28 18:31:31 crc kubenswrapper[4909]: I1128 18:31:31.739459 4909 scope.go:117] "RemoveContainer" containerID="469c4d8b85956da1a1f4411aba62be002788b2a5ec580d6111ee37e45346b1d7" Nov 28 18:31:31 crc kubenswrapper[4909]: I1128 18:31:31.843707 4909 scope.go:117] "RemoveContainer" containerID="d7af811216b9638af976c3906997db0fb98daca4b4f94edb74d1a92d3c7b951e" Nov 28 18:31:31 crc kubenswrapper[4909]: E1128 18:31:31.851341 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d7af811216b9638af976c3906997db0fb98daca4b4f94edb74d1a92d3c7b951e\": container with ID starting with d7af811216b9638af976c3906997db0fb98daca4b4f94edb74d1a92d3c7b951e not found: ID does not exist" containerID="d7af811216b9638af976c3906997db0fb98daca4b4f94edb74d1a92d3c7b951e" Nov 28 18:31:31 crc kubenswrapper[4909]: I1128 18:31:31.851569 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d7af811216b9638af976c3906997db0fb98daca4b4f94edb74d1a92d3c7b951e"} err="failed to get container status \"d7af811216b9638af976c3906997db0fb98daca4b4f94edb74d1a92d3c7b951e\": rpc error: code = NotFound desc = could not find container \"d7af811216b9638af976c3906997db0fb98daca4b4f94edb74d1a92d3c7b951e\": container with ID starting with d7af811216b9638af976c3906997db0fb98daca4b4f94edb74d1a92d3c7b951e not found: ID does not exist" Nov 28 18:31:31 crc kubenswrapper[4909]: I1128 18:31:31.851694 4909 scope.go:117] "RemoveContainer" containerID="7797d17527be7e04f08fca291469fd634d9cb1f660d86ba15e2aa6d0ca994542" Nov 28 18:31:31 crc kubenswrapper[4909]: E1128 18:31:31.855882 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7797d17527be7e04f08fca291469fd634d9cb1f660d86ba15e2aa6d0ca994542\": container with ID starting with 7797d17527be7e04f08fca291469fd634d9cb1f660d86ba15e2aa6d0ca994542 not found: ID does not exist" containerID="7797d17527be7e04f08fca291469fd634d9cb1f660d86ba15e2aa6d0ca994542" Nov 28 18:31:31 crc kubenswrapper[4909]: I1128 18:31:31.856029 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7797d17527be7e04f08fca291469fd634d9cb1f660d86ba15e2aa6d0ca994542"} err="failed to get container status \"7797d17527be7e04f08fca291469fd634d9cb1f660d86ba15e2aa6d0ca994542\": rpc error: code = NotFound desc = could not find container \"7797d17527be7e04f08fca291469fd634d9cb1f660d86ba15e2aa6d0ca994542\": container with ID starting with 7797d17527be7e04f08fca291469fd634d9cb1f660d86ba15e2aa6d0ca994542 not found: ID does not exist" Nov 28 18:31:31 crc kubenswrapper[4909]: I1128 18:31:31.856119 4909 scope.go:117] "RemoveContainer" containerID="469c4d8b85956da1a1f4411aba62be002788b2a5ec580d6111ee37e45346b1d7" Nov 28 18:31:31 crc kubenswrapper[4909]: E1128 18:31:31.857084 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"469c4d8b85956da1a1f4411aba62be002788b2a5ec580d6111ee37e45346b1d7\": container with ID starting with 469c4d8b85956da1a1f4411aba62be002788b2a5ec580d6111ee37e45346b1d7 not found: ID does not exist" containerID="469c4d8b85956da1a1f4411aba62be002788b2a5ec580d6111ee37e45346b1d7" Nov 28 18:31:31 crc kubenswrapper[4909]: I1128 18:31:31.857189 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"469c4d8b85956da1a1f4411aba62be002788b2a5ec580d6111ee37e45346b1d7"} err="failed to get container status \"469c4d8b85956da1a1f4411aba62be002788b2a5ec580d6111ee37e45346b1d7\": rpc error: code = NotFound desc = could not find container \"469c4d8b85956da1a1f4411aba62be002788b2a5ec580d6111ee37e45346b1d7\": container with ID starting with 469c4d8b85956da1a1f4411aba62be002788b2a5ec580d6111ee37e45346b1d7 not found: ID does not exist" Nov 28 18:31:31 crc kubenswrapper[4909]: I1128 18:31:31.915007 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="91335ed9-48f0-4254-b2ad-faa1cf438b4e" path="/var/lib/kubelet/pods/91335ed9-48f0-4254-b2ad-faa1cf438b4e/volumes" Nov 28 18:31:34 crc kubenswrapper[4909]: I1128 18:31:34.903884 4909 scope.go:117] "RemoveContainer" containerID="5226a771ffcb00b5135f06b8e238e32f1bb502ad55f4c3b1d915e6908d8a3ef7" Nov 28 18:31:35 crc kubenswrapper[4909]: I1128 18:31:35.713382 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" event={"ID":"5f0ac931-d37b-4342-8c12-c2779b455cc5","Type":"ContainerStarted","Data":"816a605e5fcc3b37239c060389d1f7b2373ec3869404f8abfe6340fee7ab8f9d"} Nov 28 18:32:16 crc kubenswrapper[4909]: I1128 18:32:16.938480 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-snhgh"] Nov 28 18:32:16 crc kubenswrapper[4909]: E1128 18:32:16.939763 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="91335ed9-48f0-4254-b2ad-faa1cf438b4e" containerName="registry-server" Nov 28 18:32:16 crc kubenswrapper[4909]: I1128 18:32:16.939784 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="91335ed9-48f0-4254-b2ad-faa1cf438b4e" containerName="registry-server" Nov 28 18:32:16 crc kubenswrapper[4909]: E1128 18:32:16.939823 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="91335ed9-48f0-4254-b2ad-faa1cf438b4e" containerName="extract-content" Nov 28 18:32:16 crc kubenswrapper[4909]: I1128 18:32:16.939835 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="91335ed9-48f0-4254-b2ad-faa1cf438b4e" containerName="extract-content" Nov 28 18:32:16 crc kubenswrapper[4909]: E1128 18:32:16.939890 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="91335ed9-48f0-4254-b2ad-faa1cf438b4e" containerName="extract-utilities" Nov 28 18:32:16 crc kubenswrapper[4909]: I1128 18:32:16.939906 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="91335ed9-48f0-4254-b2ad-faa1cf438b4e" containerName="extract-utilities" Nov 28 18:32:16 crc kubenswrapper[4909]: I1128 18:32:16.940302 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="91335ed9-48f0-4254-b2ad-faa1cf438b4e" containerName="registry-server" Nov 28 18:32:16 crc kubenswrapper[4909]: I1128 18:32:16.944829 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-snhgh" Nov 28 18:32:16 crc kubenswrapper[4909]: I1128 18:32:16.979221 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-snhgh"] Nov 28 18:32:17 crc kubenswrapper[4909]: I1128 18:32:17.037938 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c49lt\" (UniqueName: \"kubernetes.io/projected/9cfd406a-b092-46cc-9b8b-d01fe49b6abc-kube-api-access-c49lt\") pod \"redhat-operators-snhgh\" (UID: \"9cfd406a-b092-46cc-9b8b-d01fe49b6abc\") " pod="openshift-marketplace/redhat-operators-snhgh" Nov 28 18:32:17 crc kubenswrapper[4909]: I1128 18:32:17.038181 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9cfd406a-b092-46cc-9b8b-d01fe49b6abc-utilities\") pod \"redhat-operators-snhgh\" (UID: \"9cfd406a-b092-46cc-9b8b-d01fe49b6abc\") " pod="openshift-marketplace/redhat-operators-snhgh" Nov 28 18:32:17 crc kubenswrapper[4909]: I1128 18:32:17.038280 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9cfd406a-b092-46cc-9b8b-d01fe49b6abc-catalog-content\") pod \"redhat-operators-snhgh\" (UID: \"9cfd406a-b092-46cc-9b8b-d01fe49b6abc\") " pod="openshift-marketplace/redhat-operators-snhgh" Nov 28 18:32:17 crc kubenswrapper[4909]: I1128 18:32:17.139962 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9cfd406a-b092-46cc-9b8b-d01fe49b6abc-catalog-content\") pod \"redhat-operators-snhgh\" (UID: \"9cfd406a-b092-46cc-9b8b-d01fe49b6abc\") " pod="openshift-marketplace/redhat-operators-snhgh" Nov 28 18:32:17 crc kubenswrapper[4909]: I1128 18:32:17.140777 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9cfd406a-b092-46cc-9b8b-d01fe49b6abc-catalog-content\") pod \"redhat-operators-snhgh\" (UID: \"9cfd406a-b092-46cc-9b8b-d01fe49b6abc\") " pod="openshift-marketplace/redhat-operators-snhgh" Nov 28 18:32:17 crc kubenswrapper[4909]: I1128 18:32:17.141035 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c49lt\" (UniqueName: \"kubernetes.io/projected/9cfd406a-b092-46cc-9b8b-d01fe49b6abc-kube-api-access-c49lt\") pod \"redhat-operators-snhgh\" (UID: \"9cfd406a-b092-46cc-9b8b-d01fe49b6abc\") " pod="openshift-marketplace/redhat-operators-snhgh" Nov 28 18:32:17 crc kubenswrapper[4909]: I1128 18:32:17.141263 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9cfd406a-b092-46cc-9b8b-d01fe49b6abc-utilities\") pod \"redhat-operators-snhgh\" (UID: \"9cfd406a-b092-46cc-9b8b-d01fe49b6abc\") " pod="openshift-marketplace/redhat-operators-snhgh" Nov 28 18:32:17 crc kubenswrapper[4909]: I1128 18:32:17.141767 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9cfd406a-b092-46cc-9b8b-d01fe49b6abc-utilities\") pod \"redhat-operators-snhgh\" (UID: \"9cfd406a-b092-46cc-9b8b-d01fe49b6abc\") " pod="openshift-marketplace/redhat-operators-snhgh" Nov 28 18:32:17 crc kubenswrapper[4909]: I1128 18:32:17.168253 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c49lt\" (UniqueName: \"kubernetes.io/projected/9cfd406a-b092-46cc-9b8b-d01fe49b6abc-kube-api-access-c49lt\") pod \"redhat-operators-snhgh\" (UID: \"9cfd406a-b092-46cc-9b8b-d01fe49b6abc\") " pod="openshift-marketplace/redhat-operators-snhgh" Nov 28 18:32:17 crc kubenswrapper[4909]: I1128 18:32:17.267032 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-snhgh" Nov 28 18:32:17 crc kubenswrapper[4909]: I1128 18:32:17.750539 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-snhgh"] Nov 28 18:32:18 crc kubenswrapper[4909]: I1128 18:32:18.279351 4909 generic.go:334] "Generic (PLEG): container finished" podID="9cfd406a-b092-46cc-9b8b-d01fe49b6abc" containerID="c2bf61c8ae4631d617ec0e58f86e1a339654db5c9a7349c5e9c614b89429fbd8" exitCode=0 Nov 28 18:32:18 crc kubenswrapper[4909]: I1128 18:32:18.279408 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-snhgh" event={"ID":"9cfd406a-b092-46cc-9b8b-d01fe49b6abc","Type":"ContainerDied","Data":"c2bf61c8ae4631d617ec0e58f86e1a339654db5c9a7349c5e9c614b89429fbd8"} Nov 28 18:32:18 crc kubenswrapper[4909]: I1128 18:32:18.279763 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-snhgh" event={"ID":"9cfd406a-b092-46cc-9b8b-d01fe49b6abc","Type":"ContainerStarted","Data":"b425f80c6c8a858ce5a3f664ca6af76750f430a92fe7887c1555429dc0b9f5b3"} Nov 28 18:32:19 crc kubenswrapper[4909]: I1128 18:32:19.297739 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-snhgh" event={"ID":"9cfd406a-b092-46cc-9b8b-d01fe49b6abc","Type":"ContainerStarted","Data":"1806a726ccf706fc0cbb9f39680d0eb5e17a16025f316b71e571c2d251d49d5b"} Nov 28 18:32:22 crc kubenswrapper[4909]: I1128 18:32:22.332028 4909 generic.go:334] "Generic (PLEG): container finished" podID="9cfd406a-b092-46cc-9b8b-d01fe49b6abc" containerID="1806a726ccf706fc0cbb9f39680d0eb5e17a16025f316b71e571c2d251d49d5b" exitCode=0 Nov 28 18:32:22 crc kubenswrapper[4909]: I1128 18:32:22.332959 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-snhgh" event={"ID":"9cfd406a-b092-46cc-9b8b-d01fe49b6abc","Type":"ContainerDied","Data":"1806a726ccf706fc0cbb9f39680d0eb5e17a16025f316b71e571c2d251d49d5b"} Nov 28 18:32:23 crc kubenswrapper[4909]: I1128 18:32:23.348845 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-snhgh" event={"ID":"9cfd406a-b092-46cc-9b8b-d01fe49b6abc","Type":"ContainerStarted","Data":"91172c651b45e1fff2298a5a66d3e64dacd163c40970e91b0db1d1f11cb65b13"} Nov 28 18:32:23 crc kubenswrapper[4909]: I1128 18:32:23.371723 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-snhgh" podStartSLOduration=2.600459376 podStartE2EDuration="7.371700112s" podCreationTimestamp="2025-11-28 18:32:16 +0000 UTC" firstStartedPulling="2025-11-28 18:32:18.282240416 +0000 UTC m=+8520.678924940" lastFinishedPulling="2025-11-28 18:32:23.053481112 +0000 UTC m=+8525.450165676" observedRunningTime="2025-11-28 18:32:23.365251898 +0000 UTC m=+8525.761936442" watchObservedRunningTime="2025-11-28 18:32:23.371700112 +0000 UTC m=+8525.768384646" Nov 28 18:32:27 crc kubenswrapper[4909]: I1128 18:32:27.268005 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-snhgh" Nov 28 18:32:27 crc kubenswrapper[4909]: I1128 18:32:27.268432 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-snhgh" Nov 28 18:32:28 crc kubenswrapper[4909]: I1128 18:32:28.335043 4909 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-snhgh" podUID="9cfd406a-b092-46cc-9b8b-d01fe49b6abc" containerName="registry-server" probeResult="failure" output=< Nov 28 18:32:28 crc kubenswrapper[4909]: timeout: failed to connect service ":50051" within 1s Nov 28 18:32:28 crc kubenswrapper[4909]: > Nov 28 18:32:37 crc kubenswrapper[4909]: I1128 18:32:37.336366 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-snhgh" Nov 28 18:32:37 crc kubenswrapper[4909]: I1128 18:32:37.416258 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-snhgh" Nov 28 18:32:37 crc kubenswrapper[4909]: I1128 18:32:37.577549 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-snhgh"] Nov 28 18:32:38 crc kubenswrapper[4909]: I1128 18:32:38.514304 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-snhgh" podUID="9cfd406a-b092-46cc-9b8b-d01fe49b6abc" containerName="registry-server" containerID="cri-o://91172c651b45e1fff2298a5a66d3e64dacd163c40970e91b0db1d1f11cb65b13" gracePeriod=2 Nov 28 18:32:39 crc kubenswrapper[4909]: I1128 18:32:39.071317 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-snhgh" Nov 28 18:32:39 crc kubenswrapper[4909]: I1128 18:32:39.092402 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9cfd406a-b092-46cc-9b8b-d01fe49b6abc-utilities\") pod \"9cfd406a-b092-46cc-9b8b-d01fe49b6abc\" (UID: \"9cfd406a-b092-46cc-9b8b-d01fe49b6abc\") " Nov 28 18:32:39 crc kubenswrapper[4909]: I1128 18:32:39.092696 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c49lt\" (UniqueName: \"kubernetes.io/projected/9cfd406a-b092-46cc-9b8b-d01fe49b6abc-kube-api-access-c49lt\") pod \"9cfd406a-b092-46cc-9b8b-d01fe49b6abc\" (UID: \"9cfd406a-b092-46cc-9b8b-d01fe49b6abc\") " Nov 28 18:32:39 crc kubenswrapper[4909]: I1128 18:32:39.092797 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9cfd406a-b092-46cc-9b8b-d01fe49b6abc-catalog-content\") pod \"9cfd406a-b092-46cc-9b8b-d01fe49b6abc\" (UID: \"9cfd406a-b092-46cc-9b8b-d01fe49b6abc\") " Nov 28 18:32:39 crc kubenswrapper[4909]: I1128 18:32:39.102454 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9cfd406a-b092-46cc-9b8b-d01fe49b6abc-utilities" (OuterVolumeSpecName: "utilities") pod "9cfd406a-b092-46cc-9b8b-d01fe49b6abc" (UID: "9cfd406a-b092-46cc-9b8b-d01fe49b6abc"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 18:32:39 crc kubenswrapper[4909]: I1128 18:32:39.142755 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9cfd406a-b092-46cc-9b8b-d01fe49b6abc-kube-api-access-c49lt" (OuterVolumeSpecName: "kube-api-access-c49lt") pod "9cfd406a-b092-46cc-9b8b-d01fe49b6abc" (UID: "9cfd406a-b092-46cc-9b8b-d01fe49b6abc"). InnerVolumeSpecName "kube-api-access-c49lt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 18:32:39 crc kubenswrapper[4909]: I1128 18:32:39.195484 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c49lt\" (UniqueName: \"kubernetes.io/projected/9cfd406a-b092-46cc-9b8b-d01fe49b6abc-kube-api-access-c49lt\") on node \"crc\" DevicePath \"\"" Nov 28 18:32:39 crc kubenswrapper[4909]: I1128 18:32:39.195525 4909 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9cfd406a-b092-46cc-9b8b-d01fe49b6abc-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 18:32:39 crc kubenswrapper[4909]: I1128 18:32:39.228470 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9cfd406a-b092-46cc-9b8b-d01fe49b6abc-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "9cfd406a-b092-46cc-9b8b-d01fe49b6abc" (UID: "9cfd406a-b092-46cc-9b8b-d01fe49b6abc"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 18:32:39 crc kubenswrapper[4909]: I1128 18:32:39.297610 4909 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9cfd406a-b092-46cc-9b8b-d01fe49b6abc-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 18:32:39 crc kubenswrapper[4909]: I1128 18:32:39.525025 4909 generic.go:334] "Generic (PLEG): container finished" podID="9cfd406a-b092-46cc-9b8b-d01fe49b6abc" containerID="91172c651b45e1fff2298a5a66d3e64dacd163c40970e91b0db1d1f11cb65b13" exitCode=0 Nov 28 18:32:39 crc kubenswrapper[4909]: I1128 18:32:39.525079 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-snhgh" event={"ID":"9cfd406a-b092-46cc-9b8b-d01fe49b6abc","Type":"ContainerDied","Data":"91172c651b45e1fff2298a5a66d3e64dacd163c40970e91b0db1d1f11cb65b13"} Nov 28 18:32:39 crc kubenswrapper[4909]: I1128 18:32:39.525110 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-snhgh" event={"ID":"9cfd406a-b092-46cc-9b8b-d01fe49b6abc","Type":"ContainerDied","Data":"b425f80c6c8a858ce5a3f664ca6af76750f430a92fe7887c1555429dc0b9f5b3"} Nov 28 18:32:39 crc kubenswrapper[4909]: I1128 18:32:39.525132 4909 scope.go:117] "RemoveContainer" containerID="91172c651b45e1fff2298a5a66d3e64dacd163c40970e91b0db1d1f11cb65b13" Nov 28 18:32:39 crc kubenswrapper[4909]: I1128 18:32:39.525148 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-snhgh" Nov 28 18:32:39 crc kubenswrapper[4909]: I1128 18:32:39.564834 4909 scope.go:117] "RemoveContainer" containerID="1806a726ccf706fc0cbb9f39680d0eb5e17a16025f316b71e571c2d251d49d5b" Nov 28 18:32:39 crc kubenswrapper[4909]: I1128 18:32:39.575938 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-snhgh"] Nov 28 18:32:39 crc kubenswrapper[4909]: I1128 18:32:39.587308 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-snhgh"] Nov 28 18:32:39 crc kubenswrapper[4909]: I1128 18:32:39.595284 4909 scope.go:117] "RemoveContainer" containerID="c2bf61c8ae4631d617ec0e58f86e1a339654db5c9a7349c5e9c614b89429fbd8" Nov 28 18:32:39 crc kubenswrapper[4909]: I1128 18:32:39.655499 4909 scope.go:117] "RemoveContainer" containerID="91172c651b45e1fff2298a5a66d3e64dacd163c40970e91b0db1d1f11cb65b13" Nov 28 18:32:39 crc kubenswrapper[4909]: E1128 18:32:39.656020 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"91172c651b45e1fff2298a5a66d3e64dacd163c40970e91b0db1d1f11cb65b13\": container with ID starting with 91172c651b45e1fff2298a5a66d3e64dacd163c40970e91b0db1d1f11cb65b13 not found: ID does not exist" containerID="91172c651b45e1fff2298a5a66d3e64dacd163c40970e91b0db1d1f11cb65b13" Nov 28 18:32:39 crc kubenswrapper[4909]: I1128 18:32:39.656073 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"91172c651b45e1fff2298a5a66d3e64dacd163c40970e91b0db1d1f11cb65b13"} err="failed to get container status \"91172c651b45e1fff2298a5a66d3e64dacd163c40970e91b0db1d1f11cb65b13\": rpc error: code = NotFound desc = could not find container \"91172c651b45e1fff2298a5a66d3e64dacd163c40970e91b0db1d1f11cb65b13\": container with ID starting with 91172c651b45e1fff2298a5a66d3e64dacd163c40970e91b0db1d1f11cb65b13 not found: ID does not exist" Nov 28 18:32:39 crc kubenswrapper[4909]: I1128 18:32:39.656109 4909 scope.go:117] "RemoveContainer" containerID="1806a726ccf706fc0cbb9f39680d0eb5e17a16025f316b71e571c2d251d49d5b" Nov 28 18:32:39 crc kubenswrapper[4909]: E1128 18:32:39.656383 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1806a726ccf706fc0cbb9f39680d0eb5e17a16025f316b71e571c2d251d49d5b\": container with ID starting with 1806a726ccf706fc0cbb9f39680d0eb5e17a16025f316b71e571c2d251d49d5b not found: ID does not exist" containerID="1806a726ccf706fc0cbb9f39680d0eb5e17a16025f316b71e571c2d251d49d5b" Nov 28 18:32:39 crc kubenswrapper[4909]: I1128 18:32:39.656418 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1806a726ccf706fc0cbb9f39680d0eb5e17a16025f316b71e571c2d251d49d5b"} err="failed to get container status \"1806a726ccf706fc0cbb9f39680d0eb5e17a16025f316b71e571c2d251d49d5b\": rpc error: code = NotFound desc = could not find container \"1806a726ccf706fc0cbb9f39680d0eb5e17a16025f316b71e571c2d251d49d5b\": container with ID starting with 1806a726ccf706fc0cbb9f39680d0eb5e17a16025f316b71e571c2d251d49d5b not found: ID does not exist" Nov 28 18:32:39 crc kubenswrapper[4909]: I1128 18:32:39.656437 4909 scope.go:117] "RemoveContainer" containerID="c2bf61c8ae4631d617ec0e58f86e1a339654db5c9a7349c5e9c614b89429fbd8" Nov 28 18:32:39 crc kubenswrapper[4909]: E1128 18:32:39.656700 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c2bf61c8ae4631d617ec0e58f86e1a339654db5c9a7349c5e9c614b89429fbd8\": container with ID starting with c2bf61c8ae4631d617ec0e58f86e1a339654db5c9a7349c5e9c614b89429fbd8 not found: ID does not exist" containerID="c2bf61c8ae4631d617ec0e58f86e1a339654db5c9a7349c5e9c614b89429fbd8" Nov 28 18:32:39 crc kubenswrapper[4909]: I1128 18:32:39.656736 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c2bf61c8ae4631d617ec0e58f86e1a339654db5c9a7349c5e9c614b89429fbd8"} err="failed to get container status \"c2bf61c8ae4631d617ec0e58f86e1a339654db5c9a7349c5e9c614b89429fbd8\": rpc error: code = NotFound desc = could not find container \"c2bf61c8ae4631d617ec0e58f86e1a339654db5c9a7349c5e9c614b89429fbd8\": container with ID starting with c2bf61c8ae4631d617ec0e58f86e1a339654db5c9a7349c5e9c614b89429fbd8 not found: ID does not exist" Nov 28 18:32:39 crc kubenswrapper[4909]: I1128 18:32:39.915542 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9cfd406a-b092-46cc-9b8b-d01fe49b6abc" path="/var/lib/kubelet/pods/9cfd406a-b092-46cc-9b8b-d01fe49b6abc/volumes" Nov 28 18:33:49 crc kubenswrapper[4909]: I1128 18:33:49.910584 4909 patch_prober.go:28] interesting pod/machine-config-daemon-d5nd7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 18:33:49 crc kubenswrapper[4909]: I1128 18:33:49.911129 4909 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 18:33:54 crc kubenswrapper[4909]: I1128 18:33:54.372007 4909 generic.go:334] "Generic (PLEG): container finished" podID="0384aec0-25b2-49fd-b0d8-9a426a60005c" containerID="1c9b886825f59fb091b6b414ef4bfd0541c4abd633b7606e9c49f7db17fa71a5" exitCode=0 Nov 28 18:33:54 crc kubenswrapper[4909]: I1128 18:33:54.372093 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-openstack-openstack-cell1-dmmb6" event={"ID":"0384aec0-25b2-49fd-b0d8-9a426a60005c","Type":"ContainerDied","Data":"1c9b886825f59fb091b6b414ef4bfd0541c4abd633b7606e9c49f7db17fa71a5"} Nov 28 18:33:55 crc kubenswrapper[4909]: I1128 18:33:55.839166 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-openstack-openstack-cell1-dmmb6" Nov 28 18:33:55 crc kubenswrapper[4909]: I1128 18:33:55.927790 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/0384aec0-25b2-49fd-b0d8-9a426a60005c-ceilometer-compute-config-data-0\") pod \"0384aec0-25b2-49fd-b0d8-9a426a60005c\" (UID: \"0384aec0-25b2-49fd-b0d8-9a426a60005c\") " Nov 28 18:33:55 crc kubenswrapper[4909]: I1128 18:33:55.927988 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/0384aec0-25b2-49fd-b0d8-9a426a60005c-ceilometer-compute-config-data-2\") pod \"0384aec0-25b2-49fd-b0d8-9a426a60005c\" (UID: \"0384aec0-25b2-49fd-b0d8-9a426a60005c\") " Nov 28 18:33:55 crc kubenswrapper[4909]: I1128 18:33:55.928037 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/0384aec0-25b2-49fd-b0d8-9a426a60005c-ceph\") pod \"0384aec0-25b2-49fd-b0d8-9a426a60005c\" (UID: \"0384aec0-25b2-49fd-b0d8-9a426a60005c\") " Nov 28 18:33:55 crc kubenswrapper[4909]: I1128 18:33:55.928065 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4zrb7\" (UniqueName: \"kubernetes.io/projected/0384aec0-25b2-49fd-b0d8-9a426a60005c-kube-api-access-4zrb7\") pod \"0384aec0-25b2-49fd-b0d8-9a426a60005c\" (UID: \"0384aec0-25b2-49fd-b0d8-9a426a60005c\") " Nov 28 18:33:55 crc kubenswrapper[4909]: I1128 18:33:55.928127 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/0384aec0-25b2-49fd-b0d8-9a426a60005c-ssh-key\") pod \"0384aec0-25b2-49fd-b0d8-9a426a60005c\" (UID: \"0384aec0-25b2-49fd-b0d8-9a426a60005c\") " Nov 28 18:33:55 crc kubenswrapper[4909]: I1128 18:33:55.928164 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0384aec0-25b2-49fd-b0d8-9a426a60005c-telemetry-combined-ca-bundle\") pod \"0384aec0-25b2-49fd-b0d8-9a426a60005c\" (UID: \"0384aec0-25b2-49fd-b0d8-9a426a60005c\") " Nov 28 18:33:55 crc kubenswrapper[4909]: I1128 18:33:55.928192 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0384aec0-25b2-49fd-b0d8-9a426a60005c-inventory\") pod \"0384aec0-25b2-49fd-b0d8-9a426a60005c\" (UID: \"0384aec0-25b2-49fd-b0d8-9a426a60005c\") " Nov 28 18:33:55 crc kubenswrapper[4909]: I1128 18:33:55.928254 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/0384aec0-25b2-49fd-b0d8-9a426a60005c-ceilometer-compute-config-data-1\") pod \"0384aec0-25b2-49fd-b0d8-9a426a60005c\" (UID: \"0384aec0-25b2-49fd-b0d8-9a426a60005c\") " Nov 28 18:33:55 crc kubenswrapper[4909]: I1128 18:33:55.934916 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0384aec0-25b2-49fd-b0d8-9a426a60005c-telemetry-combined-ca-bundle" (OuterVolumeSpecName: "telemetry-combined-ca-bundle") pod "0384aec0-25b2-49fd-b0d8-9a426a60005c" (UID: "0384aec0-25b2-49fd-b0d8-9a426a60005c"). InnerVolumeSpecName "telemetry-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 18:33:55 crc kubenswrapper[4909]: I1128 18:33:55.939276 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0384aec0-25b2-49fd-b0d8-9a426a60005c-ceph" (OuterVolumeSpecName: "ceph") pod "0384aec0-25b2-49fd-b0d8-9a426a60005c" (UID: "0384aec0-25b2-49fd-b0d8-9a426a60005c"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 18:33:55 crc kubenswrapper[4909]: I1128 18:33:55.942806 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0384aec0-25b2-49fd-b0d8-9a426a60005c-kube-api-access-4zrb7" (OuterVolumeSpecName: "kube-api-access-4zrb7") pod "0384aec0-25b2-49fd-b0d8-9a426a60005c" (UID: "0384aec0-25b2-49fd-b0d8-9a426a60005c"). InnerVolumeSpecName "kube-api-access-4zrb7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 18:33:55 crc kubenswrapper[4909]: I1128 18:33:55.967590 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0384aec0-25b2-49fd-b0d8-9a426a60005c-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "0384aec0-25b2-49fd-b0d8-9a426a60005c" (UID: "0384aec0-25b2-49fd-b0d8-9a426a60005c"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 18:33:55 crc kubenswrapper[4909]: I1128 18:33:55.968042 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0384aec0-25b2-49fd-b0d8-9a426a60005c-ceilometer-compute-config-data-1" (OuterVolumeSpecName: "ceilometer-compute-config-data-1") pod "0384aec0-25b2-49fd-b0d8-9a426a60005c" (UID: "0384aec0-25b2-49fd-b0d8-9a426a60005c"). InnerVolumeSpecName "ceilometer-compute-config-data-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 18:33:55 crc kubenswrapper[4909]: I1128 18:33:55.968504 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0384aec0-25b2-49fd-b0d8-9a426a60005c-ceilometer-compute-config-data-2" (OuterVolumeSpecName: "ceilometer-compute-config-data-2") pod "0384aec0-25b2-49fd-b0d8-9a426a60005c" (UID: "0384aec0-25b2-49fd-b0d8-9a426a60005c"). InnerVolumeSpecName "ceilometer-compute-config-data-2". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 18:33:55 crc kubenswrapper[4909]: I1128 18:33:55.974968 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0384aec0-25b2-49fd-b0d8-9a426a60005c-ceilometer-compute-config-data-0" (OuterVolumeSpecName: "ceilometer-compute-config-data-0") pod "0384aec0-25b2-49fd-b0d8-9a426a60005c" (UID: "0384aec0-25b2-49fd-b0d8-9a426a60005c"). InnerVolumeSpecName "ceilometer-compute-config-data-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 18:33:55 crc kubenswrapper[4909]: I1128 18:33:55.977099 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0384aec0-25b2-49fd-b0d8-9a426a60005c-inventory" (OuterVolumeSpecName: "inventory") pod "0384aec0-25b2-49fd-b0d8-9a426a60005c" (UID: "0384aec0-25b2-49fd-b0d8-9a426a60005c"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 18:33:56 crc kubenswrapper[4909]: I1128 18:33:56.031197 4909 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/0384aec0-25b2-49fd-b0d8-9a426a60005c-ceilometer-compute-config-data-0\") on node \"crc\" DevicePath \"\"" Nov 28 18:33:56 crc kubenswrapper[4909]: I1128 18:33:56.031232 4909 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/0384aec0-25b2-49fd-b0d8-9a426a60005c-ceilometer-compute-config-data-2\") on node \"crc\" DevicePath \"\"" Nov 28 18:33:56 crc kubenswrapper[4909]: I1128 18:33:56.031243 4909 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/0384aec0-25b2-49fd-b0d8-9a426a60005c-ceph\") on node \"crc\" DevicePath \"\"" Nov 28 18:33:56 crc kubenswrapper[4909]: I1128 18:33:56.031254 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4zrb7\" (UniqueName: \"kubernetes.io/projected/0384aec0-25b2-49fd-b0d8-9a426a60005c-kube-api-access-4zrb7\") on node \"crc\" DevicePath \"\"" Nov 28 18:33:56 crc kubenswrapper[4909]: I1128 18:33:56.031264 4909 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/0384aec0-25b2-49fd-b0d8-9a426a60005c-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 28 18:33:56 crc kubenswrapper[4909]: I1128 18:33:56.031272 4909 reconciler_common.go:293] "Volume detached for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0384aec0-25b2-49fd-b0d8-9a426a60005c-telemetry-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 18:33:56 crc kubenswrapper[4909]: I1128 18:33:56.031281 4909 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0384aec0-25b2-49fd-b0d8-9a426a60005c-inventory\") on node \"crc\" DevicePath \"\"" Nov 28 18:33:56 crc kubenswrapper[4909]: I1128 18:33:56.031290 4909 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/0384aec0-25b2-49fd-b0d8-9a426a60005c-ceilometer-compute-config-data-1\") on node \"crc\" DevicePath \"\"" Nov 28 18:33:56 crc kubenswrapper[4909]: I1128 18:33:56.400260 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-openstack-openstack-cell1-dmmb6" event={"ID":"0384aec0-25b2-49fd-b0d8-9a426a60005c","Type":"ContainerDied","Data":"a391fda9268c46a9be67f46d860f1e6fe23b7d4fabd2ddf8be25892b8d9b6139"} Nov 28 18:33:56 crc kubenswrapper[4909]: I1128 18:33:56.400299 4909 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a391fda9268c46a9be67f46d860f1e6fe23b7d4fabd2ddf8be25892b8d9b6139" Nov 28 18:33:56 crc kubenswrapper[4909]: I1128 18:33:56.400309 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-openstack-openstack-cell1-dmmb6" Nov 28 18:33:56 crc kubenswrapper[4909]: I1128 18:33:56.502841 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-sriov-openstack-openstack-cell1-l48sb"] Nov 28 18:33:56 crc kubenswrapper[4909]: E1128 18:33:56.503305 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9cfd406a-b092-46cc-9b8b-d01fe49b6abc" containerName="extract-content" Nov 28 18:33:56 crc kubenswrapper[4909]: I1128 18:33:56.503332 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="9cfd406a-b092-46cc-9b8b-d01fe49b6abc" containerName="extract-content" Nov 28 18:33:56 crc kubenswrapper[4909]: E1128 18:33:56.503351 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9cfd406a-b092-46cc-9b8b-d01fe49b6abc" containerName="extract-utilities" Nov 28 18:33:56 crc kubenswrapper[4909]: I1128 18:33:56.503360 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="9cfd406a-b092-46cc-9b8b-d01fe49b6abc" containerName="extract-utilities" Nov 28 18:33:56 crc kubenswrapper[4909]: E1128 18:33:56.503393 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0384aec0-25b2-49fd-b0d8-9a426a60005c" containerName="telemetry-openstack-openstack-cell1" Nov 28 18:33:56 crc kubenswrapper[4909]: I1128 18:33:56.503401 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="0384aec0-25b2-49fd-b0d8-9a426a60005c" containerName="telemetry-openstack-openstack-cell1" Nov 28 18:33:56 crc kubenswrapper[4909]: E1128 18:33:56.503409 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9cfd406a-b092-46cc-9b8b-d01fe49b6abc" containerName="registry-server" Nov 28 18:33:56 crc kubenswrapper[4909]: I1128 18:33:56.503416 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="9cfd406a-b092-46cc-9b8b-d01fe49b6abc" containerName="registry-server" Nov 28 18:33:56 crc kubenswrapper[4909]: I1128 18:33:56.503625 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="9cfd406a-b092-46cc-9b8b-d01fe49b6abc" containerName="registry-server" Nov 28 18:33:56 crc kubenswrapper[4909]: I1128 18:33:56.503642 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="0384aec0-25b2-49fd-b0d8-9a426a60005c" containerName="telemetry-openstack-openstack-cell1" Nov 28 18:33:56 crc kubenswrapper[4909]: I1128 18:33:56.504420 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-sriov-openstack-openstack-cell1-l48sb" Nov 28 18:33:56 crc kubenswrapper[4909]: I1128 18:33:56.507324 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-sriov-agent-neutron-config" Nov 28 18:33:56 crc kubenswrapper[4909]: I1128 18:33:56.507985 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Nov 28 18:33:56 crc kubenswrapper[4909]: I1128 18:33:56.508254 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Nov 28 18:33:56 crc kubenswrapper[4909]: I1128 18:33:56.508264 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 28 18:33:56 crc kubenswrapper[4909]: I1128 18:33:56.511383 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-z249h" Nov 28 18:33:56 crc kubenswrapper[4909]: I1128 18:33:56.520177 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-sriov-openstack-openstack-cell1-l48sb"] Nov 28 18:33:56 crc kubenswrapper[4909]: I1128 18:33:56.645511 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/5b0feed7-a809-4479-a204-1d7b86a9b953-ssh-key\") pod \"neutron-sriov-openstack-openstack-cell1-l48sb\" (UID: \"5b0feed7-a809-4479-a204-1d7b86a9b953\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-l48sb" Nov 28 18:33:56 crc kubenswrapper[4909]: I1128 18:33:56.646140 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-sriov-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/5b0feed7-a809-4479-a204-1d7b86a9b953-neutron-sriov-agent-neutron-config-0\") pod \"neutron-sriov-openstack-openstack-cell1-l48sb\" (UID: \"5b0feed7-a809-4479-a204-1d7b86a9b953\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-l48sb" Nov 28 18:33:56 crc kubenswrapper[4909]: I1128 18:33:56.646238 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qxb5g\" (UniqueName: \"kubernetes.io/projected/5b0feed7-a809-4479-a204-1d7b86a9b953-kube-api-access-qxb5g\") pod \"neutron-sriov-openstack-openstack-cell1-l48sb\" (UID: \"5b0feed7-a809-4479-a204-1d7b86a9b953\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-l48sb" Nov 28 18:33:56 crc kubenswrapper[4909]: I1128 18:33:56.646308 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-sriov-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5b0feed7-a809-4479-a204-1d7b86a9b953-neutron-sriov-combined-ca-bundle\") pod \"neutron-sriov-openstack-openstack-cell1-l48sb\" (UID: \"5b0feed7-a809-4479-a204-1d7b86a9b953\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-l48sb" Nov 28 18:33:56 crc kubenswrapper[4909]: I1128 18:33:56.646393 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5b0feed7-a809-4479-a204-1d7b86a9b953-inventory\") pod \"neutron-sriov-openstack-openstack-cell1-l48sb\" (UID: \"5b0feed7-a809-4479-a204-1d7b86a9b953\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-l48sb" Nov 28 18:33:56 crc kubenswrapper[4909]: I1128 18:33:56.646472 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/5b0feed7-a809-4479-a204-1d7b86a9b953-ceph\") pod \"neutron-sriov-openstack-openstack-cell1-l48sb\" (UID: \"5b0feed7-a809-4479-a204-1d7b86a9b953\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-l48sb" Nov 28 18:33:56 crc kubenswrapper[4909]: I1128 18:33:56.748496 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/5b0feed7-a809-4479-a204-1d7b86a9b953-ssh-key\") pod \"neutron-sriov-openstack-openstack-cell1-l48sb\" (UID: \"5b0feed7-a809-4479-a204-1d7b86a9b953\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-l48sb" Nov 28 18:33:56 crc kubenswrapper[4909]: I1128 18:33:56.748605 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-sriov-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/5b0feed7-a809-4479-a204-1d7b86a9b953-neutron-sriov-agent-neutron-config-0\") pod \"neutron-sriov-openstack-openstack-cell1-l48sb\" (UID: \"5b0feed7-a809-4479-a204-1d7b86a9b953\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-l48sb" Nov 28 18:33:56 crc kubenswrapper[4909]: I1128 18:33:56.748703 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qxb5g\" (UniqueName: \"kubernetes.io/projected/5b0feed7-a809-4479-a204-1d7b86a9b953-kube-api-access-qxb5g\") pod \"neutron-sriov-openstack-openstack-cell1-l48sb\" (UID: \"5b0feed7-a809-4479-a204-1d7b86a9b953\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-l48sb" Nov 28 18:33:56 crc kubenswrapper[4909]: I1128 18:33:56.748749 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-sriov-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5b0feed7-a809-4479-a204-1d7b86a9b953-neutron-sriov-combined-ca-bundle\") pod \"neutron-sriov-openstack-openstack-cell1-l48sb\" (UID: \"5b0feed7-a809-4479-a204-1d7b86a9b953\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-l48sb" Nov 28 18:33:56 crc kubenswrapper[4909]: I1128 18:33:56.748806 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5b0feed7-a809-4479-a204-1d7b86a9b953-inventory\") pod \"neutron-sriov-openstack-openstack-cell1-l48sb\" (UID: \"5b0feed7-a809-4479-a204-1d7b86a9b953\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-l48sb" Nov 28 18:33:56 crc kubenswrapper[4909]: I1128 18:33:56.748846 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/5b0feed7-a809-4479-a204-1d7b86a9b953-ceph\") pod \"neutron-sriov-openstack-openstack-cell1-l48sb\" (UID: \"5b0feed7-a809-4479-a204-1d7b86a9b953\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-l48sb" Nov 28 18:33:56 crc kubenswrapper[4909]: I1128 18:33:56.752998 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5b0feed7-a809-4479-a204-1d7b86a9b953-inventory\") pod \"neutron-sriov-openstack-openstack-cell1-l48sb\" (UID: \"5b0feed7-a809-4479-a204-1d7b86a9b953\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-l48sb" Nov 28 18:33:56 crc kubenswrapper[4909]: I1128 18:33:56.753921 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-sriov-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/5b0feed7-a809-4479-a204-1d7b86a9b953-neutron-sriov-agent-neutron-config-0\") pod \"neutron-sriov-openstack-openstack-cell1-l48sb\" (UID: \"5b0feed7-a809-4479-a204-1d7b86a9b953\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-l48sb" Nov 28 18:33:56 crc kubenswrapper[4909]: I1128 18:33:56.754097 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/5b0feed7-a809-4479-a204-1d7b86a9b953-ssh-key\") pod \"neutron-sriov-openstack-openstack-cell1-l48sb\" (UID: \"5b0feed7-a809-4479-a204-1d7b86a9b953\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-l48sb" Nov 28 18:33:56 crc kubenswrapper[4909]: I1128 18:33:56.755305 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/5b0feed7-a809-4479-a204-1d7b86a9b953-ceph\") pod \"neutron-sriov-openstack-openstack-cell1-l48sb\" (UID: \"5b0feed7-a809-4479-a204-1d7b86a9b953\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-l48sb" Nov 28 18:33:56 crc kubenswrapper[4909]: I1128 18:33:56.756469 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-sriov-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5b0feed7-a809-4479-a204-1d7b86a9b953-neutron-sriov-combined-ca-bundle\") pod \"neutron-sriov-openstack-openstack-cell1-l48sb\" (UID: \"5b0feed7-a809-4479-a204-1d7b86a9b953\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-l48sb" Nov 28 18:33:56 crc kubenswrapper[4909]: I1128 18:33:56.780848 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qxb5g\" (UniqueName: \"kubernetes.io/projected/5b0feed7-a809-4479-a204-1d7b86a9b953-kube-api-access-qxb5g\") pod \"neutron-sriov-openstack-openstack-cell1-l48sb\" (UID: \"5b0feed7-a809-4479-a204-1d7b86a9b953\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-l48sb" Nov 28 18:33:56 crc kubenswrapper[4909]: I1128 18:33:56.822568 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-sriov-openstack-openstack-cell1-l48sb" Nov 28 18:33:57 crc kubenswrapper[4909]: I1128 18:33:57.438497 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-sriov-openstack-openstack-cell1-l48sb"] Nov 28 18:33:58 crc kubenswrapper[4909]: I1128 18:33:58.420846 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-sriov-openstack-openstack-cell1-l48sb" event={"ID":"5b0feed7-a809-4479-a204-1d7b86a9b953","Type":"ContainerStarted","Data":"032ebbdd1ea355e00cdbb17bc9e913e8ab14d3863a4f56a296f977d5fa69aac7"} Nov 28 18:33:58 crc kubenswrapper[4909]: I1128 18:33:58.421417 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-sriov-openstack-openstack-cell1-l48sb" event={"ID":"5b0feed7-a809-4479-a204-1d7b86a9b953","Type":"ContainerStarted","Data":"d4a7c52b13f294093442e9dfa4b3da17de57a3a4fc5cfd34c5c97f952b05a6ca"} Nov 28 18:33:58 crc kubenswrapper[4909]: I1128 18:33:58.442195 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-sriov-openstack-openstack-cell1-l48sb" podStartSLOduration=1.799122278 podStartE2EDuration="2.442176328s" podCreationTimestamp="2025-11-28 18:33:56 +0000 UTC" firstStartedPulling="2025-11-28 18:33:57.444520858 +0000 UTC m=+8619.841205382" lastFinishedPulling="2025-11-28 18:33:58.087574908 +0000 UTC m=+8620.484259432" observedRunningTime="2025-11-28 18:33:58.435605811 +0000 UTC m=+8620.832290335" watchObservedRunningTime="2025-11-28 18:33:58.442176328 +0000 UTC m=+8620.838860852" Nov 28 18:34:19 crc kubenswrapper[4909]: I1128 18:34:19.910812 4909 patch_prober.go:28] interesting pod/machine-config-daemon-d5nd7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 18:34:19 crc kubenswrapper[4909]: I1128 18:34:19.911766 4909 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 18:34:49 crc kubenswrapper[4909]: I1128 18:34:49.911141 4909 patch_prober.go:28] interesting pod/machine-config-daemon-d5nd7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 18:34:49 crc kubenswrapper[4909]: I1128 18:34:49.911906 4909 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 18:34:49 crc kubenswrapper[4909]: I1128 18:34:49.919751 4909 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" Nov 28 18:34:49 crc kubenswrapper[4909]: I1128 18:34:49.920738 4909 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"816a605e5fcc3b37239c060389d1f7b2373ec3869404f8abfe6340fee7ab8f9d"} pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 18:34:49 crc kubenswrapper[4909]: I1128 18:34:49.920818 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerName="machine-config-daemon" containerID="cri-o://816a605e5fcc3b37239c060389d1f7b2373ec3869404f8abfe6340fee7ab8f9d" gracePeriod=600 Nov 28 18:34:51 crc kubenswrapper[4909]: I1128 18:34:51.060883 4909 generic.go:334] "Generic (PLEG): container finished" podID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerID="816a605e5fcc3b37239c060389d1f7b2373ec3869404f8abfe6340fee7ab8f9d" exitCode=0 Nov 28 18:34:51 crc kubenswrapper[4909]: I1128 18:34:51.060964 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" event={"ID":"5f0ac931-d37b-4342-8c12-c2779b455cc5","Type":"ContainerDied","Data":"816a605e5fcc3b37239c060389d1f7b2373ec3869404f8abfe6340fee7ab8f9d"} Nov 28 18:34:51 crc kubenswrapper[4909]: I1128 18:34:51.061621 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" event={"ID":"5f0ac931-d37b-4342-8c12-c2779b455cc5","Type":"ContainerStarted","Data":"ef2a38674be7675e726f15b879fd9bbea7be679d566ac302dd10e3d3ce61bcc2"} Nov 28 18:34:51 crc kubenswrapper[4909]: I1128 18:34:51.061686 4909 scope.go:117] "RemoveContainer" containerID="5226a771ffcb00b5135f06b8e238e32f1bb502ad55f4c3b1d915e6908d8a3ef7" Nov 28 18:37:13 crc kubenswrapper[4909]: I1128 18:37:13.693372 4909 generic.go:334] "Generic (PLEG): container finished" podID="5b0feed7-a809-4479-a204-1d7b86a9b953" containerID="032ebbdd1ea355e00cdbb17bc9e913e8ab14d3863a4f56a296f977d5fa69aac7" exitCode=0 Nov 28 18:37:13 crc kubenswrapper[4909]: I1128 18:37:13.693460 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-sriov-openstack-openstack-cell1-l48sb" event={"ID":"5b0feed7-a809-4479-a204-1d7b86a9b953","Type":"ContainerDied","Data":"032ebbdd1ea355e00cdbb17bc9e913e8ab14d3863a4f56a296f977d5fa69aac7"} Nov 28 18:37:15 crc kubenswrapper[4909]: I1128 18:37:15.217889 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-sriov-openstack-openstack-cell1-l48sb" Nov 28 18:37:15 crc kubenswrapper[4909]: I1128 18:37:15.375689 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5b0feed7-a809-4479-a204-1d7b86a9b953-inventory\") pod \"5b0feed7-a809-4479-a204-1d7b86a9b953\" (UID: \"5b0feed7-a809-4479-a204-1d7b86a9b953\") " Nov 28 18:37:15 crc kubenswrapper[4909]: I1128 18:37:15.375783 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-sriov-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/5b0feed7-a809-4479-a204-1d7b86a9b953-neutron-sriov-agent-neutron-config-0\") pod \"5b0feed7-a809-4479-a204-1d7b86a9b953\" (UID: \"5b0feed7-a809-4479-a204-1d7b86a9b953\") " Nov 28 18:37:15 crc kubenswrapper[4909]: I1128 18:37:15.375820 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/5b0feed7-a809-4479-a204-1d7b86a9b953-ceph\") pod \"5b0feed7-a809-4479-a204-1d7b86a9b953\" (UID: \"5b0feed7-a809-4479-a204-1d7b86a9b953\") " Nov 28 18:37:15 crc kubenswrapper[4909]: I1128 18:37:15.375907 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/5b0feed7-a809-4479-a204-1d7b86a9b953-ssh-key\") pod \"5b0feed7-a809-4479-a204-1d7b86a9b953\" (UID: \"5b0feed7-a809-4479-a204-1d7b86a9b953\") " Nov 28 18:37:15 crc kubenswrapper[4909]: I1128 18:37:15.375945 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-sriov-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5b0feed7-a809-4479-a204-1d7b86a9b953-neutron-sriov-combined-ca-bundle\") pod \"5b0feed7-a809-4479-a204-1d7b86a9b953\" (UID: \"5b0feed7-a809-4479-a204-1d7b86a9b953\") " Nov 28 18:37:15 crc kubenswrapper[4909]: I1128 18:37:15.376051 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qxb5g\" (UniqueName: \"kubernetes.io/projected/5b0feed7-a809-4479-a204-1d7b86a9b953-kube-api-access-qxb5g\") pod \"5b0feed7-a809-4479-a204-1d7b86a9b953\" (UID: \"5b0feed7-a809-4479-a204-1d7b86a9b953\") " Nov 28 18:37:15 crc kubenswrapper[4909]: I1128 18:37:15.382889 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5b0feed7-a809-4479-a204-1d7b86a9b953-kube-api-access-qxb5g" (OuterVolumeSpecName: "kube-api-access-qxb5g") pod "5b0feed7-a809-4479-a204-1d7b86a9b953" (UID: "5b0feed7-a809-4479-a204-1d7b86a9b953"). InnerVolumeSpecName "kube-api-access-qxb5g". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 18:37:15 crc kubenswrapper[4909]: I1128 18:37:15.382936 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5b0feed7-a809-4479-a204-1d7b86a9b953-ceph" (OuterVolumeSpecName: "ceph") pod "5b0feed7-a809-4479-a204-1d7b86a9b953" (UID: "5b0feed7-a809-4479-a204-1d7b86a9b953"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 18:37:15 crc kubenswrapper[4909]: I1128 18:37:15.384374 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5b0feed7-a809-4479-a204-1d7b86a9b953-neutron-sriov-combined-ca-bundle" (OuterVolumeSpecName: "neutron-sriov-combined-ca-bundle") pod "5b0feed7-a809-4479-a204-1d7b86a9b953" (UID: "5b0feed7-a809-4479-a204-1d7b86a9b953"). InnerVolumeSpecName "neutron-sriov-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 18:37:15 crc kubenswrapper[4909]: I1128 18:37:15.405471 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5b0feed7-a809-4479-a204-1d7b86a9b953-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "5b0feed7-a809-4479-a204-1d7b86a9b953" (UID: "5b0feed7-a809-4479-a204-1d7b86a9b953"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 18:37:15 crc kubenswrapper[4909]: E1128 18:37:15.407147 4909 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5b0feed7-a809-4479-a204-1d7b86a9b953-neutron-sriov-agent-neutron-config-0 podName:5b0feed7-a809-4479-a204-1d7b86a9b953 nodeName:}" failed. No retries permitted until 2025-11-28 18:37:15.90711383 +0000 UTC m=+8818.303798354 (durationBeforeRetry 500ms). Error: error cleaning subPath mounts for volume "neutron-sriov-agent-neutron-config-0" (UniqueName: "kubernetes.io/secret/5b0feed7-a809-4479-a204-1d7b86a9b953-neutron-sriov-agent-neutron-config-0") pod "5b0feed7-a809-4479-a204-1d7b86a9b953" (UID: "5b0feed7-a809-4479-a204-1d7b86a9b953") : error deleting /var/lib/kubelet/pods/5b0feed7-a809-4479-a204-1d7b86a9b953/volume-subpaths: remove /var/lib/kubelet/pods/5b0feed7-a809-4479-a204-1d7b86a9b953/volume-subpaths: no such file or directory Nov 28 18:37:15 crc kubenswrapper[4909]: I1128 18:37:15.410827 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5b0feed7-a809-4479-a204-1d7b86a9b953-inventory" (OuterVolumeSpecName: "inventory") pod "5b0feed7-a809-4479-a204-1d7b86a9b953" (UID: "5b0feed7-a809-4479-a204-1d7b86a9b953"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 18:37:15 crc kubenswrapper[4909]: I1128 18:37:15.478560 4909 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5b0feed7-a809-4479-a204-1d7b86a9b953-inventory\") on node \"crc\" DevicePath \"\"" Nov 28 18:37:15 crc kubenswrapper[4909]: I1128 18:37:15.478606 4909 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/5b0feed7-a809-4479-a204-1d7b86a9b953-ceph\") on node \"crc\" DevicePath \"\"" Nov 28 18:37:15 crc kubenswrapper[4909]: I1128 18:37:15.478622 4909 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/5b0feed7-a809-4479-a204-1d7b86a9b953-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 28 18:37:15 crc kubenswrapper[4909]: I1128 18:37:15.478633 4909 reconciler_common.go:293] "Volume detached for volume \"neutron-sriov-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5b0feed7-a809-4479-a204-1d7b86a9b953-neutron-sriov-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 18:37:15 crc kubenswrapper[4909]: I1128 18:37:15.478646 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qxb5g\" (UniqueName: \"kubernetes.io/projected/5b0feed7-a809-4479-a204-1d7b86a9b953-kube-api-access-qxb5g\") on node \"crc\" DevicePath \"\"" Nov 28 18:37:15 crc kubenswrapper[4909]: I1128 18:37:15.725114 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-sriov-openstack-openstack-cell1-l48sb" event={"ID":"5b0feed7-a809-4479-a204-1d7b86a9b953","Type":"ContainerDied","Data":"d4a7c52b13f294093442e9dfa4b3da17de57a3a4fc5cfd34c5c97f952b05a6ca"} Nov 28 18:37:15 crc kubenswrapper[4909]: I1128 18:37:15.725188 4909 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d4a7c52b13f294093442e9dfa4b3da17de57a3a4fc5cfd34c5c97f952b05a6ca" Nov 28 18:37:15 crc kubenswrapper[4909]: I1128 18:37:15.725209 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-sriov-openstack-openstack-cell1-l48sb" Nov 28 18:37:15 crc kubenswrapper[4909]: I1128 18:37:15.825639 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-dhcp-openstack-openstack-cell1-r2h42"] Nov 28 18:37:15 crc kubenswrapper[4909]: E1128 18:37:15.826209 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5b0feed7-a809-4479-a204-1d7b86a9b953" containerName="neutron-sriov-openstack-openstack-cell1" Nov 28 18:37:15 crc kubenswrapper[4909]: I1128 18:37:15.826233 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="5b0feed7-a809-4479-a204-1d7b86a9b953" containerName="neutron-sriov-openstack-openstack-cell1" Nov 28 18:37:15 crc kubenswrapper[4909]: I1128 18:37:15.826524 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="5b0feed7-a809-4479-a204-1d7b86a9b953" containerName="neutron-sriov-openstack-openstack-cell1" Nov 28 18:37:15 crc kubenswrapper[4909]: I1128 18:37:15.827572 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-dhcp-openstack-openstack-cell1-r2h42" Nov 28 18:37:15 crc kubenswrapper[4909]: I1128 18:37:15.830541 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-dhcp-agent-neutron-config" Nov 28 18:37:15 crc kubenswrapper[4909]: I1128 18:37:15.839473 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-dhcp-openstack-openstack-cell1-r2h42"] Nov 28 18:37:15 crc kubenswrapper[4909]: I1128 18:37:15.990824 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-sriov-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/5b0feed7-a809-4479-a204-1d7b86a9b953-neutron-sriov-agent-neutron-config-0\") pod \"5b0feed7-a809-4479-a204-1d7b86a9b953\" (UID: \"5b0feed7-a809-4479-a204-1d7b86a9b953\") " Nov 28 18:37:15 crc kubenswrapper[4909]: I1128 18:37:15.991334 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/fa77c1e1-fac0-4183-824f-fbd83a237232-ceph\") pod \"neutron-dhcp-openstack-openstack-cell1-r2h42\" (UID: \"fa77c1e1-fac0-4183-824f-fbd83a237232\") " pod="openstack/neutron-dhcp-openstack-openstack-cell1-r2h42" Nov 28 18:37:15 crc kubenswrapper[4909]: I1128 18:37:15.991565 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/fa77c1e1-fac0-4183-824f-fbd83a237232-inventory\") pod \"neutron-dhcp-openstack-openstack-cell1-r2h42\" (UID: \"fa77c1e1-fac0-4183-824f-fbd83a237232\") " pod="openstack/neutron-dhcp-openstack-openstack-cell1-r2h42" Nov 28 18:37:15 crc kubenswrapper[4909]: I1128 18:37:15.991611 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/fa77c1e1-fac0-4183-824f-fbd83a237232-ssh-key\") pod \"neutron-dhcp-openstack-openstack-cell1-r2h42\" (UID: \"fa77c1e1-fac0-4183-824f-fbd83a237232\") " pod="openstack/neutron-dhcp-openstack-openstack-cell1-r2h42" Nov 28 18:37:15 crc kubenswrapper[4909]: I1128 18:37:15.991831 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-dhcp-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/fa77c1e1-fac0-4183-824f-fbd83a237232-neutron-dhcp-agent-neutron-config-0\") pod \"neutron-dhcp-openstack-openstack-cell1-r2h42\" (UID: \"fa77c1e1-fac0-4183-824f-fbd83a237232\") " pod="openstack/neutron-dhcp-openstack-openstack-cell1-r2h42" Nov 28 18:37:15 crc kubenswrapper[4909]: I1128 18:37:15.991989 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-dhcp-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fa77c1e1-fac0-4183-824f-fbd83a237232-neutron-dhcp-combined-ca-bundle\") pod \"neutron-dhcp-openstack-openstack-cell1-r2h42\" (UID: \"fa77c1e1-fac0-4183-824f-fbd83a237232\") " pod="openstack/neutron-dhcp-openstack-openstack-cell1-r2h42" Nov 28 18:37:15 crc kubenswrapper[4909]: I1128 18:37:15.992039 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gfxnl\" (UniqueName: \"kubernetes.io/projected/fa77c1e1-fac0-4183-824f-fbd83a237232-kube-api-access-gfxnl\") pod \"neutron-dhcp-openstack-openstack-cell1-r2h42\" (UID: \"fa77c1e1-fac0-4183-824f-fbd83a237232\") " pod="openstack/neutron-dhcp-openstack-openstack-cell1-r2h42" Nov 28 18:37:15 crc kubenswrapper[4909]: I1128 18:37:15.994150 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5b0feed7-a809-4479-a204-1d7b86a9b953-neutron-sriov-agent-neutron-config-0" (OuterVolumeSpecName: "neutron-sriov-agent-neutron-config-0") pod "5b0feed7-a809-4479-a204-1d7b86a9b953" (UID: "5b0feed7-a809-4479-a204-1d7b86a9b953"). InnerVolumeSpecName "neutron-sriov-agent-neutron-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 18:37:16 crc kubenswrapper[4909]: I1128 18:37:16.094496 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-dhcp-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/fa77c1e1-fac0-4183-824f-fbd83a237232-neutron-dhcp-agent-neutron-config-0\") pod \"neutron-dhcp-openstack-openstack-cell1-r2h42\" (UID: \"fa77c1e1-fac0-4183-824f-fbd83a237232\") " pod="openstack/neutron-dhcp-openstack-openstack-cell1-r2h42" Nov 28 18:37:16 crc kubenswrapper[4909]: I1128 18:37:16.094623 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-dhcp-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fa77c1e1-fac0-4183-824f-fbd83a237232-neutron-dhcp-combined-ca-bundle\") pod \"neutron-dhcp-openstack-openstack-cell1-r2h42\" (UID: \"fa77c1e1-fac0-4183-824f-fbd83a237232\") " pod="openstack/neutron-dhcp-openstack-openstack-cell1-r2h42" Nov 28 18:37:16 crc kubenswrapper[4909]: I1128 18:37:16.094677 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gfxnl\" (UniqueName: \"kubernetes.io/projected/fa77c1e1-fac0-4183-824f-fbd83a237232-kube-api-access-gfxnl\") pod \"neutron-dhcp-openstack-openstack-cell1-r2h42\" (UID: \"fa77c1e1-fac0-4183-824f-fbd83a237232\") " pod="openstack/neutron-dhcp-openstack-openstack-cell1-r2h42" Nov 28 18:37:16 crc kubenswrapper[4909]: I1128 18:37:16.094759 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/fa77c1e1-fac0-4183-824f-fbd83a237232-ceph\") pod \"neutron-dhcp-openstack-openstack-cell1-r2h42\" (UID: \"fa77c1e1-fac0-4183-824f-fbd83a237232\") " pod="openstack/neutron-dhcp-openstack-openstack-cell1-r2h42" Nov 28 18:37:16 crc kubenswrapper[4909]: I1128 18:37:16.094852 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/fa77c1e1-fac0-4183-824f-fbd83a237232-inventory\") pod \"neutron-dhcp-openstack-openstack-cell1-r2h42\" (UID: \"fa77c1e1-fac0-4183-824f-fbd83a237232\") " pod="openstack/neutron-dhcp-openstack-openstack-cell1-r2h42" Nov 28 18:37:16 crc kubenswrapper[4909]: I1128 18:37:16.094877 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/fa77c1e1-fac0-4183-824f-fbd83a237232-ssh-key\") pod \"neutron-dhcp-openstack-openstack-cell1-r2h42\" (UID: \"fa77c1e1-fac0-4183-824f-fbd83a237232\") " pod="openstack/neutron-dhcp-openstack-openstack-cell1-r2h42" Nov 28 18:37:16 crc kubenswrapper[4909]: I1128 18:37:16.094992 4909 reconciler_common.go:293] "Volume detached for volume \"neutron-sriov-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/5b0feed7-a809-4479-a204-1d7b86a9b953-neutron-sriov-agent-neutron-config-0\") on node \"crc\" DevicePath \"\"" Nov 28 18:37:16 crc kubenswrapper[4909]: I1128 18:37:16.099278 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/fa77c1e1-fac0-4183-824f-fbd83a237232-ssh-key\") pod \"neutron-dhcp-openstack-openstack-cell1-r2h42\" (UID: \"fa77c1e1-fac0-4183-824f-fbd83a237232\") " pod="openstack/neutron-dhcp-openstack-openstack-cell1-r2h42" Nov 28 18:37:16 crc kubenswrapper[4909]: I1128 18:37:16.099322 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/fa77c1e1-fac0-4183-824f-fbd83a237232-inventory\") pod \"neutron-dhcp-openstack-openstack-cell1-r2h42\" (UID: \"fa77c1e1-fac0-4183-824f-fbd83a237232\") " pod="openstack/neutron-dhcp-openstack-openstack-cell1-r2h42" Nov 28 18:37:16 crc kubenswrapper[4909]: I1128 18:37:16.099613 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-dhcp-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fa77c1e1-fac0-4183-824f-fbd83a237232-neutron-dhcp-combined-ca-bundle\") pod \"neutron-dhcp-openstack-openstack-cell1-r2h42\" (UID: \"fa77c1e1-fac0-4183-824f-fbd83a237232\") " pod="openstack/neutron-dhcp-openstack-openstack-cell1-r2h42" Nov 28 18:37:16 crc kubenswrapper[4909]: I1128 18:37:16.100279 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/fa77c1e1-fac0-4183-824f-fbd83a237232-ceph\") pod \"neutron-dhcp-openstack-openstack-cell1-r2h42\" (UID: \"fa77c1e1-fac0-4183-824f-fbd83a237232\") " pod="openstack/neutron-dhcp-openstack-openstack-cell1-r2h42" Nov 28 18:37:16 crc kubenswrapper[4909]: I1128 18:37:16.100520 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-dhcp-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/fa77c1e1-fac0-4183-824f-fbd83a237232-neutron-dhcp-agent-neutron-config-0\") pod \"neutron-dhcp-openstack-openstack-cell1-r2h42\" (UID: \"fa77c1e1-fac0-4183-824f-fbd83a237232\") " pod="openstack/neutron-dhcp-openstack-openstack-cell1-r2h42" Nov 28 18:37:16 crc kubenswrapper[4909]: I1128 18:37:16.113490 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gfxnl\" (UniqueName: \"kubernetes.io/projected/fa77c1e1-fac0-4183-824f-fbd83a237232-kube-api-access-gfxnl\") pod \"neutron-dhcp-openstack-openstack-cell1-r2h42\" (UID: \"fa77c1e1-fac0-4183-824f-fbd83a237232\") " pod="openstack/neutron-dhcp-openstack-openstack-cell1-r2h42" Nov 28 18:37:16 crc kubenswrapper[4909]: I1128 18:37:16.145381 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-dhcp-openstack-openstack-cell1-r2h42" Nov 28 18:37:16 crc kubenswrapper[4909]: I1128 18:37:16.681271 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-dhcp-openstack-openstack-cell1-r2h42"] Nov 28 18:37:16 crc kubenswrapper[4909]: I1128 18:37:16.687368 4909 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 28 18:37:16 crc kubenswrapper[4909]: I1128 18:37:16.735248 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-dhcp-openstack-openstack-cell1-r2h42" event={"ID":"fa77c1e1-fac0-4183-824f-fbd83a237232","Type":"ContainerStarted","Data":"3b70d01c22b4ae5683c1741e6f0fdc2274ed5b036497976f1c52ba16e4afe922"} Nov 28 18:37:18 crc kubenswrapper[4909]: I1128 18:37:18.766094 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-dhcp-openstack-openstack-cell1-r2h42" event={"ID":"fa77c1e1-fac0-4183-824f-fbd83a237232","Type":"ContainerStarted","Data":"0b4d3c49e0e36ece54a6dc9609e1938e19667cae76321aa7fe655079ab8d7c61"} Nov 28 18:37:18 crc kubenswrapper[4909]: I1128 18:37:18.800286 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-dhcp-openstack-openstack-cell1-r2h42" podStartSLOduration=2.797485331 podStartE2EDuration="3.800259438s" podCreationTimestamp="2025-11-28 18:37:15 +0000 UTC" firstStartedPulling="2025-11-28 18:37:16.687083504 +0000 UTC m=+8819.083768038" lastFinishedPulling="2025-11-28 18:37:17.689857611 +0000 UTC m=+8820.086542145" observedRunningTime="2025-11-28 18:37:18.790501376 +0000 UTC m=+8821.187185960" watchObservedRunningTime="2025-11-28 18:37:18.800259438 +0000 UTC m=+8821.196943992" Nov 28 18:37:19 crc kubenswrapper[4909]: I1128 18:37:19.911161 4909 patch_prober.go:28] interesting pod/machine-config-daemon-d5nd7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 18:37:19 crc kubenswrapper[4909]: I1128 18:37:19.911547 4909 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 18:37:49 crc kubenswrapper[4909]: I1128 18:37:49.911034 4909 patch_prober.go:28] interesting pod/machine-config-daemon-d5nd7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 18:37:49 crc kubenswrapper[4909]: I1128 18:37:49.912521 4909 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 18:38:19 crc kubenswrapper[4909]: I1128 18:38:19.910997 4909 patch_prober.go:28] interesting pod/machine-config-daemon-d5nd7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 18:38:19 crc kubenswrapper[4909]: I1128 18:38:19.911590 4909 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 18:38:19 crc kubenswrapper[4909]: I1128 18:38:19.916640 4909 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" Nov 28 18:38:19 crc kubenswrapper[4909]: I1128 18:38:19.917440 4909 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"ef2a38674be7675e726f15b879fd9bbea7be679d566ac302dd10e3d3ce61bcc2"} pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 18:38:19 crc kubenswrapper[4909]: I1128 18:38:19.917502 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerName="machine-config-daemon" containerID="cri-o://ef2a38674be7675e726f15b879fd9bbea7be679d566ac302dd10e3d3ce61bcc2" gracePeriod=600 Nov 28 18:38:20 crc kubenswrapper[4909]: E1128 18:38:20.049989 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 18:38:20 crc kubenswrapper[4909]: I1128 18:38:20.500577 4909 generic.go:334] "Generic (PLEG): container finished" podID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerID="ef2a38674be7675e726f15b879fd9bbea7be679d566ac302dd10e3d3ce61bcc2" exitCode=0 Nov 28 18:38:20 crc kubenswrapper[4909]: I1128 18:38:20.500605 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" event={"ID":"5f0ac931-d37b-4342-8c12-c2779b455cc5","Type":"ContainerDied","Data":"ef2a38674be7675e726f15b879fd9bbea7be679d566ac302dd10e3d3ce61bcc2"} Nov 28 18:38:20 crc kubenswrapper[4909]: I1128 18:38:20.500752 4909 scope.go:117] "RemoveContainer" containerID="816a605e5fcc3b37239c060389d1f7b2373ec3869404f8abfe6340fee7ab8f9d" Nov 28 18:38:20 crc kubenswrapper[4909]: I1128 18:38:20.501328 4909 scope.go:117] "RemoveContainer" containerID="ef2a38674be7675e726f15b879fd9bbea7be679d566ac302dd10e3d3ce61bcc2" Nov 28 18:38:20 crc kubenswrapper[4909]: E1128 18:38:20.501688 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 18:38:34 crc kubenswrapper[4909]: I1128 18:38:34.902200 4909 scope.go:117] "RemoveContainer" containerID="ef2a38674be7675e726f15b879fd9bbea7be679d566ac302dd10e3d3ce61bcc2" Nov 28 18:38:34 crc kubenswrapper[4909]: E1128 18:38:34.903209 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 18:38:46 crc kubenswrapper[4909]: I1128 18:38:46.902287 4909 scope.go:117] "RemoveContainer" containerID="ef2a38674be7675e726f15b879fd9bbea7be679d566ac302dd10e3d3ce61bcc2" Nov 28 18:38:46 crc kubenswrapper[4909]: E1128 18:38:46.903505 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 18:38:51 crc kubenswrapper[4909]: I1128 18:38:51.787817 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-5zjlm"] Nov 28 18:38:51 crc kubenswrapper[4909]: I1128 18:38:51.790777 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-5zjlm" Nov 28 18:38:51 crc kubenswrapper[4909]: I1128 18:38:51.818786 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-5zjlm"] Nov 28 18:38:51 crc kubenswrapper[4909]: I1128 18:38:51.886451 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/774f9f72-67d9-487a-89d9-08fed5aeea79-catalog-content\") pod \"community-operators-5zjlm\" (UID: \"774f9f72-67d9-487a-89d9-08fed5aeea79\") " pod="openshift-marketplace/community-operators-5zjlm" Nov 28 18:38:51 crc kubenswrapper[4909]: I1128 18:38:51.887744 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l6snd\" (UniqueName: \"kubernetes.io/projected/774f9f72-67d9-487a-89d9-08fed5aeea79-kube-api-access-l6snd\") pod \"community-operators-5zjlm\" (UID: \"774f9f72-67d9-487a-89d9-08fed5aeea79\") " pod="openshift-marketplace/community-operators-5zjlm" Nov 28 18:38:51 crc kubenswrapper[4909]: I1128 18:38:51.887842 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/774f9f72-67d9-487a-89d9-08fed5aeea79-utilities\") pod \"community-operators-5zjlm\" (UID: \"774f9f72-67d9-487a-89d9-08fed5aeea79\") " pod="openshift-marketplace/community-operators-5zjlm" Nov 28 18:38:51 crc kubenswrapper[4909]: I1128 18:38:51.989771 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l6snd\" (UniqueName: \"kubernetes.io/projected/774f9f72-67d9-487a-89d9-08fed5aeea79-kube-api-access-l6snd\") pod \"community-operators-5zjlm\" (UID: \"774f9f72-67d9-487a-89d9-08fed5aeea79\") " pod="openshift-marketplace/community-operators-5zjlm" Nov 28 18:38:51 crc kubenswrapper[4909]: I1128 18:38:51.989833 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/774f9f72-67d9-487a-89d9-08fed5aeea79-utilities\") pod \"community-operators-5zjlm\" (UID: \"774f9f72-67d9-487a-89d9-08fed5aeea79\") " pod="openshift-marketplace/community-operators-5zjlm" Nov 28 18:38:51 crc kubenswrapper[4909]: I1128 18:38:51.989909 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/774f9f72-67d9-487a-89d9-08fed5aeea79-catalog-content\") pod \"community-operators-5zjlm\" (UID: \"774f9f72-67d9-487a-89d9-08fed5aeea79\") " pod="openshift-marketplace/community-operators-5zjlm" Nov 28 18:38:51 crc kubenswrapper[4909]: I1128 18:38:51.990531 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/774f9f72-67d9-487a-89d9-08fed5aeea79-catalog-content\") pod \"community-operators-5zjlm\" (UID: \"774f9f72-67d9-487a-89d9-08fed5aeea79\") " pod="openshift-marketplace/community-operators-5zjlm" Nov 28 18:38:51 crc kubenswrapper[4909]: I1128 18:38:51.990797 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/774f9f72-67d9-487a-89d9-08fed5aeea79-utilities\") pod \"community-operators-5zjlm\" (UID: \"774f9f72-67d9-487a-89d9-08fed5aeea79\") " pod="openshift-marketplace/community-operators-5zjlm" Nov 28 18:38:52 crc kubenswrapper[4909]: I1128 18:38:52.017596 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l6snd\" (UniqueName: \"kubernetes.io/projected/774f9f72-67d9-487a-89d9-08fed5aeea79-kube-api-access-l6snd\") pod \"community-operators-5zjlm\" (UID: \"774f9f72-67d9-487a-89d9-08fed5aeea79\") " pod="openshift-marketplace/community-operators-5zjlm" Nov 28 18:38:52 crc kubenswrapper[4909]: I1128 18:38:52.126128 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-5zjlm" Nov 28 18:38:52 crc kubenswrapper[4909]: I1128 18:38:52.691163 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-5zjlm"] Nov 28 18:38:52 crc kubenswrapper[4909]: I1128 18:38:52.862753 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5zjlm" event={"ID":"774f9f72-67d9-487a-89d9-08fed5aeea79","Type":"ContainerStarted","Data":"9b9f613b61bfa69ce6d57d82954a16cd726c5a53be368d7e64375e4ac4b35905"} Nov 28 18:38:53 crc kubenswrapper[4909]: I1128 18:38:53.877909 4909 generic.go:334] "Generic (PLEG): container finished" podID="774f9f72-67d9-487a-89d9-08fed5aeea79" containerID="23b995db2bfa1f8dd085934bf433b74b45d3e068c61128db56340e3c705cc3f5" exitCode=0 Nov 28 18:38:53 crc kubenswrapper[4909]: I1128 18:38:53.878204 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5zjlm" event={"ID":"774f9f72-67d9-487a-89d9-08fed5aeea79","Type":"ContainerDied","Data":"23b995db2bfa1f8dd085934bf433b74b45d3e068c61128db56340e3c705cc3f5"} Nov 28 18:38:58 crc kubenswrapper[4909]: I1128 18:38:58.901605 4909 scope.go:117] "RemoveContainer" containerID="ef2a38674be7675e726f15b879fd9bbea7be679d566ac302dd10e3d3ce61bcc2" Nov 28 18:38:58 crc kubenswrapper[4909]: E1128 18:38:58.903142 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 18:39:00 crc kubenswrapper[4909]: I1128 18:39:00.000880 4909 generic.go:334] "Generic (PLEG): container finished" podID="774f9f72-67d9-487a-89d9-08fed5aeea79" containerID="f7706198e4db0d6880e4e91721505a75491126e5c1b38f51891a8f69d105f2d7" exitCode=0 Nov 28 18:39:00 crc kubenswrapper[4909]: I1128 18:39:00.000938 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5zjlm" event={"ID":"774f9f72-67d9-487a-89d9-08fed5aeea79","Type":"ContainerDied","Data":"f7706198e4db0d6880e4e91721505a75491126e5c1b38f51891a8f69d105f2d7"} Nov 28 18:39:01 crc kubenswrapper[4909]: I1128 18:39:01.013004 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5zjlm" event={"ID":"774f9f72-67d9-487a-89d9-08fed5aeea79","Type":"ContainerStarted","Data":"172695de8aece41cddbb07fa82189e7254dee5e6e87049e2cf57b585f1b03195"} Nov 28 18:39:01 crc kubenswrapper[4909]: I1128 18:39:01.038607 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-5zjlm" podStartSLOduration=3.508933548 podStartE2EDuration="10.038587668s" podCreationTimestamp="2025-11-28 18:38:51 +0000 UTC" firstStartedPulling="2025-11-28 18:38:53.88110212 +0000 UTC m=+8916.277786644" lastFinishedPulling="2025-11-28 18:39:00.41075623 +0000 UTC m=+8922.807440764" observedRunningTime="2025-11-28 18:39:01.032526465 +0000 UTC m=+8923.429211009" watchObservedRunningTime="2025-11-28 18:39:01.038587668 +0000 UTC m=+8923.435272182" Nov 28 18:39:02 crc kubenswrapper[4909]: I1128 18:39:02.127349 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-5zjlm" Nov 28 18:39:02 crc kubenswrapper[4909]: I1128 18:39:02.127955 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-5zjlm" Nov 28 18:39:03 crc kubenswrapper[4909]: I1128 18:39:03.180114 4909 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-5zjlm" podUID="774f9f72-67d9-487a-89d9-08fed5aeea79" containerName="registry-server" probeResult="failure" output=< Nov 28 18:39:03 crc kubenswrapper[4909]: timeout: failed to connect service ":50051" within 1s Nov 28 18:39:03 crc kubenswrapper[4909]: > Nov 28 18:39:07 crc kubenswrapper[4909]: I1128 18:39:07.845592 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-9jx55"] Nov 28 18:39:07 crc kubenswrapper[4909]: I1128 18:39:07.850777 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-9jx55" Nov 28 18:39:07 crc kubenswrapper[4909]: I1128 18:39:07.862538 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-9jx55"] Nov 28 18:39:07 crc kubenswrapper[4909]: I1128 18:39:07.988378 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a3bf5084-0c96-4c4e-b3ac-dccb6cf166b5-catalog-content\") pod \"certified-operators-9jx55\" (UID: \"a3bf5084-0c96-4c4e-b3ac-dccb6cf166b5\") " pod="openshift-marketplace/certified-operators-9jx55" Nov 28 18:39:07 crc kubenswrapper[4909]: I1128 18:39:07.988525 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a3bf5084-0c96-4c4e-b3ac-dccb6cf166b5-utilities\") pod \"certified-operators-9jx55\" (UID: \"a3bf5084-0c96-4c4e-b3ac-dccb6cf166b5\") " pod="openshift-marketplace/certified-operators-9jx55" Nov 28 18:39:07 crc kubenswrapper[4909]: I1128 18:39:07.988595 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ztvpf\" (UniqueName: \"kubernetes.io/projected/a3bf5084-0c96-4c4e-b3ac-dccb6cf166b5-kube-api-access-ztvpf\") pod \"certified-operators-9jx55\" (UID: \"a3bf5084-0c96-4c4e-b3ac-dccb6cf166b5\") " pod="openshift-marketplace/certified-operators-9jx55" Nov 28 18:39:08 crc kubenswrapper[4909]: I1128 18:39:08.102003 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a3bf5084-0c96-4c4e-b3ac-dccb6cf166b5-catalog-content\") pod \"certified-operators-9jx55\" (UID: \"a3bf5084-0c96-4c4e-b3ac-dccb6cf166b5\") " pod="openshift-marketplace/certified-operators-9jx55" Nov 28 18:39:08 crc kubenswrapper[4909]: I1128 18:39:08.102385 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a3bf5084-0c96-4c4e-b3ac-dccb6cf166b5-utilities\") pod \"certified-operators-9jx55\" (UID: \"a3bf5084-0c96-4c4e-b3ac-dccb6cf166b5\") " pod="openshift-marketplace/certified-operators-9jx55" Nov 28 18:39:08 crc kubenswrapper[4909]: I1128 18:39:08.102821 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ztvpf\" (UniqueName: \"kubernetes.io/projected/a3bf5084-0c96-4c4e-b3ac-dccb6cf166b5-kube-api-access-ztvpf\") pod \"certified-operators-9jx55\" (UID: \"a3bf5084-0c96-4c4e-b3ac-dccb6cf166b5\") " pod="openshift-marketplace/certified-operators-9jx55" Nov 28 18:39:08 crc kubenswrapper[4909]: I1128 18:39:08.103311 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a3bf5084-0c96-4c4e-b3ac-dccb6cf166b5-catalog-content\") pod \"certified-operators-9jx55\" (UID: \"a3bf5084-0c96-4c4e-b3ac-dccb6cf166b5\") " pod="openshift-marketplace/certified-operators-9jx55" Nov 28 18:39:08 crc kubenswrapper[4909]: I1128 18:39:08.103607 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a3bf5084-0c96-4c4e-b3ac-dccb6cf166b5-utilities\") pod \"certified-operators-9jx55\" (UID: \"a3bf5084-0c96-4c4e-b3ac-dccb6cf166b5\") " pod="openshift-marketplace/certified-operators-9jx55" Nov 28 18:39:08 crc kubenswrapper[4909]: I1128 18:39:08.143715 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ztvpf\" (UniqueName: \"kubernetes.io/projected/a3bf5084-0c96-4c4e-b3ac-dccb6cf166b5-kube-api-access-ztvpf\") pod \"certified-operators-9jx55\" (UID: \"a3bf5084-0c96-4c4e-b3ac-dccb6cf166b5\") " pod="openshift-marketplace/certified-operators-9jx55" Nov 28 18:39:08 crc kubenswrapper[4909]: I1128 18:39:08.216376 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-9jx55" Nov 28 18:39:08 crc kubenswrapper[4909]: I1128 18:39:08.839398 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-9jx55"] Nov 28 18:39:08 crc kubenswrapper[4909]: W1128 18:39:08.842405 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda3bf5084_0c96_4c4e_b3ac_dccb6cf166b5.slice/crio-a37d99d5d353341b62ba46bd76409bf6768b1e4679e18f22651dbee989f9ab5e WatchSource:0}: Error finding container a37d99d5d353341b62ba46bd76409bf6768b1e4679e18f22651dbee989f9ab5e: Status 404 returned error can't find the container with id a37d99d5d353341b62ba46bd76409bf6768b1e4679e18f22651dbee989f9ab5e Nov 28 18:39:09 crc kubenswrapper[4909]: I1128 18:39:09.110975 4909 generic.go:334] "Generic (PLEG): container finished" podID="a3bf5084-0c96-4c4e-b3ac-dccb6cf166b5" containerID="1c40a8552a778af20c99592c0294d6a0e7aa9e33fcdd9f328e607c99c28720a0" exitCode=0 Nov 28 18:39:09 crc kubenswrapper[4909]: I1128 18:39:09.111025 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9jx55" event={"ID":"a3bf5084-0c96-4c4e-b3ac-dccb6cf166b5","Type":"ContainerDied","Data":"1c40a8552a778af20c99592c0294d6a0e7aa9e33fcdd9f328e607c99c28720a0"} Nov 28 18:39:09 crc kubenswrapper[4909]: I1128 18:39:09.111068 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9jx55" event={"ID":"a3bf5084-0c96-4c4e-b3ac-dccb6cf166b5","Type":"ContainerStarted","Data":"a37d99d5d353341b62ba46bd76409bf6768b1e4679e18f22651dbee989f9ab5e"} Nov 28 18:39:10 crc kubenswrapper[4909]: I1128 18:39:10.901896 4909 scope.go:117] "RemoveContainer" containerID="ef2a38674be7675e726f15b879fd9bbea7be679d566ac302dd10e3d3ce61bcc2" Nov 28 18:39:10 crc kubenswrapper[4909]: E1128 18:39:10.902500 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 18:39:12 crc kubenswrapper[4909]: I1128 18:39:12.188642 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-5zjlm" Nov 28 18:39:12 crc kubenswrapper[4909]: I1128 18:39:12.258953 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-5zjlm" Nov 28 18:39:13 crc kubenswrapper[4909]: I1128 18:39:13.023276 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-5zjlm"] Nov 28 18:39:13 crc kubenswrapper[4909]: I1128 18:39:13.204793 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-w6v2w"] Nov 28 18:39:13 crc kubenswrapper[4909]: I1128 18:39:13.205139 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-w6v2w" podUID="1117637d-0189-45f6-8998-083e64532df1" containerName="registry-server" containerID="cri-o://e525159c45ad5a30cf12bdabdd0f2eea59585d6ea79ea5cfd78a779af41383bb" gracePeriod=2 Nov 28 18:39:13 crc kubenswrapper[4909]: I1128 18:39:13.731706 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-w6v2w" Nov 28 18:39:13 crc kubenswrapper[4909]: I1128 18:39:13.853292 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2l5rm\" (UniqueName: \"kubernetes.io/projected/1117637d-0189-45f6-8998-083e64532df1-kube-api-access-2l5rm\") pod \"1117637d-0189-45f6-8998-083e64532df1\" (UID: \"1117637d-0189-45f6-8998-083e64532df1\") " Nov 28 18:39:13 crc kubenswrapper[4909]: I1128 18:39:13.853356 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1117637d-0189-45f6-8998-083e64532df1-utilities\") pod \"1117637d-0189-45f6-8998-083e64532df1\" (UID: \"1117637d-0189-45f6-8998-083e64532df1\") " Nov 28 18:39:13 crc kubenswrapper[4909]: I1128 18:39:13.853517 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1117637d-0189-45f6-8998-083e64532df1-catalog-content\") pod \"1117637d-0189-45f6-8998-083e64532df1\" (UID: \"1117637d-0189-45f6-8998-083e64532df1\") " Nov 28 18:39:13 crc kubenswrapper[4909]: I1128 18:39:13.856319 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1117637d-0189-45f6-8998-083e64532df1-utilities" (OuterVolumeSpecName: "utilities") pod "1117637d-0189-45f6-8998-083e64532df1" (UID: "1117637d-0189-45f6-8998-083e64532df1"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 18:39:13 crc kubenswrapper[4909]: I1128 18:39:13.866363 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1117637d-0189-45f6-8998-083e64532df1-kube-api-access-2l5rm" (OuterVolumeSpecName: "kube-api-access-2l5rm") pod "1117637d-0189-45f6-8998-083e64532df1" (UID: "1117637d-0189-45f6-8998-083e64532df1"). InnerVolumeSpecName "kube-api-access-2l5rm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 18:39:13 crc kubenswrapper[4909]: I1128 18:39:13.956639 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2l5rm\" (UniqueName: \"kubernetes.io/projected/1117637d-0189-45f6-8998-083e64532df1-kube-api-access-2l5rm\") on node \"crc\" DevicePath \"\"" Nov 28 18:39:13 crc kubenswrapper[4909]: I1128 18:39:13.956767 4909 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1117637d-0189-45f6-8998-083e64532df1-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 18:39:13 crc kubenswrapper[4909]: I1128 18:39:13.978805 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1117637d-0189-45f6-8998-083e64532df1-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1117637d-0189-45f6-8998-083e64532df1" (UID: "1117637d-0189-45f6-8998-083e64532df1"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 18:39:14 crc kubenswrapper[4909]: I1128 18:39:14.059255 4909 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1117637d-0189-45f6-8998-083e64532df1-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 18:39:14 crc kubenswrapper[4909]: I1128 18:39:14.174355 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9jx55" event={"ID":"a3bf5084-0c96-4c4e-b3ac-dccb6cf166b5","Type":"ContainerStarted","Data":"a4eb6514ed21dbbb586014b7802036b122e0f958087c874898110b7020a5a217"} Nov 28 18:39:14 crc kubenswrapper[4909]: I1128 18:39:14.177464 4909 generic.go:334] "Generic (PLEG): container finished" podID="1117637d-0189-45f6-8998-083e64532df1" containerID="e525159c45ad5a30cf12bdabdd0f2eea59585d6ea79ea5cfd78a779af41383bb" exitCode=0 Nov 28 18:39:14 crc kubenswrapper[4909]: I1128 18:39:14.177499 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-w6v2w" Nov 28 18:39:14 crc kubenswrapper[4909]: I1128 18:39:14.177530 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-w6v2w" event={"ID":"1117637d-0189-45f6-8998-083e64532df1","Type":"ContainerDied","Data":"e525159c45ad5a30cf12bdabdd0f2eea59585d6ea79ea5cfd78a779af41383bb"} Nov 28 18:39:14 crc kubenswrapper[4909]: I1128 18:39:14.177580 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-w6v2w" event={"ID":"1117637d-0189-45f6-8998-083e64532df1","Type":"ContainerDied","Data":"2cb3c0387a7ed18b7f7215d1e5457325772ae0bcde11b8b6c55622f3fea20a43"} Nov 28 18:39:14 crc kubenswrapper[4909]: I1128 18:39:14.177601 4909 scope.go:117] "RemoveContainer" containerID="e525159c45ad5a30cf12bdabdd0f2eea59585d6ea79ea5cfd78a779af41383bb" Nov 28 18:39:14 crc kubenswrapper[4909]: I1128 18:39:14.223796 4909 scope.go:117] "RemoveContainer" containerID="668ca41ed9fe54492a6799c23aba133a8edaa1fbe02f3615620231d00c45f9e1" Nov 28 18:39:14 crc kubenswrapper[4909]: I1128 18:39:14.242954 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-w6v2w"] Nov 28 18:39:14 crc kubenswrapper[4909]: I1128 18:39:14.261752 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-w6v2w"] Nov 28 18:39:14 crc kubenswrapper[4909]: I1128 18:39:14.298840 4909 scope.go:117] "RemoveContainer" containerID="dff2d680a1e8ac13835158596810c1c5b3cc2b4e4d6c77f0b277f43c726f596b" Nov 28 18:39:14 crc kubenswrapper[4909]: I1128 18:39:14.331899 4909 scope.go:117] "RemoveContainer" containerID="e525159c45ad5a30cf12bdabdd0f2eea59585d6ea79ea5cfd78a779af41383bb" Nov 28 18:39:14 crc kubenswrapper[4909]: E1128 18:39:14.332365 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e525159c45ad5a30cf12bdabdd0f2eea59585d6ea79ea5cfd78a779af41383bb\": container with ID starting with e525159c45ad5a30cf12bdabdd0f2eea59585d6ea79ea5cfd78a779af41383bb not found: ID does not exist" containerID="e525159c45ad5a30cf12bdabdd0f2eea59585d6ea79ea5cfd78a779af41383bb" Nov 28 18:39:14 crc kubenswrapper[4909]: I1128 18:39:14.332398 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e525159c45ad5a30cf12bdabdd0f2eea59585d6ea79ea5cfd78a779af41383bb"} err="failed to get container status \"e525159c45ad5a30cf12bdabdd0f2eea59585d6ea79ea5cfd78a779af41383bb\": rpc error: code = NotFound desc = could not find container \"e525159c45ad5a30cf12bdabdd0f2eea59585d6ea79ea5cfd78a779af41383bb\": container with ID starting with e525159c45ad5a30cf12bdabdd0f2eea59585d6ea79ea5cfd78a779af41383bb not found: ID does not exist" Nov 28 18:39:14 crc kubenswrapper[4909]: I1128 18:39:14.332418 4909 scope.go:117] "RemoveContainer" containerID="668ca41ed9fe54492a6799c23aba133a8edaa1fbe02f3615620231d00c45f9e1" Nov 28 18:39:14 crc kubenswrapper[4909]: E1128 18:39:14.332686 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"668ca41ed9fe54492a6799c23aba133a8edaa1fbe02f3615620231d00c45f9e1\": container with ID starting with 668ca41ed9fe54492a6799c23aba133a8edaa1fbe02f3615620231d00c45f9e1 not found: ID does not exist" containerID="668ca41ed9fe54492a6799c23aba133a8edaa1fbe02f3615620231d00c45f9e1" Nov 28 18:39:14 crc kubenswrapper[4909]: I1128 18:39:14.332720 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"668ca41ed9fe54492a6799c23aba133a8edaa1fbe02f3615620231d00c45f9e1"} err="failed to get container status \"668ca41ed9fe54492a6799c23aba133a8edaa1fbe02f3615620231d00c45f9e1\": rpc error: code = NotFound desc = could not find container \"668ca41ed9fe54492a6799c23aba133a8edaa1fbe02f3615620231d00c45f9e1\": container with ID starting with 668ca41ed9fe54492a6799c23aba133a8edaa1fbe02f3615620231d00c45f9e1 not found: ID does not exist" Nov 28 18:39:14 crc kubenswrapper[4909]: I1128 18:39:14.332739 4909 scope.go:117] "RemoveContainer" containerID="dff2d680a1e8ac13835158596810c1c5b3cc2b4e4d6c77f0b277f43c726f596b" Nov 28 18:39:14 crc kubenswrapper[4909]: E1128 18:39:14.336919 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"dff2d680a1e8ac13835158596810c1c5b3cc2b4e4d6c77f0b277f43c726f596b\": container with ID starting with dff2d680a1e8ac13835158596810c1c5b3cc2b4e4d6c77f0b277f43c726f596b not found: ID does not exist" containerID="dff2d680a1e8ac13835158596810c1c5b3cc2b4e4d6c77f0b277f43c726f596b" Nov 28 18:39:14 crc kubenswrapper[4909]: I1128 18:39:14.336954 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dff2d680a1e8ac13835158596810c1c5b3cc2b4e4d6c77f0b277f43c726f596b"} err="failed to get container status \"dff2d680a1e8ac13835158596810c1c5b3cc2b4e4d6c77f0b277f43c726f596b\": rpc error: code = NotFound desc = could not find container \"dff2d680a1e8ac13835158596810c1c5b3cc2b4e4d6c77f0b277f43c726f596b\": container with ID starting with dff2d680a1e8ac13835158596810c1c5b3cc2b4e4d6c77f0b277f43c726f596b not found: ID does not exist" Nov 28 18:39:15 crc kubenswrapper[4909]: I1128 18:39:15.925786 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1117637d-0189-45f6-8998-083e64532df1" path="/var/lib/kubelet/pods/1117637d-0189-45f6-8998-083e64532df1/volumes" Nov 28 18:39:16 crc kubenswrapper[4909]: I1128 18:39:16.210061 4909 generic.go:334] "Generic (PLEG): container finished" podID="a3bf5084-0c96-4c4e-b3ac-dccb6cf166b5" containerID="a4eb6514ed21dbbb586014b7802036b122e0f958087c874898110b7020a5a217" exitCode=0 Nov 28 18:39:16 crc kubenswrapper[4909]: I1128 18:39:16.210258 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9jx55" event={"ID":"a3bf5084-0c96-4c4e-b3ac-dccb6cf166b5","Type":"ContainerDied","Data":"a4eb6514ed21dbbb586014b7802036b122e0f958087c874898110b7020a5a217"} Nov 28 18:39:17 crc kubenswrapper[4909]: I1128 18:39:17.223308 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9jx55" event={"ID":"a3bf5084-0c96-4c4e-b3ac-dccb6cf166b5","Type":"ContainerStarted","Data":"ef209c2025b01a57cc9d14625a86723d592ee90a182bb4602a990ff536898f71"} Nov 28 18:39:17 crc kubenswrapper[4909]: I1128 18:39:17.254819 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-9jx55" podStartSLOduration=2.611205519 podStartE2EDuration="10.254796492s" podCreationTimestamp="2025-11-28 18:39:07 +0000 UTC" firstStartedPulling="2025-11-28 18:39:09.115851695 +0000 UTC m=+8931.512536219" lastFinishedPulling="2025-11-28 18:39:16.759442668 +0000 UTC m=+8939.156127192" observedRunningTime="2025-11-28 18:39:17.241875714 +0000 UTC m=+8939.638560248" watchObservedRunningTime="2025-11-28 18:39:17.254796492 +0000 UTC m=+8939.651481036" Nov 28 18:39:18 crc kubenswrapper[4909]: I1128 18:39:18.217568 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-9jx55" Nov 28 18:39:18 crc kubenswrapper[4909]: I1128 18:39:18.218024 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-9jx55" Nov 28 18:39:19 crc kubenswrapper[4909]: I1128 18:39:19.283602 4909 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/certified-operators-9jx55" podUID="a3bf5084-0c96-4c4e-b3ac-dccb6cf166b5" containerName="registry-server" probeResult="failure" output=< Nov 28 18:39:19 crc kubenswrapper[4909]: timeout: failed to connect service ":50051" within 1s Nov 28 18:39:19 crc kubenswrapper[4909]: > Nov 28 18:39:24 crc kubenswrapper[4909]: I1128 18:39:24.902331 4909 scope.go:117] "RemoveContainer" containerID="ef2a38674be7675e726f15b879fd9bbea7be679d566ac302dd10e3d3ce61bcc2" Nov 28 18:39:24 crc kubenswrapper[4909]: E1128 18:39:24.904081 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 18:39:28 crc kubenswrapper[4909]: I1128 18:39:28.301883 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-9jx55" Nov 28 18:39:28 crc kubenswrapper[4909]: I1128 18:39:28.366433 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-9jx55" Nov 28 18:39:28 crc kubenswrapper[4909]: I1128 18:39:28.552353 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-9jx55"] Nov 28 18:39:29 crc kubenswrapper[4909]: I1128 18:39:29.369739 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-9jx55" podUID="a3bf5084-0c96-4c4e-b3ac-dccb6cf166b5" containerName="registry-server" containerID="cri-o://ef209c2025b01a57cc9d14625a86723d592ee90a182bb4602a990ff536898f71" gracePeriod=2 Nov 28 18:39:29 crc kubenswrapper[4909]: I1128 18:39:29.888513 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-9jx55" Nov 28 18:39:29 crc kubenswrapper[4909]: I1128 18:39:29.929179 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a3bf5084-0c96-4c4e-b3ac-dccb6cf166b5-catalog-content\") pod \"a3bf5084-0c96-4c4e-b3ac-dccb6cf166b5\" (UID: \"a3bf5084-0c96-4c4e-b3ac-dccb6cf166b5\") " Nov 28 18:39:29 crc kubenswrapper[4909]: I1128 18:39:29.929411 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ztvpf\" (UniqueName: \"kubernetes.io/projected/a3bf5084-0c96-4c4e-b3ac-dccb6cf166b5-kube-api-access-ztvpf\") pod \"a3bf5084-0c96-4c4e-b3ac-dccb6cf166b5\" (UID: \"a3bf5084-0c96-4c4e-b3ac-dccb6cf166b5\") " Nov 28 18:39:29 crc kubenswrapper[4909]: I1128 18:39:29.929493 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a3bf5084-0c96-4c4e-b3ac-dccb6cf166b5-utilities\") pod \"a3bf5084-0c96-4c4e-b3ac-dccb6cf166b5\" (UID: \"a3bf5084-0c96-4c4e-b3ac-dccb6cf166b5\") " Nov 28 18:39:29 crc kubenswrapper[4909]: I1128 18:39:29.932531 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a3bf5084-0c96-4c4e-b3ac-dccb6cf166b5-utilities" (OuterVolumeSpecName: "utilities") pod "a3bf5084-0c96-4c4e-b3ac-dccb6cf166b5" (UID: "a3bf5084-0c96-4c4e-b3ac-dccb6cf166b5"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 18:39:29 crc kubenswrapper[4909]: I1128 18:39:29.964537 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a3bf5084-0c96-4c4e-b3ac-dccb6cf166b5-kube-api-access-ztvpf" (OuterVolumeSpecName: "kube-api-access-ztvpf") pod "a3bf5084-0c96-4c4e-b3ac-dccb6cf166b5" (UID: "a3bf5084-0c96-4c4e-b3ac-dccb6cf166b5"). InnerVolumeSpecName "kube-api-access-ztvpf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 18:39:30 crc kubenswrapper[4909]: I1128 18:39:30.000712 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a3bf5084-0c96-4c4e-b3ac-dccb6cf166b5-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a3bf5084-0c96-4c4e-b3ac-dccb6cf166b5" (UID: "a3bf5084-0c96-4c4e-b3ac-dccb6cf166b5"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 18:39:30 crc kubenswrapper[4909]: I1128 18:39:30.033273 4909 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a3bf5084-0c96-4c4e-b3ac-dccb6cf166b5-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 18:39:30 crc kubenswrapper[4909]: I1128 18:39:30.033316 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ztvpf\" (UniqueName: \"kubernetes.io/projected/a3bf5084-0c96-4c4e-b3ac-dccb6cf166b5-kube-api-access-ztvpf\") on node \"crc\" DevicePath \"\"" Nov 28 18:39:30 crc kubenswrapper[4909]: I1128 18:39:30.033330 4909 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a3bf5084-0c96-4c4e-b3ac-dccb6cf166b5-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 18:39:30 crc kubenswrapper[4909]: I1128 18:39:30.381845 4909 generic.go:334] "Generic (PLEG): container finished" podID="a3bf5084-0c96-4c4e-b3ac-dccb6cf166b5" containerID="ef209c2025b01a57cc9d14625a86723d592ee90a182bb4602a990ff536898f71" exitCode=0 Nov 28 18:39:30 crc kubenswrapper[4909]: I1128 18:39:30.381915 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9jx55" event={"ID":"a3bf5084-0c96-4c4e-b3ac-dccb6cf166b5","Type":"ContainerDied","Data":"ef209c2025b01a57cc9d14625a86723d592ee90a182bb4602a990ff536898f71"} Nov 28 18:39:30 crc kubenswrapper[4909]: I1128 18:39:30.381952 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9jx55" event={"ID":"a3bf5084-0c96-4c4e-b3ac-dccb6cf166b5","Type":"ContainerDied","Data":"a37d99d5d353341b62ba46bd76409bf6768b1e4679e18f22651dbee989f9ab5e"} Nov 28 18:39:30 crc kubenswrapper[4909]: I1128 18:39:30.381977 4909 scope.go:117] "RemoveContainer" containerID="ef209c2025b01a57cc9d14625a86723d592ee90a182bb4602a990ff536898f71" Nov 28 18:39:30 crc kubenswrapper[4909]: I1128 18:39:30.382192 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-9jx55" Nov 28 18:39:30 crc kubenswrapper[4909]: I1128 18:39:30.414650 4909 scope.go:117] "RemoveContainer" containerID="a4eb6514ed21dbbb586014b7802036b122e0f958087c874898110b7020a5a217" Nov 28 18:39:30 crc kubenswrapper[4909]: I1128 18:39:30.428690 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-9jx55"] Nov 28 18:39:30 crc kubenswrapper[4909]: I1128 18:39:30.453759 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-9jx55"] Nov 28 18:39:30 crc kubenswrapper[4909]: I1128 18:39:30.460762 4909 scope.go:117] "RemoveContainer" containerID="1c40a8552a778af20c99592c0294d6a0e7aa9e33fcdd9f328e607c99c28720a0" Nov 28 18:39:30 crc kubenswrapper[4909]: I1128 18:39:30.492412 4909 scope.go:117] "RemoveContainer" containerID="ef209c2025b01a57cc9d14625a86723d592ee90a182bb4602a990ff536898f71" Nov 28 18:39:30 crc kubenswrapper[4909]: E1128 18:39:30.493529 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ef209c2025b01a57cc9d14625a86723d592ee90a182bb4602a990ff536898f71\": container with ID starting with ef209c2025b01a57cc9d14625a86723d592ee90a182bb4602a990ff536898f71 not found: ID does not exist" containerID="ef209c2025b01a57cc9d14625a86723d592ee90a182bb4602a990ff536898f71" Nov 28 18:39:30 crc kubenswrapper[4909]: I1128 18:39:30.493618 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ef209c2025b01a57cc9d14625a86723d592ee90a182bb4602a990ff536898f71"} err="failed to get container status \"ef209c2025b01a57cc9d14625a86723d592ee90a182bb4602a990ff536898f71\": rpc error: code = NotFound desc = could not find container \"ef209c2025b01a57cc9d14625a86723d592ee90a182bb4602a990ff536898f71\": container with ID starting with ef209c2025b01a57cc9d14625a86723d592ee90a182bb4602a990ff536898f71 not found: ID does not exist" Nov 28 18:39:30 crc kubenswrapper[4909]: I1128 18:39:30.493734 4909 scope.go:117] "RemoveContainer" containerID="a4eb6514ed21dbbb586014b7802036b122e0f958087c874898110b7020a5a217" Nov 28 18:39:30 crc kubenswrapper[4909]: E1128 18:39:30.494173 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a4eb6514ed21dbbb586014b7802036b122e0f958087c874898110b7020a5a217\": container with ID starting with a4eb6514ed21dbbb586014b7802036b122e0f958087c874898110b7020a5a217 not found: ID does not exist" containerID="a4eb6514ed21dbbb586014b7802036b122e0f958087c874898110b7020a5a217" Nov 28 18:39:30 crc kubenswrapper[4909]: I1128 18:39:30.494256 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a4eb6514ed21dbbb586014b7802036b122e0f958087c874898110b7020a5a217"} err="failed to get container status \"a4eb6514ed21dbbb586014b7802036b122e0f958087c874898110b7020a5a217\": rpc error: code = NotFound desc = could not find container \"a4eb6514ed21dbbb586014b7802036b122e0f958087c874898110b7020a5a217\": container with ID starting with a4eb6514ed21dbbb586014b7802036b122e0f958087c874898110b7020a5a217 not found: ID does not exist" Nov 28 18:39:30 crc kubenswrapper[4909]: I1128 18:39:30.494346 4909 scope.go:117] "RemoveContainer" containerID="1c40a8552a778af20c99592c0294d6a0e7aa9e33fcdd9f328e607c99c28720a0" Nov 28 18:39:30 crc kubenswrapper[4909]: E1128 18:39:30.494642 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1c40a8552a778af20c99592c0294d6a0e7aa9e33fcdd9f328e607c99c28720a0\": container with ID starting with 1c40a8552a778af20c99592c0294d6a0e7aa9e33fcdd9f328e607c99c28720a0 not found: ID does not exist" containerID="1c40a8552a778af20c99592c0294d6a0e7aa9e33fcdd9f328e607c99c28720a0" Nov 28 18:39:30 crc kubenswrapper[4909]: I1128 18:39:30.494765 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1c40a8552a778af20c99592c0294d6a0e7aa9e33fcdd9f328e607c99c28720a0"} err="failed to get container status \"1c40a8552a778af20c99592c0294d6a0e7aa9e33fcdd9f328e607c99c28720a0\": rpc error: code = NotFound desc = could not find container \"1c40a8552a778af20c99592c0294d6a0e7aa9e33fcdd9f328e607c99c28720a0\": container with ID starting with 1c40a8552a778af20c99592c0294d6a0e7aa9e33fcdd9f328e607c99c28720a0 not found: ID does not exist" Nov 28 18:39:31 crc kubenswrapper[4909]: I1128 18:39:31.917363 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a3bf5084-0c96-4c4e-b3ac-dccb6cf166b5" path="/var/lib/kubelet/pods/a3bf5084-0c96-4c4e-b3ac-dccb6cf166b5/volumes" Nov 28 18:39:37 crc kubenswrapper[4909]: I1128 18:39:37.910808 4909 scope.go:117] "RemoveContainer" containerID="ef2a38674be7675e726f15b879fd9bbea7be679d566ac302dd10e3d3ce61bcc2" Nov 28 18:39:37 crc kubenswrapper[4909]: E1128 18:39:37.912049 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 18:39:48 crc kubenswrapper[4909]: I1128 18:39:48.904159 4909 scope.go:117] "RemoveContainer" containerID="ef2a38674be7675e726f15b879fd9bbea7be679d566ac302dd10e3d3ce61bcc2" Nov 28 18:39:48 crc kubenswrapper[4909]: E1128 18:39:48.905145 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 18:39:59 crc kubenswrapper[4909]: I1128 18:39:59.905717 4909 scope.go:117] "RemoveContainer" containerID="ef2a38674be7675e726f15b879fd9bbea7be679d566ac302dd10e3d3ce61bcc2" Nov 28 18:39:59 crc kubenswrapper[4909]: E1128 18:39:59.906639 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 18:40:13 crc kubenswrapper[4909]: I1128 18:40:13.902723 4909 scope.go:117] "RemoveContainer" containerID="ef2a38674be7675e726f15b879fd9bbea7be679d566ac302dd10e3d3ce61bcc2" Nov 28 18:40:13 crc kubenswrapper[4909]: E1128 18:40:13.903971 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 18:40:24 crc kubenswrapper[4909]: I1128 18:40:24.902056 4909 scope.go:117] "RemoveContainer" containerID="ef2a38674be7675e726f15b879fd9bbea7be679d566ac302dd10e3d3ce61bcc2" Nov 28 18:40:24 crc kubenswrapper[4909]: E1128 18:40:24.903098 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 18:40:35 crc kubenswrapper[4909]: I1128 18:40:35.906448 4909 scope.go:117] "RemoveContainer" containerID="ef2a38674be7675e726f15b879fd9bbea7be679d566ac302dd10e3d3ce61bcc2" Nov 28 18:40:35 crc kubenswrapper[4909]: E1128 18:40:35.908839 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 18:40:46 crc kubenswrapper[4909]: I1128 18:40:46.903443 4909 scope.go:117] "RemoveContainer" containerID="ef2a38674be7675e726f15b879fd9bbea7be679d566ac302dd10e3d3ce61bcc2" Nov 28 18:40:46 crc kubenswrapper[4909]: E1128 18:40:46.904290 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 18:41:00 crc kubenswrapper[4909]: I1128 18:41:00.901937 4909 scope.go:117] "RemoveContainer" containerID="ef2a38674be7675e726f15b879fd9bbea7be679d566ac302dd10e3d3ce61bcc2" Nov 28 18:41:00 crc kubenswrapper[4909]: E1128 18:41:00.902876 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 18:41:12 crc kubenswrapper[4909]: I1128 18:41:12.901500 4909 scope.go:117] "RemoveContainer" containerID="ef2a38674be7675e726f15b879fd9bbea7be679d566ac302dd10e3d3ce61bcc2" Nov 28 18:41:12 crc kubenswrapper[4909]: E1128 18:41:12.902392 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 18:41:26 crc kubenswrapper[4909]: I1128 18:41:26.901556 4909 scope.go:117] "RemoveContainer" containerID="ef2a38674be7675e726f15b879fd9bbea7be679d566ac302dd10e3d3ce61bcc2" Nov 28 18:41:26 crc kubenswrapper[4909]: E1128 18:41:26.902361 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 18:41:33 crc kubenswrapper[4909]: I1128 18:41:33.016459 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-hrmx6"] Nov 28 18:41:33 crc kubenswrapper[4909]: E1128 18:41:33.018268 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a3bf5084-0c96-4c4e-b3ac-dccb6cf166b5" containerName="extract-utilities" Nov 28 18:41:33 crc kubenswrapper[4909]: I1128 18:41:33.018292 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="a3bf5084-0c96-4c4e-b3ac-dccb6cf166b5" containerName="extract-utilities" Nov 28 18:41:33 crc kubenswrapper[4909]: E1128 18:41:33.018331 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1117637d-0189-45f6-8998-083e64532df1" containerName="registry-server" Nov 28 18:41:33 crc kubenswrapper[4909]: I1128 18:41:33.018339 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="1117637d-0189-45f6-8998-083e64532df1" containerName="registry-server" Nov 28 18:41:33 crc kubenswrapper[4909]: E1128 18:41:33.018358 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a3bf5084-0c96-4c4e-b3ac-dccb6cf166b5" containerName="registry-server" Nov 28 18:41:33 crc kubenswrapper[4909]: I1128 18:41:33.018366 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="a3bf5084-0c96-4c4e-b3ac-dccb6cf166b5" containerName="registry-server" Nov 28 18:41:33 crc kubenswrapper[4909]: E1128 18:41:33.018381 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1117637d-0189-45f6-8998-083e64532df1" containerName="extract-utilities" Nov 28 18:41:33 crc kubenswrapper[4909]: I1128 18:41:33.018389 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="1117637d-0189-45f6-8998-083e64532df1" containerName="extract-utilities" Nov 28 18:41:33 crc kubenswrapper[4909]: E1128 18:41:33.018420 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1117637d-0189-45f6-8998-083e64532df1" containerName="extract-content" Nov 28 18:41:33 crc kubenswrapper[4909]: I1128 18:41:33.018430 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="1117637d-0189-45f6-8998-083e64532df1" containerName="extract-content" Nov 28 18:41:33 crc kubenswrapper[4909]: E1128 18:41:33.018455 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a3bf5084-0c96-4c4e-b3ac-dccb6cf166b5" containerName="extract-content" Nov 28 18:41:33 crc kubenswrapper[4909]: I1128 18:41:33.018463 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="a3bf5084-0c96-4c4e-b3ac-dccb6cf166b5" containerName="extract-content" Nov 28 18:41:33 crc kubenswrapper[4909]: I1128 18:41:33.018745 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="1117637d-0189-45f6-8998-083e64532df1" containerName="registry-server" Nov 28 18:41:33 crc kubenswrapper[4909]: I1128 18:41:33.018781 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="a3bf5084-0c96-4c4e-b3ac-dccb6cf166b5" containerName="registry-server" Nov 28 18:41:33 crc kubenswrapper[4909]: I1128 18:41:33.020903 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-hrmx6" Nov 28 18:41:33 crc kubenswrapper[4909]: I1128 18:41:33.032244 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-hrmx6"] Nov 28 18:41:33 crc kubenswrapper[4909]: I1128 18:41:33.126551 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/aba4d954-2436-42f5-a7e8-5a4b44af15ab-catalog-content\") pod \"redhat-marketplace-hrmx6\" (UID: \"aba4d954-2436-42f5-a7e8-5a4b44af15ab\") " pod="openshift-marketplace/redhat-marketplace-hrmx6" Nov 28 18:41:33 crc kubenswrapper[4909]: I1128 18:41:33.126648 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/aba4d954-2436-42f5-a7e8-5a4b44af15ab-utilities\") pod \"redhat-marketplace-hrmx6\" (UID: \"aba4d954-2436-42f5-a7e8-5a4b44af15ab\") " pod="openshift-marketplace/redhat-marketplace-hrmx6" Nov 28 18:41:33 crc kubenswrapper[4909]: I1128 18:41:33.126757 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7q6md\" (UniqueName: \"kubernetes.io/projected/aba4d954-2436-42f5-a7e8-5a4b44af15ab-kube-api-access-7q6md\") pod \"redhat-marketplace-hrmx6\" (UID: \"aba4d954-2436-42f5-a7e8-5a4b44af15ab\") " pod="openshift-marketplace/redhat-marketplace-hrmx6" Nov 28 18:41:33 crc kubenswrapper[4909]: I1128 18:41:33.230695 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/aba4d954-2436-42f5-a7e8-5a4b44af15ab-catalog-content\") pod \"redhat-marketplace-hrmx6\" (UID: \"aba4d954-2436-42f5-a7e8-5a4b44af15ab\") " pod="openshift-marketplace/redhat-marketplace-hrmx6" Nov 28 18:41:33 crc kubenswrapper[4909]: I1128 18:41:33.229489 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/aba4d954-2436-42f5-a7e8-5a4b44af15ab-catalog-content\") pod \"redhat-marketplace-hrmx6\" (UID: \"aba4d954-2436-42f5-a7e8-5a4b44af15ab\") " pod="openshift-marketplace/redhat-marketplace-hrmx6" Nov 28 18:41:33 crc kubenswrapper[4909]: I1128 18:41:33.230912 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/aba4d954-2436-42f5-a7e8-5a4b44af15ab-utilities\") pod \"redhat-marketplace-hrmx6\" (UID: \"aba4d954-2436-42f5-a7e8-5a4b44af15ab\") " pod="openshift-marketplace/redhat-marketplace-hrmx6" Nov 28 18:41:33 crc kubenswrapper[4909]: I1128 18:41:33.231316 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/aba4d954-2436-42f5-a7e8-5a4b44af15ab-utilities\") pod \"redhat-marketplace-hrmx6\" (UID: \"aba4d954-2436-42f5-a7e8-5a4b44af15ab\") " pod="openshift-marketplace/redhat-marketplace-hrmx6" Nov 28 18:41:33 crc kubenswrapper[4909]: I1128 18:41:33.231517 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7q6md\" (UniqueName: \"kubernetes.io/projected/aba4d954-2436-42f5-a7e8-5a4b44af15ab-kube-api-access-7q6md\") pod \"redhat-marketplace-hrmx6\" (UID: \"aba4d954-2436-42f5-a7e8-5a4b44af15ab\") " pod="openshift-marketplace/redhat-marketplace-hrmx6" Nov 28 18:41:33 crc kubenswrapper[4909]: I1128 18:41:33.264593 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7q6md\" (UniqueName: \"kubernetes.io/projected/aba4d954-2436-42f5-a7e8-5a4b44af15ab-kube-api-access-7q6md\") pod \"redhat-marketplace-hrmx6\" (UID: \"aba4d954-2436-42f5-a7e8-5a4b44af15ab\") " pod="openshift-marketplace/redhat-marketplace-hrmx6" Nov 28 18:41:33 crc kubenswrapper[4909]: I1128 18:41:33.354230 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-hrmx6" Nov 28 18:41:33 crc kubenswrapper[4909]: I1128 18:41:33.874965 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-hrmx6"] Nov 28 18:41:34 crc kubenswrapper[4909]: I1128 18:41:34.710325 4909 generic.go:334] "Generic (PLEG): container finished" podID="aba4d954-2436-42f5-a7e8-5a4b44af15ab" containerID="f7a9b883fb10bc21401bf48d1745641f93a9ede61dcbaf4a6acc541f4dc09f44" exitCode=0 Nov 28 18:41:34 crc kubenswrapper[4909]: I1128 18:41:34.710432 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-hrmx6" event={"ID":"aba4d954-2436-42f5-a7e8-5a4b44af15ab","Type":"ContainerDied","Data":"f7a9b883fb10bc21401bf48d1745641f93a9ede61dcbaf4a6acc541f4dc09f44"} Nov 28 18:41:34 crc kubenswrapper[4909]: I1128 18:41:34.710613 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-hrmx6" event={"ID":"aba4d954-2436-42f5-a7e8-5a4b44af15ab","Type":"ContainerStarted","Data":"e427fbf6da8ab346307e5bba3f62935acb87f87ff60e1aa87be314097527d3ac"} Nov 28 18:41:36 crc kubenswrapper[4909]: I1128 18:41:36.736836 4909 generic.go:334] "Generic (PLEG): container finished" podID="aba4d954-2436-42f5-a7e8-5a4b44af15ab" containerID="7f36d8f490ec096763951b420ee877f45de621e99f4d81c93d69ea0506f82a2d" exitCode=0 Nov 28 18:41:36 crc kubenswrapper[4909]: I1128 18:41:36.736904 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-hrmx6" event={"ID":"aba4d954-2436-42f5-a7e8-5a4b44af15ab","Type":"ContainerDied","Data":"7f36d8f490ec096763951b420ee877f45de621e99f4d81c93d69ea0506f82a2d"} Nov 28 18:41:37 crc kubenswrapper[4909]: I1128 18:41:37.754366 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-hrmx6" event={"ID":"aba4d954-2436-42f5-a7e8-5a4b44af15ab","Type":"ContainerStarted","Data":"4fa444563a7f2d6e35316e61702a8ba760f50c5de50cf61ed04cda6fc670409a"} Nov 28 18:41:37 crc kubenswrapper[4909]: I1128 18:41:37.788736 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-hrmx6" podStartSLOduration=3.298231343 podStartE2EDuration="5.78871501s" podCreationTimestamp="2025-11-28 18:41:32 +0000 UTC" firstStartedPulling="2025-11-28 18:41:34.712979259 +0000 UTC m=+9077.109663783" lastFinishedPulling="2025-11-28 18:41:37.203462936 +0000 UTC m=+9079.600147450" observedRunningTime="2025-11-28 18:41:37.776609324 +0000 UTC m=+9080.173293858" watchObservedRunningTime="2025-11-28 18:41:37.78871501 +0000 UTC m=+9080.185399534" Nov 28 18:41:41 crc kubenswrapper[4909]: I1128 18:41:41.901957 4909 scope.go:117] "RemoveContainer" containerID="ef2a38674be7675e726f15b879fd9bbea7be679d566ac302dd10e3d3ce61bcc2" Nov 28 18:41:41 crc kubenswrapper[4909]: E1128 18:41:41.902769 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 18:41:43 crc kubenswrapper[4909]: I1128 18:41:43.355474 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-hrmx6" Nov 28 18:41:43 crc kubenswrapper[4909]: I1128 18:41:43.357072 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-hrmx6" Nov 28 18:41:43 crc kubenswrapper[4909]: I1128 18:41:43.447176 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-hrmx6" Nov 28 18:41:43 crc kubenswrapper[4909]: I1128 18:41:43.900305 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-hrmx6" Nov 28 18:41:43 crc kubenswrapper[4909]: I1128 18:41:43.948792 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-hrmx6"] Nov 28 18:41:45 crc kubenswrapper[4909]: I1128 18:41:45.848144 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-hrmx6" podUID="aba4d954-2436-42f5-a7e8-5a4b44af15ab" containerName="registry-server" containerID="cri-o://4fa444563a7f2d6e35316e61702a8ba760f50c5de50cf61ed04cda6fc670409a" gracePeriod=2 Nov 28 18:41:46 crc kubenswrapper[4909]: E1128 18:41:46.115679 4909 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podaba4d954_2436_42f5_a7e8_5a4b44af15ab.slice/crio-conmon-4fa444563a7f2d6e35316e61702a8ba760f50c5de50cf61ed04cda6fc670409a.scope\": RecentStats: unable to find data in memory cache]" Nov 28 18:41:46 crc kubenswrapper[4909]: I1128 18:41:46.840629 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-hrmx6" Nov 28 18:41:46 crc kubenswrapper[4909]: I1128 18:41:46.864210 4909 generic.go:334] "Generic (PLEG): container finished" podID="aba4d954-2436-42f5-a7e8-5a4b44af15ab" containerID="4fa444563a7f2d6e35316e61702a8ba760f50c5de50cf61ed04cda6fc670409a" exitCode=0 Nov 28 18:41:46 crc kubenswrapper[4909]: I1128 18:41:46.864248 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-hrmx6" event={"ID":"aba4d954-2436-42f5-a7e8-5a4b44af15ab","Type":"ContainerDied","Data":"4fa444563a7f2d6e35316e61702a8ba760f50c5de50cf61ed04cda6fc670409a"} Nov 28 18:41:46 crc kubenswrapper[4909]: I1128 18:41:46.864270 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-hrmx6" Nov 28 18:41:46 crc kubenswrapper[4909]: I1128 18:41:46.864342 4909 scope.go:117] "RemoveContainer" containerID="4fa444563a7f2d6e35316e61702a8ba760f50c5de50cf61ed04cda6fc670409a" Nov 28 18:41:46 crc kubenswrapper[4909]: I1128 18:41:46.864318 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-hrmx6" event={"ID":"aba4d954-2436-42f5-a7e8-5a4b44af15ab","Type":"ContainerDied","Data":"e427fbf6da8ab346307e5bba3f62935acb87f87ff60e1aa87be314097527d3ac"} Nov 28 18:41:46 crc kubenswrapper[4909]: I1128 18:41:46.903199 4909 scope.go:117] "RemoveContainer" containerID="7f36d8f490ec096763951b420ee877f45de621e99f4d81c93d69ea0506f82a2d" Nov 28 18:41:46 crc kubenswrapper[4909]: I1128 18:41:46.933029 4909 scope.go:117] "RemoveContainer" containerID="f7a9b883fb10bc21401bf48d1745641f93a9ede61dcbaf4a6acc541f4dc09f44" Nov 28 18:41:46 crc kubenswrapper[4909]: I1128 18:41:46.937594 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/aba4d954-2436-42f5-a7e8-5a4b44af15ab-catalog-content\") pod \"aba4d954-2436-42f5-a7e8-5a4b44af15ab\" (UID: \"aba4d954-2436-42f5-a7e8-5a4b44af15ab\") " Nov 28 18:41:46 crc kubenswrapper[4909]: I1128 18:41:46.937648 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/aba4d954-2436-42f5-a7e8-5a4b44af15ab-utilities\") pod \"aba4d954-2436-42f5-a7e8-5a4b44af15ab\" (UID: \"aba4d954-2436-42f5-a7e8-5a4b44af15ab\") " Nov 28 18:41:46 crc kubenswrapper[4909]: I1128 18:41:46.937732 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7q6md\" (UniqueName: \"kubernetes.io/projected/aba4d954-2436-42f5-a7e8-5a4b44af15ab-kube-api-access-7q6md\") pod \"aba4d954-2436-42f5-a7e8-5a4b44af15ab\" (UID: \"aba4d954-2436-42f5-a7e8-5a4b44af15ab\") " Nov 28 18:41:46 crc kubenswrapper[4909]: I1128 18:41:46.939107 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/aba4d954-2436-42f5-a7e8-5a4b44af15ab-utilities" (OuterVolumeSpecName: "utilities") pod "aba4d954-2436-42f5-a7e8-5a4b44af15ab" (UID: "aba4d954-2436-42f5-a7e8-5a4b44af15ab"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 18:41:46 crc kubenswrapper[4909]: I1128 18:41:46.945650 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/aba4d954-2436-42f5-a7e8-5a4b44af15ab-kube-api-access-7q6md" (OuterVolumeSpecName: "kube-api-access-7q6md") pod "aba4d954-2436-42f5-a7e8-5a4b44af15ab" (UID: "aba4d954-2436-42f5-a7e8-5a4b44af15ab"). InnerVolumeSpecName "kube-api-access-7q6md". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 18:41:46 crc kubenswrapper[4909]: I1128 18:41:46.964782 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/aba4d954-2436-42f5-a7e8-5a4b44af15ab-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "aba4d954-2436-42f5-a7e8-5a4b44af15ab" (UID: "aba4d954-2436-42f5-a7e8-5a4b44af15ab"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 18:41:47 crc kubenswrapper[4909]: I1128 18:41:47.025100 4909 scope.go:117] "RemoveContainer" containerID="4fa444563a7f2d6e35316e61702a8ba760f50c5de50cf61ed04cda6fc670409a" Nov 28 18:41:47 crc kubenswrapper[4909]: E1128 18:41:47.025494 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4fa444563a7f2d6e35316e61702a8ba760f50c5de50cf61ed04cda6fc670409a\": container with ID starting with 4fa444563a7f2d6e35316e61702a8ba760f50c5de50cf61ed04cda6fc670409a not found: ID does not exist" containerID="4fa444563a7f2d6e35316e61702a8ba760f50c5de50cf61ed04cda6fc670409a" Nov 28 18:41:47 crc kubenswrapper[4909]: I1128 18:41:47.025527 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4fa444563a7f2d6e35316e61702a8ba760f50c5de50cf61ed04cda6fc670409a"} err="failed to get container status \"4fa444563a7f2d6e35316e61702a8ba760f50c5de50cf61ed04cda6fc670409a\": rpc error: code = NotFound desc = could not find container \"4fa444563a7f2d6e35316e61702a8ba760f50c5de50cf61ed04cda6fc670409a\": container with ID starting with 4fa444563a7f2d6e35316e61702a8ba760f50c5de50cf61ed04cda6fc670409a not found: ID does not exist" Nov 28 18:41:47 crc kubenswrapper[4909]: I1128 18:41:47.025545 4909 scope.go:117] "RemoveContainer" containerID="7f36d8f490ec096763951b420ee877f45de621e99f4d81c93d69ea0506f82a2d" Nov 28 18:41:47 crc kubenswrapper[4909]: E1128 18:41:47.025838 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7f36d8f490ec096763951b420ee877f45de621e99f4d81c93d69ea0506f82a2d\": container with ID starting with 7f36d8f490ec096763951b420ee877f45de621e99f4d81c93d69ea0506f82a2d not found: ID does not exist" containerID="7f36d8f490ec096763951b420ee877f45de621e99f4d81c93d69ea0506f82a2d" Nov 28 18:41:47 crc kubenswrapper[4909]: I1128 18:41:47.025863 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7f36d8f490ec096763951b420ee877f45de621e99f4d81c93d69ea0506f82a2d"} err="failed to get container status \"7f36d8f490ec096763951b420ee877f45de621e99f4d81c93d69ea0506f82a2d\": rpc error: code = NotFound desc = could not find container \"7f36d8f490ec096763951b420ee877f45de621e99f4d81c93d69ea0506f82a2d\": container with ID starting with 7f36d8f490ec096763951b420ee877f45de621e99f4d81c93d69ea0506f82a2d not found: ID does not exist" Nov 28 18:41:47 crc kubenswrapper[4909]: I1128 18:41:47.025876 4909 scope.go:117] "RemoveContainer" containerID="f7a9b883fb10bc21401bf48d1745641f93a9ede61dcbaf4a6acc541f4dc09f44" Nov 28 18:41:47 crc kubenswrapper[4909]: E1128 18:41:47.026077 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f7a9b883fb10bc21401bf48d1745641f93a9ede61dcbaf4a6acc541f4dc09f44\": container with ID starting with f7a9b883fb10bc21401bf48d1745641f93a9ede61dcbaf4a6acc541f4dc09f44 not found: ID does not exist" containerID="f7a9b883fb10bc21401bf48d1745641f93a9ede61dcbaf4a6acc541f4dc09f44" Nov 28 18:41:47 crc kubenswrapper[4909]: I1128 18:41:47.026101 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f7a9b883fb10bc21401bf48d1745641f93a9ede61dcbaf4a6acc541f4dc09f44"} err="failed to get container status \"f7a9b883fb10bc21401bf48d1745641f93a9ede61dcbaf4a6acc541f4dc09f44\": rpc error: code = NotFound desc = could not find container \"f7a9b883fb10bc21401bf48d1745641f93a9ede61dcbaf4a6acc541f4dc09f44\": container with ID starting with f7a9b883fb10bc21401bf48d1745641f93a9ede61dcbaf4a6acc541f4dc09f44 not found: ID does not exist" Nov 28 18:41:47 crc kubenswrapper[4909]: I1128 18:41:47.040755 4909 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/aba4d954-2436-42f5-a7e8-5a4b44af15ab-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 18:41:47 crc kubenswrapper[4909]: I1128 18:41:47.040781 4909 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/aba4d954-2436-42f5-a7e8-5a4b44af15ab-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 18:41:47 crc kubenswrapper[4909]: I1128 18:41:47.040791 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7q6md\" (UniqueName: \"kubernetes.io/projected/aba4d954-2436-42f5-a7e8-5a4b44af15ab-kube-api-access-7q6md\") on node \"crc\" DevicePath \"\"" Nov 28 18:41:47 crc kubenswrapper[4909]: I1128 18:41:47.204435 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-hrmx6"] Nov 28 18:41:47 crc kubenswrapper[4909]: I1128 18:41:47.213390 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-hrmx6"] Nov 28 18:41:47 crc kubenswrapper[4909]: I1128 18:41:47.914797 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="aba4d954-2436-42f5-a7e8-5a4b44af15ab" path="/var/lib/kubelet/pods/aba4d954-2436-42f5-a7e8-5a4b44af15ab/volumes" Nov 28 18:41:55 crc kubenswrapper[4909]: I1128 18:41:55.903422 4909 scope.go:117] "RemoveContainer" containerID="ef2a38674be7675e726f15b879fd9bbea7be679d566ac302dd10e3d3ce61bcc2" Nov 28 18:41:55 crc kubenswrapper[4909]: E1128 18:41:55.904420 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 18:42:09 crc kubenswrapper[4909]: I1128 18:42:09.901798 4909 scope.go:117] "RemoveContainer" containerID="ef2a38674be7675e726f15b879fd9bbea7be679d566ac302dd10e3d3ce61bcc2" Nov 28 18:42:09 crc kubenswrapper[4909]: E1128 18:42:09.902458 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 18:42:21 crc kubenswrapper[4909]: I1128 18:42:21.901717 4909 scope.go:117] "RemoveContainer" containerID="ef2a38674be7675e726f15b879fd9bbea7be679d566ac302dd10e3d3ce61bcc2" Nov 28 18:42:21 crc kubenswrapper[4909]: E1128 18:42:21.902383 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 18:42:30 crc kubenswrapper[4909]: I1128 18:42:30.368117 4909 generic.go:334] "Generic (PLEG): container finished" podID="fa77c1e1-fac0-4183-824f-fbd83a237232" containerID="0b4d3c49e0e36ece54a6dc9609e1938e19667cae76321aa7fe655079ab8d7c61" exitCode=0 Nov 28 18:42:30 crc kubenswrapper[4909]: I1128 18:42:30.368272 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-dhcp-openstack-openstack-cell1-r2h42" event={"ID":"fa77c1e1-fac0-4183-824f-fbd83a237232","Type":"ContainerDied","Data":"0b4d3c49e0e36ece54a6dc9609e1938e19667cae76321aa7fe655079ab8d7c61"} Nov 28 18:42:31 crc kubenswrapper[4909]: I1128 18:42:31.851632 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-dhcp-openstack-openstack-cell1-r2h42" Nov 28 18:42:31 crc kubenswrapper[4909]: I1128 18:42:31.961894 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/fa77c1e1-fac0-4183-824f-fbd83a237232-inventory\") pod \"fa77c1e1-fac0-4183-824f-fbd83a237232\" (UID: \"fa77c1e1-fac0-4183-824f-fbd83a237232\") " Nov 28 18:42:31 crc kubenswrapper[4909]: I1128 18:42:31.961974 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-dhcp-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fa77c1e1-fac0-4183-824f-fbd83a237232-neutron-dhcp-combined-ca-bundle\") pod \"fa77c1e1-fac0-4183-824f-fbd83a237232\" (UID: \"fa77c1e1-fac0-4183-824f-fbd83a237232\") " Nov 28 18:42:31 crc kubenswrapper[4909]: I1128 18:42:31.962013 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/fa77c1e1-fac0-4183-824f-fbd83a237232-ssh-key\") pod \"fa77c1e1-fac0-4183-824f-fbd83a237232\" (UID: \"fa77c1e1-fac0-4183-824f-fbd83a237232\") " Nov 28 18:42:31 crc kubenswrapper[4909]: I1128 18:42:31.962049 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-dhcp-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/fa77c1e1-fac0-4183-824f-fbd83a237232-neutron-dhcp-agent-neutron-config-0\") pod \"fa77c1e1-fac0-4183-824f-fbd83a237232\" (UID: \"fa77c1e1-fac0-4183-824f-fbd83a237232\") " Nov 28 18:42:31 crc kubenswrapper[4909]: I1128 18:42:31.962124 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/fa77c1e1-fac0-4183-824f-fbd83a237232-ceph\") pod \"fa77c1e1-fac0-4183-824f-fbd83a237232\" (UID: \"fa77c1e1-fac0-4183-824f-fbd83a237232\") " Nov 28 18:42:31 crc kubenswrapper[4909]: I1128 18:42:31.962302 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gfxnl\" (UniqueName: \"kubernetes.io/projected/fa77c1e1-fac0-4183-824f-fbd83a237232-kube-api-access-gfxnl\") pod \"fa77c1e1-fac0-4183-824f-fbd83a237232\" (UID: \"fa77c1e1-fac0-4183-824f-fbd83a237232\") " Nov 28 18:42:31 crc kubenswrapper[4909]: I1128 18:42:31.975931 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fa77c1e1-fac0-4183-824f-fbd83a237232-neutron-dhcp-combined-ca-bundle" (OuterVolumeSpecName: "neutron-dhcp-combined-ca-bundle") pod "fa77c1e1-fac0-4183-824f-fbd83a237232" (UID: "fa77c1e1-fac0-4183-824f-fbd83a237232"). InnerVolumeSpecName "neutron-dhcp-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 18:42:31 crc kubenswrapper[4909]: I1128 18:42:31.977644 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fa77c1e1-fac0-4183-824f-fbd83a237232-ceph" (OuterVolumeSpecName: "ceph") pod "fa77c1e1-fac0-4183-824f-fbd83a237232" (UID: "fa77c1e1-fac0-4183-824f-fbd83a237232"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 18:42:31 crc kubenswrapper[4909]: I1128 18:42:31.977873 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fa77c1e1-fac0-4183-824f-fbd83a237232-kube-api-access-gfxnl" (OuterVolumeSpecName: "kube-api-access-gfxnl") pod "fa77c1e1-fac0-4183-824f-fbd83a237232" (UID: "fa77c1e1-fac0-4183-824f-fbd83a237232"). InnerVolumeSpecName "kube-api-access-gfxnl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 18:42:31 crc kubenswrapper[4909]: I1128 18:42:31.989588 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fa77c1e1-fac0-4183-824f-fbd83a237232-neutron-dhcp-agent-neutron-config-0" (OuterVolumeSpecName: "neutron-dhcp-agent-neutron-config-0") pod "fa77c1e1-fac0-4183-824f-fbd83a237232" (UID: "fa77c1e1-fac0-4183-824f-fbd83a237232"). InnerVolumeSpecName "neutron-dhcp-agent-neutron-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 18:42:31 crc kubenswrapper[4909]: I1128 18:42:31.999037 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fa77c1e1-fac0-4183-824f-fbd83a237232-inventory" (OuterVolumeSpecName: "inventory") pod "fa77c1e1-fac0-4183-824f-fbd83a237232" (UID: "fa77c1e1-fac0-4183-824f-fbd83a237232"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 18:42:32 crc kubenswrapper[4909]: I1128 18:42:32.007259 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fa77c1e1-fac0-4183-824f-fbd83a237232-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "fa77c1e1-fac0-4183-824f-fbd83a237232" (UID: "fa77c1e1-fac0-4183-824f-fbd83a237232"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 18:42:32 crc kubenswrapper[4909]: I1128 18:42:32.064735 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gfxnl\" (UniqueName: \"kubernetes.io/projected/fa77c1e1-fac0-4183-824f-fbd83a237232-kube-api-access-gfxnl\") on node \"crc\" DevicePath \"\"" Nov 28 18:42:32 crc kubenswrapper[4909]: I1128 18:42:32.064804 4909 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/fa77c1e1-fac0-4183-824f-fbd83a237232-inventory\") on node \"crc\" DevicePath \"\"" Nov 28 18:42:32 crc kubenswrapper[4909]: I1128 18:42:32.064814 4909 reconciler_common.go:293] "Volume detached for volume \"neutron-dhcp-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fa77c1e1-fac0-4183-824f-fbd83a237232-neutron-dhcp-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 18:42:32 crc kubenswrapper[4909]: I1128 18:42:32.064825 4909 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/fa77c1e1-fac0-4183-824f-fbd83a237232-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 28 18:42:32 crc kubenswrapper[4909]: I1128 18:42:32.064836 4909 reconciler_common.go:293] "Volume detached for volume \"neutron-dhcp-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/fa77c1e1-fac0-4183-824f-fbd83a237232-neutron-dhcp-agent-neutron-config-0\") on node \"crc\" DevicePath \"\"" Nov 28 18:42:32 crc kubenswrapper[4909]: I1128 18:42:32.064845 4909 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/fa77c1e1-fac0-4183-824f-fbd83a237232-ceph\") on node \"crc\" DevicePath \"\"" Nov 28 18:42:32 crc kubenswrapper[4909]: I1128 18:42:32.394120 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-dhcp-openstack-openstack-cell1-r2h42" event={"ID":"fa77c1e1-fac0-4183-824f-fbd83a237232","Type":"ContainerDied","Data":"3b70d01c22b4ae5683c1741e6f0fdc2274ed5b036497976f1c52ba16e4afe922"} Nov 28 18:42:32 crc kubenswrapper[4909]: I1128 18:42:32.394178 4909 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3b70d01c22b4ae5683c1741e6f0fdc2274ed5b036497976f1c52ba16e4afe922" Nov 28 18:42:32 crc kubenswrapper[4909]: I1128 18:42:32.394195 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-dhcp-openstack-openstack-cell1-r2h42" Nov 28 18:42:32 crc kubenswrapper[4909]: I1128 18:42:32.904175 4909 scope.go:117] "RemoveContainer" containerID="ef2a38674be7675e726f15b879fd9bbea7be679d566ac302dd10e3d3ce61bcc2" Nov 28 18:42:32 crc kubenswrapper[4909]: E1128 18:42:32.905052 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 18:42:43 crc kubenswrapper[4909]: I1128 18:42:43.903600 4909 scope.go:117] "RemoveContainer" containerID="ef2a38674be7675e726f15b879fd9bbea7be679d566ac302dd10e3d3ce61bcc2" Nov 28 18:42:43 crc kubenswrapper[4909]: E1128 18:42:43.904357 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 18:42:56 crc kubenswrapper[4909]: I1128 18:42:56.904214 4909 scope.go:117] "RemoveContainer" containerID="ef2a38674be7675e726f15b879fd9bbea7be679d566ac302dd10e3d3ce61bcc2" Nov 28 18:42:56 crc kubenswrapper[4909]: E1128 18:42:56.907520 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 18:42:57 crc kubenswrapper[4909]: I1128 18:42:57.567952 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 28 18:42:57 crc kubenswrapper[4909]: I1128 18:42:57.568371 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell0-conductor-0" podUID="7b8b20b0-03bc-4edf-89ef-1cad623e470e" containerName="nova-cell0-conductor-conductor" containerID="cri-o://4497815e4da4961b57d051aeed3d768d65bc3e75563cc5e62034a3ca89cc32e7" gracePeriod=30 Nov 28 18:42:58 crc kubenswrapper[4909]: I1128 18:42:58.055954 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 28 18:42:58 crc kubenswrapper[4909]: I1128 18:42:58.056159 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell1-conductor-0" podUID="520d15b5-e2af-446f-8576-3d67355807b9" containerName="nova-cell1-conductor-conductor" containerID="cri-o://6e52fa75391c03ea8613f3eb1bda67c7ce7fc954edab1810b00800ccc0e6ed5b" gracePeriod=30 Nov 28 18:42:58 crc kubenswrapper[4909]: I1128 18:42:58.209523 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 18:42:58 crc kubenswrapper[4909]: I1128 18:42:58.209779 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="d447854c-9bc0-4c9e-b1b8-7c9b1cb549a5" containerName="nova-scheduler-scheduler" containerID="cri-o://7502ba00a4b47f66f200769bd3bc15236c8b4f7d4b3e60240180cae56fb01d26" gracePeriod=30 Nov 28 18:42:58 crc kubenswrapper[4909]: I1128 18:42:58.225140 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 28 18:42:58 crc kubenswrapper[4909]: I1128 18:42:58.225537 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="7f0a968b-b03b-4f03-99f8-5c4cf1d70baa" containerName="nova-api-log" containerID="cri-o://3916421bf5eb1a4dd64705dc52aba217e0dd0f8adcc8ed2960649da6026f1340" gracePeriod=30 Nov 28 18:42:58 crc kubenswrapper[4909]: I1128 18:42:58.226224 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="7f0a968b-b03b-4f03-99f8-5c4cf1d70baa" containerName="nova-api-api" containerID="cri-o://3011ae936e046f930a87e8382a2115d20b5fac41b3278ba9b19928abacbf0d1f" gracePeriod=30 Nov 28 18:42:58 crc kubenswrapper[4909]: I1128 18:42:58.319048 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 18:42:58 crc kubenswrapper[4909]: I1128 18:42:58.319631 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="a2b756af-7f55-41f7-95be-4b01d0c05c51" containerName="nova-metadata-log" containerID="cri-o://cf563c6f57ef2c67b0f416db42c68c8a696cf8b010ccf61af5cd491c594466f4" gracePeriod=30 Nov 28 18:42:58 crc kubenswrapper[4909]: I1128 18:42:58.319845 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="a2b756af-7f55-41f7-95be-4b01d0c05c51" containerName="nova-metadata-metadata" containerID="cri-o://9116defd9fe70722ad03cad59ee3e52631b206c84226f2c2088a926031e0b38c" gracePeriod=30 Nov 28 18:42:58 crc kubenswrapper[4909]: I1128 18:42:58.446717 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell4hv6d"] Nov 28 18:42:58 crc kubenswrapper[4909]: E1128 18:42:58.447237 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="aba4d954-2436-42f5-a7e8-5a4b44af15ab" containerName="extract-content" Nov 28 18:42:58 crc kubenswrapper[4909]: I1128 18:42:58.447257 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="aba4d954-2436-42f5-a7e8-5a4b44af15ab" containerName="extract-content" Nov 28 18:42:58 crc kubenswrapper[4909]: E1128 18:42:58.447300 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="aba4d954-2436-42f5-a7e8-5a4b44af15ab" containerName="extract-utilities" Nov 28 18:42:58 crc kubenswrapper[4909]: I1128 18:42:58.447308 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="aba4d954-2436-42f5-a7e8-5a4b44af15ab" containerName="extract-utilities" Nov 28 18:42:58 crc kubenswrapper[4909]: E1128 18:42:58.447332 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fa77c1e1-fac0-4183-824f-fbd83a237232" containerName="neutron-dhcp-openstack-openstack-cell1" Nov 28 18:42:58 crc kubenswrapper[4909]: I1128 18:42:58.447341 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="fa77c1e1-fac0-4183-824f-fbd83a237232" containerName="neutron-dhcp-openstack-openstack-cell1" Nov 28 18:42:58 crc kubenswrapper[4909]: E1128 18:42:58.447353 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="aba4d954-2436-42f5-a7e8-5a4b44af15ab" containerName="registry-server" Nov 28 18:42:58 crc kubenswrapper[4909]: I1128 18:42:58.447359 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="aba4d954-2436-42f5-a7e8-5a4b44af15ab" containerName="registry-server" Nov 28 18:42:58 crc kubenswrapper[4909]: I1128 18:42:58.447585 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="aba4d954-2436-42f5-a7e8-5a4b44af15ab" containerName="registry-server" Nov 28 18:42:58 crc kubenswrapper[4909]: I1128 18:42:58.447603 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="fa77c1e1-fac0-4183-824f-fbd83a237232" containerName="neutron-dhcp-openstack-openstack-cell1" Nov 28 18:42:58 crc kubenswrapper[4909]: I1128 18:42:58.448381 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell4hv6d" Nov 28 18:42:58 crc kubenswrapper[4909]: I1128 18:42:58.451960 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-migration-ssh-key" Nov 28 18:42:58 crc kubenswrapper[4909]: I1128 18:42:58.452374 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-compute-config" Nov 28 18:42:58 crc kubenswrapper[4909]: I1128 18:42:58.452613 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-z249h" Nov 28 18:42:58 crc kubenswrapper[4909]: I1128 18:42:58.452924 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 28 18:42:58 crc kubenswrapper[4909]: I1128 18:42:58.453799 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Nov 28 18:42:58 crc kubenswrapper[4909]: I1128 18:42:58.456868 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"nova-cells-global-config" Nov 28 18:42:58 crc kubenswrapper[4909]: I1128 18:42:58.456963 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Nov 28 18:42:58 crc kubenswrapper[4909]: I1128 18:42:58.458579 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell4hv6d"] Nov 28 18:42:58 crc kubenswrapper[4909]: I1128 18:42:58.550957 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/410d136c-96de-485b-a570-3b74d2d66941-nova-migration-ssh-key-0\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell4hv6d\" (UID: \"410d136c-96de-485b-a570-3b74d2d66941\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell4hv6d" Nov 28 18:42:58 crc kubenswrapper[4909]: I1128 18:42:58.550997 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/410d136c-96de-485b-a570-3b74d2d66941-nova-cell1-combined-ca-bundle\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell4hv6d\" (UID: \"410d136c-96de-485b-a570-3b74d2d66941\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell4hv6d" Nov 28 18:42:58 crc kubenswrapper[4909]: I1128 18:42:58.551041 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/410d136c-96de-485b-a570-3b74d2d66941-ceph\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell4hv6d\" (UID: \"410d136c-96de-485b-a570-3b74d2d66941\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell4hv6d" Nov 28 18:42:58 crc kubenswrapper[4909]: I1128 18:42:58.551224 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/410d136c-96de-485b-a570-3b74d2d66941-nova-cell1-compute-config-0\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell4hv6d\" (UID: \"410d136c-96de-485b-a570-3b74d2d66941\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell4hv6d" Nov 28 18:42:58 crc kubenswrapper[4909]: I1128 18:42:58.551285 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cells-global-config-0\" (UniqueName: \"kubernetes.io/configmap/410d136c-96de-485b-a570-3b74d2d66941-nova-cells-global-config-0\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell4hv6d\" (UID: \"410d136c-96de-485b-a570-3b74d2d66941\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell4hv6d" Nov 28 18:42:58 crc kubenswrapper[4909]: I1128 18:42:58.551307 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/410d136c-96de-485b-a570-3b74d2d66941-inventory\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell4hv6d\" (UID: \"410d136c-96de-485b-a570-3b74d2d66941\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell4hv6d" Nov 28 18:42:58 crc kubenswrapper[4909]: I1128 18:42:58.551355 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/410d136c-96de-485b-a570-3b74d2d66941-nova-migration-ssh-key-1\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell4hv6d\" (UID: \"410d136c-96de-485b-a570-3b74d2d66941\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell4hv6d" Nov 28 18:42:58 crc kubenswrapper[4909]: I1128 18:42:58.551427 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/410d136c-96de-485b-a570-3b74d2d66941-nova-cell1-compute-config-1\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell4hv6d\" (UID: \"410d136c-96de-485b-a570-3b74d2d66941\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell4hv6d" Nov 28 18:42:58 crc kubenswrapper[4909]: I1128 18:42:58.551455 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cells-global-config-1\" (UniqueName: \"kubernetes.io/configmap/410d136c-96de-485b-a570-3b74d2d66941-nova-cells-global-config-1\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell4hv6d\" (UID: \"410d136c-96de-485b-a570-3b74d2d66941\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell4hv6d" Nov 28 18:42:58 crc kubenswrapper[4909]: I1128 18:42:58.551599 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/410d136c-96de-485b-a570-3b74d2d66941-ssh-key\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell4hv6d\" (UID: \"410d136c-96de-485b-a570-3b74d2d66941\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell4hv6d" Nov 28 18:42:58 crc kubenswrapper[4909]: I1128 18:42:58.551693 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4ch9g\" (UniqueName: \"kubernetes.io/projected/410d136c-96de-485b-a570-3b74d2d66941-kube-api-access-4ch9g\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell4hv6d\" (UID: \"410d136c-96de-485b-a570-3b74d2d66941\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell4hv6d" Nov 28 18:42:58 crc kubenswrapper[4909]: I1128 18:42:58.653802 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/410d136c-96de-485b-a570-3b74d2d66941-nova-cell1-compute-config-0\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell4hv6d\" (UID: \"410d136c-96de-485b-a570-3b74d2d66941\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell4hv6d" Nov 28 18:42:58 crc kubenswrapper[4909]: I1128 18:42:58.653848 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cells-global-config-0\" (UniqueName: \"kubernetes.io/configmap/410d136c-96de-485b-a570-3b74d2d66941-nova-cells-global-config-0\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell4hv6d\" (UID: \"410d136c-96de-485b-a570-3b74d2d66941\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell4hv6d" Nov 28 18:42:58 crc kubenswrapper[4909]: I1128 18:42:58.653871 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/410d136c-96de-485b-a570-3b74d2d66941-inventory\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell4hv6d\" (UID: \"410d136c-96de-485b-a570-3b74d2d66941\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell4hv6d" Nov 28 18:42:58 crc kubenswrapper[4909]: I1128 18:42:58.653899 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/410d136c-96de-485b-a570-3b74d2d66941-nova-migration-ssh-key-1\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell4hv6d\" (UID: \"410d136c-96de-485b-a570-3b74d2d66941\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell4hv6d" Nov 28 18:42:58 crc kubenswrapper[4909]: I1128 18:42:58.653937 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cells-global-config-1\" (UniqueName: \"kubernetes.io/configmap/410d136c-96de-485b-a570-3b74d2d66941-nova-cells-global-config-1\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell4hv6d\" (UID: \"410d136c-96de-485b-a570-3b74d2d66941\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell4hv6d" Nov 28 18:42:58 crc kubenswrapper[4909]: I1128 18:42:58.653963 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/410d136c-96de-485b-a570-3b74d2d66941-nova-cell1-compute-config-1\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell4hv6d\" (UID: \"410d136c-96de-485b-a570-3b74d2d66941\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell4hv6d" Nov 28 18:42:58 crc kubenswrapper[4909]: I1128 18:42:58.654010 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/410d136c-96de-485b-a570-3b74d2d66941-ssh-key\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell4hv6d\" (UID: \"410d136c-96de-485b-a570-3b74d2d66941\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell4hv6d" Nov 28 18:42:58 crc kubenswrapper[4909]: I1128 18:42:58.654041 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4ch9g\" (UniqueName: \"kubernetes.io/projected/410d136c-96de-485b-a570-3b74d2d66941-kube-api-access-4ch9g\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell4hv6d\" (UID: \"410d136c-96de-485b-a570-3b74d2d66941\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell4hv6d" Nov 28 18:42:58 crc kubenswrapper[4909]: I1128 18:42:58.654112 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/410d136c-96de-485b-a570-3b74d2d66941-nova-migration-ssh-key-0\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell4hv6d\" (UID: \"410d136c-96de-485b-a570-3b74d2d66941\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell4hv6d" Nov 28 18:42:58 crc kubenswrapper[4909]: I1128 18:42:58.654131 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/410d136c-96de-485b-a570-3b74d2d66941-nova-cell1-combined-ca-bundle\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell4hv6d\" (UID: \"410d136c-96de-485b-a570-3b74d2d66941\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell4hv6d" Nov 28 18:42:58 crc kubenswrapper[4909]: I1128 18:42:58.654169 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/410d136c-96de-485b-a570-3b74d2d66941-ceph\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell4hv6d\" (UID: \"410d136c-96de-485b-a570-3b74d2d66941\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell4hv6d" Nov 28 18:42:58 crc kubenswrapper[4909]: I1128 18:42:58.655011 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cells-global-config-0\" (UniqueName: \"kubernetes.io/configmap/410d136c-96de-485b-a570-3b74d2d66941-nova-cells-global-config-0\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell4hv6d\" (UID: \"410d136c-96de-485b-a570-3b74d2d66941\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell4hv6d" Nov 28 18:42:58 crc kubenswrapper[4909]: I1128 18:42:58.656162 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cells-global-config-1\" (UniqueName: \"kubernetes.io/configmap/410d136c-96de-485b-a570-3b74d2d66941-nova-cells-global-config-1\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell4hv6d\" (UID: \"410d136c-96de-485b-a570-3b74d2d66941\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell4hv6d" Nov 28 18:42:58 crc kubenswrapper[4909]: I1128 18:42:58.659535 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/410d136c-96de-485b-a570-3b74d2d66941-ceph\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell4hv6d\" (UID: \"410d136c-96de-485b-a570-3b74d2d66941\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell4hv6d" Nov 28 18:42:58 crc kubenswrapper[4909]: I1128 18:42:58.659653 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/410d136c-96de-485b-a570-3b74d2d66941-nova-cell1-compute-config-1\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell4hv6d\" (UID: \"410d136c-96de-485b-a570-3b74d2d66941\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell4hv6d" Nov 28 18:42:58 crc kubenswrapper[4909]: I1128 18:42:58.659810 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/410d136c-96de-485b-a570-3b74d2d66941-nova-cell1-combined-ca-bundle\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell4hv6d\" (UID: \"410d136c-96de-485b-a570-3b74d2d66941\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell4hv6d" Nov 28 18:42:58 crc kubenswrapper[4909]: I1128 18:42:58.659866 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/410d136c-96de-485b-a570-3b74d2d66941-inventory\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell4hv6d\" (UID: \"410d136c-96de-485b-a570-3b74d2d66941\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell4hv6d" Nov 28 18:42:58 crc kubenswrapper[4909]: I1128 18:42:58.660265 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/410d136c-96de-485b-a570-3b74d2d66941-nova-cell1-compute-config-0\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell4hv6d\" (UID: \"410d136c-96de-485b-a570-3b74d2d66941\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell4hv6d" Nov 28 18:42:58 crc kubenswrapper[4909]: I1128 18:42:58.660804 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/410d136c-96de-485b-a570-3b74d2d66941-nova-migration-ssh-key-1\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell4hv6d\" (UID: \"410d136c-96de-485b-a570-3b74d2d66941\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell4hv6d" Nov 28 18:42:58 crc kubenswrapper[4909]: I1128 18:42:58.661119 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/410d136c-96de-485b-a570-3b74d2d66941-nova-migration-ssh-key-0\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell4hv6d\" (UID: \"410d136c-96de-485b-a570-3b74d2d66941\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell4hv6d" Nov 28 18:42:58 crc kubenswrapper[4909]: I1128 18:42:58.663413 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/410d136c-96de-485b-a570-3b74d2d66941-ssh-key\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell4hv6d\" (UID: \"410d136c-96de-485b-a570-3b74d2d66941\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell4hv6d" Nov 28 18:42:58 crc kubenswrapper[4909]: I1128 18:42:58.688215 4909 generic.go:334] "Generic (PLEG): container finished" podID="a2b756af-7f55-41f7-95be-4b01d0c05c51" containerID="cf563c6f57ef2c67b0f416db42c68c8a696cf8b010ccf61af5cd491c594466f4" exitCode=143 Nov 28 18:42:58 crc kubenswrapper[4909]: I1128 18:42:58.688296 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"a2b756af-7f55-41f7-95be-4b01d0c05c51","Type":"ContainerDied","Data":"cf563c6f57ef2c67b0f416db42c68c8a696cf8b010ccf61af5cd491c594466f4"} Nov 28 18:42:58 crc kubenswrapper[4909]: I1128 18:42:58.689193 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4ch9g\" (UniqueName: \"kubernetes.io/projected/410d136c-96de-485b-a570-3b74d2d66941-kube-api-access-4ch9g\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell4hv6d\" (UID: \"410d136c-96de-485b-a570-3b74d2d66941\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell4hv6d" Nov 28 18:42:58 crc kubenswrapper[4909]: I1128 18:42:58.690246 4909 generic.go:334] "Generic (PLEG): container finished" podID="7f0a968b-b03b-4f03-99f8-5c4cf1d70baa" containerID="3916421bf5eb1a4dd64705dc52aba217e0dd0f8adcc8ed2960649da6026f1340" exitCode=143 Nov 28 18:42:58 crc kubenswrapper[4909]: I1128 18:42:58.690271 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"7f0a968b-b03b-4f03-99f8-5c4cf1d70baa","Type":"ContainerDied","Data":"3916421bf5eb1a4dd64705dc52aba217e0dd0f8adcc8ed2960649da6026f1340"} Nov 28 18:42:58 crc kubenswrapper[4909]: E1128 18:42:58.797759 4909 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="4497815e4da4961b57d051aeed3d768d65bc3e75563cc5e62034a3ca89cc32e7" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Nov 28 18:42:58 crc kubenswrapper[4909]: E1128 18:42:58.799453 4909 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="4497815e4da4961b57d051aeed3d768d65bc3e75563cc5e62034a3ca89cc32e7" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Nov 28 18:42:58 crc kubenswrapper[4909]: E1128 18:42:58.800962 4909 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="4497815e4da4961b57d051aeed3d768d65bc3e75563cc5e62034a3ca89cc32e7" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Nov 28 18:42:58 crc kubenswrapper[4909]: E1128 18:42:58.800998 4909 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-cell0-conductor-0" podUID="7b8b20b0-03bc-4edf-89ef-1cad623e470e" containerName="nova-cell0-conductor-conductor" Nov 28 18:42:58 crc kubenswrapper[4909]: I1128 18:42:58.806256 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell4hv6d" Nov 28 18:42:59 crc kubenswrapper[4909]: I1128 18:42:59.275076 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 28 18:42:59 crc kubenswrapper[4909]: I1128 18:42:59.368634 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/520d15b5-e2af-446f-8576-3d67355807b9-config-data\") pod \"520d15b5-e2af-446f-8576-3d67355807b9\" (UID: \"520d15b5-e2af-446f-8576-3d67355807b9\") " Nov 28 18:42:59 crc kubenswrapper[4909]: I1128 18:42:59.368922 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/520d15b5-e2af-446f-8576-3d67355807b9-combined-ca-bundle\") pod \"520d15b5-e2af-446f-8576-3d67355807b9\" (UID: \"520d15b5-e2af-446f-8576-3d67355807b9\") " Nov 28 18:42:59 crc kubenswrapper[4909]: I1128 18:42:59.368975 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-srgmg\" (UniqueName: \"kubernetes.io/projected/520d15b5-e2af-446f-8576-3d67355807b9-kube-api-access-srgmg\") pod \"520d15b5-e2af-446f-8576-3d67355807b9\" (UID: \"520d15b5-e2af-446f-8576-3d67355807b9\") " Nov 28 18:42:59 crc kubenswrapper[4909]: I1128 18:42:59.373346 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/520d15b5-e2af-446f-8576-3d67355807b9-kube-api-access-srgmg" (OuterVolumeSpecName: "kube-api-access-srgmg") pod "520d15b5-e2af-446f-8576-3d67355807b9" (UID: "520d15b5-e2af-446f-8576-3d67355807b9"). InnerVolumeSpecName "kube-api-access-srgmg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 18:42:59 crc kubenswrapper[4909]: I1128 18:42:59.401232 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell4hv6d"] Nov 28 18:42:59 crc kubenswrapper[4909]: I1128 18:42:59.401509 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/520d15b5-e2af-446f-8576-3d67355807b9-config-data" (OuterVolumeSpecName: "config-data") pod "520d15b5-e2af-446f-8576-3d67355807b9" (UID: "520d15b5-e2af-446f-8576-3d67355807b9"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 18:42:59 crc kubenswrapper[4909]: I1128 18:42:59.405173 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/520d15b5-e2af-446f-8576-3d67355807b9-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "520d15b5-e2af-446f-8576-3d67355807b9" (UID: "520d15b5-e2af-446f-8576-3d67355807b9"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 18:42:59 crc kubenswrapper[4909]: I1128 18:42:59.406421 4909 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 28 18:42:59 crc kubenswrapper[4909]: I1128 18:42:59.470886 4909 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/520d15b5-e2af-446f-8576-3d67355807b9-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 18:42:59 crc kubenswrapper[4909]: I1128 18:42:59.471447 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-srgmg\" (UniqueName: \"kubernetes.io/projected/520d15b5-e2af-446f-8576-3d67355807b9-kube-api-access-srgmg\") on node \"crc\" DevicePath \"\"" Nov 28 18:42:59 crc kubenswrapper[4909]: I1128 18:42:59.471461 4909 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/520d15b5-e2af-446f-8576-3d67355807b9-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 18:42:59 crc kubenswrapper[4909]: I1128 18:42:59.710364 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell4hv6d" event={"ID":"410d136c-96de-485b-a570-3b74d2d66941","Type":"ContainerStarted","Data":"4ce94c1ad7876c28c0272550356e674aaffe53310986a3d899bdc086b2f71030"} Nov 28 18:42:59 crc kubenswrapper[4909]: I1128 18:42:59.712601 4909 generic.go:334] "Generic (PLEG): container finished" podID="520d15b5-e2af-446f-8576-3d67355807b9" containerID="6e52fa75391c03ea8613f3eb1bda67c7ce7fc954edab1810b00800ccc0e6ed5b" exitCode=0 Nov 28 18:42:59 crc kubenswrapper[4909]: I1128 18:42:59.712647 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"520d15b5-e2af-446f-8576-3d67355807b9","Type":"ContainerDied","Data":"6e52fa75391c03ea8613f3eb1bda67c7ce7fc954edab1810b00800ccc0e6ed5b"} Nov 28 18:42:59 crc kubenswrapper[4909]: I1128 18:42:59.712687 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"520d15b5-e2af-446f-8576-3d67355807b9","Type":"ContainerDied","Data":"a6ed951d796899a8ff6652da87d7bca340f9fbdae3f3e257fda85aae79180cf1"} Nov 28 18:42:59 crc kubenswrapper[4909]: I1128 18:42:59.712719 4909 scope.go:117] "RemoveContainer" containerID="6e52fa75391c03ea8613f3eb1bda67c7ce7fc954edab1810b00800ccc0e6ed5b" Nov 28 18:42:59 crc kubenswrapper[4909]: I1128 18:42:59.712842 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 28 18:42:59 crc kubenswrapper[4909]: I1128 18:42:59.736040 4909 scope.go:117] "RemoveContainer" containerID="6e52fa75391c03ea8613f3eb1bda67c7ce7fc954edab1810b00800ccc0e6ed5b" Nov 28 18:42:59 crc kubenswrapper[4909]: E1128 18:42:59.736542 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6e52fa75391c03ea8613f3eb1bda67c7ce7fc954edab1810b00800ccc0e6ed5b\": container with ID starting with 6e52fa75391c03ea8613f3eb1bda67c7ce7fc954edab1810b00800ccc0e6ed5b not found: ID does not exist" containerID="6e52fa75391c03ea8613f3eb1bda67c7ce7fc954edab1810b00800ccc0e6ed5b" Nov 28 18:42:59 crc kubenswrapper[4909]: I1128 18:42:59.736590 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6e52fa75391c03ea8613f3eb1bda67c7ce7fc954edab1810b00800ccc0e6ed5b"} err="failed to get container status \"6e52fa75391c03ea8613f3eb1bda67c7ce7fc954edab1810b00800ccc0e6ed5b\": rpc error: code = NotFound desc = could not find container \"6e52fa75391c03ea8613f3eb1bda67c7ce7fc954edab1810b00800ccc0e6ed5b\": container with ID starting with 6e52fa75391c03ea8613f3eb1bda67c7ce7fc954edab1810b00800ccc0e6ed5b not found: ID does not exist" Nov 28 18:42:59 crc kubenswrapper[4909]: I1128 18:42:59.765414 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 28 18:42:59 crc kubenswrapper[4909]: I1128 18:42:59.779630 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 28 18:42:59 crc kubenswrapper[4909]: I1128 18:42:59.808574 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 28 18:42:59 crc kubenswrapper[4909]: E1128 18:42:59.809410 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="520d15b5-e2af-446f-8576-3d67355807b9" containerName="nova-cell1-conductor-conductor" Nov 28 18:42:59 crc kubenswrapper[4909]: I1128 18:42:59.809446 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="520d15b5-e2af-446f-8576-3d67355807b9" containerName="nova-cell1-conductor-conductor" Nov 28 18:42:59 crc kubenswrapper[4909]: I1128 18:42:59.809847 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="520d15b5-e2af-446f-8576-3d67355807b9" containerName="nova-cell1-conductor-conductor" Nov 28 18:42:59 crc kubenswrapper[4909]: I1128 18:42:59.811128 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 28 18:42:59 crc kubenswrapper[4909]: I1128 18:42:59.813202 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Nov 28 18:42:59 crc kubenswrapper[4909]: I1128 18:42:59.827832 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 28 18:42:59 crc kubenswrapper[4909]: I1128 18:42:59.880509 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mtcm8\" (UniqueName: \"kubernetes.io/projected/6ce5e59d-c3dd-439d-86dc-9abdeb9cc320-kube-api-access-mtcm8\") pod \"nova-cell1-conductor-0\" (UID: \"6ce5e59d-c3dd-439d-86dc-9abdeb9cc320\") " pod="openstack/nova-cell1-conductor-0" Nov 28 18:42:59 crc kubenswrapper[4909]: I1128 18:42:59.880612 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6ce5e59d-c3dd-439d-86dc-9abdeb9cc320-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"6ce5e59d-c3dd-439d-86dc-9abdeb9cc320\") " pod="openstack/nova-cell1-conductor-0" Nov 28 18:42:59 crc kubenswrapper[4909]: I1128 18:42:59.880709 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6ce5e59d-c3dd-439d-86dc-9abdeb9cc320-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"6ce5e59d-c3dd-439d-86dc-9abdeb9cc320\") " pod="openstack/nova-cell1-conductor-0" Nov 28 18:42:59 crc kubenswrapper[4909]: I1128 18:42:59.914512 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="520d15b5-e2af-446f-8576-3d67355807b9" path="/var/lib/kubelet/pods/520d15b5-e2af-446f-8576-3d67355807b9/volumes" Nov 28 18:42:59 crc kubenswrapper[4909]: I1128 18:42:59.983152 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mtcm8\" (UniqueName: \"kubernetes.io/projected/6ce5e59d-c3dd-439d-86dc-9abdeb9cc320-kube-api-access-mtcm8\") pod \"nova-cell1-conductor-0\" (UID: \"6ce5e59d-c3dd-439d-86dc-9abdeb9cc320\") " pod="openstack/nova-cell1-conductor-0" Nov 28 18:42:59 crc kubenswrapper[4909]: I1128 18:42:59.983330 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6ce5e59d-c3dd-439d-86dc-9abdeb9cc320-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"6ce5e59d-c3dd-439d-86dc-9abdeb9cc320\") " pod="openstack/nova-cell1-conductor-0" Nov 28 18:42:59 crc kubenswrapper[4909]: I1128 18:42:59.983425 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6ce5e59d-c3dd-439d-86dc-9abdeb9cc320-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"6ce5e59d-c3dd-439d-86dc-9abdeb9cc320\") " pod="openstack/nova-cell1-conductor-0" Nov 28 18:42:59 crc kubenswrapper[4909]: I1128 18:42:59.989124 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6ce5e59d-c3dd-439d-86dc-9abdeb9cc320-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"6ce5e59d-c3dd-439d-86dc-9abdeb9cc320\") " pod="openstack/nova-cell1-conductor-0" Nov 28 18:42:59 crc kubenswrapper[4909]: I1128 18:42:59.989250 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6ce5e59d-c3dd-439d-86dc-9abdeb9cc320-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"6ce5e59d-c3dd-439d-86dc-9abdeb9cc320\") " pod="openstack/nova-cell1-conductor-0" Nov 28 18:43:00 crc kubenswrapper[4909]: I1128 18:43:00.005570 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mtcm8\" (UniqueName: \"kubernetes.io/projected/6ce5e59d-c3dd-439d-86dc-9abdeb9cc320-kube-api-access-mtcm8\") pod \"nova-cell1-conductor-0\" (UID: \"6ce5e59d-c3dd-439d-86dc-9abdeb9cc320\") " pod="openstack/nova-cell1-conductor-0" Nov 28 18:43:00 crc kubenswrapper[4909]: I1128 18:43:00.145246 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 28 18:43:00 crc kubenswrapper[4909]: W1128 18:43:00.649031 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6ce5e59d_c3dd_439d_86dc_9abdeb9cc320.slice/crio-02792686a1ba6417424999b03a2e3eefcef4ba8f0e5e6b78efe00f3035ef783e WatchSource:0}: Error finding container 02792686a1ba6417424999b03a2e3eefcef4ba8f0e5e6b78efe00f3035ef783e: Status 404 returned error can't find the container with id 02792686a1ba6417424999b03a2e3eefcef4ba8f0e5e6b78efe00f3035ef783e Nov 28 18:43:00 crc kubenswrapper[4909]: I1128 18:43:00.660085 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 28 18:43:00 crc kubenswrapper[4909]: I1128 18:43:00.733309 4909 generic.go:334] "Generic (PLEG): container finished" podID="d447854c-9bc0-4c9e-b1b8-7c9b1cb549a5" containerID="7502ba00a4b47f66f200769bd3bc15236c8b4f7d4b3e60240180cae56fb01d26" exitCode=0 Nov 28 18:43:00 crc kubenswrapper[4909]: I1128 18:43:00.733381 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"d447854c-9bc0-4c9e-b1b8-7c9b1cb549a5","Type":"ContainerDied","Data":"7502ba00a4b47f66f200769bd3bc15236c8b4f7d4b3e60240180cae56fb01d26"} Nov 28 18:43:00 crc kubenswrapper[4909]: I1128 18:43:00.734640 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"6ce5e59d-c3dd-439d-86dc-9abdeb9cc320","Type":"ContainerStarted","Data":"02792686a1ba6417424999b03a2e3eefcef4ba8f0e5e6b78efe00f3035ef783e"} Nov 28 18:43:00 crc kubenswrapper[4909]: I1128 18:43:00.736202 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell4hv6d" event={"ID":"410d136c-96de-485b-a570-3b74d2d66941","Type":"ContainerStarted","Data":"4fd60dfc734c8bc85c61df4dacfd8b1f1311843464b0537013fc407a47e5dc52"} Nov 28 18:43:00 crc kubenswrapper[4909]: I1128 18:43:00.763744 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell4hv6d" podStartSLOduration=2.299899887 podStartE2EDuration="2.763718571s" podCreationTimestamp="2025-11-28 18:42:58 +0000 UTC" firstStartedPulling="2025-11-28 18:42:59.405796831 +0000 UTC m=+9161.802481355" lastFinishedPulling="2025-11-28 18:42:59.869615515 +0000 UTC m=+9162.266300039" observedRunningTime="2025-11-28 18:43:00.759291292 +0000 UTC m=+9163.155975836" watchObservedRunningTime="2025-11-28 18:43:00.763718571 +0000 UTC m=+9163.160403125" Nov 28 18:43:00 crc kubenswrapper[4909]: I1128 18:43:00.935414 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 18:43:01 crc kubenswrapper[4909]: I1128 18:43:01.004115 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d447854c-9bc0-4c9e-b1b8-7c9b1cb549a5-combined-ca-bundle\") pod \"d447854c-9bc0-4c9e-b1b8-7c9b1cb549a5\" (UID: \"d447854c-9bc0-4c9e-b1b8-7c9b1cb549a5\") " Nov 28 18:43:01 crc kubenswrapper[4909]: I1128 18:43:01.004321 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d447854c-9bc0-4c9e-b1b8-7c9b1cb549a5-config-data\") pod \"d447854c-9bc0-4c9e-b1b8-7c9b1cb549a5\" (UID: \"d447854c-9bc0-4c9e-b1b8-7c9b1cb549a5\") " Nov 28 18:43:01 crc kubenswrapper[4909]: I1128 18:43:01.004358 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pgllc\" (UniqueName: \"kubernetes.io/projected/d447854c-9bc0-4c9e-b1b8-7c9b1cb549a5-kube-api-access-pgllc\") pod \"d447854c-9bc0-4c9e-b1b8-7c9b1cb549a5\" (UID: \"d447854c-9bc0-4c9e-b1b8-7c9b1cb549a5\") " Nov 28 18:43:01 crc kubenswrapper[4909]: I1128 18:43:01.009154 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d447854c-9bc0-4c9e-b1b8-7c9b1cb549a5-kube-api-access-pgllc" (OuterVolumeSpecName: "kube-api-access-pgllc") pod "d447854c-9bc0-4c9e-b1b8-7c9b1cb549a5" (UID: "d447854c-9bc0-4c9e-b1b8-7c9b1cb549a5"). InnerVolumeSpecName "kube-api-access-pgllc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 18:43:01 crc kubenswrapper[4909]: I1128 18:43:01.039357 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d447854c-9bc0-4c9e-b1b8-7c9b1cb549a5-config-data" (OuterVolumeSpecName: "config-data") pod "d447854c-9bc0-4c9e-b1b8-7c9b1cb549a5" (UID: "d447854c-9bc0-4c9e-b1b8-7c9b1cb549a5"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 18:43:01 crc kubenswrapper[4909]: I1128 18:43:01.042106 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d447854c-9bc0-4c9e-b1b8-7c9b1cb549a5-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d447854c-9bc0-4c9e-b1b8-7c9b1cb549a5" (UID: "d447854c-9bc0-4c9e-b1b8-7c9b1cb549a5"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 18:43:01 crc kubenswrapper[4909]: I1128 18:43:01.108785 4909 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d447854c-9bc0-4c9e-b1b8-7c9b1cb549a5-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 18:43:01 crc kubenswrapper[4909]: I1128 18:43:01.108931 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pgllc\" (UniqueName: \"kubernetes.io/projected/d447854c-9bc0-4c9e-b1b8-7c9b1cb549a5-kube-api-access-pgllc\") on node \"crc\" DevicePath \"\"" Nov 28 18:43:01 crc kubenswrapper[4909]: I1128 18:43:01.108960 4909 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d447854c-9bc0-4c9e-b1b8-7c9b1cb549a5-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 18:43:01 crc kubenswrapper[4909]: I1128 18:43:01.487568 4909 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="a2b756af-7f55-41f7-95be-4b01d0c05c51" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"http://10.217.1.83:8775/\": read tcp 10.217.0.2:41452->10.217.1.83:8775: read: connection reset by peer" Nov 28 18:43:01 crc kubenswrapper[4909]: I1128 18:43:01.487600 4909 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="a2b756af-7f55-41f7-95be-4b01d0c05c51" containerName="nova-metadata-log" probeResult="failure" output="Get \"http://10.217.1.83:8775/\": read tcp 10.217.0.2:41456->10.217.1.83:8775: read: connection reset by peer" Nov 28 18:43:01 crc kubenswrapper[4909]: I1128 18:43:01.767701 4909 generic.go:334] "Generic (PLEG): container finished" podID="a2b756af-7f55-41f7-95be-4b01d0c05c51" containerID="9116defd9fe70722ad03cad59ee3e52631b206c84226f2c2088a926031e0b38c" exitCode=0 Nov 28 18:43:01 crc kubenswrapper[4909]: I1128 18:43:01.767953 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"a2b756af-7f55-41f7-95be-4b01d0c05c51","Type":"ContainerDied","Data":"9116defd9fe70722ad03cad59ee3e52631b206c84226f2c2088a926031e0b38c"} Nov 28 18:43:01 crc kubenswrapper[4909]: I1128 18:43:01.796365 4909 generic.go:334] "Generic (PLEG): container finished" podID="7f0a968b-b03b-4f03-99f8-5c4cf1d70baa" containerID="3011ae936e046f930a87e8382a2115d20b5fac41b3278ba9b19928abacbf0d1f" exitCode=0 Nov 28 18:43:01 crc kubenswrapper[4909]: I1128 18:43:01.796465 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"7f0a968b-b03b-4f03-99f8-5c4cf1d70baa","Type":"ContainerDied","Data":"3011ae936e046f930a87e8382a2115d20b5fac41b3278ba9b19928abacbf0d1f"} Nov 28 18:43:01 crc kubenswrapper[4909]: I1128 18:43:01.796495 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"7f0a968b-b03b-4f03-99f8-5c4cf1d70baa","Type":"ContainerDied","Data":"c062d938c08070aaca44ee54b2bd4baeb9260c894aceb394663354832094b263"} Nov 28 18:43:01 crc kubenswrapper[4909]: I1128 18:43:01.796508 4909 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c062d938c08070aaca44ee54b2bd4baeb9260c894aceb394663354832094b263" Nov 28 18:43:01 crc kubenswrapper[4909]: I1128 18:43:01.804194 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 18:43:01 crc kubenswrapper[4909]: I1128 18:43:01.806397 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"d447854c-9bc0-4c9e-b1b8-7c9b1cb549a5","Type":"ContainerDied","Data":"1a5be13676a6b180c1b4dde3e4ecdbba5636b9d3e2ff47b1075335539dd0a852"} Nov 28 18:43:01 crc kubenswrapper[4909]: I1128 18:43:01.806443 4909 scope.go:117] "RemoveContainer" containerID="7502ba00a4b47f66f200769bd3bc15236c8b4f7d4b3e60240180cae56fb01d26" Nov 28 18:43:01 crc kubenswrapper[4909]: I1128 18:43:01.812949 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"6ce5e59d-c3dd-439d-86dc-9abdeb9cc320","Type":"ContainerStarted","Data":"2fcbcef693ef249781f8a1977ca604685e069af2637680e77db1aa4a3e6b463d"} Nov 28 18:43:01 crc kubenswrapper[4909]: I1128 18:43:01.813916 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-conductor-0" Nov 28 18:43:01 crc kubenswrapper[4909]: I1128 18:43:01.834497 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-0" podStartSLOduration=2.834475363 podStartE2EDuration="2.834475363s" podCreationTimestamp="2025-11-28 18:42:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 18:43:01.830149817 +0000 UTC m=+9164.226834351" watchObservedRunningTime="2025-11-28 18:43:01.834475363 +0000 UTC m=+9164.231159887" Nov 28 18:43:01 crc kubenswrapper[4909]: I1128 18:43:01.884141 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 18:43:01 crc kubenswrapper[4909]: I1128 18:43:01.925157 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8m2kq\" (UniqueName: \"kubernetes.io/projected/7f0a968b-b03b-4f03-99f8-5c4cf1d70baa-kube-api-access-8m2kq\") pod \"7f0a968b-b03b-4f03-99f8-5c4cf1d70baa\" (UID: \"7f0a968b-b03b-4f03-99f8-5c4cf1d70baa\") " Nov 28 18:43:01 crc kubenswrapper[4909]: I1128 18:43:01.925266 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7f0a968b-b03b-4f03-99f8-5c4cf1d70baa-combined-ca-bundle\") pod \"7f0a968b-b03b-4f03-99f8-5c4cf1d70baa\" (UID: \"7f0a968b-b03b-4f03-99f8-5c4cf1d70baa\") " Nov 28 18:43:01 crc kubenswrapper[4909]: I1128 18:43:01.925303 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7f0a968b-b03b-4f03-99f8-5c4cf1d70baa-config-data\") pod \"7f0a968b-b03b-4f03-99f8-5c4cf1d70baa\" (UID: \"7f0a968b-b03b-4f03-99f8-5c4cf1d70baa\") " Nov 28 18:43:01 crc kubenswrapper[4909]: I1128 18:43:01.925614 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7f0a968b-b03b-4f03-99f8-5c4cf1d70baa-logs\") pod \"7f0a968b-b03b-4f03-99f8-5c4cf1d70baa\" (UID: \"7f0a968b-b03b-4f03-99f8-5c4cf1d70baa\") " Nov 28 18:43:01 crc kubenswrapper[4909]: I1128 18:43:01.942969 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7f0a968b-b03b-4f03-99f8-5c4cf1d70baa-logs" (OuterVolumeSpecName: "logs") pod "7f0a968b-b03b-4f03-99f8-5c4cf1d70baa" (UID: "7f0a968b-b03b-4f03-99f8-5c4cf1d70baa"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 18:43:01 crc kubenswrapper[4909]: I1128 18:43:01.949973 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7f0a968b-b03b-4f03-99f8-5c4cf1d70baa-kube-api-access-8m2kq" (OuterVolumeSpecName: "kube-api-access-8m2kq") pod "7f0a968b-b03b-4f03-99f8-5c4cf1d70baa" (UID: "7f0a968b-b03b-4f03-99f8-5c4cf1d70baa"). InnerVolumeSpecName "kube-api-access-8m2kq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 18:43:01 crc kubenswrapper[4909]: I1128 18:43:01.954650 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 18:43:01 crc kubenswrapper[4909]: I1128 18:43:01.954741 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 18:43:01 crc kubenswrapper[4909]: I1128 18:43:01.965148 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 18:43:01 crc kubenswrapper[4909]: E1128 18:43:01.965756 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d447854c-9bc0-4c9e-b1b8-7c9b1cb549a5" containerName="nova-scheduler-scheduler" Nov 28 18:43:01 crc kubenswrapper[4909]: I1128 18:43:01.965774 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="d447854c-9bc0-4c9e-b1b8-7c9b1cb549a5" containerName="nova-scheduler-scheduler" Nov 28 18:43:01 crc kubenswrapper[4909]: E1128 18:43:01.965790 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7f0a968b-b03b-4f03-99f8-5c4cf1d70baa" containerName="nova-api-api" Nov 28 18:43:01 crc kubenswrapper[4909]: I1128 18:43:01.965796 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="7f0a968b-b03b-4f03-99f8-5c4cf1d70baa" containerName="nova-api-api" Nov 28 18:43:01 crc kubenswrapper[4909]: E1128 18:43:01.965811 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7f0a968b-b03b-4f03-99f8-5c4cf1d70baa" containerName="nova-api-log" Nov 28 18:43:01 crc kubenswrapper[4909]: I1128 18:43:01.965817 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="7f0a968b-b03b-4f03-99f8-5c4cf1d70baa" containerName="nova-api-log" Nov 28 18:43:01 crc kubenswrapper[4909]: I1128 18:43:01.965999 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="7f0a968b-b03b-4f03-99f8-5c4cf1d70baa" containerName="nova-api-log" Nov 28 18:43:01 crc kubenswrapper[4909]: I1128 18:43:01.966023 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="d447854c-9bc0-4c9e-b1b8-7c9b1cb549a5" containerName="nova-scheduler-scheduler" Nov 28 18:43:01 crc kubenswrapper[4909]: I1128 18:43:01.966031 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="7f0a968b-b03b-4f03-99f8-5c4cf1d70baa" containerName="nova-api-api" Nov 28 18:43:01 crc kubenswrapper[4909]: I1128 18:43:01.966751 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 18:43:01 crc kubenswrapper[4909]: I1128 18:43:01.969741 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7f0a968b-b03b-4f03-99f8-5c4cf1d70baa-config-data" (OuterVolumeSpecName: "config-data") pod "7f0a968b-b03b-4f03-99f8-5c4cf1d70baa" (UID: "7f0a968b-b03b-4f03-99f8-5c4cf1d70baa"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 18:43:01 crc kubenswrapper[4909]: I1128 18:43:01.970191 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 28 18:43:02 crc kubenswrapper[4909]: I1128 18:43:02.011486 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 18:43:02 crc kubenswrapper[4909]: I1128 18:43:02.027459 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kbdqx\" (UniqueName: \"kubernetes.io/projected/732cb966-3efa-4ba9-8bc1-ce4427b9e92b-kube-api-access-kbdqx\") pod \"nova-scheduler-0\" (UID: \"732cb966-3efa-4ba9-8bc1-ce4427b9e92b\") " pod="openstack/nova-scheduler-0" Nov 28 18:43:02 crc kubenswrapper[4909]: I1128 18:43:02.027643 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/732cb966-3efa-4ba9-8bc1-ce4427b9e92b-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"732cb966-3efa-4ba9-8bc1-ce4427b9e92b\") " pod="openstack/nova-scheduler-0" Nov 28 18:43:02 crc kubenswrapper[4909]: I1128 18:43:02.027820 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/732cb966-3efa-4ba9-8bc1-ce4427b9e92b-config-data\") pod \"nova-scheduler-0\" (UID: \"732cb966-3efa-4ba9-8bc1-ce4427b9e92b\") " pod="openstack/nova-scheduler-0" Nov 28 18:43:02 crc kubenswrapper[4909]: I1128 18:43:02.028302 4909 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7f0a968b-b03b-4f03-99f8-5c4cf1d70baa-logs\") on node \"crc\" DevicePath \"\"" Nov 28 18:43:02 crc kubenswrapper[4909]: I1128 18:43:02.028323 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8m2kq\" (UniqueName: \"kubernetes.io/projected/7f0a968b-b03b-4f03-99f8-5c4cf1d70baa-kube-api-access-8m2kq\") on node \"crc\" DevicePath \"\"" Nov 28 18:43:02 crc kubenswrapper[4909]: I1128 18:43:02.028334 4909 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7f0a968b-b03b-4f03-99f8-5c4cf1d70baa-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 18:43:02 crc kubenswrapper[4909]: I1128 18:43:02.130164 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kbdqx\" (UniqueName: \"kubernetes.io/projected/732cb966-3efa-4ba9-8bc1-ce4427b9e92b-kube-api-access-kbdqx\") pod \"nova-scheduler-0\" (UID: \"732cb966-3efa-4ba9-8bc1-ce4427b9e92b\") " pod="openstack/nova-scheduler-0" Nov 28 18:43:02 crc kubenswrapper[4909]: I1128 18:43:02.130356 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/732cb966-3efa-4ba9-8bc1-ce4427b9e92b-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"732cb966-3efa-4ba9-8bc1-ce4427b9e92b\") " pod="openstack/nova-scheduler-0" Nov 28 18:43:02 crc kubenswrapper[4909]: I1128 18:43:02.130394 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/732cb966-3efa-4ba9-8bc1-ce4427b9e92b-config-data\") pod \"nova-scheduler-0\" (UID: \"732cb966-3efa-4ba9-8bc1-ce4427b9e92b\") " pod="openstack/nova-scheduler-0" Nov 28 18:43:02 crc kubenswrapper[4909]: I1128 18:43:02.576504 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/732cb966-3efa-4ba9-8bc1-ce4427b9e92b-config-data\") pod \"nova-scheduler-0\" (UID: \"732cb966-3efa-4ba9-8bc1-ce4427b9e92b\") " pod="openstack/nova-scheduler-0" Nov 28 18:43:02 crc kubenswrapper[4909]: I1128 18:43:02.576816 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/732cb966-3efa-4ba9-8bc1-ce4427b9e92b-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"732cb966-3efa-4ba9-8bc1-ce4427b9e92b\") " pod="openstack/nova-scheduler-0" Nov 28 18:43:02 crc kubenswrapper[4909]: I1128 18:43:02.579811 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kbdqx\" (UniqueName: \"kubernetes.io/projected/732cb966-3efa-4ba9-8bc1-ce4427b9e92b-kube-api-access-kbdqx\") pod \"nova-scheduler-0\" (UID: \"732cb966-3efa-4ba9-8bc1-ce4427b9e92b\") " pod="openstack/nova-scheduler-0" Nov 28 18:43:02 crc kubenswrapper[4909]: I1128 18:43:02.591636 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 18:43:02 crc kubenswrapper[4909]: I1128 18:43:02.609899 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7f0a968b-b03b-4f03-99f8-5c4cf1d70baa-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "7f0a968b-b03b-4f03-99f8-5c4cf1d70baa" (UID: "7f0a968b-b03b-4f03-99f8-5c4cf1d70baa"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 18:43:02 crc kubenswrapper[4909]: I1128 18:43:02.641555 4909 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7f0a968b-b03b-4f03-99f8-5c4cf1d70baa-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 18:43:02 crc kubenswrapper[4909]: I1128 18:43:02.805227 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 18:43:02 crc kubenswrapper[4909]: I1128 18:43:02.870987 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"a2b756af-7f55-41f7-95be-4b01d0c05c51","Type":"ContainerDied","Data":"6341298ee59f56a90dcb4b2f9b687fd85b6beef35bdb7c775b09dd7594ccba7b"} Nov 28 18:43:02 crc kubenswrapper[4909]: I1128 18:43:02.871053 4909 scope.go:117] "RemoveContainer" containerID="9116defd9fe70722ad03cad59ee3e52631b206c84226f2c2088a926031e0b38c" Nov 28 18:43:02 crc kubenswrapper[4909]: I1128 18:43:02.871122 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 18:43:02 crc kubenswrapper[4909]: I1128 18:43:02.889725 4909 generic.go:334] "Generic (PLEG): container finished" podID="7b8b20b0-03bc-4edf-89ef-1cad623e470e" containerID="4497815e4da4961b57d051aeed3d768d65bc3e75563cc5e62034a3ca89cc32e7" exitCode=0 Nov 28 18:43:02 crc kubenswrapper[4909]: I1128 18:43:02.890799 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"7b8b20b0-03bc-4edf-89ef-1cad623e470e","Type":"ContainerDied","Data":"4497815e4da4961b57d051aeed3d768d65bc3e75563cc5e62034a3ca89cc32e7"} Nov 28 18:43:02 crc kubenswrapper[4909]: I1128 18:43:02.890877 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 18:43:02 crc kubenswrapper[4909]: I1128 18:43:02.951868 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a2b756af-7f55-41f7-95be-4b01d0c05c51-logs\") pod \"a2b756af-7f55-41f7-95be-4b01d0c05c51\" (UID: \"a2b756af-7f55-41f7-95be-4b01d0c05c51\") " Nov 28 18:43:02 crc kubenswrapper[4909]: I1128 18:43:02.952046 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hz6mg\" (UniqueName: \"kubernetes.io/projected/a2b756af-7f55-41f7-95be-4b01d0c05c51-kube-api-access-hz6mg\") pod \"a2b756af-7f55-41f7-95be-4b01d0c05c51\" (UID: \"a2b756af-7f55-41f7-95be-4b01d0c05c51\") " Nov 28 18:43:02 crc kubenswrapper[4909]: I1128 18:43:02.952114 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a2b756af-7f55-41f7-95be-4b01d0c05c51-combined-ca-bundle\") pod \"a2b756af-7f55-41f7-95be-4b01d0c05c51\" (UID: \"a2b756af-7f55-41f7-95be-4b01d0c05c51\") " Nov 28 18:43:02 crc kubenswrapper[4909]: I1128 18:43:02.952196 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a2b756af-7f55-41f7-95be-4b01d0c05c51-config-data\") pod \"a2b756af-7f55-41f7-95be-4b01d0c05c51\" (UID: \"a2b756af-7f55-41f7-95be-4b01d0c05c51\") " Nov 28 18:43:02 crc kubenswrapper[4909]: I1128 18:43:02.958321 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 28 18:43:02 crc kubenswrapper[4909]: I1128 18:43:02.963257 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a2b756af-7f55-41f7-95be-4b01d0c05c51-logs" (OuterVolumeSpecName: "logs") pod "a2b756af-7f55-41f7-95be-4b01d0c05c51" (UID: "a2b756af-7f55-41f7-95be-4b01d0c05c51"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 18:43:02 crc kubenswrapper[4909]: I1128 18:43:02.968345 4909 scope.go:117] "RemoveContainer" containerID="cf563c6f57ef2c67b0f416db42c68c8a696cf8b010ccf61af5cd491c594466f4" Nov 28 18:43:02 crc kubenswrapper[4909]: I1128 18:43:02.969388 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a2b756af-7f55-41f7-95be-4b01d0c05c51-kube-api-access-hz6mg" (OuterVolumeSpecName: "kube-api-access-hz6mg") pod "a2b756af-7f55-41f7-95be-4b01d0c05c51" (UID: "a2b756af-7f55-41f7-95be-4b01d0c05c51"). InnerVolumeSpecName "kube-api-access-hz6mg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 18:43:02 crc kubenswrapper[4909]: I1128 18:43:02.970404 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 28 18:43:02 crc kubenswrapper[4909]: I1128 18:43:02.984020 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 28 18:43:02 crc kubenswrapper[4909]: E1128 18:43:02.984730 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a2b756af-7f55-41f7-95be-4b01d0c05c51" containerName="nova-metadata-log" Nov 28 18:43:02 crc kubenswrapper[4909]: I1128 18:43:02.984744 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="a2b756af-7f55-41f7-95be-4b01d0c05c51" containerName="nova-metadata-log" Nov 28 18:43:02 crc kubenswrapper[4909]: E1128 18:43:02.984761 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a2b756af-7f55-41f7-95be-4b01d0c05c51" containerName="nova-metadata-metadata" Nov 28 18:43:02 crc kubenswrapper[4909]: I1128 18:43:02.984767 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="a2b756af-7f55-41f7-95be-4b01d0c05c51" containerName="nova-metadata-metadata" Nov 28 18:43:02 crc kubenswrapper[4909]: I1128 18:43:02.985001 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="a2b756af-7f55-41f7-95be-4b01d0c05c51" containerName="nova-metadata-metadata" Nov 28 18:43:02 crc kubenswrapper[4909]: I1128 18:43:02.985022 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="a2b756af-7f55-41f7-95be-4b01d0c05c51" containerName="nova-metadata-log" Nov 28 18:43:02 crc kubenswrapper[4909]: I1128 18:43:02.986333 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 18:43:02 crc kubenswrapper[4909]: I1128 18:43:02.988028 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 28 18:43:02 crc kubenswrapper[4909]: I1128 18:43:02.993427 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 28 18:43:03 crc kubenswrapper[4909]: I1128 18:43:03.017721 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a2b756af-7f55-41f7-95be-4b01d0c05c51-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a2b756af-7f55-41f7-95be-4b01d0c05c51" (UID: "a2b756af-7f55-41f7-95be-4b01d0c05c51"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 18:43:03 crc kubenswrapper[4909]: I1128 18:43:03.058798 4909 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a2b756af-7f55-41f7-95be-4b01d0c05c51-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 18:43:03 crc kubenswrapper[4909]: I1128 18:43:03.058833 4909 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a2b756af-7f55-41f7-95be-4b01d0c05c51-logs\") on node \"crc\" DevicePath \"\"" Nov 28 18:43:03 crc kubenswrapper[4909]: I1128 18:43:03.058849 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hz6mg\" (UniqueName: \"kubernetes.io/projected/a2b756af-7f55-41f7-95be-4b01d0c05c51-kube-api-access-hz6mg\") on node \"crc\" DevicePath \"\"" Nov 28 18:43:03 crc kubenswrapper[4909]: W1128 18:43:03.069915 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod732cb966_3efa_4ba9_8bc1_ce4427b9e92b.slice/crio-fa4875d62ec4f9137186ce946c6a7b1114ed9709557b3e7531562bbdf3a55f4c WatchSource:0}: Error finding container fa4875d62ec4f9137186ce946c6a7b1114ed9709557b3e7531562bbdf3a55f4c: Status 404 returned error can't find the container with id fa4875d62ec4f9137186ce946c6a7b1114ed9709557b3e7531562bbdf3a55f4c Nov 28 18:43:03 crc kubenswrapper[4909]: I1128 18:43:03.072306 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 18:43:03 crc kubenswrapper[4909]: I1128 18:43:03.160178 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e96a498d-55ea-45ba-8bf3-2ab66ba99d0a-config-data\") pod \"nova-api-0\" (UID: \"e96a498d-55ea-45ba-8bf3-2ab66ba99d0a\") " pod="openstack/nova-api-0" Nov 28 18:43:03 crc kubenswrapper[4909]: I1128 18:43:03.160443 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e96a498d-55ea-45ba-8bf3-2ab66ba99d0a-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"e96a498d-55ea-45ba-8bf3-2ab66ba99d0a\") " pod="openstack/nova-api-0" Nov 28 18:43:03 crc kubenswrapper[4909]: I1128 18:43:03.160535 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e96a498d-55ea-45ba-8bf3-2ab66ba99d0a-logs\") pod \"nova-api-0\" (UID: \"e96a498d-55ea-45ba-8bf3-2ab66ba99d0a\") " pod="openstack/nova-api-0" Nov 28 18:43:03 crc kubenswrapper[4909]: I1128 18:43:03.160552 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9sj7s\" (UniqueName: \"kubernetes.io/projected/e96a498d-55ea-45ba-8bf3-2ab66ba99d0a-kube-api-access-9sj7s\") pod \"nova-api-0\" (UID: \"e96a498d-55ea-45ba-8bf3-2ab66ba99d0a\") " pod="openstack/nova-api-0" Nov 28 18:43:03 crc kubenswrapper[4909]: I1128 18:43:03.179966 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a2b756af-7f55-41f7-95be-4b01d0c05c51-config-data" (OuterVolumeSpecName: "config-data") pod "a2b756af-7f55-41f7-95be-4b01d0c05c51" (UID: "a2b756af-7f55-41f7-95be-4b01d0c05c51"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 18:43:03 crc kubenswrapper[4909]: I1128 18:43:03.259943 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 28 18:43:03 crc kubenswrapper[4909]: I1128 18:43:03.262000 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e96a498d-55ea-45ba-8bf3-2ab66ba99d0a-logs\") pod \"nova-api-0\" (UID: \"e96a498d-55ea-45ba-8bf3-2ab66ba99d0a\") " pod="openstack/nova-api-0" Nov 28 18:43:03 crc kubenswrapper[4909]: I1128 18:43:03.262031 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9sj7s\" (UniqueName: \"kubernetes.io/projected/e96a498d-55ea-45ba-8bf3-2ab66ba99d0a-kube-api-access-9sj7s\") pod \"nova-api-0\" (UID: \"e96a498d-55ea-45ba-8bf3-2ab66ba99d0a\") " pod="openstack/nova-api-0" Nov 28 18:43:03 crc kubenswrapper[4909]: I1128 18:43:03.262138 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e96a498d-55ea-45ba-8bf3-2ab66ba99d0a-config-data\") pod \"nova-api-0\" (UID: \"e96a498d-55ea-45ba-8bf3-2ab66ba99d0a\") " pod="openstack/nova-api-0" Nov 28 18:43:03 crc kubenswrapper[4909]: I1128 18:43:03.262154 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e96a498d-55ea-45ba-8bf3-2ab66ba99d0a-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"e96a498d-55ea-45ba-8bf3-2ab66ba99d0a\") " pod="openstack/nova-api-0" Nov 28 18:43:03 crc kubenswrapper[4909]: I1128 18:43:03.262259 4909 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a2b756af-7f55-41f7-95be-4b01d0c05c51-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 18:43:03 crc kubenswrapper[4909]: I1128 18:43:03.263203 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e96a498d-55ea-45ba-8bf3-2ab66ba99d0a-logs\") pod \"nova-api-0\" (UID: \"e96a498d-55ea-45ba-8bf3-2ab66ba99d0a\") " pod="openstack/nova-api-0" Nov 28 18:43:03 crc kubenswrapper[4909]: I1128 18:43:03.266374 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e96a498d-55ea-45ba-8bf3-2ab66ba99d0a-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"e96a498d-55ea-45ba-8bf3-2ab66ba99d0a\") " pod="openstack/nova-api-0" Nov 28 18:43:03 crc kubenswrapper[4909]: I1128 18:43:03.266998 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e96a498d-55ea-45ba-8bf3-2ab66ba99d0a-config-data\") pod \"nova-api-0\" (UID: \"e96a498d-55ea-45ba-8bf3-2ab66ba99d0a\") " pod="openstack/nova-api-0" Nov 28 18:43:03 crc kubenswrapper[4909]: I1128 18:43:03.285147 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9sj7s\" (UniqueName: \"kubernetes.io/projected/e96a498d-55ea-45ba-8bf3-2ab66ba99d0a-kube-api-access-9sj7s\") pod \"nova-api-0\" (UID: \"e96a498d-55ea-45ba-8bf3-2ab66ba99d0a\") " pod="openstack/nova-api-0" Nov 28 18:43:03 crc kubenswrapper[4909]: I1128 18:43:03.363544 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lx6pm\" (UniqueName: \"kubernetes.io/projected/7b8b20b0-03bc-4edf-89ef-1cad623e470e-kube-api-access-lx6pm\") pod \"7b8b20b0-03bc-4edf-89ef-1cad623e470e\" (UID: \"7b8b20b0-03bc-4edf-89ef-1cad623e470e\") " Nov 28 18:43:03 crc kubenswrapper[4909]: I1128 18:43:03.363731 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7b8b20b0-03bc-4edf-89ef-1cad623e470e-combined-ca-bundle\") pod \"7b8b20b0-03bc-4edf-89ef-1cad623e470e\" (UID: \"7b8b20b0-03bc-4edf-89ef-1cad623e470e\") " Nov 28 18:43:03 crc kubenswrapper[4909]: I1128 18:43:03.363907 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7b8b20b0-03bc-4edf-89ef-1cad623e470e-config-data\") pod \"7b8b20b0-03bc-4edf-89ef-1cad623e470e\" (UID: \"7b8b20b0-03bc-4edf-89ef-1cad623e470e\") " Nov 28 18:43:03 crc kubenswrapper[4909]: I1128 18:43:03.369879 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7b8b20b0-03bc-4edf-89ef-1cad623e470e-kube-api-access-lx6pm" (OuterVolumeSpecName: "kube-api-access-lx6pm") pod "7b8b20b0-03bc-4edf-89ef-1cad623e470e" (UID: "7b8b20b0-03bc-4edf-89ef-1cad623e470e"). InnerVolumeSpecName "kube-api-access-lx6pm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 18:43:03 crc kubenswrapper[4909]: I1128 18:43:03.402788 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7b8b20b0-03bc-4edf-89ef-1cad623e470e-config-data" (OuterVolumeSpecName: "config-data") pod "7b8b20b0-03bc-4edf-89ef-1cad623e470e" (UID: "7b8b20b0-03bc-4edf-89ef-1cad623e470e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 18:43:03 crc kubenswrapper[4909]: I1128 18:43:03.410439 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7b8b20b0-03bc-4edf-89ef-1cad623e470e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "7b8b20b0-03bc-4edf-89ef-1cad623e470e" (UID: "7b8b20b0-03bc-4edf-89ef-1cad623e470e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 18:43:03 crc kubenswrapper[4909]: I1128 18:43:03.466059 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lx6pm\" (UniqueName: \"kubernetes.io/projected/7b8b20b0-03bc-4edf-89ef-1cad623e470e-kube-api-access-lx6pm\") on node \"crc\" DevicePath \"\"" Nov 28 18:43:03 crc kubenswrapper[4909]: I1128 18:43:03.466096 4909 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7b8b20b0-03bc-4edf-89ef-1cad623e470e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 18:43:03 crc kubenswrapper[4909]: I1128 18:43:03.466106 4909 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7b8b20b0-03bc-4edf-89ef-1cad623e470e-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 18:43:03 crc kubenswrapper[4909]: I1128 18:43:03.504572 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 18:43:03 crc kubenswrapper[4909]: I1128 18:43:03.514810 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 18:43:03 crc kubenswrapper[4909]: I1128 18:43:03.528958 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 28 18:43:03 crc kubenswrapper[4909]: E1128 18:43:03.529392 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7b8b20b0-03bc-4edf-89ef-1cad623e470e" containerName="nova-cell0-conductor-conductor" Nov 28 18:43:03 crc kubenswrapper[4909]: I1128 18:43:03.529411 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="7b8b20b0-03bc-4edf-89ef-1cad623e470e" containerName="nova-cell0-conductor-conductor" Nov 28 18:43:03 crc kubenswrapper[4909]: I1128 18:43:03.529690 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="7b8b20b0-03bc-4edf-89ef-1cad623e470e" containerName="nova-cell0-conductor-conductor" Nov 28 18:43:03 crc kubenswrapper[4909]: I1128 18:43:03.530910 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 18:43:03 crc kubenswrapper[4909]: I1128 18:43:03.532833 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 28 18:43:03 crc kubenswrapper[4909]: I1128 18:43:03.559763 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 18:43:03 crc kubenswrapper[4909]: I1128 18:43:03.605769 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 18:43:03 crc kubenswrapper[4909]: I1128 18:43:03.678716 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1184d3c2-eb34-4da7-9f6b-39789cd507e4-logs\") pod \"nova-metadata-0\" (UID: \"1184d3c2-eb34-4da7-9f6b-39789cd507e4\") " pod="openstack/nova-metadata-0" Nov 28 18:43:03 crc kubenswrapper[4909]: I1128 18:43:03.678784 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f9xc2\" (UniqueName: \"kubernetes.io/projected/1184d3c2-eb34-4da7-9f6b-39789cd507e4-kube-api-access-f9xc2\") pod \"nova-metadata-0\" (UID: \"1184d3c2-eb34-4da7-9f6b-39789cd507e4\") " pod="openstack/nova-metadata-0" Nov 28 18:43:03 crc kubenswrapper[4909]: I1128 18:43:03.678847 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1184d3c2-eb34-4da7-9f6b-39789cd507e4-config-data\") pod \"nova-metadata-0\" (UID: \"1184d3c2-eb34-4da7-9f6b-39789cd507e4\") " pod="openstack/nova-metadata-0" Nov 28 18:43:03 crc kubenswrapper[4909]: I1128 18:43:03.678874 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1184d3c2-eb34-4da7-9f6b-39789cd507e4-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"1184d3c2-eb34-4da7-9f6b-39789cd507e4\") " pod="openstack/nova-metadata-0" Nov 28 18:43:03 crc kubenswrapper[4909]: I1128 18:43:03.780481 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1184d3c2-eb34-4da7-9f6b-39789cd507e4-config-data\") pod \"nova-metadata-0\" (UID: \"1184d3c2-eb34-4da7-9f6b-39789cd507e4\") " pod="openstack/nova-metadata-0" Nov 28 18:43:03 crc kubenswrapper[4909]: I1128 18:43:03.780527 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1184d3c2-eb34-4da7-9f6b-39789cd507e4-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"1184d3c2-eb34-4da7-9f6b-39789cd507e4\") " pod="openstack/nova-metadata-0" Nov 28 18:43:03 crc kubenswrapper[4909]: I1128 18:43:03.780700 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1184d3c2-eb34-4da7-9f6b-39789cd507e4-logs\") pod \"nova-metadata-0\" (UID: \"1184d3c2-eb34-4da7-9f6b-39789cd507e4\") " pod="openstack/nova-metadata-0" Nov 28 18:43:03 crc kubenswrapper[4909]: I1128 18:43:03.780732 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f9xc2\" (UniqueName: \"kubernetes.io/projected/1184d3c2-eb34-4da7-9f6b-39789cd507e4-kube-api-access-f9xc2\") pod \"nova-metadata-0\" (UID: \"1184d3c2-eb34-4da7-9f6b-39789cd507e4\") " pod="openstack/nova-metadata-0" Nov 28 18:43:03 crc kubenswrapper[4909]: I1128 18:43:03.781845 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1184d3c2-eb34-4da7-9f6b-39789cd507e4-logs\") pod \"nova-metadata-0\" (UID: \"1184d3c2-eb34-4da7-9f6b-39789cd507e4\") " pod="openstack/nova-metadata-0" Nov 28 18:43:03 crc kubenswrapper[4909]: I1128 18:43:03.784937 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1184d3c2-eb34-4da7-9f6b-39789cd507e4-config-data\") pod \"nova-metadata-0\" (UID: \"1184d3c2-eb34-4da7-9f6b-39789cd507e4\") " pod="openstack/nova-metadata-0" Nov 28 18:43:03 crc kubenswrapper[4909]: I1128 18:43:03.786871 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1184d3c2-eb34-4da7-9f6b-39789cd507e4-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"1184d3c2-eb34-4da7-9f6b-39789cd507e4\") " pod="openstack/nova-metadata-0" Nov 28 18:43:03 crc kubenswrapper[4909]: I1128 18:43:03.800584 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f9xc2\" (UniqueName: \"kubernetes.io/projected/1184d3c2-eb34-4da7-9f6b-39789cd507e4-kube-api-access-f9xc2\") pod \"nova-metadata-0\" (UID: \"1184d3c2-eb34-4da7-9f6b-39789cd507e4\") " pod="openstack/nova-metadata-0" Nov 28 18:43:03 crc kubenswrapper[4909]: I1128 18:43:03.848201 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 18:43:03 crc kubenswrapper[4909]: I1128 18:43:03.912015 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 28 18:43:03 crc kubenswrapper[4909]: I1128 18:43:03.936696 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7f0a968b-b03b-4f03-99f8-5c4cf1d70baa" path="/var/lib/kubelet/pods/7f0a968b-b03b-4f03-99f8-5c4cf1d70baa/volumes" Nov 28 18:43:03 crc kubenswrapper[4909]: I1128 18:43:03.938642 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a2b756af-7f55-41f7-95be-4b01d0c05c51" path="/var/lib/kubelet/pods/a2b756af-7f55-41f7-95be-4b01d0c05c51/volumes" Nov 28 18:43:03 crc kubenswrapper[4909]: I1128 18:43:03.940173 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d447854c-9bc0-4c9e-b1b8-7c9b1cb549a5" path="/var/lib/kubelet/pods/d447854c-9bc0-4c9e-b1b8-7c9b1cb549a5/volumes" Nov 28 18:43:03 crc kubenswrapper[4909]: I1128 18:43:03.942302 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"7b8b20b0-03bc-4edf-89ef-1cad623e470e","Type":"ContainerDied","Data":"208bf8958ecb13ba4b5e9918198422fa986f23b894d040111cb869736248a550"} Nov 28 18:43:03 crc kubenswrapper[4909]: I1128 18:43:03.942340 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"732cb966-3efa-4ba9-8bc1-ce4427b9e92b","Type":"ContainerStarted","Data":"9d5f0cef196df7313d1ecf9c1a6694e21486d76c8dcda41d7902ca0279729389"} Nov 28 18:43:03 crc kubenswrapper[4909]: I1128 18:43:03.942363 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"732cb966-3efa-4ba9-8bc1-ce4427b9e92b","Type":"ContainerStarted","Data":"fa4875d62ec4f9137186ce946c6a7b1114ed9709557b3e7531562bbdf3a55f4c"} Nov 28 18:43:03 crc kubenswrapper[4909]: I1128 18:43:03.942393 4909 scope.go:117] "RemoveContainer" containerID="4497815e4da4961b57d051aeed3d768d65bc3e75563cc5e62034a3ca89cc32e7" Nov 28 18:43:03 crc kubenswrapper[4909]: I1128 18:43:03.985371 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 28 18:43:04 crc kubenswrapper[4909]: I1128 18:43:04.000162 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 28 18:43:04 crc kubenswrapper[4909]: I1128 18:43:04.021131 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 28 18:43:04 crc kubenswrapper[4909]: I1128 18:43:04.022611 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 28 18:43:04 crc kubenswrapper[4909]: I1128 18:43:04.022722 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=3.022704893 podStartE2EDuration="3.022704893s" podCreationTimestamp="2025-11-28 18:43:01 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 18:43:03.979906932 +0000 UTC m=+9166.376591476" watchObservedRunningTime="2025-11-28 18:43:04.022704893 +0000 UTC m=+9166.419389417" Nov 28 18:43:04 crc kubenswrapper[4909]: I1128 18:43:04.024738 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Nov 28 18:43:04 crc kubenswrapper[4909]: I1128 18:43:04.050436 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 28 18:43:04 crc kubenswrapper[4909]: I1128 18:43:04.088614 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 28 18:43:04 crc kubenswrapper[4909]: I1128 18:43:04.190446 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1b4b7190-2021-4a98-b7a6-f9b051e04f36-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"1b4b7190-2021-4a98-b7a6-f9b051e04f36\") " pod="openstack/nova-cell0-conductor-0" Nov 28 18:43:04 crc kubenswrapper[4909]: I1128 18:43:04.190585 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1b4b7190-2021-4a98-b7a6-f9b051e04f36-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"1b4b7190-2021-4a98-b7a6-f9b051e04f36\") " pod="openstack/nova-cell0-conductor-0" Nov 28 18:43:04 crc kubenswrapper[4909]: I1128 18:43:04.190667 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l7n54\" (UniqueName: \"kubernetes.io/projected/1b4b7190-2021-4a98-b7a6-f9b051e04f36-kube-api-access-l7n54\") pod \"nova-cell0-conductor-0\" (UID: \"1b4b7190-2021-4a98-b7a6-f9b051e04f36\") " pod="openstack/nova-cell0-conductor-0" Nov 28 18:43:04 crc kubenswrapper[4909]: I1128 18:43:04.292733 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l7n54\" (UniqueName: \"kubernetes.io/projected/1b4b7190-2021-4a98-b7a6-f9b051e04f36-kube-api-access-l7n54\") pod \"nova-cell0-conductor-0\" (UID: \"1b4b7190-2021-4a98-b7a6-f9b051e04f36\") " pod="openstack/nova-cell0-conductor-0" Nov 28 18:43:04 crc kubenswrapper[4909]: I1128 18:43:04.292843 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1b4b7190-2021-4a98-b7a6-f9b051e04f36-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"1b4b7190-2021-4a98-b7a6-f9b051e04f36\") " pod="openstack/nova-cell0-conductor-0" Nov 28 18:43:04 crc kubenswrapper[4909]: I1128 18:43:04.292996 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1b4b7190-2021-4a98-b7a6-f9b051e04f36-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"1b4b7190-2021-4a98-b7a6-f9b051e04f36\") " pod="openstack/nova-cell0-conductor-0" Nov 28 18:43:04 crc kubenswrapper[4909]: I1128 18:43:04.410484 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 18:43:04 crc kubenswrapper[4909]: I1128 18:43:04.575570 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l7n54\" (UniqueName: \"kubernetes.io/projected/1b4b7190-2021-4a98-b7a6-f9b051e04f36-kube-api-access-l7n54\") pod \"nova-cell0-conductor-0\" (UID: \"1b4b7190-2021-4a98-b7a6-f9b051e04f36\") " pod="openstack/nova-cell0-conductor-0" Nov 28 18:43:04 crc kubenswrapper[4909]: I1128 18:43:04.575909 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1b4b7190-2021-4a98-b7a6-f9b051e04f36-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"1b4b7190-2021-4a98-b7a6-f9b051e04f36\") " pod="openstack/nova-cell0-conductor-0" Nov 28 18:43:04 crc kubenswrapper[4909]: I1128 18:43:04.576622 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1b4b7190-2021-4a98-b7a6-f9b051e04f36-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"1b4b7190-2021-4a98-b7a6-f9b051e04f36\") " pod="openstack/nova-cell0-conductor-0" Nov 28 18:43:04 crc kubenswrapper[4909]: W1128 18:43:04.578586 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode96a498d_55ea_45ba_8bf3_2ab66ba99d0a.slice/crio-9d9f4c919e06ee916414b6137ec6475ee3494406f2caffa5477d30fd8e71ebda WatchSource:0}: Error finding container 9d9f4c919e06ee916414b6137ec6475ee3494406f2caffa5477d30fd8e71ebda: Status 404 returned error can't find the container with id 9d9f4c919e06ee916414b6137ec6475ee3494406f2caffa5477d30fd8e71ebda Nov 28 18:43:04 crc kubenswrapper[4909]: W1128 18:43:04.581475 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1184d3c2_eb34_4da7_9f6b_39789cd507e4.slice/crio-0faa8331aa71665397b9fc3513dc2fc23c6b2cc6a092753dae47ddbe8f65d79e WatchSource:0}: Error finding container 0faa8331aa71665397b9fc3513dc2fc23c6b2cc6a092753dae47ddbe8f65d79e: Status 404 returned error can't find the container with id 0faa8331aa71665397b9fc3513dc2fc23c6b2cc6a092753dae47ddbe8f65d79e Nov 28 18:43:04 crc kubenswrapper[4909]: I1128 18:43:04.644560 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 28 18:43:04 crc kubenswrapper[4909]: I1128 18:43:04.965368 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"1184d3c2-eb34-4da7-9f6b-39789cd507e4","Type":"ContainerStarted","Data":"0faa8331aa71665397b9fc3513dc2fc23c6b2cc6a092753dae47ddbe8f65d79e"} Nov 28 18:43:04 crc kubenswrapper[4909]: I1128 18:43:04.966887 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"e96a498d-55ea-45ba-8bf3-2ab66ba99d0a","Type":"ContainerStarted","Data":"9d9f4c919e06ee916414b6137ec6475ee3494406f2caffa5477d30fd8e71ebda"} Nov 28 18:43:05 crc kubenswrapper[4909]: I1128 18:43:05.200141 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-conductor-0" Nov 28 18:43:05 crc kubenswrapper[4909]: I1128 18:43:05.274039 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 28 18:43:05 crc kubenswrapper[4909]: I1128 18:43:05.922209 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7b8b20b0-03bc-4edf-89ef-1cad623e470e" path="/var/lib/kubelet/pods/7b8b20b0-03bc-4edf-89ef-1cad623e470e/volumes" Nov 28 18:43:05 crc kubenswrapper[4909]: I1128 18:43:05.979119 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"1184d3c2-eb34-4da7-9f6b-39789cd507e4","Type":"ContainerStarted","Data":"3dc60b4261ef8ecb6d81efedf1fc6b0a0f3ccd462668b97148022dd2167333b4"} Nov 28 18:43:05 crc kubenswrapper[4909]: I1128 18:43:05.979163 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"1184d3c2-eb34-4da7-9f6b-39789cd507e4","Type":"ContainerStarted","Data":"fe1603d602dcdc00c8fe3144fb95c6b66cc087e3ec8a19ff61c34bcb4490acf6"} Nov 28 18:43:05 crc kubenswrapper[4909]: I1128 18:43:05.981130 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"e96a498d-55ea-45ba-8bf3-2ab66ba99d0a","Type":"ContainerStarted","Data":"3ad534470451c68ae9974f09c15cf6a8b8c3234f24b35195166c4b8b1a05daf6"} Nov 28 18:43:05 crc kubenswrapper[4909]: I1128 18:43:05.981331 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"e96a498d-55ea-45ba-8bf3-2ab66ba99d0a","Type":"ContainerStarted","Data":"30500848f3bcb16c8be2c94e3e5dd291ca97d8976a62668ce13b31d1d50e60dd"} Nov 28 18:43:05 crc kubenswrapper[4909]: I1128 18:43:05.983527 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"1b4b7190-2021-4a98-b7a6-f9b051e04f36","Type":"ContainerStarted","Data":"8ac98394e1e06775f3ba9e370e3c3ce9f67b17e2a9c08d2ec87d1565b665b692"} Nov 28 18:43:05 crc kubenswrapper[4909]: I1128 18:43:05.983552 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"1b4b7190-2021-4a98-b7a6-f9b051e04f36","Type":"ContainerStarted","Data":"7c514f42c544c4a97b3fb5d24231413941f0e68fc97000e5df3be3c3db3f93ab"} Nov 28 18:43:05 crc kubenswrapper[4909]: I1128 18:43:05.983675 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell0-conductor-0" Nov 28 18:43:06 crc kubenswrapper[4909]: I1128 18:43:06.008274 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=3.008254079 podStartE2EDuration="3.008254079s" podCreationTimestamp="2025-11-28 18:43:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 18:43:05.997831419 +0000 UTC m=+9168.394515943" watchObservedRunningTime="2025-11-28 18:43:06.008254079 +0000 UTC m=+9168.404938603" Nov 28 18:43:06 crc kubenswrapper[4909]: I1128 18:43:06.025293 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-0" podStartSLOduration=3.025250617 podStartE2EDuration="3.025250617s" podCreationTimestamp="2025-11-28 18:43:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 18:43:06.01348944 +0000 UTC m=+9168.410173964" watchObservedRunningTime="2025-11-28 18:43:06.025250617 +0000 UTC m=+9168.421935131" Nov 28 18:43:06 crc kubenswrapper[4909]: I1128 18:43:06.037421 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=4.037397304 podStartE2EDuration="4.037397304s" podCreationTimestamp="2025-11-28 18:43:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 18:43:06.029360837 +0000 UTC m=+9168.426045381" watchObservedRunningTime="2025-11-28 18:43:06.037397304 +0000 UTC m=+9168.434081828" Nov 28 18:43:07 crc kubenswrapper[4909]: I1128 18:43:07.329880 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-znbzg"] Nov 28 18:43:07 crc kubenswrapper[4909]: I1128 18:43:07.333695 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-znbzg" Nov 28 18:43:07 crc kubenswrapper[4909]: I1128 18:43:07.353555 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-znbzg"] Nov 28 18:43:07 crc kubenswrapper[4909]: I1128 18:43:07.482537 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f753e5c8-e102-4829-bcb0-6f8b10aab661-utilities\") pod \"redhat-operators-znbzg\" (UID: \"f753e5c8-e102-4829-bcb0-6f8b10aab661\") " pod="openshift-marketplace/redhat-operators-znbzg" Nov 28 18:43:07 crc kubenswrapper[4909]: I1128 18:43:07.482711 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f753e5c8-e102-4829-bcb0-6f8b10aab661-catalog-content\") pod \"redhat-operators-znbzg\" (UID: \"f753e5c8-e102-4829-bcb0-6f8b10aab661\") " pod="openshift-marketplace/redhat-operators-znbzg" Nov 28 18:43:07 crc kubenswrapper[4909]: I1128 18:43:07.482800 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t9jrl\" (UniqueName: \"kubernetes.io/projected/f753e5c8-e102-4829-bcb0-6f8b10aab661-kube-api-access-t9jrl\") pod \"redhat-operators-znbzg\" (UID: \"f753e5c8-e102-4829-bcb0-6f8b10aab661\") " pod="openshift-marketplace/redhat-operators-znbzg" Nov 28 18:43:07 crc kubenswrapper[4909]: I1128 18:43:07.584647 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f753e5c8-e102-4829-bcb0-6f8b10aab661-catalog-content\") pod \"redhat-operators-znbzg\" (UID: \"f753e5c8-e102-4829-bcb0-6f8b10aab661\") " pod="openshift-marketplace/redhat-operators-znbzg" Nov 28 18:43:07 crc kubenswrapper[4909]: I1128 18:43:07.584779 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t9jrl\" (UniqueName: \"kubernetes.io/projected/f753e5c8-e102-4829-bcb0-6f8b10aab661-kube-api-access-t9jrl\") pod \"redhat-operators-znbzg\" (UID: \"f753e5c8-e102-4829-bcb0-6f8b10aab661\") " pod="openshift-marketplace/redhat-operators-znbzg" Nov 28 18:43:07 crc kubenswrapper[4909]: I1128 18:43:07.584914 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f753e5c8-e102-4829-bcb0-6f8b10aab661-utilities\") pod \"redhat-operators-znbzg\" (UID: \"f753e5c8-e102-4829-bcb0-6f8b10aab661\") " pod="openshift-marketplace/redhat-operators-znbzg" Nov 28 18:43:07 crc kubenswrapper[4909]: I1128 18:43:07.585223 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f753e5c8-e102-4829-bcb0-6f8b10aab661-catalog-content\") pod \"redhat-operators-znbzg\" (UID: \"f753e5c8-e102-4829-bcb0-6f8b10aab661\") " pod="openshift-marketplace/redhat-operators-znbzg" Nov 28 18:43:07 crc kubenswrapper[4909]: I1128 18:43:07.585369 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f753e5c8-e102-4829-bcb0-6f8b10aab661-utilities\") pod \"redhat-operators-znbzg\" (UID: \"f753e5c8-e102-4829-bcb0-6f8b10aab661\") " pod="openshift-marketplace/redhat-operators-znbzg" Nov 28 18:43:07 crc kubenswrapper[4909]: I1128 18:43:07.592223 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Nov 28 18:43:07 crc kubenswrapper[4909]: I1128 18:43:07.607791 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t9jrl\" (UniqueName: \"kubernetes.io/projected/f753e5c8-e102-4829-bcb0-6f8b10aab661-kube-api-access-t9jrl\") pod \"redhat-operators-znbzg\" (UID: \"f753e5c8-e102-4829-bcb0-6f8b10aab661\") " pod="openshift-marketplace/redhat-operators-znbzg" Nov 28 18:43:07 crc kubenswrapper[4909]: I1128 18:43:07.659956 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-znbzg" Nov 28 18:43:08 crc kubenswrapper[4909]: I1128 18:43:08.181403 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-znbzg"] Nov 28 18:43:08 crc kubenswrapper[4909]: W1128 18:43:08.183981 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf753e5c8_e102_4829_bcb0_6f8b10aab661.slice/crio-906bed9910cfdb33bffaef6b7dfe7f526a200df90fb30f2f9d735a225f29980d WatchSource:0}: Error finding container 906bed9910cfdb33bffaef6b7dfe7f526a200df90fb30f2f9d735a225f29980d: Status 404 returned error can't find the container with id 906bed9910cfdb33bffaef6b7dfe7f526a200df90fb30f2f9d735a225f29980d Nov 28 18:43:08 crc kubenswrapper[4909]: I1128 18:43:08.848580 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 28 18:43:08 crc kubenswrapper[4909]: I1128 18:43:08.849792 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 28 18:43:09 crc kubenswrapper[4909]: I1128 18:43:09.022304 4909 generic.go:334] "Generic (PLEG): container finished" podID="f753e5c8-e102-4829-bcb0-6f8b10aab661" containerID="38e83e4d59635ece9759d5f4c9ea1f6fe25fd57029efaab5cbcc3ac6303a71b3" exitCode=0 Nov 28 18:43:09 crc kubenswrapper[4909]: I1128 18:43:09.022449 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-znbzg" event={"ID":"f753e5c8-e102-4829-bcb0-6f8b10aab661","Type":"ContainerDied","Data":"38e83e4d59635ece9759d5f4c9ea1f6fe25fd57029efaab5cbcc3ac6303a71b3"} Nov 28 18:43:09 crc kubenswrapper[4909]: I1128 18:43:09.022507 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-znbzg" event={"ID":"f753e5c8-e102-4829-bcb0-6f8b10aab661","Type":"ContainerStarted","Data":"906bed9910cfdb33bffaef6b7dfe7f526a200df90fb30f2f9d735a225f29980d"} Nov 28 18:43:09 crc kubenswrapper[4909]: I1128 18:43:09.901989 4909 scope.go:117] "RemoveContainer" containerID="ef2a38674be7675e726f15b879fd9bbea7be679d566ac302dd10e3d3ce61bcc2" Nov 28 18:43:09 crc kubenswrapper[4909]: E1128 18:43:09.902573 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 18:43:11 crc kubenswrapper[4909]: I1128 18:43:11.042393 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-znbzg" event={"ID":"f753e5c8-e102-4829-bcb0-6f8b10aab661","Type":"ContainerStarted","Data":"7709b9f4148032a95c21e5ad1253775cc963bc256dcfa6536348fc637b7e82d2"} Nov 28 18:43:12 crc kubenswrapper[4909]: I1128 18:43:12.592837 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Nov 28 18:43:12 crc kubenswrapper[4909]: I1128 18:43:12.649724 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Nov 28 18:43:13 crc kubenswrapper[4909]: I1128 18:43:13.070056 4909 generic.go:334] "Generic (PLEG): container finished" podID="f753e5c8-e102-4829-bcb0-6f8b10aab661" containerID="7709b9f4148032a95c21e5ad1253775cc963bc256dcfa6536348fc637b7e82d2" exitCode=0 Nov 28 18:43:13 crc kubenswrapper[4909]: I1128 18:43:13.070144 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-znbzg" event={"ID":"f753e5c8-e102-4829-bcb0-6f8b10aab661","Type":"ContainerDied","Data":"7709b9f4148032a95c21e5ad1253775cc963bc256dcfa6536348fc637b7e82d2"} Nov 28 18:43:13 crc kubenswrapper[4909]: I1128 18:43:13.115521 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Nov 28 18:43:13 crc kubenswrapper[4909]: I1128 18:43:13.559913 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 28 18:43:13 crc kubenswrapper[4909]: I1128 18:43:13.559959 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 28 18:43:13 crc kubenswrapper[4909]: I1128 18:43:13.849906 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 28 18:43:13 crc kubenswrapper[4909]: I1128 18:43:13.851972 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 28 18:43:14 crc kubenswrapper[4909]: I1128 18:43:14.644864 4909 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="e96a498d-55ea-45ba-8bf3-2ab66ba99d0a" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.1.194:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 28 18:43:14 crc kubenswrapper[4909]: I1128 18:43:14.644960 4909 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="e96a498d-55ea-45ba-8bf3-2ab66ba99d0a" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.1.194:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 28 18:43:14 crc kubenswrapper[4909]: I1128 18:43:14.931901 4909 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="1184d3c2-eb34-4da7-9f6b-39789cd507e4" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"http://10.217.1.195:8775/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 28 18:43:14 crc kubenswrapper[4909]: I1128 18:43:14.932385 4909 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="1184d3c2-eb34-4da7-9f6b-39789cd507e4" containerName="nova-metadata-log" probeResult="failure" output="Get \"http://10.217.1.195:8775/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 28 18:43:15 crc kubenswrapper[4909]: I1128 18:43:15.114108 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-znbzg" event={"ID":"f753e5c8-e102-4829-bcb0-6f8b10aab661","Type":"ContainerStarted","Data":"7d9e71dc27eead4963481f07ef0815e48b8ab496f16a115881bc17042020e172"} Nov 28 18:43:15 crc kubenswrapper[4909]: I1128 18:43:15.163757 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell0-conductor-0" Nov 28 18:43:15 crc kubenswrapper[4909]: I1128 18:43:15.192926 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-znbzg" podStartSLOduration=3.324441558 podStartE2EDuration="8.192909132s" podCreationTimestamp="2025-11-28 18:43:07 +0000 UTC" firstStartedPulling="2025-11-28 18:43:09.024390055 +0000 UTC m=+9171.421074569" lastFinishedPulling="2025-11-28 18:43:13.892857609 +0000 UTC m=+9176.289542143" observedRunningTime="2025-11-28 18:43:15.135683072 +0000 UTC m=+9177.532367596" watchObservedRunningTime="2025-11-28 18:43:15.192909132 +0000 UTC m=+9177.589593656" Nov 28 18:43:17 crc kubenswrapper[4909]: I1128 18:43:17.660821 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-znbzg" Nov 28 18:43:17 crc kubenswrapper[4909]: I1128 18:43:17.662696 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-znbzg" Nov 28 18:43:18 crc kubenswrapper[4909]: I1128 18:43:18.712798 4909 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-znbzg" podUID="f753e5c8-e102-4829-bcb0-6f8b10aab661" containerName="registry-server" probeResult="failure" output=< Nov 28 18:43:18 crc kubenswrapper[4909]: timeout: failed to connect service ":50051" within 1s Nov 28 18:43:18 crc kubenswrapper[4909]: > Nov 28 18:43:23 crc kubenswrapper[4909]: I1128 18:43:23.566307 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 28 18:43:23 crc kubenswrapper[4909]: I1128 18:43:23.567265 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 28 18:43:23 crc kubenswrapper[4909]: I1128 18:43:23.568060 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 28 18:43:23 crc kubenswrapper[4909]: I1128 18:43:23.568523 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 28 18:43:23 crc kubenswrapper[4909]: I1128 18:43:23.580095 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 28 18:43:23 crc kubenswrapper[4909]: I1128 18:43:23.581527 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 28 18:43:24 crc kubenswrapper[4909]: I1128 18:43:24.755926 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 28 18:43:24 crc kubenswrapper[4909]: I1128 18:43:24.756308 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 28 18:43:24 crc kubenswrapper[4909]: I1128 18:43:24.768956 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 28 18:43:24 crc kubenswrapper[4909]: I1128 18:43:24.769061 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 28 18:43:24 crc kubenswrapper[4909]: I1128 18:43:24.902132 4909 scope.go:117] "RemoveContainer" containerID="ef2a38674be7675e726f15b879fd9bbea7be679d566ac302dd10e3d3ce61bcc2" Nov 28 18:43:25 crc kubenswrapper[4909]: I1128 18:43:25.744262 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" event={"ID":"5f0ac931-d37b-4342-8c12-c2779b455cc5","Type":"ContainerStarted","Data":"1b79d6263347baf50ca11beac0b5161bf74ecc26eb50576ac172a4314dc37da9"} Nov 28 18:43:27 crc kubenswrapper[4909]: I1128 18:43:27.718761 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-znbzg" Nov 28 18:43:27 crc kubenswrapper[4909]: I1128 18:43:27.790160 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-znbzg" Nov 28 18:43:27 crc kubenswrapper[4909]: I1128 18:43:27.964182 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-znbzg"] Nov 28 18:43:28 crc kubenswrapper[4909]: I1128 18:43:28.777520 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-znbzg" podUID="f753e5c8-e102-4829-bcb0-6f8b10aab661" containerName="registry-server" containerID="cri-o://7d9e71dc27eead4963481f07ef0815e48b8ab496f16a115881bc17042020e172" gracePeriod=2 Nov 28 18:43:29 crc kubenswrapper[4909]: I1128 18:43:29.792842 4909 generic.go:334] "Generic (PLEG): container finished" podID="f753e5c8-e102-4829-bcb0-6f8b10aab661" containerID="7d9e71dc27eead4963481f07ef0815e48b8ab496f16a115881bc17042020e172" exitCode=0 Nov 28 18:43:29 crc kubenswrapper[4909]: I1128 18:43:29.792874 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-znbzg" event={"ID":"f753e5c8-e102-4829-bcb0-6f8b10aab661","Type":"ContainerDied","Data":"7d9e71dc27eead4963481f07ef0815e48b8ab496f16a115881bc17042020e172"} Nov 28 18:43:30 crc kubenswrapper[4909]: I1128 18:43:30.387176 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-znbzg" Nov 28 18:43:30 crc kubenswrapper[4909]: I1128 18:43:30.532781 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f753e5c8-e102-4829-bcb0-6f8b10aab661-catalog-content\") pod \"f753e5c8-e102-4829-bcb0-6f8b10aab661\" (UID: \"f753e5c8-e102-4829-bcb0-6f8b10aab661\") " Nov 28 18:43:30 crc kubenswrapper[4909]: I1128 18:43:30.533049 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t9jrl\" (UniqueName: \"kubernetes.io/projected/f753e5c8-e102-4829-bcb0-6f8b10aab661-kube-api-access-t9jrl\") pod \"f753e5c8-e102-4829-bcb0-6f8b10aab661\" (UID: \"f753e5c8-e102-4829-bcb0-6f8b10aab661\") " Nov 28 18:43:30 crc kubenswrapper[4909]: I1128 18:43:30.533132 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f753e5c8-e102-4829-bcb0-6f8b10aab661-utilities\") pod \"f753e5c8-e102-4829-bcb0-6f8b10aab661\" (UID: \"f753e5c8-e102-4829-bcb0-6f8b10aab661\") " Nov 28 18:43:30 crc kubenswrapper[4909]: I1128 18:43:30.533816 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f753e5c8-e102-4829-bcb0-6f8b10aab661-utilities" (OuterVolumeSpecName: "utilities") pod "f753e5c8-e102-4829-bcb0-6f8b10aab661" (UID: "f753e5c8-e102-4829-bcb0-6f8b10aab661"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 18:43:30 crc kubenswrapper[4909]: I1128 18:43:30.534150 4909 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f753e5c8-e102-4829-bcb0-6f8b10aab661-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 18:43:30 crc kubenswrapper[4909]: I1128 18:43:30.540959 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f753e5c8-e102-4829-bcb0-6f8b10aab661-kube-api-access-t9jrl" (OuterVolumeSpecName: "kube-api-access-t9jrl") pod "f753e5c8-e102-4829-bcb0-6f8b10aab661" (UID: "f753e5c8-e102-4829-bcb0-6f8b10aab661"). InnerVolumeSpecName "kube-api-access-t9jrl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 18:43:30 crc kubenswrapper[4909]: I1128 18:43:30.636017 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t9jrl\" (UniqueName: \"kubernetes.io/projected/f753e5c8-e102-4829-bcb0-6f8b10aab661-kube-api-access-t9jrl\") on node \"crc\" DevicePath \"\"" Nov 28 18:43:30 crc kubenswrapper[4909]: I1128 18:43:30.663747 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f753e5c8-e102-4829-bcb0-6f8b10aab661-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "f753e5c8-e102-4829-bcb0-6f8b10aab661" (UID: "f753e5c8-e102-4829-bcb0-6f8b10aab661"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 18:43:30 crc kubenswrapper[4909]: I1128 18:43:30.739123 4909 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f753e5c8-e102-4829-bcb0-6f8b10aab661-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 18:43:30 crc kubenswrapper[4909]: I1128 18:43:30.810290 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-znbzg" event={"ID":"f753e5c8-e102-4829-bcb0-6f8b10aab661","Type":"ContainerDied","Data":"906bed9910cfdb33bffaef6b7dfe7f526a200df90fb30f2f9d735a225f29980d"} Nov 28 18:43:30 crc kubenswrapper[4909]: I1128 18:43:30.810328 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-znbzg" Nov 28 18:43:30 crc kubenswrapper[4909]: I1128 18:43:30.810370 4909 scope.go:117] "RemoveContainer" containerID="7d9e71dc27eead4963481f07ef0815e48b8ab496f16a115881bc17042020e172" Nov 28 18:43:30 crc kubenswrapper[4909]: I1128 18:43:30.857943 4909 scope.go:117] "RemoveContainer" containerID="7709b9f4148032a95c21e5ad1253775cc963bc256dcfa6536348fc637b7e82d2" Nov 28 18:43:30 crc kubenswrapper[4909]: I1128 18:43:30.859646 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-znbzg"] Nov 28 18:43:30 crc kubenswrapper[4909]: I1128 18:43:30.870679 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-znbzg"] Nov 28 18:43:30 crc kubenswrapper[4909]: I1128 18:43:30.888985 4909 scope.go:117] "RemoveContainer" containerID="38e83e4d59635ece9759d5f4c9ea1f6fe25fd57029efaab5cbcc3ac6303a71b3" Nov 28 18:43:31 crc kubenswrapper[4909]: I1128 18:43:31.919422 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f753e5c8-e102-4829-bcb0-6f8b10aab661" path="/var/lib/kubelet/pods/f753e5c8-e102-4829-bcb0-6f8b10aab661/volumes" Nov 28 18:43:39 crc kubenswrapper[4909]: I1128 18:43:39.216641 4909 scope.go:117] "RemoveContainer" containerID="3011ae936e046f930a87e8382a2115d20b5fac41b3278ba9b19928abacbf0d1f" Nov 28 18:43:39 crc kubenswrapper[4909]: I1128 18:43:39.257244 4909 scope.go:117] "RemoveContainer" containerID="3916421bf5eb1a4dd64705dc52aba217e0dd0f8adcc8ed2960649da6026f1340" Nov 28 18:45:00 crc kubenswrapper[4909]: I1128 18:45:00.151289 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405925-lltxn"] Nov 28 18:45:00 crc kubenswrapper[4909]: E1128 18:45:00.152610 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f753e5c8-e102-4829-bcb0-6f8b10aab661" containerName="registry-server" Nov 28 18:45:00 crc kubenswrapper[4909]: I1128 18:45:00.152628 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="f753e5c8-e102-4829-bcb0-6f8b10aab661" containerName="registry-server" Nov 28 18:45:00 crc kubenswrapper[4909]: E1128 18:45:00.152670 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f753e5c8-e102-4829-bcb0-6f8b10aab661" containerName="extract-utilities" Nov 28 18:45:00 crc kubenswrapper[4909]: I1128 18:45:00.152680 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="f753e5c8-e102-4829-bcb0-6f8b10aab661" containerName="extract-utilities" Nov 28 18:45:00 crc kubenswrapper[4909]: E1128 18:45:00.152728 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f753e5c8-e102-4829-bcb0-6f8b10aab661" containerName="extract-content" Nov 28 18:45:00 crc kubenswrapper[4909]: I1128 18:45:00.152737 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="f753e5c8-e102-4829-bcb0-6f8b10aab661" containerName="extract-content" Nov 28 18:45:00 crc kubenswrapper[4909]: I1128 18:45:00.153034 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="f753e5c8-e102-4829-bcb0-6f8b10aab661" containerName="registry-server" Nov 28 18:45:00 crc kubenswrapper[4909]: I1128 18:45:00.153978 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405925-lltxn" Nov 28 18:45:00 crc kubenswrapper[4909]: I1128 18:45:00.175105 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 28 18:45:00 crc kubenswrapper[4909]: I1128 18:45:00.176718 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 28 18:45:00 crc kubenswrapper[4909]: I1128 18:45:00.196408 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405925-lltxn"] Nov 28 18:45:00 crc kubenswrapper[4909]: I1128 18:45:00.288143 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/73469313-1bf1-4a36-b123-53760f6ce73c-config-volume\") pod \"collect-profiles-29405925-lltxn\" (UID: \"73469313-1bf1-4a36-b123-53760f6ce73c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405925-lltxn" Nov 28 18:45:00 crc kubenswrapper[4909]: I1128 18:45:00.288324 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q9gwr\" (UniqueName: \"kubernetes.io/projected/73469313-1bf1-4a36-b123-53760f6ce73c-kube-api-access-q9gwr\") pod \"collect-profiles-29405925-lltxn\" (UID: \"73469313-1bf1-4a36-b123-53760f6ce73c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405925-lltxn" Nov 28 18:45:00 crc kubenswrapper[4909]: I1128 18:45:00.288527 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/73469313-1bf1-4a36-b123-53760f6ce73c-secret-volume\") pod \"collect-profiles-29405925-lltxn\" (UID: \"73469313-1bf1-4a36-b123-53760f6ce73c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405925-lltxn" Nov 28 18:45:00 crc kubenswrapper[4909]: I1128 18:45:00.391725 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/73469313-1bf1-4a36-b123-53760f6ce73c-config-volume\") pod \"collect-profiles-29405925-lltxn\" (UID: \"73469313-1bf1-4a36-b123-53760f6ce73c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405925-lltxn" Nov 28 18:45:00 crc kubenswrapper[4909]: I1128 18:45:00.392066 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q9gwr\" (UniqueName: \"kubernetes.io/projected/73469313-1bf1-4a36-b123-53760f6ce73c-kube-api-access-q9gwr\") pod \"collect-profiles-29405925-lltxn\" (UID: \"73469313-1bf1-4a36-b123-53760f6ce73c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405925-lltxn" Nov 28 18:45:00 crc kubenswrapper[4909]: I1128 18:45:00.392245 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/73469313-1bf1-4a36-b123-53760f6ce73c-secret-volume\") pod \"collect-profiles-29405925-lltxn\" (UID: \"73469313-1bf1-4a36-b123-53760f6ce73c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405925-lltxn" Nov 28 18:45:00 crc kubenswrapper[4909]: I1128 18:45:00.393847 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/73469313-1bf1-4a36-b123-53760f6ce73c-config-volume\") pod \"collect-profiles-29405925-lltxn\" (UID: \"73469313-1bf1-4a36-b123-53760f6ce73c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405925-lltxn" Nov 28 18:45:00 crc kubenswrapper[4909]: I1128 18:45:00.400095 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/73469313-1bf1-4a36-b123-53760f6ce73c-secret-volume\") pod \"collect-profiles-29405925-lltxn\" (UID: \"73469313-1bf1-4a36-b123-53760f6ce73c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405925-lltxn" Nov 28 18:45:00 crc kubenswrapper[4909]: I1128 18:45:00.409140 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q9gwr\" (UniqueName: \"kubernetes.io/projected/73469313-1bf1-4a36-b123-53760f6ce73c-kube-api-access-q9gwr\") pod \"collect-profiles-29405925-lltxn\" (UID: \"73469313-1bf1-4a36-b123-53760f6ce73c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405925-lltxn" Nov 28 18:45:00 crc kubenswrapper[4909]: I1128 18:45:00.483917 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405925-lltxn" Nov 28 18:45:00 crc kubenswrapper[4909]: I1128 18:45:00.956864 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405925-lltxn"] Nov 28 18:45:01 crc kubenswrapper[4909]: I1128 18:45:01.884174 4909 generic.go:334] "Generic (PLEG): container finished" podID="73469313-1bf1-4a36-b123-53760f6ce73c" containerID="a62f712d88509972f3f173d204bae429c6ca8dddc3cec93bad32e9a9764c163f" exitCode=0 Nov 28 18:45:01 crc kubenswrapper[4909]: I1128 18:45:01.884273 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405925-lltxn" event={"ID":"73469313-1bf1-4a36-b123-53760f6ce73c","Type":"ContainerDied","Data":"a62f712d88509972f3f173d204bae429c6ca8dddc3cec93bad32e9a9764c163f"} Nov 28 18:45:01 crc kubenswrapper[4909]: I1128 18:45:01.884532 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405925-lltxn" event={"ID":"73469313-1bf1-4a36-b123-53760f6ce73c","Type":"ContainerStarted","Data":"1e29182c9c10d9290e71efb588e1fb270cb29f8c71cf79232ddb0562196a14b4"} Nov 28 18:45:03 crc kubenswrapper[4909]: I1128 18:45:03.328542 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405925-lltxn" Nov 28 18:45:03 crc kubenswrapper[4909]: I1128 18:45:03.478735 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/73469313-1bf1-4a36-b123-53760f6ce73c-config-volume\") pod \"73469313-1bf1-4a36-b123-53760f6ce73c\" (UID: \"73469313-1bf1-4a36-b123-53760f6ce73c\") " Nov 28 18:45:03 crc kubenswrapper[4909]: I1128 18:45:03.478901 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q9gwr\" (UniqueName: \"kubernetes.io/projected/73469313-1bf1-4a36-b123-53760f6ce73c-kube-api-access-q9gwr\") pod \"73469313-1bf1-4a36-b123-53760f6ce73c\" (UID: \"73469313-1bf1-4a36-b123-53760f6ce73c\") " Nov 28 18:45:03 crc kubenswrapper[4909]: I1128 18:45:03.478977 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/73469313-1bf1-4a36-b123-53760f6ce73c-secret-volume\") pod \"73469313-1bf1-4a36-b123-53760f6ce73c\" (UID: \"73469313-1bf1-4a36-b123-53760f6ce73c\") " Nov 28 18:45:03 crc kubenswrapper[4909]: I1128 18:45:03.480637 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/73469313-1bf1-4a36-b123-53760f6ce73c-config-volume" (OuterVolumeSpecName: "config-volume") pod "73469313-1bf1-4a36-b123-53760f6ce73c" (UID: "73469313-1bf1-4a36-b123-53760f6ce73c"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 18:45:03 crc kubenswrapper[4909]: I1128 18:45:03.487277 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/73469313-1bf1-4a36-b123-53760f6ce73c-kube-api-access-q9gwr" (OuterVolumeSpecName: "kube-api-access-q9gwr") pod "73469313-1bf1-4a36-b123-53760f6ce73c" (UID: "73469313-1bf1-4a36-b123-53760f6ce73c"). InnerVolumeSpecName "kube-api-access-q9gwr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 18:45:03 crc kubenswrapper[4909]: I1128 18:45:03.487919 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/73469313-1bf1-4a36-b123-53760f6ce73c-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "73469313-1bf1-4a36-b123-53760f6ce73c" (UID: "73469313-1bf1-4a36-b123-53760f6ce73c"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 18:45:03 crc kubenswrapper[4909]: I1128 18:45:03.581731 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q9gwr\" (UniqueName: \"kubernetes.io/projected/73469313-1bf1-4a36-b123-53760f6ce73c-kube-api-access-q9gwr\") on node \"crc\" DevicePath \"\"" Nov 28 18:45:03 crc kubenswrapper[4909]: I1128 18:45:03.581774 4909 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/73469313-1bf1-4a36-b123-53760f6ce73c-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 28 18:45:03 crc kubenswrapper[4909]: I1128 18:45:03.581789 4909 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/73469313-1bf1-4a36-b123-53760f6ce73c-config-volume\") on node \"crc\" DevicePath \"\"" Nov 28 18:45:03 crc kubenswrapper[4909]: I1128 18:45:03.907817 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405925-lltxn" Nov 28 18:45:03 crc kubenswrapper[4909]: I1128 18:45:03.914046 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405925-lltxn" event={"ID":"73469313-1bf1-4a36-b123-53760f6ce73c","Type":"ContainerDied","Data":"1e29182c9c10d9290e71efb588e1fb270cb29f8c71cf79232ddb0562196a14b4"} Nov 28 18:45:03 crc kubenswrapper[4909]: I1128 18:45:03.914086 4909 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1e29182c9c10d9290e71efb588e1fb270cb29f8c71cf79232ddb0562196a14b4" Nov 28 18:45:04 crc kubenswrapper[4909]: I1128 18:45:04.442442 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405880-fv5k7"] Nov 28 18:45:04 crc kubenswrapper[4909]: I1128 18:45:04.452532 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405880-fv5k7"] Nov 28 18:45:05 crc kubenswrapper[4909]: I1128 18:45:05.922872 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0fd47769-1328-495f-aa2d-3e15a9df114d" path="/var/lib/kubelet/pods/0fd47769-1328-495f-aa2d-3e15a9df114d/volumes" Nov 28 18:45:40 crc kubenswrapper[4909]: I1128 18:45:40.012997 4909 scope.go:117] "RemoveContainer" containerID="b26a761f7f99192d97c3db332338c7cb0e6fa7201dcb4944384a020921cdcc36" Nov 28 18:45:49 crc kubenswrapper[4909]: I1128 18:45:49.911446 4909 patch_prober.go:28] interesting pod/machine-config-daemon-d5nd7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 18:45:49 crc kubenswrapper[4909]: I1128 18:45:49.912088 4909 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 18:46:19 crc kubenswrapper[4909]: I1128 18:46:19.911062 4909 patch_prober.go:28] interesting pod/machine-config-daemon-d5nd7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 18:46:19 crc kubenswrapper[4909]: I1128 18:46:19.912818 4909 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 18:46:49 crc kubenswrapper[4909]: I1128 18:46:49.910901 4909 patch_prober.go:28] interesting pod/machine-config-daemon-d5nd7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 18:46:49 crc kubenswrapper[4909]: I1128 18:46:49.911560 4909 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 18:46:49 crc kubenswrapper[4909]: I1128 18:46:49.921452 4909 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" Nov 28 18:46:49 crc kubenswrapper[4909]: I1128 18:46:49.922932 4909 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"1b79d6263347baf50ca11beac0b5161bf74ecc26eb50576ac172a4314dc37da9"} pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 18:46:49 crc kubenswrapper[4909]: I1128 18:46:49.923050 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerName="machine-config-daemon" containerID="cri-o://1b79d6263347baf50ca11beac0b5161bf74ecc26eb50576ac172a4314dc37da9" gracePeriod=600 Nov 28 18:46:50 crc kubenswrapper[4909]: I1128 18:46:50.197299 4909 generic.go:334] "Generic (PLEG): container finished" podID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerID="1b79d6263347baf50ca11beac0b5161bf74ecc26eb50576ac172a4314dc37da9" exitCode=0 Nov 28 18:46:50 crc kubenswrapper[4909]: I1128 18:46:50.197956 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" event={"ID":"5f0ac931-d37b-4342-8c12-c2779b455cc5","Type":"ContainerDied","Data":"1b79d6263347baf50ca11beac0b5161bf74ecc26eb50576ac172a4314dc37da9"} Nov 28 18:46:50 crc kubenswrapper[4909]: I1128 18:46:50.198079 4909 scope.go:117] "RemoveContainer" containerID="ef2a38674be7675e726f15b879fd9bbea7be679d566ac302dd10e3d3ce61bcc2" Nov 28 18:46:51 crc kubenswrapper[4909]: I1128 18:46:51.211600 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" event={"ID":"5f0ac931-d37b-4342-8c12-c2779b455cc5","Type":"ContainerStarted","Data":"141877a9e47864adeab54e6b62e3e3588ad770da6cc4187b344c2cdac9b50495"} Nov 28 18:48:29 crc kubenswrapper[4909]: I1128 18:48:29.371139 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell4hv6d" event={"ID":"410d136c-96de-485b-a570-3b74d2d66941","Type":"ContainerDied","Data":"4fd60dfc734c8bc85c61df4dacfd8b1f1311843464b0537013fc407a47e5dc52"} Nov 28 18:48:29 crc kubenswrapper[4909]: I1128 18:48:29.371223 4909 generic.go:334] "Generic (PLEG): container finished" podID="410d136c-96de-485b-a570-3b74d2d66941" containerID="4fd60dfc734c8bc85c61df4dacfd8b1f1311843464b0537013fc407a47e5dc52" exitCode=0 Nov 28 18:48:31 crc kubenswrapper[4909]: I1128 18:48:31.054740 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell4hv6d" Nov 28 18:48:31 crc kubenswrapper[4909]: I1128 18:48:31.201836 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/410d136c-96de-485b-a570-3b74d2d66941-ssh-key\") pod \"410d136c-96de-485b-a570-3b74d2d66941\" (UID: \"410d136c-96de-485b-a570-3b74d2d66941\") " Nov 28 18:48:31 crc kubenswrapper[4909]: I1128 18:48:31.202228 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/410d136c-96de-485b-a570-3b74d2d66941-nova-migration-ssh-key-0\") pod \"410d136c-96de-485b-a570-3b74d2d66941\" (UID: \"410d136c-96de-485b-a570-3b74d2d66941\") " Nov 28 18:48:31 crc kubenswrapper[4909]: I1128 18:48:31.202261 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/410d136c-96de-485b-a570-3b74d2d66941-nova-cell1-compute-config-1\") pod \"410d136c-96de-485b-a570-3b74d2d66941\" (UID: \"410d136c-96de-485b-a570-3b74d2d66941\") " Nov 28 18:48:31 crc kubenswrapper[4909]: I1128 18:48:31.202284 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/410d136c-96de-485b-a570-3b74d2d66941-ceph\") pod \"410d136c-96de-485b-a570-3b74d2d66941\" (UID: \"410d136c-96de-485b-a570-3b74d2d66941\") " Nov 28 18:48:31 crc kubenswrapper[4909]: I1128 18:48:31.202320 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cells-global-config-1\" (UniqueName: \"kubernetes.io/configmap/410d136c-96de-485b-a570-3b74d2d66941-nova-cells-global-config-1\") pod \"410d136c-96de-485b-a570-3b74d2d66941\" (UID: \"410d136c-96de-485b-a570-3b74d2d66941\") " Nov 28 18:48:31 crc kubenswrapper[4909]: I1128 18:48:31.202372 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cells-global-config-0\" (UniqueName: \"kubernetes.io/configmap/410d136c-96de-485b-a570-3b74d2d66941-nova-cells-global-config-0\") pod \"410d136c-96de-485b-a570-3b74d2d66941\" (UID: \"410d136c-96de-485b-a570-3b74d2d66941\") " Nov 28 18:48:31 crc kubenswrapper[4909]: I1128 18:48:31.202408 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/410d136c-96de-485b-a570-3b74d2d66941-inventory\") pod \"410d136c-96de-485b-a570-3b74d2d66941\" (UID: \"410d136c-96de-485b-a570-3b74d2d66941\") " Nov 28 18:48:31 crc kubenswrapper[4909]: I1128 18:48:31.202714 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/410d136c-96de-485b-a570-3b74d2d66941-nova-cell1-compute-config-0\") pod \"410d136c-96de-485b-a570-3b74d2d66941\" (UID: \"410d136c-96de-485b-a570-3b74d2d66941\") " Nov 28 18:48:31 crc kubenswrapper[4909]: I1128 18:48:31.202763 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/410d136c-96de-485b-a570-3b74d2d66941-nova-migration-ssh-key-1\") pod \"410d136c-96de-485b-a570-3b74d2d66941\" (UID: \"410d136c-96de-485b-a570-3b74d2d66941\") " Nov 28 18:48:31 crc kubenswrapper[4909]: I1128 18:48:31.202780 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4ch9g\" (UniqueName: \"kubernetes.io/projected/410d136c-96de-485b-a570-3b74d2d66941-kube-api-access-4ch9g\") pod \"410d136c-96de-485b-a570-3b74d2d66941\" (UID: \"410d136c-96de-485b-a570-3b74d2d66941\") " Nov 28 18:48:31 crc kubenswrapper[4909]: I1128 18:48:31.202828 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/410d136c-96de-485b-a570-3b74d2d66941-nova-cell1-combined-ca-bundle\") pod \"410d136c-96de-485b-a570-3b74d2d66941\" (UID: \"410d136c-96de-485b-a570-3b74d2d66941\") " Nov 28 18:48:31 crc kubenswrapper[4909]: I1128 18:48:31.208061 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/410d136c-96de-485b-a570-3b74d2d66941-ceph" (OuterVolumeSpecName: "ceph") pod "410d136c-96de-485b-a570-3b74d2d66941" (UID: "410d136c-96de-485b-a570-3b74d2d66941"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 18:48:31 crc kubenswrapper[4909]: I1128 18:48:31.208189 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/410d136c-96de-485b-a570-3b74d2d66941-nova-cell1-combined-ca-bundle" (OuterVolumeSpecName: "nova-cell1-combined-ca-bundle") pod "410d136c-96de-485b-a570-3b74d2d66941" (UID: "410d136c-96de-485b-a570-3b74d2d66941"). InnerVolumeSpecName "nova-cell1-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 18:48:31 crc kubenswrapper[4909]: I1128 18:48:31.208585 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/410d136c-96de-485b-a570-3b74d2d66941-kube-api-access-4ch9g" (OuterVolumeSpecName: "kube-api-access-4ch9g") pod "410d136c-96de-485b-a570-3b74d2d66941" (UID: "410d136c-96de-485b-a570-3b74d2d66941"). InnerVolumeSpecName "kube-api-access-4ch9g". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 18:48:31 crc kubenswrapper[4909]: I1128 18:48:31.233944 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/410d136c-96de-485b-a570-3b74d2d66941-nova-cells-global-config-0" (OuterVolumeSpecName: "nova-cells-global-config-0") pod "410d136c-96de-485b-a570-3b74d2d66941" (UID: "410d136c-96de-485b-a570-3b74d2d66941"). InnerVolumeSpecName "nova-cells-global-config-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 18:48:31 crc kubenswrapper[4909]: I1128 18:48:31.236015 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/410d136c-96de-485b-a570-3b74d2d66941-nova-cell1-compute-config-0" (OuterVolumeSpecName: "nova-cell1-compute-config-0") pod "410d136c-96de-485b-a570-3b74d2d66941" (UID: "410d136c-96de-485b-a570-3b74d2d66941"). InnerVolumeSpecName "nova-cell1-compute-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 18:48:31 crc kubenswrapper[4909]: I1128 18:48:31.236315 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/410d136c-96de-485b-a570-3b74d2d66941-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "410d136c-96de-485b-a570-3b74d2d66941" (UID: "410d136c-96de-485b-a570-3b74d2d66941"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 18:48:31 crc kubenswrapper[4909]: I1128 18:48:31.241643 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/410d136c-96de-485b-a570-3b74d2d66941-nova-cell1-compute-config-1" (OuterVolumeSpecName: "nova-cell1-compute-config-1") pod "410d136c-96de-485b-a570-3b74d2d66941" (UID: "410d136c-96de-485b-a570-3b74d2d66941"). InnerVolumeSpecName "nova-cell1-compute-config-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 18:48:31 crc kubenswrapper[4909]: I1128 18:48:31.246253 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/410d136c-96de-485b-a570-3b74d2d66941-nova-migration-ssh-key-0" (OuterVolumeSpecName: "nova-migration-ssh-key-0") pod "410d136c-96de-485b-a570-3b74d2d66941" (UID: "410d136c-96de-485b-a570-3b74d2d66941"). InnerVolumeSpecName "nova-migration-ssh-key-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 18:48:31 crc kubenswrapper[4909]: I1128 18:48:31.247142 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/410d136c-96de-485b-a570-3b74d2d66941-nova-cells-global-config-1" (OuterVolumeSpecName: "nova-cells-global-config-1") pod "410d136c-96de-485b-a570-3b74d2d66941" (UID: "410d136c-96de-485b-a570-3b74d2d66941"). InnerVolumeSpecName "nova-cells-global-config-1". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 18:48:31 crc kubenswrapper[4909]: I1128 18:48:31.255932 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/410d136c-96de-485b-a570-3b74d2d66941-inventory" (OuterVolumeSpecName: "inventory") pod "410d136c-96de-485b-a570-3b74d2d66941" (UID: "410d136c-96de-485b-a570-3b74d2d66941"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 18:48:31 crc kubenswrapper[4909]: I1128 18:48:31.271696 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/410d136c-96de-485b-a570-3b74d2d66941-nova-migration-ssh-key-1" (OuterVolumeSpecName: "nova-migration-ssh-key-1") pod "410d136c-96de-485b-a570-3b74d2d66941" (UID: "410d136c-96de-485b-a570-3b74d2d66941"). InnerVolumeSpecName "nova-migration-ssh-key-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 18:48:31 crc kubenswrapper[4909]: I1128 18:48:31.305331 4909 reconciler_common.go:293] "Volume detached for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/410d136c-96de-485b-a570-3b74d2d66941-nova-migration-ssh-key-1\") on node \"crc\" DevicePath \"\"" Nov 28 18:48:31 crc kubenswrapper[4909]: I1128 18:48:31.305555 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4ch9g\" (UniqueName: \"kubernetes.io/projected/410d136c-96de-485b-a570-3b74d2d66941-kube-api-access-4ch9g\") on node \"crc\" DevicePath \"\"" Nov 28 18:48:31 crc kubenswrapper[4909]: I1128 18:48:31.305615 4909 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/410d136c-96de-485b-a570-3b74d2d66941-nova-cell1-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 18:48:31 crc kubenswrapper[4909]: I1128 18:48:31.305688 4909 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/410d136c-96de-485b-a570-3b74d2d66941-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 28 18:48:31 crc kubenswrapper[4909]: I1128 18:48:31.305770 4909 reconciler_common.go:293] "Volume detached for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/410d136c-96de-485b-a570-3b74d2d66941-nova-migration-ssh-key-0\") on node \"crc\" DevicePath \"\"" Nov 28 18:48:31 crc kubenswrapper[4909]: I1128 18:48:31.305843 4909 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/410d136c-96de-485b-a570-3b74d2d66941-nova-cell1-compute-config-1\") on node \"crc\" DevicePath \"\"" Nov 28 18:48:31 crc kubenswrapper[4909]: I1128 18:48:31.305898 4909 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/410d136c-96de-485b-a570-3b74d2d66941-ceph\") on node \"crc\" DevicePath \"\"" Nov 28 18:48:31 crc kubenswrapper[4909]: I1128 18:48:31.305987 4909 reconciler_common.go:293] "Volume detached for volume \"nova-cells-global-config-1\" (UniqueName: \"kubernetes.io/configmap/410d136c-96de-485b-a570-3b74d2d66941-nova-cells-global-config-1\") on node \"crc\" DevicePath \"\"" Nov 28 18:48:31 crc kubenswrapper[4909]: I1128 18:48:31.306042 4909 reconciler_common.go:293] "Volume detached for volume \"nova-cells-global-config-0\" (UniqueName: \"kubernetes.io/configmap/410d136c-96de-485b-a570-3b74d2d66941-nova-cells-global-config-0\") on node \"crc\" DevicePath \"\"" Nov 28 18:48:31 crc kubenswrapper[4909]: I1128 18:48:31.306095 4909 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/410d136c-96de-485b-a570-3b74d2d66941-inventory\") on node \"crc\" DevicePath \"\"" Nov 28 18:48:31 crc kubenswrapper[4909]: I1128 18:48:31.306155 4909 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/410d136c-96de-485b-a570-3b74d2d66941-nova-cell1-compute-config-0\") on node \"crc\" DevicePath \"\"" Nov 28 18:48:31 crc kubenswrapper[4909]: I1128 18:48:31.397355 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell4hv6d" event={"ID":"410d136c-96de-485b-a570-3b74d2d66941","Type":"ContainerDied","Data":"4ce94c1ad7876c28c0272550356e674aaffe53310986a3d899bdc086b2f71030"} Nov 28 18:48:31 crc kubenswrapper[4909]: I1128 18:48:31.397397 4909 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4ce94c1ad7876c28c0272550356e674aaffe53310986a3d899bdc086b2f71030" Nov 28 18:48:31 crc kubenswrapper[4909]: I1128 18:48:31.397643 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell4hv6d" Nov 28 18:49:19 crc kubenswrapper[4909]: I1128 18:49:19.911498 4909 patch_prober.go:28] interesting pod/machine-config-daemon-d5nd7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 18:49:19 crc kubenswrapper[4909]: I1128 18:49:19.912212 4909 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 18:49:32 crc kubenswrapper[4909]: I1128 18:49:32.806051 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-rc2tj"] Nov 28 18:49:32 crc kubenswrapper[4909]: E1128 18:49:32.807441 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="410d136c-96de-485b-a570-3b74d2d66941" containerName="nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell1" Nov 28 18:49:32 crc kubenswrapper[4909]: I1128 18:49:32.807463 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="410d136c-96de-485b-a570-3b74d2d66941" containerName="nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell1" Nov 28 18:49:32 crc kubenswrapper[4909]: E1128 18:49:32.807511 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="73469313-1bf1-4a36-b123-53760f6ce73c" containerName="collect-profiles" Nov 28 18:49:32 crc kubenswrapper[4909]: I1128 18:49:32.807522 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="73469313-1bf1-4a36-b123-53760f6ce73c" containerName="collect-profiles" Nov 28 18:49:32 crc kubenswrapper[4909]: I1128 18:49:32.807813 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="73469313-1bf1-4a36-b123-53760f6ce73c" containerName="collect-profiles" Nov 28 18:49:32 crc kubenswrapper[4909]: I1128 18:49:32.807855 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="410d136c-96de-485b-a570-3b74d2d66941" containerName="nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell1" Nov 28 18:49:32 crc kubenswrapper[4909]: I1128 18:49:32.810007 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-rc2tj" Nov 28 18:49:32 crc kubenswrapper[4909]: I1128 18:49:32.824478 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-rc2tj"] Nov 28 18:49:32 crc kubenswrapper[4909]: I1128 18:49:32.912675 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bd3ec630-0e05-436e-a6f6-fb069bb5a3d5-catalog-content\") pod \"certified-operators-rc2tj\" (UID: \"bd3ec630-0e05-436e-a6f6-fb069bb5a3d5\") " pod="openshift-marketplace/certified-operators-rc2tj" Nov 28 18:49:32 crc kubenswrapper[4909]: I1128 18:49:32.912768 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bd3ec630-0e05-436e-a6f6-fb069bb5a3d5-utilities\") pod \"certified-operators-rc2tj\" (UID: \"bd3ec630-0e05-436e-a6f6-fb069bb5a3d5\") " pod="openshift-marketplace/certified-operators-rc2tj" Nov 28 18:49:32 crc kubenswrapper[4909]: I1128 18:49:32.912833 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qnfjq\" (UniqueName: \"kubernetes.io/projected/bd3ec630-0e05-436e-a6f6-fb069bb5a3d5-kube-api-access-qnfjq\") pod \"certified-operators-rc2tj\" (UID: \"bd3ec630-0e05-436e-a6f6-fb069bb5a3d5\") " pod="openshift-marketplace/certified-operators-rc2tj" Nov 28 18:49:32 crc kubenswrapper[4909]: I1128 18:49:32.986094 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-m54tx"] Nov 28 18:49:32 crc kubenswrapper[4909]: I1128 18:49:32.988559 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-m54tx" Nov 28 18:49:33 crc kubenswrapper[4909]: I1128 18:49:33.005344 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-m54tx"] Nov 28 18:49:33 crc kubenswrapper[4909]: I1128 18:49:33.015123 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bd3ec630-0e05-436e-a6f6-fb069bb5a3d5-catalog-content\") pod \"certified-operators-rc2tj\" (UID: \"bd3ec630-0e05-436e-a6f6-fb069bb5a3d5\") " pod="openshift-marketplace/certified-operators-rc2tj" Nov 28 18:49:33 crc kubenswrapper[4909]: I1128 18:49:33.015451 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bd3ec630-0e05-436e-a6f6-fb069bb5a3d5-utilities\") pod \"certified-operators-rc2tj\" (UID: \"bd3ec630-0e05-436e-a6f6-fb069bb5a3d5\") " pod="openshift-marketplace/certified-operators-rc2tj" Nov 28 18:49:33 crc kubenswrapper[4909]: I1128 18:49:33.015646 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qnfjq\" (UniqueName: \"kubernetes.io/projected/bd3ec630-0e05-436e-a6f6-fb069bb5a3d5-kube-api-access-qnfjq\") pod \"certified-operators-rc2tj\" (UID: \"bd3ec630-0e05-436e-a6f6-fb069bb5a3d5\") " pod="openshift-marketplace/certified-operators-rc2tj" Nov 28 18:49:33 crc kubenswrapper[4909]: I1128 18:49:33.016617 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bd3ec630-0e05-436e-a6f6-fb069bb5a3d5-catalog-content\") pod \"certified-operators-rc2tj\" (UID: \"bd3ec630-0e05-436e-a6f6-fb069bb5a3d5\") " pod="openshift-marketplace/certified-operators-rc2tj" Nov 28 18:49:33 crc kubenswrapper[4909]: I1128 18:49:33.017022 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bd3ec630-0e05-436e-a6f6-fb069bb5a3d5-utilities\") pod \"certified-operators-rc2tj\" (UID: \"bd3ec630-0e05-436e-a6f6-fb069bb5a3d5\") " pod="openshift-marketplace/certified-operators-rc2tj" Nov 28 18:49:33 crc kubenswrapper[4909]: I1128 18:49:33.057797 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qnfjq\" (UniqueName: \"kubernetes.io/projected/bd3ec630-0e05-436e-a6f6-fb069bb5a3d5-kube-api-access-qnfjq\") pod \"certified-operators-rc2tj\" (UID: \"bd3ec630-0e05-436e-a6f6-fb069bb5a3d5\") " pod="openshift-marketplace/certified-operators-rc2tj" Nov 28 18:49:33 crc kubenswrapper[4909]: I1128 18:49:33.117732 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/27d688d5-8752-40b5-9aa9-6c1d255dfa24-catalog-content\") pod \"community-operators-m54tx\" (UID: \"27d688d5-8752-40b5-9aa9-6c1d255dfa24\") " pod="openshift-marketplace/community-operators-m54tx" Nov 28 18:49:33 crc kubenswrapper[4909]: I1128 18:49:33.118201 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7xn57\" (UniqueName: \"kubernetes.io/projected/27d688d5-8752-40b5-9aa9-6c1d255dfa24-kube-api-access-7xn57\") pod \"community-operators-m54tx\" (UID: \"27d688d5-8752-40b5-9aa9-6c1d255dfa24\") " pod="openshift-marketplace/community-operators-m54tx" Nov 28 18:49:33 crc kubenswrapper[4909]: I1128 18:49:33.118417 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/27d688d5-8752-40b5-9aa9-6c1d255dfa24-utilities\") pod \"community-operators-m54tx\" (UID: \"27d688d5-8752-40b5-9aa9-6c1d255dfa24\") " pod="openshift-marketplace/community-operators-m54tx" Nov 28 18:49:33 crc kubenswrapper[4909]: I1128 18:49:33.134130 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-rc2tj" Nov 28 18:49:33 crc kubenswrapper[4909]: I1128 18:49:33.220097 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7xn57\" (UniqueName: \"kubernetes.io/projected/27d688d5-8752-40b5-9aa9-6c1d255dfa24-kube-api-access-7xn57\") pod \"community-operators-m54tx\" (UID: \"27d688d5-8752-40b5-9aa9-6c1d255dfa24\") " pod="openshift-marketplace/community-operators-m54tx" Nov 28 18:49:33 crc kubenswrapper[4909]: I1128 18:49:33.220218 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/27d688d5-8752-40b5-9aa9-6c1d255dfa24-utilities\") pod \"community-operators-m54tx\" (UID: \"27d688d5-8752-40b5-9aa9-6c1d255dfa24\") " pod="openshift-marketplace/community-operators-m54tx" Nov 28 18:49:33 crc kubenswrapper[4909]: I1128 18:49:33.220289 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/27d688d5-8752-40b5-9aa9-6c1d255dfa24-catalog-content\") pod \"community-operators-m54tx\" (UID: \"27d688d5-8752-40b5-9aa9-6c1d255dfa24\") " pod="openshift-marketplace/community-operators-m54tx" Nov 28 18:49:33 crc kubenswrapper[4909]: I1128 18:49:33.220773 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/27d688d5-8752-40b5-9aa9-6c1d255dfa24-utilities\") pod \"community-operators-m54tx\" (UID: \"27d688d5-8752-40b5-9aa9-6c1d255dfa24\") " pod="openshift-marketplace/community-operators-m54tx" Nov 28 18:49:33 crc kubenswrapper[4909]: I1128 18:49:33.220825 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/27d688d5-8752-40b5-9aa9-6c1d255dfa24-catalog-content\") pod \"community-operators-m54tx\" (UID: \"27d688d5-8752-40b5-9aa9-6c1d255dfa24\") " pod="openshift-marketplace/community-operators-m54tx" Nov 28 18:49:33 crc kubenswrapper[4909]: I1128 18:49:33.247398 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7xn57\" (UniqueName: \"kubernetes.io/projected/27d688d5-8752-40b5-9aa9-6c1d255dfa24-kube-api-access-7xn57\") pod \"community-operators-m54tx\" (UID: \"27d688d5-8752-40b5-9aa9-6c1d255dfa24\") " pod="openshift-marketplace/community-operators-m54tx" Nov 28 18:49:33 crc kubenswrapper[4909]: I1128 18:49:33.308880 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-m54tx" Nov 28 18:49:33 crc kubenswrapper[4909]: I1128 18:49:33.752438 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-rc2tj"] Nov 28 18:49:33 crc kubenswrapper[4909]: W1128 18:49:33.759310 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podbd3ec630_0e05_436e_a6f6_fb069bb5a3d5.slice/crio-1e0f277851b7ca2d23c9fb4ebbb06a704b7bdfa89d9eaf6dee2f354e09e6b546 WatchSource:0}: Error finding container 1e0f277851b7ca2d23c9fb4ebbb06a704b7bdfa89d9eaf6dee2f354e09e6b546: Status 404 returned error can't find the container with id 1e0f277851b7ca2d23c9fb4ebbb06a704b7bdfa89d9eaf6dee2f354e09e6b546 Nov 28 18:49:33 crc kubenswrapper[4909]: I1128 18:49:33.928723 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-m54tx"] Nov 28 18:49:34 crc kubenswrapper[4909]: I1128 18:49:34.120836 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-rc2tj" event={"ID":"bd3ec630-0e05-436e-a6f6-fb069bb5a3d5","Type":"ContainerStarted","Data":"1e0f277851b7ca2d23c9fb4ebbb06a704b7bdfa89d9eaf6dee2f354e09e6b546"} Nov 28 18:49:35 crc kubenswrapper[4909]: I1128 18:49:35.158461 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-m54tx" event={"ID":"27d688d5-8752-40b5-9aa9-6c1d255dfa24","Type":"ContainerStarted","Data":"efd2bfa29df4d4a5f00bad4a3340943a50b5b9223f082ca14db90e2b7d6bed45"} Nov 28 18:49:36 crc kubenswrapper[4909]: I1128 18:49:36.170710 4909 generic.go:334] "Generic (PLEG): container finished" podID="bd3ec630-0e05-436e-a6f6-fb069bb5a3d5" containerID="ac52983b89b93aea921aef96631081e2fdc38f6e0f7c5172ce96e022e6b52d66" exitCode=0 Nov 28 18:49:36 crc kubenswrapper[4909]: I1128 18:49:36.170886 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-rc2tj" event={"ID":"bd3ec630-0e05-436e-a6f6-fb069bb5a3d5","Type":"ContainerDied","Data":"ac52983b89b93aea921aef96631081e2fdc38f6e0f7c5172ce96e022e6b52d66"} Nov 28 18:49:36 crc kubenswrapper[4909]: I1128 18:49:36.174579 4909 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 28 18:49:36 crc kubenswrapper[4909]: I1128 18:49:36.177066 4909 generic.go:334] "Generic (PLEG): container finished" podID="27d688d5-8752-40b5-9aa9-6c1d255dfa24" containerID="bd7bda477d74753df702dc8297ecff0b30618c0c736c1f1dd0ccaa50e9cb9fe4" exitCode=0 Nov 28 18:49:36 crc kubenswrapper[4909]: I1128 18:49:36.177107 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-m54tx" event={"ID":"27d688d5-8752-40b5-9aa9-6c1d255dfa24","Type":"ContainerDied","Data":"bd7bda477d74753df702dc8297ecff0b30618c0c736c1f1dd0ccaa50e9cb9fe4"} Nov 28 18:49:38 crc kubenswrapper[4909]: I1128 18:49:38.198636 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-rc2tj" event={"ID":"bd3ec630-0e05-436e-a6f6-fb069bb5a3d5","Type":"ContainerStarted","Data":"ba9cd11ebb35512d593867640fa1eefe58b26ec6f95cf0eea849ca8cd6af1e38"} Nov 28 18:49:38 crc kubenswrapper[4909]: I1128 18:49:38.201839 4909 generic.go:334] "Generic (PLEG): container finished" podID="27d688d5-8752-40b5-9aa9-6c1d255dfa24" containerID="119417e62b442ee93ab989a8f379e9599bffdf778da0e8b5aab0a2cb67744e4f" exitCode=0 Nov 28 18:49:38 crc kubenswrapper[4909]: I1128 18:49:38.201944 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-m54tx" event={"ID":"27d688d5-8752-40b5-9aa9-6c1d255dfa24","Type":"ContainerDied","Data":"119417e62b442ee93ab989a8f379e9599bffdf778da0e8b5aab0a2cb67744e4f"} Nov 28 18:49:39 crc kubenswrapper[4909]: I1128 18:49:39.221176 4909 generic.go:334] "Generic (PLEG): container finished" podID="bd3ec630-0e05-436e-a6f6-fb069bb5a3d5" containerID="ba9cd11ebb35512d593867640fa1eefe58b26ec6f95cf0eea849ca8cd6af1e38" exitCode=0 Nov 28 18:49:39 crc kubenswrapper[4909]: I1128 18:49:39.221637 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-rc2tj" event={"ID":"bd3ec630-0e05-436e-a6f6-fb069bb5a3d5","Type":"ContainerDied","Data":"ba9cd11ebb35512d593867640fa1eefe58b26ec6f95cf0eea849ca8cd6af1e38"} Nov 28 18:49:39 crc kubenswrapper[4909]: I1128 18:49:39.228188 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-m54tx" event={"ID":"27d688d5-8752-40b5-9aa9-6c1d255dfa24","Type":"ContainerStarted","Data":"f076208443f7a4d22eb586f3b6350016123bb75e3683a02849185a94f28ea5f4"} Nov 28 18:49:39 crc kubenswrapper[4909]: I1128 18:49:39.268754 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-m54tx" podStartSLOduration=4.606972597 podStartE2EDuration="7.268735052s" podCreationTimestamp="2025-11-28 18:49:32 +0000 UTC" firstStartedPulling="2025-11-28 18:49:36.179074528 +0000 UTC m=+9558.575759062" lastFinishedPulling="2025-11-28 18:49:38.840836973 +0000 UTC m=+9561.237521517" observedRunningTime="2025-11-28 18:49:39.26786435 +0000 UTC m=+9561.664548894" watchObservedRunningTime="2025-11-28 18:49:39.268735052 +0000 UTC m=+9561.665419586" Nov 28 18:49:41 crc kubenswrapper[4909]: I1128 18:49:41.249716 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-rc2tj" event={"ID":"bd3ec630-0e05-436e-a6f6-fb069bb5a3d5","Type":"ContainerStarted","Data":"820330287dceb7268e8442ab4a37529b8cb583afa3f5be1ce10b28287f0fbd6d"} Nov 28 18:49:41 crc kubenswrapper[4909]: I1128 18:49:41.272130 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-rc2tj" podStartSLOduration=4.944204968 podStartE2EDuration="9.272112259s" podCreationTimestamp="2025-11-28 18:49:32 +0000 UTC" firstStartedPulling="2025-11-28 18:49:36.174308733 +0000 UTC m=+9558.570993257" lastFinishedPulling="2025-11-28 18:49:40.502216024 +0000 UTC m=+9562.898900548" observedRunningTime="2025-11-28 18:49:41.26833156 +0000 UTC m=+9563.665016084" watchObservedRunningTime="2025-11-28 18:49:41.272112259 +0000 UTC m=+9563.668796783" Nov 28 18:49:43 crc kubenswrapper[4909]: I1128 18:49:43.135072 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-rc2tj" Nov 28 18:49:43 crc kubenswrapper[4909]: I1128 18:49:43.135720 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-rc2tj" Nov 28 18:49:43 crc kubenswrapper[4909]: I1128 18:49:43.220182 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-rc2tj" Nov 28 18:49:43 crc kubenswrapper[4909]: I1128 18:49:43.309109 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-m54tx" Nov 28 18:49:43 crc kubenswrapper[4909]: I1128 18:49:43.309154 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-m54tx" Nov 28 18:49:43 crc kubenswrapper[4909]: I1128 18:49:43.386365 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-m54tx" Nov 28 18:49:44 crc kubenswrapper[4909]: I1128 18:49:44.351563 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-m54tx" Nov 28 18:49:45 crc kubenswrapper[4909]: I1128 18:49:45.587181 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-m54tx"] Nov 28 18:49:46 crc kubenswrapper[4909]: I1128 18:49:46.302204 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-m54tx" podUID="27d688d5-8752-40b5-9aa9-6c1d255dfa24" containerName="registry-server" containerID="cri-o://f076208443f7a4d22eb586f3b6350016123bb75e3683a02849185a94f28ea5f4" gracePeriod=2 Nov 28 18:49:47 crc kubenswrapper[4909]: I1128 18:49:47.321376 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-m54tx" event={"ID":"27d688d5-8752-40b5-9aa9-6c1d255dfa24","Type":"ContainerDied","Data":"f076208443f7a4d22eb586f3b6350016123bb75e3683a02849185a94f28ea5f4"} Nov 28 18:49:47 crc kubenswrapper[4909]: I1128 18:49:47.321365 4909 generic.go:334] "Generic (PLEG): container finished" podID="27d688d5-8752-40b5-9aa9-6c1d255dfa24" containerID="f076208443f7a4d22eb586f3b6350016123bb75e3683a02849185a94f28ea5f4" exitCode=0 Nov 28 18:49:47 crc kubenswrapper[4909]: I1128 18:49:47.499343 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-m54tx" Nov 28 18:49:47 crc kubenswrapper[4909]: I1128 18:49:47.661562 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/27d688d5-8752-40b5-9aa9-6c1d255dfa24-catalog-content\") pod \"27d688d5-8752-40b5-9aa9-6c1d255dfa24\" (UID: \"27d688d5-8752-40b5-9aa9-6c1d255dfa24\") " Nov 28 18:49:47 crc kubenswrapper[4909]: I1128 18:49:47.661978 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/27d688d5-8752-40b5-9aa9-6c1d255dfa24-utilities\") pod \"27d688d5-8752-40b5-9aa9-6c1d255dfa24\" (UID: \"27d688d5-8752-40b5-9aa9-6c1d255dfa24\") " Nov 28 18:49:47 crc kubenswrapper[4909]: I1128 18:49:47.662146 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7xn57\" (UniqueName: \"kubernetes.io/projected/27d688d5-8752-40b5-9aa9-6c1d255dfa24-kube-api-access-7xn57\") pod \"27d688d5-8752-40b5-9aa9-6c1d255dfa24\" (UID: \"27d688d5-8752-40b5-9aa9-6c1d255dfa24\") " Nov 28 18:49:47 crc kubenswrapper[4909]: I1128 18:49:47.664135 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/27d688d5-8752-40b5-9aa9-6c1d255dfa24-utilities" (OuterVolumeSpecName: "utilities") pod "27d688d5-8752-40b5-9aa9-6c1d255dfa24" (UID: "27d688d5-8752-40b5-9aa9-6c1d255dfa24"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 18:49:47 crc kubenswrapper[4909]: I1128 18:49:47.670062 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/27d688d5-8752-40b5-9aa9-6c1d255dfa24-kube-api-access-7xn57" (OuterVolumeSpecName: "kube-api-access-7xn57") pod "27d688d5-8752-40b5-9aa9-6c1d255dfa24" (UID: "27d688d5-8752-40b5-9aa9-6c1d255dfa24"). InnerVolumeSpecName "kube-api-access-7xn57". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 18:49:47 crc kubenswrapper[4909]: I1128 18:49:47.713939 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/27d688d5-8752-40b5-9aa9-6c1d255dfa24-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "27d688d5-8752-40b5-9aa9-6c1d255dfa24" (UID: "27d688d5-8752-40b5-9aa9-6c1d255dfa24"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 18:49:47 crc kubenswrapper[4909]: I1128 18:49:47.764525 4909 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/27d688d5-8752-40b5-9aa9-6c1d255dfa24-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 18:49:47 crc kubenswrapper[4909]: I1128 18:49:47.764560 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7xn57\" (UniqueName: \"kubernetes.io/projected/27d688d5-8752-40b5-9aa9-6c1d255dfa24-kube-api-access-7xn57\") on node \"crc\" DevicePath \"\"" Nov 28 18:49:47 crc kubenswrapper[4909]: I1128 18:49:47.764572 4909 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/27d688d5-8752-40b5-9aa9-6c1d255dfa24-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 18:49:48 crc kubenswrapper[4909]: I1128 18:49:48.332844 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-m54tx" event={"ID":"27d688d5-8752-40b5-9aa9-6c1d255dfa24","Type":"ContainerDied","Data":"efd2bfa29df4d4a5f00bad4a3340943a50b5b9223f082ca14db90e2b7d6bed45"} Nov 28 18:49:48 crc kubenswrapper[4909]: I1128 18:49:48.333137 4909 scope.go:117] "RemoveContainer" containerID="f076208443f7a4d22eb586f3b6350016123bb75e3683a02849185a94f28ea5f4" Nov 28 18:49:48 crc kubenswrapper[4909]: I1128 18:49:48.333270 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-m54tx" Nov 28 18:49:48 crc kubenswrapper[4909]: I1128 18:49:48.364230 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-m54tx"] Nov 28 18:49:48 crc kubenswrapper[4909]: I1128 18:49:48.366283 4909 scope.go:117] "RemoveContainer" containerID="119417e62b442ee93ab989a8f379e9599bffdf778da0e8b5aab0a2cb67744e4f" Nov 28 18:49:48 crc kubenswrapper[4909]: I1128 18:49:48.374901 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-m54tx"] Nov 28 18:49:48 crc kubenswrapper[4909]: I1128 18:49:48.393193 4909 scope.go:117] "RemoveContainer" containerID="bd7bda477d74753df702dc8297ecff0b30618c0c736c1f1dd0ccaa50e9cb9fe4" Nov 28 18:49:49 crc kubenswrapper[4909]: I1128 18:49:49.911264 4909 patch_prober.go:28] interesting pod/machine-config-daemon-d5nd7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 18:49:49 crc kubenswrapper[4909]: I1128 18:49:49.911322 4909 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 18:49:49 crc kubenswrapper[4909]: I1128 18:49:49.922025 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="27d688d5-8752-40b5-9aa9-6c1d255dfa24" path="/var/lib/kubelet/pods/27d688d5-8752-40b5-9aa9-6c1d255dfa24/volumes" Nov 28 18:49:50 crc kubenswrapper[4909]: E1128 18:49:50.188972 4909 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp 38.102.83.53:57730->38.102.83.53:44787: write tcp 38.102.83.53:57730->38.102.83.53:44787: write: broken pipe Nov 28 18:49:53 crc kubenswrapper[4909]: I1128 18:49:53.192324 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-rc2tj" Nov 28 18:49:53 crc kubenswrapper[4909]: I1128 18:49:53.262528 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-rc2tj"] Nov 28 18:49:53 crc kubenswrapper[4909]: I1128 18:49:53.399008 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-rc2tj" podUID="bd3ec630-0e05-436e-a6f6-fb069bb5a3d5" containerName="registry-server" containerID="cri-o://820330287dceb7268e8442ab4a37529b8cb583afa3f5be1ce10b28287f0fbd6d" gracePeriod=2 Nov 28 18:49:53 crc kubenswrapper[4909]: I1128 18:49:53.952368 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-rc2tj" Nov 28 18:49:54 crc kubenswrapper[4909]: I1128 18:49:54.100440 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bd3ec630-0e05-436e-a6f6-fb069bb5a3d5-catalog-content\") pod \"bd3ec630-0e05-436e-a6f6-fb069bb5a3d5\" (UID: \"bd3ec630-0e05-436e-a6f6-fb069bb5a3d5\") " Nov 28 18:49:54 crc kubenswrapper[4909]: I1128 18:49:54.100538 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bd3ec630-0e05-436e-a6f6-fb069bb5a3d5-utilities\") pod \"bd3ec630-0e05-436e-a6f6-fb069bb5a3d5\" (UID: \"bd3ec630-0e05-436e-a6f6-fb069bb5a3d5\") " Nov 28 18:49:54 crc kubenswrapper[4909]: I1128 18:49:54.100734 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qnfjq\" (UniqueName: \"kubernetes.io/projected/bd3ec630-0e05-436e-a6f6-fb069bb5a3d5-kube-api-access-qnfjq\") pod \"bd3ec630-0e05-436e-a6f6-fb069bb5a3d5\" (UID: \"bd3ec630-0e05-436e-a6f6-fb069bb5a3d5\") " Nov 28 18:49:54 crc kubenswrapper[4909]: I1128 18:49:54.102083 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bd3ec630-0e05-436e-a6f6-fb069bb5a3d5-utilities" (OuterVolumeSpecName: "utilities") pod "bd3ec630-0e05-436e-a6f6-fb069bb5a3d5" (UID: "bd3ec630-0e05-436e-a6f6-fb069bb5a3d5"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 18:49:54 crc kubenswrapper[4909]: I1128 18:49:54.103030 4909 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bd3ec630-0e05-436e-a6f6-fb069bb5a3d5-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 18:49:54 crc kubenswrapper[4909]: I1128 18:49:54.107755 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bd3ec630-0e05-436e-a6f6-fb069bb5a3d5-kube-api-access-qnfjq" (OuterVolumeSpecName: "kube-api-access-qnfjq") pod "bd3ec630-0e05-436e-a6f6-fb069bb5a3d5" (UID: "bd3ec630-0e05-436e-a6f6-fb069bb5a3d5"). InnerVolumeSpecName "kube-api-access-qnfjq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 18:49:54 crc kubenswrapper[4909]: I1128 18:49:54.169607 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bd3ec630-0e05-436e-a6f6-fb069bb5a3d5-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "bd3ec630-0e05-436e-a6f6-fb069bb5a3d5" (UID: "bd3ec630-0e05-436e-a6f6-fb069bb5a3d5"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 18:49:54 crc kubenswrapper[4909]: I1128 18:49:54.205589 4909 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bd3ec630-0e05-436e-a6f6-fb069bb5a3d5-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 18:49:54 crc kubenswrapper[4909]: I1128 18:49:54.205637 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qnfjq\" (UniqueName: \"kubernetes.io/projected/bd3ec630-0e05-436e-a6f6-fb069bb5a3d5-kube-api-access-qnfjq\") on node \"crc\" DevicePath \"\"" Nov 28 18:49:54 crc kubenswrapper[4909]: I1128 18:49:54.419029 4909 generic.go:334] "Generic (PLEG): container finished" podID="bd3ec630-0e05-436e-a6f6-fb069bb5a3d5" containerID="820330287dceb7268e8442ab4a37529b8cb583afa3f5be1ce10b28287f0fbd6d" exitCode=0 Nov 28 18:49:54 crc kubenswrapper[4909]: I1128 18:49:54.419099 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-rc2tj" Nov 28 18:49:54 crc kubenswrapper[4909]: I1128 18:49:54.419095 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-rc2tj" event={"ID":"bd3ec630-0e05-436e-a6f6-fb069bb5a3d5","Type":"ContainerDied","Data":"820330287dceb7268e8442ab4a37529b8cb583afa3f5be1ce10b28287f0fbd6d"} Nov 28 18:49:54 crc kubenswrapper[4909]: I1128 18:49:54.419445 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-rc2tj" event={"ID":"bd3ec630-0e05-436e-a6f6-fb069bb5a3d5","Type":"ContainerDied","Data":"1e0f277851b7ca2d23c9fb4ebbb06a704b7bdfa89d9eaf6dee2f354e09e6b546"} Nov 28 18:49:54 crc kubenswrapper[4909]: I1128 18:49:54.419465 4909 scope.go:117] "RemoveContainer" containerID="820330287dceb7268e8442ab4a37529b8cb583afa3f5be1ce10b28287f0fbd6d" Nov 28 18:49:54 crc kubenswrapper[4909]: I1128 18:49:54.446623 4909 scope.go:117] "RemoveContainer" containerID="ba9cd11ebb35512d593867640fa1eefe58b26ec6f95cf0eea849ca8cd6af1e38" Nov 28 18:49:54 crc kubenswrapper[4909]: I1128 18:49:54.452996 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-rc2tj"] Nov 28 18:49:54 crc kubenswrapper[4909]: I1128 18:49:54.463777 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-rc2tj"] Nov 28 18:49:54 crc kubenswrapper[4909]: I1128 18:49:54.487524 4909 scope.go:117] "RemoveContainer" containerID="ac52983b89b93aea921aef96631081e2fdc38f6e0f7c5172ce96e022e6b52d66" Nov 28 18:49:54 crc kubenswrapper[4909]: I1128 18:49:54.521192 4909 scope.go:117] "RemoveContainer" containerID="820330287dceb7268e8442ab4a37529b8cb583afa3f5be1ce10b28287f0fbd6d" Nov 28 18:49:54 crc kubenswrapper[4909]: E1128 18:49:54.521606 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"820330287dceb7268e8442ab4a37529b8cb583afa3f5be1ce10b28287f0fbd6d\": container with ID starting with 820330287dceb7268e8442ab4a37529b8cb583afa3f5be1ce10b28287f0fbd6d not found: ID does not exist" containerID="820330287dceb7268e8442ab4a37529b8cb583afa3f5be1ce10b28287f0fbd6d" Nov 28 18:49:54 crc kubenswrapper[4909]: I1128 18:49:54.521641 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"820330287dceb7268e8442ab4a37529b8cb583afa3f5be1ce10b28287f0fbd6d"} err="failed to get container status \"820330287dceb7268e8442ab4a37529b8cb583afa3f5be1ce10b28287f0fbd6d\": rpc error: code = NotFound desc = could not find container \"820330287dceb7268e8442ab4a37529b8cb583afa3f5be1ce10b28287f0fbd6d\": container with ID starting with 820330287dceb7268e8442ab4a37529b8cb583afa3f5be1ce10b28287f0fbd6d not found: ID does not exist" Nov 28 18:49:54 crc kubenswrapper[4909]: I1128 18:49:54.521681 4909 scope.go:117] "RemoveContainer" containerID="ba9cd11ebb35512d593867640fa1eefe58b26ec6f95cf0eea849ca8cd6af1e38" Nov 28 18:49:54 crc kubenswrapper[4909]: E1128 18:49:54.522261 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ba9cd11ebb35512d593867640fa1eefe58b26ec6f95cf0eea849ca8cd6af1e38\": container with ID starting with ba9cd11ebb35512d593867640fa1eefe58b26ec6f95cf0eea849ca8cd6af1e38 not found: ID does not exist" containerID="ba9cd11ebb35512d593867640fa1eefe58b26ec6f95cf0eea849ca8cd6af1e38" Nov 28 18:49:54 crc kubenswrapper[4909]: I1128 18:49:54.522288 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ba9cd11ebb35512d593867640fa1eefe58b26ec6f95cf0eea849ca8cd6af1e38"} err="failed to get container status \"ba9cd11ebb35512d593867640fa1eefe58b26ec6f95cf0eea849ca8cd6af1e38\": rpc error: code = NotFound desc = could not find container \"ba9cd11ebb35512d593867640fa1eefe58b26ec6f95cf0eea849ca8cd6af1e38\": container with ID starting with ba9cd11ebb35512d593867640fa1eefe58b26ec6f95cf0eea849ca8cd6af1e38 not found: ID does not exist" Nov 28 18:49:54 crc kubenswrapper[4909]: I1128 18:49:54.522302 4909 scope.go:117] "RemoveContainer" containerID="ac52983b89b93aea921aef96631081e2fdc38f6e0f7c5172ce96e022e6b52d66" Nov 28 18:49:54 crc kubenswrapper[4909]: E1128 18:49:54.522574 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ac52983b89b93aea921aef96631081e2fdc38f6e0f7c5172ce96e022e6b52d66\": container with ID starting with ac52983b89b93aea921aef96631081e2fdc38f6e0f7c5172ce96e022e6b52d66 not found: ID does not exist" containerID="ac52983b89b93aea921aef96631081e2fdc38f6e0f7c5172ce96e022e6b52d66" Nov 28 18:49:54 crc kubenswrapper[4909]: I1128 18:49:54.522680 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ac52983b89b93aea921aef96631081e2fdc38f6e0f7c5172ce96e022e6b52d66"} err="failed to get container status \"ac52983b89b93aea921aef96631081e2fdc38f6e0f7c5172ce96e022e6b52d66\": rpc error: code = NotFound desc = could not find container \"ac52983b89b93aea921aef96631081e2fdc38f6e0f7c5172ce96e022e6b52d66\": container with ID starting with ac52983b89b93aea921aef96631081e2fdc38f6e0f7c5172ce96e022e6b52d66 not found: ID does not exist" Nov 28 18:49:55 crc kubenswrapper[4909]: I1128 18:49:55.923788 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bd3ec630-0e05-436e-a6f6-fb069bb5a3d5" path="/var/lib/kubelet/pods/bd3ec630-0e05-436e-a6f6-fb069bb5a3d5/volumes" Nov 28 18:50:19 crc kubenswrapper[4909]: I1128 18:50:19.910641 4909 patch_prober.go:28] interesting pod/machine-config-daemon-d5nd7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 18:50:19 crc kubenswrapper[4909]: I1128 18:50:19.911609 4909 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 18:50:19 crc kubenswrapper[4909]: I1128 18:50:19.919638 4909 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" Nov 28 18:50:19 crc kubenswrapper[4909]: I1128 18:50:19.921347 4909 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"141877a9e47864adeab54e6b62e3e3588ad770da6cc4187b344c2cdac9b50495"} pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 18:50:19 crc kubenswrapper[4909]: I1128 18:50:19.921456 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerName="machine-config-daemon" containerID="cri-o://141877a9e47864adeab54e6b62e3e3588ad770da6cc4187b344c2cdac9b50495" gracePeriod=600 Nov 28 18:50:20 crc kubenswrapper[4909]: E1128 18:50:20.049868 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 18:50:20 crc kubenswrapper[4909]: I1128 18:50:20.762590 4909 generic.go:334] "Generic (PLEG): container finished" podID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerID="141877a9e47864adeab54e6b62e3e3588ad770da6cc4187b344c2cdac9b50495" exitCode=0 Nov 28 18:50:20 crc kubenswrapper[4909]: I1128 18:50:20.762702 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" event={"ID":"5f0ac931-d37b-4342-8c12-c2779b455cc5","Type":"ContainerDied","Data":"141877a9e47864adeab54e6b62e3e3588ad770da6cc4187b344c2cdac9b50495"} Nov 28 18:50:20 crc kubenswrapper[4909]: I1128 18:50:20.762762 4909 scope.go:117] "RemoveContainer" containerID="1b79d6263347baf50ca11beac0b5161bf74ecc26eb50576ac172a4314dc37da9" Nov 28 18:50:20 crc kubenswrapper[4909]: I1128 18:50:20.763382 4909 scope.go:117] "RemoveContainer" containerID="141877a9e47864adeab54e6b62e3e3588ad770da6cc4187b344c2cdac9b50495" Nov 28 18:50:20 crc kubenswrapper[4909]: E1128 18:50:20.764004 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 18:50:32 crc kubenswrapper[4909]: I1128 18:50:32.904312 4909 scope.go:117] "RemoveContainer" containerID="141877a9e47864adeab54e6b62e3e3588ad770da6cc4187b344c2cdac9b50495" Nov 28 18:50:32 crc kubenswrapper[4909]: E1128 18:50:32.905228 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 18:50:46 crc kubenswrapper[4909]: I1128 18:50:46.901436 4909 scope.go:117] "RemoveContainer" containerID="141877a9e47864adeab54e6b62e3e3588ad770da6cc4187b344c2cdac9b50495" Nov 28 18:50:46 crc kubenswrapper[4909]: E1128 18:50:46.902433 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 18:50:50 crc kubenswrapper[4909]: I1128 18:50:50.707563 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mariadb-copy-data"] Nov 28 18:50:50 crc kubenswrapper[4909]: I1128 18:50:50.708299 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/mariadb-copy-data" podUID="75370690-54d5-4283-8b1e-00deda652092" containerName="adoption" containerID="cri-o://241ede6f5b70c6d762520c107d41898b9054421b0f9d81a5e7ad0b7351c7cf8a" gracePeriod=30 Nov 28 18:50:59 crc kubenswrapper[4909]: I1128 18:50:59.903197 4909 scope.go:117] "RemoveContainer" containerID="141877a9e47864adeab54e6b62e3e3588ad770da6cc4187b344c2cdac9b50495" Nov 28 18:50:59 crc kubenswrapper[4909]: E1128 18:50:59.906564 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 18:51:10 crc kubenswrapper[4909]: I1128 18:51:10.902456 4909 scope.go:117] "RemoveContainer" containerID="141877a9e47864adeab54e6b62e3e3588ad770da6cc4187b344c2cdac9b50495" Nov 28 18:51:10 crc kubenswrapper[4909]: E1128 18:51:10.903384 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 18:51:15 crc kubenswrapper[4909]: I1128 18:51:15.578620 4909 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/openstack-galera-0" podUID="e040cc04-ec87-429f-86b4-37fc9aa86fb1" containerName="galera" probeResult="failure" output="command timed out" Nov 28 18:51:21 crc kubenswrapper[4909]: I1128 18:51:21.239867 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-copy-data" Nov 28 18:51:21 crc kubenswrapper[4909]: I1128 18:51:21.367556 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mariadb-data\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b05bf8b3-1bd7-4304-855e-6d182fcf1324\") pod \"75370690-54d5-4283-8b1e-00deda652092\" (UID: \"75370690-54d5-4283-8b1e-00deda652092\") " Nov 28 18:51:21 crc kubenswrapper[4909]: I1128 18:51:21.367886 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xsm99\" (UniqueName: \"kubernetes.io/projected/75370690-54d5-4283-8b1e-00deda652092-kube-api-access-xsm99\") pod \"75370690-54d5-4283-8b1e-00deda652092\" (UID: \"75370690-54d5-4283-8b1e-00deda652092\") " Nov 28 18:51:21 crc kubenswrapper[4909]: I1128 18:51:21.386622 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/75370690-54d5-4283-8b1e-00deda652092-kube-api-access-xsm99" (OuterVolumeSpecName: "kube-api-access-xsm99") pod "75370690-54d5-4283-8b1e-00deda652092" (UID: "75370690-54d5-4283-8b1e-00deda652092"). InnerVolumeSpecName "kube-api-access-xsm99". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 18:51:21 crc kubenswrapper[4909]: I1128 18:51:21.400624 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b05bf8b3-1bd7-4304-855e-6d182fcf1324" (OuterVolumeSpecName: "mariadb-data") pod "75370690-54d5-4283-8b1e-00deda652092" (UID: "75370690-54d5-4283-8b1e-00deda652092"). InnerVolumeSpecName "pvc-b05bf8b3-1bd7-4304-855e-6d182fcf1324". PluginName "kubernetes.io/csi", VolumeGidValue "" Nov 28 18:51:21 crc kubenswrapper[4909]: I1128 18:51:21.464732 4909 generic.go:334] "Generic (PLEG): container finished" podID="75370690-54d5-4283-8b1e-00deda652092" containerID="241ede6f5b70c6d762520c107d41898b9054421b0f9d81a5e7ad0b7351c7cf8a" exitCode=137 Nov 28 18:51:21 crc kubenswrapper[4909]: I1128 18:51:21.464788 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-copy-data" event={"ID":"75370690-54d5-4283-8b1e-00deda652092","Type":"ContainerDied","Data":"241ede6f5b70c6d762520c107d41898b9054421b0f9d81a5e7ad0b7351c7cf8a"} Nov 28 18:51:21 crc kubenswrapper[4909]: I1128 18:51:21.464821 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-copy-data" event={"ID":"75370690-54d5-4283-8b1e-00deda652092","Type":"ContainerDied","Data":"b45ae6fd09d28c090cc1f4512ceb47fe164a8c2e0142f9c3b57e4f61fedb930a"} Nov 28 18:51:21 crc kubenswrapper[4909]: I1128 18:51:21.464846 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-copy-data" Nov 28 18:51:21 crc kubenswrapper[4909]: I1128 18:51:21.464851 4909 scope.go:117] "RemoveContainer" containerID="241ede6f5b70c6d762520c107d41898b9054421b0f9d81a5e7ad0b7351c7cf8a" Nov 28 18:51:21 crc kubenswrapper[4909]: I1128 18:51:21.470520 4909 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"pvc-b05bf8b3-1bd7-4304-855e-6d182fcf1324\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b05bf8b3-1bd7-4304-855e-6d182fcf1324\") on node \"crc\" " Nov 28 18:51:21 crc kubenswrapper[4909]: I1128 18:51:21.470549 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xsm99\" (UniqueName: \"kubernetes.io/projected/75370690-54d5-4283-8b1e-00deda652092-kube-api-access-xsm99\") on node \"crc\" DevicePath \"\"" Nov 28 18:51:21 crc kubenswrapper[4909]: I1128 18:51:21.504588 4909 csi_attacher.go:630] kubernetes.io/csi: attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice... Nov 28 18:51:21 crc kubenswrapper[4909]: I1128 18:51:21.505694 4909 operation_generator.go:917] UnmountDevice succeeded for volume "pvc-b05bf8b3-1bd7-4304-855e-6d182fcf1324" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b05bf8b3-1bd7-4304-855e-6d182fcf1324") on node "crc" Nov 28 18:51:21 crc kubenswrapper[4909]: I1128 18:51:21.516590 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mariadb-copy-data"] Nov 28 18:51:21 crc kubenswrapper[4909]: I1128 18:51:21.523129 4909 scope.go:117] "RemoveContainer" containerID="241ede6f5b70c6d762520c107d41898b9054421b0f9d81a5e7ad0b7351c7cf8a" Nov 28 18:51:21 crc kubenswrapper[4909]: E1128 18:51:21.523605 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"241ede6f5b70c6d762520c107d41898b9054421b0f9d81a5e7ad0b7351c7cf8a\": container with ID starting with 241ede6f5b70c6d762520c107d41898b9054421b0f9d81a5e7ad0b7351c7cf8a not found: ID does not exist" containerID="241ede6f5b70c6d762520c107d41898b9054421b0f9d81a5e7ad0b7351c7cf8a" Nov 28 18:51:21 crc kubenswrapper[4909]: I1128 18:51:21.523632 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"241ede6f5b70c6d762520c107d41898b9054421b0f9d81a5e7ad0b7351c7cf8a"} err="failed to get container status \"241ede6f5b70c6d762520c107d41898b9054421b0f9d81a5e7ad0b7351c7cf8a\": rpc error: code = NotFound desc = could not find container \"241ede6f5b70c6d762520c107d41898b9054421b0f9d81a5e7ad0b7351c7cf8a\": container with ID starting with 241ede6f5b70c6d762520c107d41898b9054421b0f9d81a5e7ad0b7351c7cf8a not found: ID does not exist" Nov 28 18:51:21 crc kubenswrapper[4909]: I1128 18:51:21.525349 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mariadb-copy-data"] Nov 28 18:51:21 crc kubenswrapper[4909]: I1128 18:51:21.572452 4909 reconciler_common.go:293] "Volume detached for volume \"pvc-b05bf8b3-1bd7-4304-855e-6d182fcf1324\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b05bf8b3-1bd7-4304-855e-6d182fcf1324\") on node \"crc\" DevicePath \"\"" Nov 28 18:51:21 crc kubenswrapper[4909]: I1128 18:51:21.920477 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="75370690-54d5-4283-8b1e-00deda652092" path="/var/lib/kubelet/pods/75370690-54d5-4283-8b1e-00deda652092/volumes" Nov 28 18:51:22 crc kubenswrapper[4909]: I1128 18:51:22.254843 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-copy-data"] Nov 28 18:51:22 crc kubenswrapper[4909]: I1128 18:51:22.255319 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovn-copy-data" podUID="bb430976-530e-466d-be9a-7cb07fb560e7" containerName="adoption" containerID="cri-o://5827cf351581a93448f8f19fda6b6d62cc0392d8eb43bfce460600ff9d139b8f" gracePeriod=30 Nov 28 18:51:25 crc kubenswrapper[4909]: I1128 18:51:25.914517 4909 scope.go:117] "RemoveContainer" containerID="141877a9e47864adeab54e6b62e3e3588ad770da6cc4187b344c2cdac9b50495" Nov 28 18:51:25 crc kubenswrapper[4909]: E1128 18:51:25.915613 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 18:51:36 crc kubenswrapper[4909]: I1128 18:51:36.903191 4909 scope.go:117] "RemoveContainer" containerID="141877a9e47864adeab54e6b62e3e3588ad770da6cc4187b344c2cdac9b50495" Nov 28 18:51:36 crc kubenswrapper[4909]: E1128 18:51:36.905988 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 18:51:40 crc kubenswrapper[4909]: I1128 18:51:40.504360 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-8lld4"] Nov 28 18:51:40 crc kubenswrapper[4909]: E1128 18:51:40.505457 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bd3ec630-0e05-436e-a6f6-fb069bb5a3d5" containerName="registry-server" Nov 28 18:51:40 crc kubenswrapper[4909]: I1128 18:51:40.505472 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="bd3ec630-0e05-436e-a6f6-fb069bb5a3d5" containerName="registry-server" Nov 28 18:51:40 crc kubenswrapper[4909]: E1128 18:51:40.505496 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bd3ec630-0e05-436e-a6f6-fb069bb5a3d5" containerName="extract-utilities" Nov 28 18:51:40 crc kubenswrapper[4909]: I1128 18:51:40.505505 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="bd3ec630-0e05-436e-a6f6-fb069bb5a3d5" containerName="extract-utilities" Nov 28 18:51:40 crc kubenswrapper[4909]: E1128 18:51:40.505527 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bd3ec630-0e05-436e-a6f6-fb069bb5a3d5" containerName="extract-content" Nov 28 18:51:40 crc kubenswrapper[4909]: I1128 18:51:40.505536 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="bd3ec630-0e05-436e-a6f6-fb069bb5a3d5" containerName="extract-content" Nov 28 18:51:40 crc kubenswrapper[4909]: E1128 18:51:40.505557 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="27d688d5-8752-40b5-9aa9-6c1d255dfa24" containerName="registry-server" Nov 28 18:51:40 crc kubenswrapper[4909]: I1128 18:51:40.505565 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="27d688d5-8752-40b5-9aa9-6c1d255dfa24" containerName="registry-server" Nov 28 18:51:40 crc kubenswrapper[4909]: E1128 18:51:40.505584 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="75370690-54d5-4283-8b1e-00deda652092" containerName="adoption" Nov 28 18:51:40 crc kubenswrapper[4909]: I1128 18:51:40.505592 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="75370690-54d5-4283-8b1e-00deda652092" containerName="adoption" Nov 28 18:51:40 crc kubenswrapper[4909]: E1128 18:51:40.505606 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="27d688d5-8752-40b5-9aa9-6c1d255dfa24" containerName="extract-content" Nov 28 18:51:40 crc kubenswrapper[4909]: I1128 18:51:40.505614 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="27d688d5-8752-40b5-9aa9-6c1d255dfa24" containerName="extract-content" Nov 28 18:51:40 crc kubenswrapper[4909]: E1128 18:51:40.505627 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="27d688d5-8752-40b5-9aa9-6c1d255dfa24" containerName="extract-utilities" Nov 28 18:51:40 crc kubenswrapper[4909]: I1128 18:51:40.505636 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="27d688d5-8752-40b5-9aa9-6c1d255dfa24" containerName="extract-utilities" Nov 28 18:51:40 crc kubenswrapper[4909]: I1128 18:51:40.505958 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="27d688d5-8752-40b5-9aa9-6c1d255dfa24" containerName="registry-server" Nov 28 18:51:40 crc kubenswrapper[4909]: I1128 18:51:40.505976 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="bd3ec630-0e05-436e-a6f6-fb069bb5a3d5" containerName="registry-server" Nov 28 18:51:40 crc kubenswrapper[4909]: I1128 18:51:40.505992 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="75370690-54d5-4283-8b1e-00deda652092" containerName="adoption" Nov 28 18:51:40 crc kubenswrapper[4909]: I1128 18:51:40.509122 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8lld4" Nov 28 18:51:40 crc kubenswrapper[4909]: I1128 18:51:40.530448 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-8lld4"] Nov 28 18:51:40 crc kubenswrapper[4909]: I1128 18:51:40.611187 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h7xcq\" (UniqueName: \"kubernetes.io/projected/80589d0b-cfbd-4552-ad71-dbad6ecd32c5-kube-api-access-h7xcq\") pod \"redhat-marketplace-8lld4\" (UID: \"80589d0b-cfbd-4552-ad71-dbad6ecd32c5\") " pod="openshift-marketplace/redhat-marketplace-8lld4" Nov 28 18:51:40 crc kubenswrapper[4909]: I1128 18:51:40.611268 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/80589d0b-cfbd-4552-ad71-dbad6ecd32c5-utilities\") pod \"redhat-marketplace-8lld4\" (UID: \"80589d0b-cfbd-4552-ad71-dbad6ecd32c5\") " pod="openshift-marketplace/redhat-marketplace-8lld4" Nov 28 18:51:40 crc kubenswrapper[4909]: I1128 18:51:40.611419 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/80589d0b-cfbd-4552-ad71-dbad6ecd32c5-catalog-content\") pod \"redhat-marketplace-8lld4\" (UID: \"80589d0b-cfbd-4552-ad71-dbad6ecd32c5\") " pod="openshift-marketplace/redhat-marketplace-8lld4" Nov 28 18:51:40 crc kubenswrapper[4909]: I1128 18:51:40.713349 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h7xcq\" (UniqueName: \"kubernetes.io/projected/80589d0b-cfbd-4552-ad71-dbad6ecd32c5-kube-api-access-h7xcq\") pod \"redhat-marketplace-8lld4\" (UID: \"80589d0b-cfbd-4552-ad71-dbad6ecd32c5\") " pod="openshift-marketplace/redhat-marketplace-8lld4" Nov 28 18:51:40 crc kubenswrapper[4909]: I1128 18:51:40.713450 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/80589d0b-cfbd-4552-ad71-dbad6ecd32c5-utilities\") pod \"redhat-marketplace-8lld4\" (UID: \"80589d0b-cfbd-4552-ad71-dbad6ecd32c5\") " pod="openshift-marketplace/redhat-marketplace-8lld4" Nov 28 18:51:40 crc kubenswrapper[4909]: I1128 18:51:40.713522 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/80589d0b-cfbd-4552-ad71-dbad6ecd32c5-catalog-content\") pod \"redhat-marketplace-8lld4\" (UID: \"80589d0b-cfbd-4552-ad71-dbad6ecd32c5\") " pod="openshift-marketplace/redhat-marketplace-8lld4" Nov 28 18:51:40 crc kubenswrapper[4909]: I1128 18:51:40.714005 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/80589d0b-cfbd-4552-ad71-dbad6ecd32c5-catalog-content\") pod \"redhat-marketplace-8lld4\" (UID: \"80589d0b-cfbd-4552-ad71-dbad6ecd32c5\") " pod="openshift-marketplace/redhat-marketplace-8lld4" Nov 28 18:51:40 crc kubenswrapper[4909]: I1128 18:51:40.714335 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/80589d0b-cfbd-4552-ad71-dbad6ecd32c5-utilities\") pod \"redhat-marketplace-8lld4\" (UID: \"80589d0b-cfbd-4552-ad71-dbad6ecd32c5\") " pod="openshift-marketplace/redhat-marketplace-8lld4" Nov 28 18:51:40 crc kubenswrapper[4909]: I1128 18:51:40.878127 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h7xcq\" (UniqueName: \"kubernetes.io/projected/80589d0b-cfbd-4552-ad71-dbad6ecd32c5-kube-api-access-h7xcq\") pod \"redhat-marketplace-8lld4\" (UID: \"80589d0b-cfbd-4552-ad71-dbad6ecd32c5\") " pod="openshift-marketplace/redhat-marketplace-8lld4" Nov 28 18:51:41 crc kubenswrapper[4909]: I1128 18:51:41.131880 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8lld4" Nov 28 18:51:41 crc kubenswrapper[4909]: I1128 18:51:41.625873 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-8lld4"] Nov 28 18:51:41 crc kubenswrapper[4909]: I1128 18:51:41.701160 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8lld4" event={"ID":"80589d0b-cfbd-4552-ad71-dbad6ecd32c5","Type":"ContainerStarted","Data":"5b3a816c827ede2af24ca5d1a6c7d3d9a03d1d26e007159767496bf92608d664"} Nov 28 18:51:42 crc kubenswrapper[4909]: I1128 18:51:42.714626 4909 generic.go:334] "Generic (PLEG): container finished" podID="80589d0b-cfbd-4552-ad71-dbad6ecd32c5" containerID="a7384e5e23fc66aed68b706288d394f3e1faf0ff46c80d0325e20f789e26d0a1" exitCode=0 Nov 28 18:51:42 crc kubenswrapper[4909]: I1128 18:51:42.714772 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8lld4" event={"ID":"80589d0b-cfbd-4552-ad71-dbad6ecd32c5","Type":"ContainerDied","Data":"a7384e5e23fc66aed68b706288d394f3e1faf0ff46c80d0325e20f789e26d0a1"} Nov 28 18:51:45 crc kubenswrapper[4909]: I1128 18:51:45.753182 4909 generic.go:334] "Generic (PLEG): container finished" podID="80589d0b-cfbd-4552-ad71-dbad6ecd32c5" containerID="ec14f8c69104e764b33196869321a955bd9d3e92da192909df4a61a4445554b5" exitCode=0 Nov 28 18:51:45 crc kubenswrapper[4909]: I1128 18:51:45.753322 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8lld4" event={"ID":"80589d0b-cfbd-4552-ad71-dbad6ecd32c5","Type":"ContainerDied","Data":"ec14f8c69104e764b33196869321a955bd9d3e92da192909df4a61a4445554b5"} Nov 28 18:51:46 crc kubenswrapper[4909]: I1128 18:51:46.768406 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8lld4" event={"ID":"80589d0b-cfbd-4552-ad71-dbad6ecd32c5","Type":"ContainerStarted","Data":"8abcbc2970a5b0c4365a80687a7c755743d769b3507d04f3e4eddccf65a59370"} Nov 28 18:51:46 crc kubenswrapper[4909]: I1128 18:51:46.797830 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-8lld4" podStartSLOduration=3.084485558 podStartE2EDuration="6.79781156s" podCreationTimestamp="2025-11-28 18:51:40 +0000 UTC" firstStartedPulling="2025-11-28 18:51:42.717838657 +0000 UTC m=+9685.114523181" lastFinishedPulling="2025-11-28 18:51:46.431164639 +0000 UTC m=+9688.827849183" observedRunningTime="2025-11-28 18:51:46.793508367 +0000 UTC m=+9689.190192901" watchObservedRunningTime="2025-11-28 18:51:46.79781156 +0000 UTC m=+9689.194496084" Nov 28 18:51:50 crc kubenswrapper[4909]: I1128 18:51:50.903178 4909 scope.go:117] "RemoveContainer" containerID="141877a9e47864adeab54e6b62e3e3588ad770da6cc4187b344c2cdac9b50495" Nov 28 18:51:50 crc kubenswrapper[4909]: E1128 18:51:50.904044 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 18:51:51 crc kubenswrapper[4909]: I1128 18:51:51.132469 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-8lld4" Nov 28 18:51:51 crc kubenswrapper[4909]: I1128 18:51:51.132525 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-8lld4" Nov 28 18:51:51 crc kubenswrapper[4909]: I1128 18:51:51.189248 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-8lld4" Nov 28 18:51:51 crc kubenswrapper[4909]: I1128 18:51:51.894626 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-8lld4" Nov 28 18:51:51 crc kubenswrapper[4909]: I1128 18:51:51.972553 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-8lld4"] Nov 28 18:51:52 crc kubenswrapper[4909]: I1128 18:51:52.836608 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-copy-data" Nov 28 18:51:52 crc kubenswrapper[4909]: I1128 18:51:52.846756 4909 generic.go:334] "Generic (PLEG): container finished" podID="bb430976-530e-466d-be9a-7cb07fb560e7" containerID="5827cf351581a93448f8f19fda6b6d62cc0392d8eb43bfce460600ff9d139b8f" exitCode=137 Nov 28 18:51:52 crc kubenswrapper[4909]: I1128 18:51:52.846808 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-copy-data" Nov 28 18:51:52 crc kubenswrapper[4909]: I1128 18:51:52.846829 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-copy-data" event={"ID":"bb430976-530e-466d-be9a-7cb07fb560e7","Type":"ContainerDied","Data":"5827cf351581a93448f8f19fda6b6d62cc0392d8eb43bfce460600ff9d139b8f"} Nov 28 18:51:52 crc kubenswrapper[4909]: I1128 18:51:52.847107 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-copy-data" event={"ID":"bb430976-530e-466d-be9a-7cb07fb560e7","Type":"ContainerDied","Data":"b927fc2b6ae1035c902f45ef1b75f6fe0d565a884178ff30cf20c057d6c41803"} Nov 28 18:51:52 crc kubenswrapper[4909]: I1128 18:51:52.847136 4909 scope.go:117] "RemoveContainer" containerID="5827cf351581a93448f8f19fda6b6d62cc0392d8eb43bfce460600ff9d139b8f" Nov 28 18:51:52 crc kubenswrapper[4909]: I1128 18:51:52.896972 4909 scope.go:117] "RemoveContainer" containerID="5827cf351581a93448f8f19fda6b6d62cc0392d8eb43bfce460600ff9d139b8f" Nov 28 18:51:52 crc kubenswrapper[4909]: E1128 18:51:52.898582 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5827cf351581a93448f8f19fda6b6d62cc0392d8eb43bfce460600ff9d139b8f\": container with ID starting with 5827cf351581a93448f8f19fda6b6d62cc0392d8eb43bfce460600ff9d139b8f not found: ID does not exist" containerID="5827cf351581a93448f8f19fda6b6d62cc0392d8eb43bfce460600ff9d139b8f" Nov 28 18:51:52 crc kubenswrapper[4909]: I1128 18:51:52.898677 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5827cf351581a93448f8f19fda6b6d62cc0392d8eb43bfce460600ff9d139b8f"} err="failed to get container status \"5827cf351581a93448f8f19fda6b6d62cc0392d8eb43bfce460600ff9d139b8f\": rpc error: code = NotFound desc = could not find container \"5827cf351581a93448f8f19fda6b6d62cc0392d8eb43bfce460600ff9d139b8f\": container with ID starting with 5827cf351581a93448f8f19fda6b6d62cc0392d8eb43bfce460600ff9d139b8f not found: ID does not exist" Nov 28 18:51:52 crc kubenswrapper[4909]: I1128 18:51:52.921004 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xxtsg\" (UniqueName: \"kubernetes.io/projected/bb430976-530e-466d-be9a-7cb07fb560e7-kube-api-access-xxtsg\") pod \"bb430976-530e-466d-be9a-7cb07fb560e7\" (UID: \"bb430976-530e-466d-be9a-7cb07fb560e7\") " Nov 28 18:51:52 crc kubenswrapper[4909]: I1128 18:51:52.922223 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-data\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-77b06fa3-847d-47d3-aab5-1872a4f18357\") pod \"bb430976-530e-466d-be9a-7cb07fb560e7\" (UID: \"bb430976-530e-466d-be9a-7cb07fb560e7\") " Nov 28 18:51:52 crc kubenswrapper[4909]: I1128 18:51:52.922388 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-data-cert\" (UniqueName: \"kubernetes.io/secret/bb430976-530e-466d-be9a-7cb07fb560e7-ovn-data-cert\") pod \"bb430976-530e-466d-be9a-7cb07fb560e7\" (UID: \"bb430976-530e-466d-be9a-7cb07fb560e7\") " Nov 28 18:51:52 crc kubenswrapper[4909]: I1128 18:51:52.927963 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bb430976-530e-466d-be9a-7cb07fb560e7-ovn-data-cert" (OuterVolumeSpecName: "ovn-data-cert") pod "bb430976-530e-466d-be9a-7cb07fb560e7" (UID: "bb430976-530e-466d-be9a-7cb07fb560e7"). InnerVolumeSpecName "ovn-data-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 18:51:52 crc kubenswrapper[4909]: I1128 18:51:52.928997 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bb430976-530e-466d-be9a-7cb07fb560e7-kube-api-access-xxtsg" (OuterVolumeSpecName: "kube-api-access-xxtsg") pod "bb430976-530e-466d-be9a-7cb07fb560e7" (UID: "bb430976-530e-466d-be9a-7cb07fb560e7"). InnerVolumeSpecName "kube-api-access-xxtsg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 18:51:52 crc kubenswrapper[4909]: I1128 18:51:52.949091 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-77b06fa3-847d-47d3-aab5-1872a4f18357" (OuterVolumeSpecName: "ovn-data") pod "bb430976-530e-466d-be9a-7cb07fb560e7" (UID: "bb430976-530e-466d-be9a-7cb07fb560e7"). InnerVolumeSpecName "pvc-77b06fa3-847d-47d3-aab5-1872a4f18357". PluginName "kubernetes.io/csi", VolumeGidValue "" Nov 28 18:51:53 crc kubenswrapper[4909]: I1128 18:51:53.026715 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xxtsg\" (UniqueName: \"kubernetes.io/projected/bb430976-530e-466d-be9a-7cb07fb560e7-kube-api-access-xxtsg\") on node \"crc\" DevicePath \"\"" Nov 28 18:51:53 crc kubenswrapper[4909]: I1128 18:51:53.026813 4909 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"pvc-77b06fa3-847d-47d3-aab5-1872a4f18357\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-77b06fa3-847d-47d3-aab5-1872a4f18357\") on node \"crc\" " Nov 28 18:51:53 crc kubenswrapper[4909]: I1128 18:51:53.026830 4909 reconciler_common.go:293] "Volume detached for volume \"ovn-data-cert\" (UniqueName: \"kubernetes.io/secret/bb430976-530e-466d-be9a-7cb07fb560e7-ovn-data-cert\") on node \"crc\" DevicePath \"\"" Nov 28 18:51:53 crc kubenswrapper[4909]: I1128 18:51:53.062442 4909 csi_attacher.go:630] kubernetes.io/csi: attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice... Nov 28 18:51:53 crc kubenswrapper[4909]: I1128 18:51:53.062829 4909 operation_generator.go:917] UnmountDevice succeeded for volume "pvc-77b06fa3-847d-47d3-aab5-1872a4f18357" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-77b06fa3-847d-47d3-aab5-1872a4f18357") on node "crc" Nov 28 18:51:53 crc kubenswrapper[4909]: I1128 18:51:53.130022 4909 reconciler_common.go:293] "Volume detached for volume \"pvc-77b06fa3-847d-47d3-aab5-1872a4f18357\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-77b06fa3-847d-47d3-aab5-1872a4f18357\") on node \"crc\" DevicePath \"\"" Nov 28 18:51:53 crc kubenswrapper[4909]: I1128 18:51:53.185990 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-copy-data"] Nov 28 18:51:53 crc kubenswrapper[4909]: I1128 18:51:53.194217 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-copy-data"] Nov 28 18:51:53 crc kubenswrapper[4909]: I1128 18:51:53.861897 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-8lld4" podUID="80589d0b-cfbd-4552-ad71-dbad6ecd32c5" containerName="registry-server" containerID="cri-o://8abcbc2970a5b0c4365a80687a7c755743d769b3507d04f3e4eddccf65a59370" gracePeriod=2 Nov 28 18:51:53 crc kubenswrapper[4909]: I1128 18:51:53.921208 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bb430976-530e-466d-be9a-7cb07fb560e7" path="/var/lib/kubelet/pods/bb430976-530e-466d-be9a-7cb07fb560e7/volumes" Nov 28 18:51:54 crc kubenswrapper[4909]: I1128 18:51:54.447240 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8lld4" Nov 28 18:51:54 crc kubenswrapper[4909]: I1128 18:51:54.570060 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h7xcq\" (UniqueName: \"kubernetes.io/projected/80589d0b-cfbd-4552-ad71-dbad6ecd32c5-kube-api-access-h7xcq\") pod \"80589d0b-cfbd-4552-ad71-dbad6ecd32c5\" (UID: \"80589d0b-cfbd-4552-ad71-dbad6ecd32c5\") " Nov 28 18:51:54 crc kubenswrapper[4909]: I1128 18:51:54.570391 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/80589d0b-cfbd-4552-ad71-dbad6ecd32c5-utilities\") pod \"80589d0b-cfbd-4552-ad71-dbad6ecd32c5\" (UID: \"80589d0b-cfbd-4552-ad71-dbad6ecd32c5\") " Nov 28 18:51:54 crc kubenswrapper[4909]: I1128 18:51:54.570461 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/80589d0b-cfbd-4552-ad71-dbad6ecd32c5-catalog-content\") pod \"80589d0b-cfbd-4552-ad71-dbad6ecd32c5\" (UID: \"80589d0b-cfbd-4552-ad71-dbad6ecd32c5\") " Nov 28 18:51:54 crc kubenswrapper[4909]: I1128 18:51:54.572079 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/80589d0b-cfbd-4552-ad71-dbad6ecd32c5-utilities" (OuterVolumeSpecName: "utilities") pod "80589d0b-cfbd-4552-ad71-dbad6ecd32c5" (UID: "80589d0b-cfbd-4552-ad71-dbad6ecd32c5"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 18:51:54 crc kubenswrapper[4909]: I1128 18:51:54.579294 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/80589d0b-cfbd-4552-ad71-dbad6ecd32c5-kube-api-access-h7xcq" (OuterVolumeSpecName: "kube-api-access-h7xcq") pod "80589d0b-cfbd-4552-ad71-dbad6ecd32c5" (UID: "80589d0b-cfbd-4552-ad71-dbad6ecd32c5"). InnerVolumeSpecName "kube-api-access-h7xcq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 18:51:54 crc kubenswrapper[4909]: I1128 18:51:54.590625 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/80589d0b-cfbd-4552-ad71-dbad6ecd32c5-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "80589d0b-cfbd-4552-ad71-dbad6ecd32c5" (UID: "80589d0b-cfbd-4552-ad71-dbad6ecd32c5"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 18:51:54 crc kubenswrapper[4909]: I1128 18:51:54.672760 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h7xcq\" (UniqueName: \"kubernetes.io/projected/80589d0b-cfbd-4552-ad71-dbad6ecd32c5-kube-api-access-h7xcq\") on node \"crc\" DevicePath \"\"" Nov 28 18:51:54 crc kubenswrapper[4909]: I1128 18:51:54.672791 4909 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/80589d0b-cfbd-4552-ad71-dbad6ecd32c5-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 18:51:54 crc kubenswrapper[4909]: I1128 18:51:54.672802 4909 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/80589d0b-cfbd-4552-ad71-dbad6ecd32c5-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 18:51:54 crc kubenswrapper[4909]: I1128 18:51:54.877255 4909 generic.go:334] "Generic (PLEG): container finished" podID="80589d0b-cfbd-4552-ad71-dbad6ecd32c5" containerID="8abcbc2970a5b0c4365a80687a7c755743d769b3507d04f3e4eddccf65a59370" exitCode=0 Nov 28 18:51:54 crc kubenswrapper[4909]: I1128 18:51:54.877301 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8lld4" event={"ID":"80589d0b-cfbd-4552-ad71-dbad6ecd32c5","Type":"ContainerDied","Data":"8abcbc2970a5b0c4365a80687a7c755743d769b3507d04f3e4eddccf65a59370"} Nov 28 18:51:54 crc kubenswrapper[4909]: I1128 18:51:54.877327 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8lld4" event={"ID":"80589d0b-cfbd-4552-ad71-dbad6ecd32c5","Type":"ContainerDied","Data":"5b3a816c827ede2af24ca5d1a6c7d3d9a03d1d26e007159767496bf92608d664"} Nov 28 18:51:54 crc kubenswrapper[4909]: I1128 18:51:54.877343 4909 scope.go:117] "RemoveContainer" containerID="8abcbc2970a5b0c4365a80687a7c755743d769b3507d04f3e4eddccf65a59370" Nov 28 18:51:54 crc kubenswrapper[4909]: I1128 18:51:54.877463 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8lld4" Nov 28 18:51:54 crc kubenswrapper[4909]: I1128 18:51:54.914391 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-8lld4"] Nov 28 18:51:54 crc kubenswrapper[4909]: I1128 18:51:54.923132 4909 scope.go:117] "RemoveContainer" containerID="ec14f8c69104e764b33196869321a955bd9d3e92da192909df4a61a4445554b5" Nov 28 18:51:54 crc kubenswrapper[4909]: I1128 18:51:54.929923 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-8lld4"] Nov 28 18:51:54 crc kubenswrapper[4909]: I1128 18:51:54.957787 4909 scope.go:117] "RemoveContainer" containerID="a7384e5e23fc66aed68b706288d394f3e1faf0ff46c80d0325e20f789e26d0a1" Nov 28 18:51:55 crc kubenswrapper[4909]: I1128 18:51:55.027183 4909 scope.go:117] "RemoveContainer" containerID="8abcbc2970a5b0c4365a80687a7c755743d769b3507d04f3e4eddccf65a59370" Nov 28 18:51:55 crc kubenswrapper[4909]: E1128 18:51:55.034017 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8abcbc2970a5b0c4365a80687a7c755743d769b3507d04f3e4eddccf65a59370\": container with ID starting with 8abcbc2970a5b0c4365a80687a7c755743d769b3507d04f3e4eddccf65a59370 not found: ID does not exist" containerID="8abcbc2970a5b0c4365a80687a7c755743d769b3507d04f3e4eddccf65a59370" Nov 28 18:51:55 crc kubenswrapper[4909]: I1128 18:51:55.034103 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8abcbc2970a5b0c4365a80687a7c755743d769b3507d04f3e4eddccf65a59370"} err="failed to get container status \"8abcbc2970a5b0c4365a80687a7c755743d769b3507d04f3e4eddccf65a59370\": rpc error: code = NotFound desc = could not find container \"8abcbc2970a5b0c4365a80687a7c755743d769b3507d04f3e4eddccf65a59370\": container with ID starting with 8abcbc2970a5b0c4365a80687a7c755743d769b3507d04f3e4eddccf65a59370 not found: ID does not exist" Nov 28 18:51:55 crc kubenswrapper[4909]: I1128 18:51:55.034145 4909 scope.go:117] "RemoveContainer" containerID="ec14f8c69104e764b33196869321a955bd9d3e92da192909df4a61a4445554b5" Nov 28 18:51:55 crc kubenswrapper[4909]: E1128 18:51:55.035218 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ec14f8c69104e764b33196869321a955bd9d3e92da192909df4a61a4445554b5\": container with ID starting with ec14f8c69104e764b33196869321a955bd9d3e92da192909df4a61a4445554b5 not found: ID does not exist" containerID="ec14f8c69104e764b33196869321a955bd9d3e92da192909df4a61a4445554b5" Nov 28 18:51:55 crc kubenswrapper[4909]: I1128 18:51:55.035281 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ec14f8c69104e764b33196869321a955bd9d3e92da192909df4a61a4445554b5"} err="failed to get container status \"ec14f8c69104e764b33196869321a955bd9d3e92da192909df4a61a4445554b5\": rpc error: code = NotFound desc = could not find container \"ec14f8c69104e764b33196869321a955bd9d3e92da192909df4a61a4445554b5\": container with ID starting with ec14f8c69104e764b33196869321a955bd9d3e92da192909df4a61a4445554b5 not found: ID does not exist" Nov 28 18:51:55 crc kubenswrapper[4909]: I1128 18:51:55.035321 4909 scope.go:117] "RemoveContainer" containerID="a7384e5e23fc66aed68b706288d394f3e1faf0ff46c80d0325e20f789e26d0a1" Nov 28 18:51:55 crc kubenswrapper[4909]: E1128 18:51:55.036086 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a7384e5e23fc66aed68b706288d394f3e1faf0ff46c80d0325e20f789e26d0a1\": container with ID starting with a7384e5e23fc66aed68b706288d394f3e1faf0ff46c80d0325e20f789e26d0a1 not found: ID does not exist" containerID="a7384e5e23fc66aed68b706288d394f3e1faf0ff46c80d0325e20f789e26d0a1" Nov 28 18:51:55 crc kubenswrapper[4909]: I1128 18:51:55.036146 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a7384e5e23fc66aed68b706288d394f3e1faf0ff46c80d0325e20f789e26d0a1"} err="failed to get container status \"a7384e5e23fc66aed68b706288d394f3e1faf0ff46c80d0325e20f789e26d0a1\": rpc error: code = NotFound desc = could not find container \"a7384e5e23fc66aed68b706288d394f3e1faf0ff46c80d0325e20f789e26d0a1\": container with ID starting with a7384e5e23fc66aed68b706288d394f3e1faf0ff46c80d0325e20f789e26d0a1 not found: ID does not exist" Nov 28 18:51:55 crc kubenswrapper[4909]: I1128 18:51:55.916199 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="80589d0b-cfbd-4552-ad71-dbad6ecd32c5" path="/var/lib/kubelet/pods/80589d0b-cfbd-4552-ad71-dbad6ecd32c5/volumes" Nov 28 18:52:04 crc kubenswrapper[4909]: I1128 18:52:04.902525 4909 scope.go:117] "RemoveContainer" containerID="141877a9e47864adeab54e6b62e3e3588ad770da6cc4187b344c2cdac9b50495" Nov 28 18:52:04 crc kubenswrapper[4909]: E1128 18:52:04.903556 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 18:52:16 crc kubenswrapper[4909]: I1128 18:52:16.902335 4909 scope.go:117] "RemoveContainer" containerID="141877a9e47864adeab54e6b62e3e3588ad770da6cc4187b344c2cdac9b50495" Nov 28 18:52:16 crc kubenswrapper[4909]: E1128 18:52:16.903068 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 18:52:27 crc kubenswrapper[4909]: I1128 18:52:27.909422 4909 scope.go:117] "RemoveContainer" containerID="141877a9e47864adeab54e6b62e3e3588ad770da6cc4187b344c2cdac9b50495" Nov 28 18:52:27 crc kubenswrapper[4909]: E1128 18:52:27.910499 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 18:52:39 crc kubenswrapper[4909]: I1128 18:52:39.903023 4909 scope.go:117] "RemoveContainer" containerID="141877a9e47864adeab54e6b62e3e3588ad770da6cc4187b344c2cdac9b50495" Nov 28 18:52:39 crc kubenswrapper[4909]: E1128 18:52:39.904114 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 18:52:52 crc kubenswrapper[4909]: I1128 18:52:52.901893 4909 scope.go:117] "RemoveContainer" containerID="141877a9e47864adeab54e6b62e3e3588ad770da6cc4187b344c2cdac9b50495" Nov 28 18:52:52 crc kubenswrapper[4909]: E1128 18:52:52.902963 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 18:52:59 crc kubenswrapper[4909]: I1128 18:52:59.792474 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-xw4tq/must-gather-9vc6g"] Nov 28 18:52:59 crc kubenswrapper[4909]: E1128 18:52:59.793686 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="80589d0b-cfbd-4552-ad71-dbad6ecd32c5" containerName="extract-content" Nov 28 18:52:59 crc kubenswrapper[4909]: I1128 18:52:59.793703 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="80589d0b-cfbd-4552-ad71-dbad6ecd32c5" containerName="extract-content" Nov 28 18:52:59 crc kubenswrapper[4909]: E1128 18:52:59.793723 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bb430976-530e-466d-be9a-7cb07fb560e7" containerName="adoption" Nov 28 18:52:59 crc kubenswrapper[4909]: I1128 18:52:59.793731 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="bb430976-530e-466d-be9a-7cb07fb560e7" containerName="adoption" Nov 28 18:52:59 crc kubenswrapper[4909]: E1128 18:52:59.793741 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="80589d0b-cfbd-4552-ad71-dbad6ecd32c5" containerName="extract-utilities" Nov 28 18:52:59 crc kubenswrapper[4909]: I1128 18:52:59.793749 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="80589d0b-cfbd-4552-ad71-dbad6ecd32c5" containerName="extract-utilities" Nov 28 18:52:59 crc kubenswrapper[4909]: E1128 18:52:59.793762 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="80589d0b-cfbd-4552-ad71-dbad6ecd32c5" containerName="registry-server" Nov 28 18:52:59 crc kubenswrapper[4909]: I1128 18:52:59.793769 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="80589d0b-cfbd-4552-ad71-dbad6ecd32c5" containerName="registry-server" Nov 28 18:52:59 crc kubenswrapper[4909]: I1128 18:52:59.794003 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="bb430976-530e-466d-be9a-7cb07fb560e7" containerName="adoption" Nov 28 18:52:59 crc kubenswrapper[4909]: I1128 18:52:59.794030 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="80589d0b-cfbd-4552-ad71-dbad6ecd32c5" containerName="registry-server" Nov 28 18:52:59 crc kubenswrapper[4909]: I1128 18:52:59.795234 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-xw4tq/must-gather-9vc6g" Nov 28 18:52:59 crc kubenswrapper[4909]: I1128 18:52:59.798256 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-xw4tq"/"openshift-service-ca.crt" Nov 28 18:52:59 crc kubenswrapper[4909]: I1128 18:52:59.798536 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-xw4tq"/"kube-root-ca.crt" Nov 28 18:52:59 crc kubenswrapper[4909]: I1128 18:52:59.798738 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-must-gather-xw4tq"/"default-dockercfg-d77qh" Nov 28 18:52:59 crc kubenswrapper[4909]: I1128 18:52:59.811407 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-xw4tq/must-gather-9vc6g"] Nov 28 18:52:59 crc kubenswrapper[4909]: I1128 18:52:59.965652 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-klfpt\" (UniqueName: \"kubernetes.io/projected/2c3a1077-9e2d-4c6a-8963-98e8a0662189-kube-api-access-klfpt\") pod \"must-gather-9vc6g\" (UID: \"2c3a1077-9e2d-4c6a-8963-98e8a0662189\") " pod="openshift-must-gather-xw4tq/must-gather-9vc6g" Nov 28 18:52:59 crc kubenswrapper[4909]: I1128 18:52:59.965742 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/2c3a1077-9e2d-4c6a-8963-98e8a0662189-must-gather-output\") pod \"must-gather-9vc6g\" (UID: \"2c3a1077-9e2d-4c6a-8963-98e8a0662189\") " pod="openshift-must-gather-xw4tq/must-gather-9vc6g" Nov 28 18:53:00 crc kubenswrapper[4909]: I1128 18:53:00.068046 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-klfpt\" (UniqueName: \"kubernetes.io/projected/2c3a1077-9e2d-4c6a-8963-98e8a0662189-kube-api-access-klfpt\") pod \"must-gather-9vc6g\" (UID: \"2c3a1077-9e2d-4c6a-8963-98e8a0662189\") " pod="openshift-must-gather-xw4tq/must-gather-9vc6g" Nov 28 18:53:00 crc kubenswrapper[4909]: I1128 18:53:00.068134 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/2c3a1077-9e2d-4c6a-8963-98e8a0662189-must-gather-output\") pod \"must-gather-9vc6g\" (UID: \"2c3a1077-9e2d-4c6a-8963-98e8a0662189\") " pod="openshift-must-gather-xw4tq/must-gather-9vc6g" Nov 28 18:53:00 crc kubenswrapper[4909]: I1128 18:53:00.068554 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/2c3a1077-9e2d-4c6a-8963-98e8a0662189-must-gather-output\") pod \"must-gather-9vc6g\" (UID: \"2c3a1077-9e2d-4c6a-8963-98e8a0662189\") " pod="openshift-must-gather-xw4tq/must-gather-9vc6g" Nov 28 18:53:00 crc kubenswrapper[4909]: I1128 18:53:00.087235 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-klfpt\" (UniqueName: \"kubernetes.io/projected/2c3a1077-9e2d-4c6a-8963-98e8a0662189-kube-api-access-klfpt\") pod \"must-gather-9vc6g\" (UID: \"2c3a1077-9e2d-4c6a-8963-98e8a0662189\") " pod="openshift-must-gather-xw4tq/must-gather-9vc6g" Nov 28 18:53:00 crc kubenswrapper[4909]: I1128 18:53:00.120782 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-xw4tq/must-gather-9vc6g" Nov 28 18:53:00 crc kubenswrapper[4909]: I1128 18:53:00.630085 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-xw4tq/must-gather-9vc6g"] Nov 28 18:53:00 crc kubenswrapper[4909]: I1128 18:53:00.703869 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-xw4tq/must-gather-9vc6g" event={"ID":"2c3a1077-9e2d-4c6a-8963-98e8a0662189","Type":"ContainerStarted","Data":"af30596363144dd391e8d0e880c6f380f0f586606c4ae8cd1763caafed01295e"} Nov 28 18:53:06 crc kubenswrapper[4909]: I1128 18:53:06.770610 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-xw4tq/must-gather-9vc6g" event={"ID":"2c3a1077-9e2d-4c6a-8963-98e8a0662189","Type":"ContainerStarted","Data":"8343aacd3bcdc478c4b067ec67fbc7a57668b99929aec5071a8facf091585ec5"} Nov 28 18:53:06 crc kubenswrapper[4909]: I1128 18:53:06.771328 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-xw4tq/must-gather-9vc6g" event={"ID":"2c3a1077-9e2d-4c6a-8963-98e8a0662189","Type":"ContainerStarted","Data":"ffe1df1a67259d0edaba28ddf00bd1460a871ae4abb2601a22cb13893092fef3"} Nov 28 18:53:06 crc kubenswrapper[4909]: I1128 18:53:06.799985 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-xw4tq/must-gather-9vc6g" podStartSLOduration=2.470288459 podStartE2EDuration="7.799939278s" podCreationTimestamp="2025-11-28 18:52:59 +0000 UTC" firstStartedPulling="2025-11-28 18:53:00.622244583 +0000 UTC m=+9763.018929107" lastFinishedPulling="2025-11-28 18:53:05.951895402 +0000 UTC m=+9768.348579926" observedRunningTime="2025-11-28 18:53:06.788311503 +0000 UTC m=+9769.184996037" watchObservedRunningTime="2025-11-28 18:53:06.799939278 +0000 UTC m=+9769.196623832" Nov 28 18:53:06 crc kubenswrapper[4909]: I1128 18:53:06.902401 4909 scope.go:117] "RemoveContainer" containerID="141877a9e47864adeab54e6b62e3e3588ad770da6cc4187b344c2cdac9b50495" Nov 28 18:53:06 crc kubenswrapper[4909]: E1128 18:53:06.902637 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 18:53:10 crc kubenswrapper[4909]: I1128 18:53:10.352784 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-xw4tq/crc-debug-dt7xk"] Nov 28 18:53:10 crc kubenswrapper[4909]: I1128 18:53:10.355283 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-xw4tq/crc-debug-dt7xk" Nov 28 18:53:10 crc kubenswrapper[4909]: I1128 18:53:10.521170 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vq2jt\" (UniqueName: \"kubernetes.io/projected/f2e717db-8407-4363-a2be-b789ea225705-kube-api-access-vq2jt\") pod \"crc-debug-dt7xk\" (UID: \"f2e717db-8407-4363-a2be-b789ea225705\") " pod="openshift-must-gather-xw4tq/crc-debug-dt7xk" Nov 28 18:53:10 crc kubenswrapper[4909]: I1128 18:53:10.521301 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/f2e717db-8407-4363-a2be-b789ea225705-host\") pod \"crc-debug-dt7xk\" (UID: \"f2e717db-8407-4363-a2be-b789ea225705\") " pod="openshift-must-gather-xw4tq/crc-debug-dt7xk" Nov 28 18:53:10 crc kubenswrapper[4909]: I1128 18:53:10.623474 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/f2e717db-8407-4363-a2be-b789ea225705-host\") pod \"crc-debug-dt7xk\" (UID: \"f2e717db-8407-4363-a2be-b789ea225705\") " pod="openshift-must-gather-xw4tq/crc-debug-dt7xk" Nov 28 18:53:10 crc kubenswrapper[4909]: I1128 18:53:10.623622 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/f2e717db-8407-4363-a2be-b789ea225705-host\") pod \"crc-debug-dt7xk\" (UID: \"f2e717db-8407-4363-a2be-b789ea225705\") " pod="openshift-must-gather-xw4tq/crc-debug-dt7xk" Nov 28 18:53:10 crc kubenswrapper[4909]: I1128 18:53:10.623644 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vq2jt\" (UniqueName: \"kubernetes.io/projected/f2e717db-8407-4363-a2be-b789ea225705-kube-api-access-vq2jt\") pod \"crc-debug-dt7xk\" (UID: \"f2e717db-8407-4363-a2be-b789ea225705\") " pod="openshift-must-gather-xw4tq/crc-debug-dt7xk" Nov 28 18:53:10 crc kubenswrapper[4909]: I1128 18:53:10.641977 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vq2jt\" (UniqueName: \"kubernetes.io/projected/f2e717db-8407-4363-a2be-b789ea225705-kube-api-access-vq2jt\") pod \"crc-debug-dt7xk\" (UID: \"f2e717db-8407-4363-a2be-b789ea225705\") " pod="openshift-must-gather-xw4tq/crc-debug-dt7xk" Nov 28 18:53:10 crc kubenswrapper[4909]: I1128 18:53:10.676403 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-xw4tq/crc-debug-dt7xk" Nov 28 18:53:10 crc kubenswrapper[4909]: W1128 18:53:10.738307 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf2e717db_8407_4363_a2be_b789ea225705.slice/crio-bd8ecd391a2c1affad6c2c3c27266bec7d02d6e249278471a8275a0ce74be344 WatchSource:0}: Error finding container bd8ecd391a2c1affad6c2c3c27266bec7d02d6e249278471a8275a0ce74be344: Status 404 returned error can't find the container with id bd8ecd391a2c1affad6c2c3c27266bec7d02d6e249278471a8275a0ce74be344 Nov 28 18:53:10 crc kubenswrapper[4909]: I1128 18:53:10.823439 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-xw4tq/crc-debug-dt7xk" event={"ID":"f2e717db-8407-4363-a2be-b789ea225705","Type":"ContainerStarted","Data":"bd8ecd391a2c1affad6c2c3c27266bec7d02d6e249278471a8275a0ce74be344"} Nov 28 18:53:20 crc kubenswrapper[4909]: I1128 18:53:20.902124 4909 scope.go:117] "RemoveContainer" containerID="141877a9e47864adeab54e6b62e3e3588ad770da6cc4187b344c2cdac9b50495" Nov 28 18:53:20 crc kubenswrapper[4909]: E1128 18:53:20.902951 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 18:53:22 crc kubenswrapper[4909]: I1128 18:53:22.950318 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-xw4tq/crc-debug-dt7xk" event={"ID":"f2e717db-8407-4363-a2be-b789ea225705","Type":"ContainerStarted","Data":"50e352405fde3cc0a236c78f4ea8121321cdd989fde854d261990b1c1c5bf338"} Nov 28 18:53:35 crc kubenswrapper[4909]: I1128 18:53:35.902576 4909 scope.go:117] "RemoveContainer" containerID="141877a9e47864adeab54e6b62e3e3588ad770da6cc4187b344c2cdac9b50495" Nov 28 18:53:35 crc kubenswrapper[4909]: E1128 18:53:35.903468 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 18:53:40 crc kubenswrapper[4909]: I1128 18:53:40.136175 4909 generic.go:334] "Generic (PLEG): container finished" podID="f2e717db-8407-4363-a2be-b789ea225705" containerID="50e352405fde3cc0a236c78f4ea8121321cdd989fde854d261990b1c1c5bf338" exitCode=0 Nov 28 18:53:40 crc kubenswrapper[4909]: I1128 18:53:40.136667 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-xw4tq/crc-debug-dt7xk" event={"ID":"f2e717db-8407-4363-a2be-b789ea225705","Type":"ContainerDied","Data":"50e352405fde3cc0a236c78f4ea8121321cdd989fde854d261990b1c1c5bf338"} Nov 28 18:53:41 crc kubenswrapper[4909]: I1128 18:53:41.297369 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-xw4tq/crc-debug-dt7xk" Nov 28 18:53:41 crc kubenswrapper[4909]: I1128 18:53:41.339284 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-xw4tq/crc-debug-dt7xk"] Nov 28 18:53:41 crc kubenswrapper[4909]: I1128 18:53:41.351237 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-xw4tq/crc-debug-dt7xk"] Nov 28 18:53:41 crc kubenswrapper[4909]: I1128 18:53:41.404441 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/f2e717db-8407-4363-a2be-b789ea225705-host\") pod \"f2e717db-8407-4363-a2be-b789ea225705\" (UID: \"f2e717db-8407-4363-a2be-b789ea225705\") " Nov 28 18:53:41 crc kubenswrapper[4909]: I1128 18:53:41.404523 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vq2jt\" (UniqueName: \"kubernetes.io/projected/f2e717db-8407-4363-a2be-b789ea225705-kube-api-access-vq2jt\") pod \"f2e717db-8407-4363-a2be-b789ea225705\" (UID: \"f2e717db-8407-4363-a2be-b789ea225705\") " Nov 28 18:53:41 crc kubenswrapper[4909]: I1128 18:53:41.404590 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f2e717db-8407-4363-a2be-b789ea225705-host" (OuterVolumeSpecName: "host") pod "f2e717db-8407-4363-a2be-b789ea225705" (UID: "f2e717db-8407-4363-a2be-b789ea225705"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 18:53:41 crc kubenswrapper[4909]: I1128 18:53:41.405031 4909 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/f2e717db-8407-4363-a2be-b789ea225705-host\") on node \"crc\" DevicePath \"\"" Nov 28 18:53:41 crc kubenswrapper[4909]: I1128 18:53:41.411459 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f2e717db-8407-4363-a2be-b789ea225705-kube-api-access-vq2jt" (OuterVolumeSpecName: "kube-api-access-vq2jt") pod "f2e717db-8407-4363-a2be-b789ea225705" (UID: "f2e717db-8407-4363-a2be-b789ea225705"). InnerVolumeSpecName "kube-api-access-vq2jt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 18:53:41 crc kubenswrapper[4909]: I1128 18:53:41.507440 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vq2jt\" (UniqueName: \"kubernetes.io/projected/f2e717db-8407-4363-a2be-b789ea225705-kube-api-access-vq2jt\") on node \"crc\" DevicePath \"\"" Nov 28 18:53:41 crc kubenswrapper[4909]: I1128 18:53:41.916506 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f2e717db-8407-4363-a2be-b789ea225705" path="/var/lib/kubelet/pods/f2e717db-8407-4363-a2be-b789ea225705/volumes" Nov 28 18:53:42 crc kubenswrapper[4909]: I1128 18:53:42.159833 4909 scope.go:117] "RemoveContainer" containerID="50e352405fde3cc0a236c78f4ea8121321cdd989fde854d261990b1c1c5bf338" Nov 28 18:53:42 crc kubenswrapper[4909]: I1128 18:53:42.159996 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-xw4tq/crc-debug-dt7xk" Nov 28 18:53:42 crc kubenswrapper[4909]: I1128 18:53:42.506508 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-8rw4n"] Nov 28 18:53:42 crc kubenswrapper[4909]: E1128 18:53:42.507181 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f2e717db-8407-4363-a2be-b789ea225705" containerName="container-00" Nov 28 18:53:42 crc kubenswrapper[4909]: I1128 18:53:42.507193 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="f2e717db-8407-4363-a2be-b789ea225705" containerName="container-00" Nov 28 18:53:42 crc kubenswrapper[4909]: I1128 18:53:42.507420 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="f2e717db-8407-4363-a2be-b789ea225705" containerName="container-00" Nov 28 18:53:42 crc kubenswrapper[4909]: I1128 18:53:42.509096 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-8rw4n" Nov 28 18:53:42 crc kubenswrapper[4909]: I1128 18:53:42.526259 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-8rw4n"] Nov 28 18:53:42 crc kubenswrapper[4909]: I1128 18:53:42.606849 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-xw4tq/crc-debug-w7f4d"] Nov 28 18:53:42 crc kubenswrapper[4909]: I1128 18:53:42.612114 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-xw4tq/crc-debug-w7f4d" Nov 28 18:53:42 crc kubenswrapper[4909]: I1128 18:53:42.630330 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/20ddb6bd-21eb-4744-b3a3-ef28ae63c7cb-utilities\") pod \"redhat-operators-8rw4n\" (UID: \"20ddb6bd-21eb-4744-b3a3-ef28ae63c7cb\") " pod="openshift-marketplace/redhat-operators-8rw4n" Nov 28 18:53:42 crc kubenswrapper[4909]: I1128 18:53:42.630396 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/20ddb6bd-21eb-4744-b3a3-ef28ae63c7cb-catalog-content\") pod \"redhat-operators-8rw4n\" (UID: \"20ddb6bd-21eb-4744-b3a3-ef28ae63c7cb\") " pod="openshift-marketplace/redhat-operators-8rw4n" Nov 28 18:53:42 crc kubenswrapper[4909]: I1128 18:53:42.630463 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xtnrm\" (UniqueName: \"kubernetes.io/projected/20ddb6bd-21eb-4744-b3a3-ef28ae63c7cb-kube-api-access-xtnrm\") pod \"redhat-operators-8rw4n\" (UID: \"20ddb6bd-21eb-4744-b3a3-ef28ae63c7cb\") " pod="openshift-marketplace/redhat-operators-8rw4n" Nov 28 18:53:42 crc kubenswrapper[4909]: I1128 18:53:42.733014 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/20ddb6bd-21eb-4744-b3a3-ef28ae63c7cb-utilities\") pod \"redhat-operators-8rw4n\" (UID: \"20ddb6bd-21eb-4744-b3a3-ef28ae63c7cb\") " pod="openshift-marketplace/redhat-operators-8rw4n" Nov 28 18:53:42 crc kubenswrapper[4909]: I1128 18:53:42.733074 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-55fsj\" (UniqueName: \"kubernetes.io/projected/b22699ac-b671-4b93-a53f-4493984f5874-kube-api-access-55fsj\") pod \"crc-debug-w7f4d\" (UID: \"b22699ac-b671-4b93-a53f-4493984f5874\") " pod="openshift-must-gather-xw4tq/crc-debug-w7f4d" Nov 28 18:53:42 crc kubenswrapper[4909]: I1128 18:53:42.733165 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/20ddb6bd-21eb-4744-b3a3-ef28ae63c7cb-catalog-content\") pod \"redhat-operators-8rw4n\" (UID: \"20ddb6bd-21eb-4744-b3a3-ef28ae63c7cb\") " pod="openshift-marketplace/redhat-operators-8rw4n" Nov 28 18:53:42 crc kubenswrapper[4909]: I1128 18:53:42.733195 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/b22699ac-b671-4b93-a53f-4493984f5874-host\") pod \"crc-debug-w7f4d\" (UID: \"b22699ac-b671-4b93-a53f-4493984f5874\") " pod="openshift-must-gather-xw4tq/crc-debug-w7f4d" Nov 28 18:53:42 crc kubenswrapper[4909]: I1128 18:53:42.733378 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xtnrm\" (UniqueName: \"kubernetes.io/projected/20ddb6bd-21eb-4744-b3a3-ef28ae63c7cb-kube-api-access-xtnrm\") pod \"redhat-operators-8rw4n\" (UID: \"20ddb6bd-21eb-4744-b3a3-ef28ae63c7cb\") " pod="openshift-marketplace/redhat-operators-8rw4n" Nov 28 18:53:42 crc kubenswrapper[4909]: I1128 18:53:42.733721 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/20ddb6bd-21eb-4744-b3a3-ef28ae63c7cb-utilities\") pod \"redhat-operators-8rw4n\" (UID: \"20ddb6bd-21eb-4744-b3a3-ef28ae63c7cb\") " pod="openshift-marketplace/redhat-operators-8rw4n" Nov 28 18:53:42 crc kubenswrapper[4909]: I1128 18:53:42.733794 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/20ddb6bd-21eb-4744-b3a3-ef28ae63c7cb-catalog-content\") pod \"redhat-operators-8rw4n\" (UID: \"20ddb6bd-21eb-4744-b3a3-ef28ae63c7cb\") " pod="openshift-marketplace/redhat-operators-8rw4n" Nov 28 18:53:42 crc kubenswrapper[4909]: I1128 18:53:42.762719 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xtnrm\" (UniqueName: \"kubernetes.io/projected/20ddb6bd-21eb-4744-b3a3-ef28ae63c7cb-kube-api-access-xtnrm\") pod \"redhat-operators-8rw4n\" (UID: \"20ddb6bd-21eb-4744-b3a3-ef28ae63c7cb\") " pod="openshift-marketplace/redhat-operators-8rw4n" Nov 28 18:53:42 crc kubenswrapper[4909]: I1128 18:53:42.835855 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-55fsj\" (UniqueName: \"kubernetes.io/projected/b22699ac-b671-4b93-a53f-4493984f5874-kube-api-access-55fsj\") pod \"crc-debug-w7f4d\" (UID: \"b22699ac-b671-4b93-a53f-4493984f5874\") " pod="openshift-must-gather-xw4tq/crc-debug-w7f4d" Nov 28 18:53:42 crc kubenswrapper[4909]: I1128 18:53:42.835905 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/b22699ac-b671-4b93-a53f-4493984f5874-host\") pod \"crc-debug-w7f4d\" (UID: \"b22699ac-b671-4b93-a53f-4493984f5874\") " pod="openshift-must-gather-xw4tq/crc-debug-w7f4d" Nov 28 18:53:42 crc kubenswrapper[4909]: I1128 18:53:42.836024 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/b22699ac-b671-4b93-a53f-4493984f5874-host\") pod \"crc-debug-w7f4d\" (UID: \"b22699ac-b671-4b93-a53f-4493984f5874\") " pod="openshift-must-gather-xw4tq/crc-debug-w7f4d" Nov 28 18:53:42 crc kubenswrapper[4909]: I1128 18:53:42.839410 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-8rw4n" Nov 28 18:53:42 crc kubenswrapper[4909]: I1128 18:53:42.852420 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-55fsj\" (UniqueName: \"kubernetes.io/projected/b22699ac-b671-4b93-a53f-4493984f5874-kube-api-access-55fsj\") pod \"crc-debug-w7f4d\" (UID: \"b22699ac-b671-4b93-a53f-4493984f5874\") " pod="openshift-must-gather-xw4tq/crc-debug-w7f4d" Nov 28 18:53:42 crc kubenswrapper[4909]: I1128 18:53:42.935276 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-xw4tq/crc-debug-w7f4d" Nov 28 18:53:43 crc kubenswrapper[4909]: I1128 18:53:43.207931 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-xw4tq/crc-debug-w7f4d" event={"ID":"b22699ac-b671-4b93-a53f-4493984f5874","Type":"ContainerStarted","Data":"a1000985b214f6aea61ff77e348834630757fb5646edbc8b1ed551085b1e0c4f"} Nov 28 18:53:43 crc kubenswrapper[4909]: I1128 18:53:43.518154 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-8rw4n"] Nov 28 18:53:44 crc kubenswrapper[4909]: I1128 18:53:44.222543 4909 generic.go:334] "Generic (PLEG): container finished" podID="b22699ac-b671-4b93-a53f-4493984f5874" containerID="cc218957a5148a0df690bd27d0c275a80070e436482d45a602f325764d289a34" exitCode=1 Nov 28 18:53:44 crc kubenswrapper[4909]: I1128 18:53:44.222625 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-xw4tq/crc-debug-w7f4d" event={"ID":"b22699ac-b671-4b93-a53f-4493984f5874","Type":"ContainerDied","Data":"cc218957a5148a0df690bd27d0c275a80070e436482d45a602f325764d289a34"} Nov 28 18:53:44 crc kubenswrapper[4909]: I1128 18:53:44.226271 4909 generic.go:334] "Generic (PLEG): container finished" podID="20ddb6bd-21eb-4744-b3a3-ef28ae63c7cb" containerID="8fb219141fb32b72e7d74ffea75432c4e76af2d9f2032214dc73b1d0b386ccdb" exitCode=0 Nov 28 18:53:44 crc kubenswrapper[4909]: I1128 18:53:44.226336 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8rw4n" event={"ID":"20ddb6bd-21eb-4744-b3a3-ef28ae63c7cb","Type":"ContainerDied","Data":"8fb219141fb32b72e7d74ffea75432c4e76af2d9f2032214dc73b1d0b386ccdb"} Nov 28 18:53:44 crc kubenswrapper[4909]: I1128 18:53:44.226406 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8rw4n" event={"ID":"20ddb6bd-21eb-4744-b3a3-ef28ae63c7cb","Type":"ContainerStarted","Data":"1893ca18ec3d7ec6f47cc7075d36d4073d1d8328dd3e50140d3867c2a7d5c6fa"} Nov 28 18:53:44 crc kubenswrapper[4909]: I1128 18:53:44.297778 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-xw4tq/crc-debug-w7f4d"] Nov 28 18:53:44 crc kubenswrapper[4909]: I1128 18:53:44.319292 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-xw4tq/crc-debug-w7f4d"] Nov 28 18:53:45 crc kubenswrapper[4909]: I1128 18:53:45.377692 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-xw4tq/crc-debug-w7f4d" Nov 28 18:53:45 crc kubenswrapper[4909]: I1128 18:53:45.487374 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/b22699ac-b671-4b93-a53f-4493984f5874-host\") pod \"b22699ac-b671-4b93-a53f-4493984f5874\" (UID: \"b22699ac-b671-4b93-a53f-4493984f5874\") " Nov 28 18:53:45 crc kubenswrapper[4909]: I1128 18:53:45.487430 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-55fsj\" (UniqueName: \"kubernetes.io/projected/b22699ac-b671-4b93-a53f-4493984f5874-kube-api-access-55fsj\") pod \"b22699ac-b671-4b93-a53f-4493984f5874\" (UID: \"b22699ac-b671-4b93-a53f-4493984f5874\") " Nov 28 18:53:45 crc kubenswrapper[4909]: I1128 18:53:45.487470 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/b22699ac-b671-4b93-a53f-4493984f5874-host" (OuterVolumeSpecName: "host") pod "b22699ac-b671-4b93-a53f-4493984f5874" (UID: "b22699ac-b671-4b93-a53f-4493984f5874"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 18:53:45 crc kubenswrapper[4909]: I1128 18:53:45.488015 4909 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/b22699ac-b671-4b93-a53f-4493984f5874-host\") on node \"crc\" DevicePath \"\"" Nov 28 18:53:45 crc kubenswrapper[4909]: I1128 18:53:45.494979 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b22699ac-b671-4b93-a53f-4493984f5874-kube-api-access-55fsj" (OuterVolumeSpecName: "kube-api-access-55fsj") pod "b22699ac-b671-4b93-a53f-4493984f5874" (UID: "b22699ac-b671-4b93-a53f-4493984f5874"). InnerVolumeSpecName "kube-api-access-55fsj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 18:53:45 crc kubenswrapper[4909]: I1128 18:53:45.590164 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-55fsj\" (UniqueName: \"kubernetes.io/projected/b22699ac-b671-4b93-a53f-4493984f5874-kube-api-access-55fsj\") on node \"crc\" DevicePath \"\"" Nov 28 18:53:45 crc kubenswrapper[4909]: I1128 18:53:45.913242 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b22699ac-b671-4b93-a53f-4493984f5874" path="/var/lib/kubelet/pods/b22699ac-b671-4b93-a53f-4493984f5874/volumes" Nov 28 18:53:46 crc kubenswrapper[4909]: I1128 18:53:46.260011 4909 scope.go:117] "RemoveContainer" containerID="cc218957a5148a0df690bd27d0c275a80070e436482d45a602f325764d289a34" Nov 28 18:53:46 crc kubenswrapper[4909]: I1128 18:53:46.260144 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-xw4tq/crc-debug-w7f4d" Nov 28 18:53:46 crc kubenswrapper[4909]: I1128 18:53:46.269414 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8rw4n" event={"ID":"20ddb6bd-21eb-4744-b3a3-ef28ae63c7cb","Type":"ContainerStarted","Data":"e0786069d2eaed1a298dd5c85d7302c5c5a8ed6058ecfd7ebc4dc498c6384397"} Nov 28 18:53:48 crc kubenswrapper[4909]: I1128 18:53:48.902013 4909 scope.go:117] "RemoveContainer" containerID="141877a9e47864adeab54e6b62e3e3588ad770da6cc4187b344c2cdac9b50495" Nov 28 18:53:48 crc kubenswrapper[4909]: E1128 18:53:48.903254 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 18:53:49 crc kubenswrapper[4909]: I1128 18:53:49.305475 4909 generic.go:334] "Generic (PLEG): container finished" podID="20ddb6bd-21eb-4744-b3a3-ef28ae63c7cb" containerID="e0786069d2eaed1a298dd5c85d7302c5c5a8ed6058ecfd7ebc4dc498c6384397" exitCode=0 Nov 28 18:53:49 crc kubenswrapper[4909]: I1128 18:53:49.305545 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8rw4n" event={"ID":"20ddb6bd-21eb-4744-b3a3-ef28ae63c7cb","Type":"ContainerDied","Data":"e0786069d2eaed1a298dd5c85d7302c5c5a8ed6058ecfd7ebc4dc498c6384397"} Nov 28 18:53:50 crc kubenswrapper[4909]: I1128 18:53:50.318018 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8rw4n" event={"ID":"20ddb6bd-21eb-4744-b3a3-ef28ae63c7cb","Type":"ContainerStarted","Data":"a076567dbf868b793cfe973c64d8c84a7ec42457b97c55c9631352cff4ba0edb"} Nov 28 18:53:50 crc kubenswrapper[4909]: I1128 18:53:50.349780 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-8rw4n" podStartSLOduration=2.716320747 podStartE2EDuration="8.349758358s" podCreationTimestamp="2025-11-28 18:53:42 +0000 UTC" firstStartedPulling="2025-11-28 18:53:44.229563512 +0000 UTC m=+9806.626248036" lastFinishedPulling="2025-11-28 18:53:49.863001123 +0000 UTC m=+9812.259685647" observedRunningTime="2025-11-28 18:53:50.337167357 +0000 UTC m=+9812.733851881" watchObservedRunningTime="2025-11-28 18:53:50.349758358 +0000 UTC m=+9812.746442902" Nov 28 18:53:52 crc kubenswrapper[4909]: I1128 18:53:52.840521 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-8rw4n" Nov 28 18:53:52 crc kubenswrapper[4909]: I1128 18:53:52.841183 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-8rw4n" Nov 28 18:53:53 crc kubenswrapper[4909]: I1128 18:53:53.890890 4909 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-8rw4n" podUID="20ddb6bd-21eb-4744-b3a3-ef28ae63c7cb" containerName="registry-server" probeResult="failure" output=< Nov 28 18:53:53 crc kubenswrapper[4909]: timeout: failed to connect service ":50051" within 1s Nov 28 18:53:53 crc kubenswrapper[4909]: > Nov 28 18:54:02 crc kubenswrapper[4909]: I1128 18:54:02.900968 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-8rw4n" Nov 28 18:54:02 crc kubenswrapper[4909]: I1128 18:54:02.904127 4909 scope.go:117] "RemoveContainer" containerID="141877a9e47864adeab54e6b62e3e3588ad770da6cc4187b344c2cdac9b50495" Nov 28 18:54:02 crc kubenswrapper[4909]: E1128 18:54:02.905370 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 18:54:02 crc kubenswrapper[4909]: I1128 18:54:02.977626 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-8rw4n" Nov 28 18:54:03 crc kubenswrapper[4909]: I1128 18:54:03.151487 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-8rw4n"] Nov 28 18:54:04 crc kubenswrapper[4909]: I1128 18:54:04.462058 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-8rw4n" podUID="20ddb6bd-21eb-4744-b3a3-ef28ae63c7cb" containerName="registry-server" containerID="cri-o://a076567dbf868b793cfe973c64d8c84a7ec42457b97c55c9631352cff4ba0edb" gracePeriod=2 Nov 28 18:54:05 crc kubenswrapper[4909]: I1128 18:54:05.044479 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-8rw4n" Nov 28 18:54:05 crc kubenswrapper[4909]: I1128 18:54:05.167791 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/20ddb6bd-21eb-4744-b3a3-ef28ae63c7cb-catalog-content\") pod \"20ddb6bd-21eb-4744-b3a3-ef28ae63c7cb\" (UID: \"20ddb6bd-21eb-4744-b3a3-ef28ae63c7cb\") " Nov 28 18:54:05 crc kubenswrapper[4909]: I1128 18:54:05.168638 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/20ddb6bd-21eb-4744-b3a3-ef28ae63c7cb-utilities\") pod \"20ddb6bd-21eb-4744-b3a3-ef28ae63c7cb\" (UID: \"20ddb6bd-21eb-4744-b3a3-ef28ae63c7cb\") " Nov 28 18:54:05 crc kubenswrapper[4909]: I1128 18:54:05.169243 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xtnrm\" (UniqueName: \"kubernetes.io/projected/20ddb6bd-21eb-4744-b3a3-ef28ae63c7cb-kube-api-access-xtnrm\") pod \"20ddb6bd-21eb-4744-b3a3-ef28ae63c7cb\" (UID: \"20ddb6bd-21eb-4744-b3a3-ef28ae63c7cb\") " Nov 28 18:54:05 crc kubenswrapper[4909]: I1128 18:54:05.169998 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/20ddb6bd-21eb-4744-b3a3-ef28ae63c7cb-utilities" (OuterVolumeSpecName: "utilities") pod "20ddb6bd-21eb-4744-b3a3-ef28ae63c7cb" (UID: "20ddb6bd-21eb-4744-b3a3-ef28ae63c7cb"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 18:54:05 crc kubenswrapper[4909]: I1128 18:54:05.170361 4909 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/20ddb6bd-21eb-4744-b3a3-ef28ae63c7cb-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 18:54:05 crc kubenswrapper[4909]: I1128 18:54:05.178007 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/20ddb6bd-21eb-4744-b3a3-ef28ae63c7cb-kube-api-access-xtnrm" (OuterVolumeSpecName: "kube-api-access-xtnrm") pod "20ddb6bd-21eb-4744-b3a3-ef28ae63c7cb" (UID: "20ddb6bd-21eb-4744-b3a3-ef28ae63c7cb"). InnerVolumeSpecName "kube-api-access-xtnrm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 18:54:05 crc kubenswrapper[4909]: I1128 18:54:05.272379 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xtnrm\" (UniqueName: \"kubernetes.io/projected/20ddb6bd-21eb-4744-b3a3-ef28ae63c7cb-kube-api-access-xtnrm\") on node \"crc\" DevicePath \"\"" Nov 28 18:54:05 crc kubenswrapper[4909]: I1128 18:54:05.307219 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/20ddb6bd-21eb-4744-b3a3-ef28ae63c7cb-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "20ddb6bd-21eb-4744-b3a3-ef28ae63c7cb" (UID: "20ddb6bd-21eb-4744-b3a3-ef28ae63c7cb"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 18:54:05 crc kubenswrapper[4909]: I1128 18:54:05.373946 4909 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/20ddb6bd-21eb-4744-b3a3-ef28ae63c7cb-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 18:54:05 crc kubenswrapper[4909]: I1128 18:54:05.477157 4909 generic.go:334] "Generic (PLEG): container finished" podID="20ddb6bd-21eb-4744-b3a3-ef28ae63c7cb" containerID="a076567dbf868b793cfe973c64d8c84a7ec42457b97c55c9631352cff4ba0edb" exitCode=0 Nov 28 18:54:05 crc kubenswrapper[4909]: I1128 18:54:05.477241 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8rw4n" event={"ID":"20ddb6bd-21eb-4744-b3a3-ef28ae63c7cb","Type":"ContainerDied","Data":"a076567dbf868b793cfe973c64d8c84a7ec42457b97c55c9631352cff4ba0edb"} Nov 28 18:54:05 crc kubenswrapper[4909]: I1128 18:54:05.477287 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8rw4n" event={"ID":"20ddb6bd-21eb-4744-b3a3-ef28ae63c7cb","Type":"ContainerDied","Data":"1893ca18ec3d7ec6f47cc7075d36d4073d1d8328dd3e50140d3867c2a7d5c6fa"} Nov 28 18:54:05 crc kubenswrapper[4909]: I1128 18:54:05.477322 4909 scope.go:117] "RemoveContainer" containerID="a076567dbf868b793cfe973c64d8c84a7ec42457b97c55c9631352cff4ba0edb" Nov 28 18:54:05 crc kubenswrapper[4909]: I1128 18:54:05.477624 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-8rw4n" Nov 28 18:54:05 crc kubenswrapper[4909]: I1128 18:54:05.516032 4909 scope.go:117] "RemoveContainer" containerID="e0786069d2eaed1a298dd5c85d7302c5c5a8ed6058ecfd7ebc4dc498c6384397" Nov 28 18:54:05 crc kubenswrapper[4909]: I1128 18:54:05.526872 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-8rw4n"] Nov 28 18:54:05 crc kubenswrapper[4909]: I1128 18:54:05.537390 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-8rw4n"] Nov 28 18:54:05 crc kubenswrapper[4909]: I1128 18:54:05.553386 4909 scope.go:117] "RemoveContainer" containerID="8fb219141fb32b72e7d74ffea75432c4e76af2d9f2032214dc73b1d0b386ccdb" Nov 28 18:54:05 crc kubenswrapper[4909]: I1128 18:54:05.618608 4909 scope.go:117] "RemoveContainer" containerID="a076567dbf868b793cfe973c64d8c84a7ec42457b97c55c9631352cff4ba0edb" Nov 28 18:54:05 crc kubenswrapper[4909]: E1128 18:54:05.619306 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a076567dbf868b793cfe973c64d8c84a7ec42457b97c55c9631352cff4ba0edb\": container with ID starting with a076567dbf868b793cfe973c64d8c84a7ec42457b97c55c9631352cff4ba0edb not found: ID does not exist" containerID="a076567dbf868b793cfe973c64d8c84a7ec42457b97c55c9631352cff4ba0edb" Nov 28 18:54:05 crc kubenswrapper[4909]: I1128 18:54:05.619373 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a076567dbf868b793cfe973c64d8c84a7ec42457b97c55c9631352cff4ba0edb"} err="failed to get container status \"a076567dbf868b793cfe973c64d8c84a7ec42457b97c55c9631352cff4ba0edb\": rpc error: code = NotFound desc = could not find container \"a076567dbf868b793cfe973c64d8c84a7ec42457b97c55c9631352cff4ba0edb\": container with ID starting with a076567dbf868b793cfe973c64d8c84a7ec42457b97c55c9631352cff4ba0edb not found: ID does not exist" Nov 28 18:54:05 crc kubenswrapper[4909]: I1128 18:54:05.619417 4909 scope.go:117] "RemoveContainer" containerID="e0786069d2eaed1a298dd5c85d7302c5c5a8ed6058ecfd7ebc4dc498c6384397" Nov 28 18:54:05 crc kubenswrapper[4909]: E1128 18:54:05.619927 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e0786069d2eaed1a298dd5c85d7302c5c5a8ed6058ecfd7ebc4dc498c6384397\": container with ID starting with e0786069d2eaed1a298dd5c85d7302c5c5a8ed6058ecfd7ebc4dc498c6384397 not found: ID does not exist" containerID="e0786069d2eaed1a298dd5c85d7302c5c5a8ed6058ecfd7ebc4dc498c6384397" Nov 28 18:54:05 crc kubenswrapper[4909]: I1128 18:54:05.619974 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e0786069d2eaed1a298dd5c85d7302c5c5a8ed6058ecfd7ebc4dc498c6384397"} err="failed to get container status \"e0786069d2eaed1a298dd5c85d7302c5c5a8ed6058ecfd7ebc4dc498c6384397\": rpc error: code = NotFound desc = could not find container \"e0786069d2eaed1a298dd5c85d7302c5c5a8ed6058ecfd7ebc4dc498c6384397\": container with ID starting with e0786069d2eaed1a298dd5c85d7302c5c5a8ed6058ecfd7ebc4dc498c6384397 not found: ID does not exist" Nov 28 18:54:05 crc kubenswrapper[4909]: I1128 18:54:05.620010 4909 scope.go:117] "RemoveContainer" containerID="8fb219141fb32b72e7d74ffea75432c4e76af2d9f2032214dc73b1d0b386ccdb" Nov 28 18:54:05 crc kubenswrapper[4909]: E1128 18:54:05.620330 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8fb219141fb32b72e7d74ffea75432c4e76af2d9f2032214dc73b1d0b386ccdb\": container with ID starting with 8fb219141fb32b72e7d74ffea75432c4e76af2d9f2032214dc73b1d0b386ccdb not found: ID does not exist" containerID="8fb219141fb32b72e7d74ffea75432c4e76af2d9f2032214dc73b1d0b386ccdb" Nov 28 18:54:05 crc kubenswrapper[4909]: I1128 18:54:05.620364 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8fb219141fb32b72e7d74ffea75432c4e76af2d9f2032214dc73b1d0b386ccdb"} err="failed to get container status \"8fb219141fb32b72e7d74ffea75432c4e76af2d9f2032214dc73b1d0b386ccdb\": rpc error: code = NotFound desc = could not find container \"8fb219141fb32b72e7d74ffea75432c4e76af2d9f2032214dc73b1d0b386ccdb\": container with ID starting with 8fb219141fb32b72e7d74ffea75432c4e76af2d9f2032214dc73b1d0b386ccdb not found: ID does not exist" Nov 28 18:54:05 crc kubenswrapper[4909]: I1128 18:54:05.922629 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="20ddb6bd-21eb-4744-b3a3-ef28ae63c7cb" path="/var/lib/kubelet/pods/20ddb6bd-21eb-4744-b3a3-ef28ae63c7cb/volumes" Nov 28 18:54:16 crc kubenswrapper[4909]: I1128 18:54:16.904302 4909 scope.go:117] "RemoveContainer" containerID="141877a9e47864adeab54e6b62e3e3588ad770da6cc4187b344c2cdac9b50495" Nov 28 18:54:16 crc kubenswrapper[4909]: E1128 18:54:16.905374 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 18:54:29 crc kubenswrapper[4909]: I1128 18:54:29.902081 4909 scope.go:117] "RemoveContainer" containerID="141877a9e47864adeab54e6b62e3e3588ad770da6cc4187b344c2cdac9b50495" Nov 28 18:54:29 crc kubenswrapper[4909]: E1128 18:54:29.903277 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 18:54:44 crc kubenswrapper[4909]: I1128 18:54:44.903613 4909 scope.go:117] "RemoveContainer" containerID="141877a9e47864adeab54e6b62e3e3588ad770da6cc4187b344c2cdac9b50495" Nov 28 18:54:44 crc kubenswrapper[4909]: E1128 18:54:44.904646 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 18:54:57 crc kubenswrapper[4909]: I1128 18:54:57.908914 4909 scope.go:117] "RemoveContainer" containerID="141877a9e47864adeab54e6b62e3e3588ad770da6cc4187b344c2cdac9b50495" Nov 28 18:54:57 crc kubenswrapper[4909]: E1128 18:54:57.909559 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 18:55:08 crc kubenswrapper[4909]: I1128 18:55:08.902563 4909 scope.go:117] "RemoveContainer" containerID="141877a9e47864adeab54e6b62e3e3588ad770da6cc4187b344c2cdac9b50495" Nov 28 18:55:08 crc kubenswrapper[4909]: E1128 18:55:08.903432 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 18:55:23 crc kubenswrapper[4909]: I1128 18:55:23.902845 4909 scope.go:117] "RemoveContainer" containerID="141877a9e47864adeab54e6b62e3e3588ad770da6cc4187b344c2cdac9b50495" Nov 28 18:55:24 crc kubenswrapper[4909]: I1128 18:55:24.570333 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" event={"ID":"5f0ac931-d37b-4342-8c12-c2779b455cc5","Type":"ContainerStarted","Data":"6b21cd92c3dfe12571ba7526889fb2493e0be87bb6225f8ee39c471668ab1d1e"} Nov 28 18:57:49 crc kubenswrapper[4909]: I1128 18:57:49.912221 4909 patch_prober.go:28] interesting pod/machine-config-daemon-d5nd7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 18:57:49 crc kubenswrapper[4909]: I1128 18:57:49.912960 4909 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 18:57:54 crc kubenswrapper[4909]: I1128 18:57:54.675869 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_alertmanager-metric-storage-0_abb9ef1a-19b4-4dca-a532-ec15cb3625b1/init-config-reloader/0.log" Nov 28 18:57:54 crc kubenswrapper[4909]: I1128 18:57:54.845598 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_alertmanager-metric-storage-0_abb9ef1a-19b4-4dca-a532-ec15cb3625b1/init-config-reloader/0.log" Nov 28 18:57:54 crc kubenswrapper[4909]: I1128 18:57:54.889080 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_alertmanager-metric-storage-0_abb9ef1a-19b4-4dca-a532-ec15cb3625b1/alertmanager/0.log" Nov 28 18:57:54 crc kubenswrapper[4909]: I1128 18:57:54.997306 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_alertmanager-metric-storage-0_abb9ef1a-19b4-4dca-a532-ec15cb3625b1/config-reloader/0.log" Nov 28 18:57:55 crc kubenswrapper[4909]: I1128 18:57:55.050517 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_aodh-0_c0f248c2-fae0-4996-b2d1-3d19d18f6cc5/aodh-api/0.log" Nov 28 18:57:55 crc kubenswrapper[4909]: I1128 18:57:55.122573 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_aodh-0_c0f248c2-fae0-4996-b2d1-3d19d18f6cc5/aodh-evaluator/0.log" Nov 28 18:57:55 crc kubenswrapper[4909]: I1128 18:57:55.250822 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_aodh-0_c0f248c2-fae0-4996-b2d1-3d19d18f6cc5/aodh-notifier/0.log" Nov 28 18:57:55 crc kubenswrapper[4909]: I1128 18:57:55.272717 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_aodh-0_c0f248c2-fae0-4996-b2d1-3d19d18f6cc5/aodh-listener/0.log" Nov 28 18:57:55 crc kubenswrapper[4909]: I1128 18:57:55.386277 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-76ddd994f8-kn6g9_799b6ce4-6703-4ef0-a2a9-dc181821c868/barbican-api/0.log" Nov 28 18:57:55 crc kubenswrapper[4909]: I1128 18:57:55.476952 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-76ddd994f8-kn6g9_799b6ce4-6703-4ef0-a2a9-dc181821c868/barbican-api-log/0.log" Nov 28 18:57:55 crc kubenswrapper[4909]: I1128 18:57:55.570993 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-588cbd7d6-6mxll_93545244-c0f9-4766-9239-567b5722f3b6/barbican-keystone-listener/0.log" Nov 28 18:57:55 crc kubenswrapper[4909]: I1128 18:57:55.619440 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-588cbd7d6-6mxll_93545244-c0f9-4766-9239-567b5722f3b6/barbican-keystone-listener-log/0.log" Nov 28 18:57:55 crc kubenswrapper[4909]: I1128 18:57:55.756601 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-74bff95d75-zkvnr_aa676b78-af57-406c-95fd-d7f329dd46d7/barbican-worker/0.log" Nov 28 18:57:55 crc kubenswrapper[4909]: I1128 18:57:55.804606 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-74bff95d75-zkvnr_aa676b78-af57-406c-95fd-d7f329dd46d7/barbican-worker-log/0.log" Nov 28 18:57:55 crc kubenswrapper[4909]: I1128 18:57:55.978592 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_bootstrap-openstack-openstack-cell1-xp7wk_67e07eb7-8bd1-448a-898a-7ce44180ceaf/bootstrap-openstack-openstack-cell1/0.log" Nov 28 18:57:56 crc kubenswrapper[4909]: I1128 18:57:56.045230 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_bda35a95-ef50-478b-9dd3-7ce3743fda57/ceilometer-central-agent/0.log" Nov 28 18:57:56 crc kubenswrapper[4909]: I1128 18:57:56.167100 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_bda35a95-ef50-478b-9dd3-7ce3743fda57/ceilometer-notification-agent/0.log" Nov 28 18:57:56 crc kubenswrapper[4909]: I1128 18:57:56.196689 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_bda35a95-ef50-478b-9dd3-7ce3743fda57/proxy-httpd/0.log" Nov 28 18:57:56 crc kubenswrapper[4909]: I1128 18:57:56.205956 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_bda35a95-ef50-478b-9dd3-7ce3743fda57/sg-core/0.log" Nov 28 18:57:56 crc kubenswrapper[4909]: I1128 18:57:56.381708 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceph-client-openstack-openstack-cell1-kj8w5_54f8470a-3a05-43ec-9e56-d2cf963b71bb/ceph-client-openstack-openstack-cell1/0.log" Nov 28 18:57:56 crc kubenswrapper[4909]: I1128 18:57:56.477771 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_fc695e03-ba92-4ca7-a736-44869be5a553/cinder-api/0.log" Nov 28 18:57:56 crc kubenswrapper[4909]: I1128 18:57:56.501721 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_fc695e03-ba92-4ca7-a736-44869be5a553/cinder-api-log/0.log" Nov 28 18:57:56 crc kubenswrapper[4909]: I1128 18:57:56.724165 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-backup-0_d6109b09-2279-459c-a836-b02bb0156e0a/probe/0.log" Nov 28 18:57:56 crc kubenswrapper[4909]: I1128 18:57:56.819472 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-backup-0_d6109b09-2279-459c-a836-b02bb0156e0a/cinder-backup/0.log" Nov 28 18:57:56 crc kubenswrapper[4909]: I1128 18:57:56.870621 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_02b08df8-7490-4759-8f0b-c7415210385b/cinder-scheduler/0.log" Nov 28 18:57:56 crc kubenswrapper[4909]: I1128 18:57:56.996730 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_02b08df8-7490-4759-8f0b-c7415210385b/probe/0.log" Nov 28 18:57:57 crc kubenswrapper[4909]: I1128 18:57:57.044271 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-volume-volume1-0_d3778829-e25a-4c1a-b6bd-25085ee5e380/cinder-volume/0.log" Nov 28 18:57:57 crc kubenswrapper[4909]: I1128 18:57:57.089825 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-volume-volume1-0_d3778829-e25a-4c1a-b6bd-25085ee5e380/probe/0.log" Nov 28 18:57:57 crc kubenswrapper[4909]: I1128 18:57:57.260902 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-network-openstack-openstack-cell1-pclm9_1f9fdddb-81e6-435f-9d8a-cc3e89bea15f/configure-network-openstack-openstack-cell1/0.log" Nov 28 18:57:57 crc kubenswrapper[4909]: I1128 18:57:57.295842 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-os-openstack-openstack-cell1-7cgmn_2268ed7c-96e8-4452-b1fb-babe8572783e/configure-os-openstack-openstack-cell1/0.log" Nov 28 18:57:57 crc kubenswrapper[4909]: I1128 18:57:57.480717 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-56b94bdf77-gbr2l_49263899-997f-4338-8a6f-0492e45aad4f/init/0.log" Nov 28 18:57:57 crc kubenswrapper[4909]: I1128 18:57:57.668187 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-56b94bdf77-gbr2l_49263899-997f-4338-8a6f-0492e45aad4f/init/0.log" Nov 28 18:57:57 crc kubenswrapper[4909]: I1128 18:57:57.715751 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_download-cache-openstack-openstack-cell1-l84qt_b75c9750-f182-4b75-9a62-57f3939379c4/download-cache-openstack-openstack-cell1/0.log" Nov 28 18:57:57 crc kubenswrapper[4909]: I1128 18:57:57.731213 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-56b94bdf77-gbr2l_49263899-997f-4338-8a6f-0492e45aad4f/dnsmasq-dns/0.log" Nov 28 18:57:57 crc kubenswrapper[4909]: I1128 18:57:57.903967 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_10420792-94b6-41b0-9e92-391c998c8b26/glance-log/0.log" Nov 28 18:57:57 crc kubenswrapper[4909]: I1128 18:57:57.990823 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_10420792-94b6-41b0-9e92-391c998c8b26/glance-httpd/0.log" Nov 28 18:57:58 crc kubenswrapper[4909]: I1128 18:57:58.086869 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_da12d29e-36b6-4d5c-90aa-9311ea8dd628/glance-httpd/0.log" Nov 28 18:57:58 crc kubenswrapper[4909]: I1128 18:57:58.115852 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_da12d29e-36b6-4d5c-90aa-9311ea8dd628/glance-log/0.log" Nov 28 18:57:58 crc kubenswrapper[4909]: I1128 18:57:58.304691 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_heat-api-7fb999b7d7-5c5hr_dbf8fec5-911d-4e34-8cd6-dd4d7e279741/heat-api/0.log" Nov 28 18:57:58 crc kubenswrapper[4909]: I1128 18:57:58.400557 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_heat-cfnapi-86554d647c-254tn_55707b9e-aa43-4152-999d-6ac4a4ac49d2/heat-cfnapi/0.log" Nov 28 18:57:58 crc kubenswrapper[4909]: I1128 18:57:58.511259 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_heat-engine-745d4ddf6b-r7v9k_e547b3a5-efff-473b-842f-5c4a1334bc46/heat-engine/0.log" Nov 28 18:57:58 crc kubenswrapper[4909]: I1128 18:57:58.628455 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_horizon-65fb6b7dff-zjwdh_cf2fee0c-2628-4b4c-bd64-fe39147a51d1/horizon/0.log" Nov 28 18:57:58 crc kubenswrapper[4909]: I1128 18:57:58.712485 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_horizon-65fb6b7dff-zjwdh_cf2fee0c-2628-4b4c-bd64-fe39147a51d1/horizon-log/0.log" Nov 28 18:57:58 crc kubenswrapper[4909]: I1128 18:57:58.739991 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-certs-openstack-openstack-cell1-rg4b9_0c3aabae-25df-4037-9383-d8affbcd3674/install-certs-openstack-openstack-cell1/0.log" Nov 28 18:57:58 crc kubenswrapper[4909]: I1128 18:57:58.832470 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-os-openstack-openstack-cell1-2x6vh_00ea43ac-10c8-4210-a08b-268e72f43f4f/install-os-openstack-openstack-cell1/0.log" Nov 28 18:57:59 crc kubenswrapper[4909]: I1128 18:57:59.049141 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-755754cf9d-jvrhc_206f6377-a9e3-41f9-899c-fea7f35ecd51/keystone-api/0.log" Nov 28 18:57:59 crc kubenswrapper[4909]: I1128 18:57:59.642545 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_libvirt-openstack-openstack-cell1-srn2t_de3e2413-2749-4df8-b633-30842a045c5c/libvirt-openstack-openstack-cell1/0.log" Nov 28 18:57:59 crc kubenswrapper[4909]: I1128 18:57:59.655068 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_kube-state-metrics-0_3c15f4bf-b1ef-4b7b-b7b8-f3e9e26eccd0/kube-state-metrics/0.log" Nov 28 18:57:59 crc kubenswrapper[4909]: I1128 18:57:59.675574 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-cron-29405881-hg7qc_187d1121-6730-42b2-8233-4f050d18f92b/keystone-cron/0.log" Nov 28 18:57:59 crc kubenswrapper[4909]: I1128 18:57:59.831896 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_manila-api-0_21a44c88-dfc5-4832-8db4-b16f31dc9625/manila-api-log/0.log" Nov 28 18:57:59 crc kubenswrapper[4909]: I1128 18:57:59.965863 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_manila-api-0_21a44c88-dfc5-4832-8db4-b16f31dc9625/manila-api/0.log" Nov 28 18:58:00 crc kubenswrapper[4909]: I1128 18:58:00.053637 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_manila-scheduler-0_9d7fb806-75ea-4671-a658-8871d26a148c/manila-scheduler/0.log" Nov 28 18:58:00 crc kubenswrapper[4909]: I1128 18:58:00.074844 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_manila-scheduler-0_9d7fb806-75ea-4671-a658-8871d26a148c/probe/0.log" Nov 28 18:58:00 crc kubenswrapper[4909]: I1128 18:58:00.161207 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_manila-share-share1-0_0a875053-cc34-499d-8aef-c515a3e3b399/manila-share/0.log" Nov 28 18:58:00 crc kubenswrapper[4909]: I1128 18:58:00.202285 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_manila-share-share1-0_0a875053-cc34-499d-8aef-c515a3e3b399/probe/0.log" Nov 28 18:58:00 crc kubenswrapper[4909]: I1128 18:58:00.450699 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-7b4d47ccc7-5qprs_2c9e3241-c8bc-44a3-88cd-4e251259dba0/neutron-httpd/0.log" Nov 28 18:58:00 crc kubenswrapper[4909]: I1128 18:58:00.521919 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-7b4d47ccc7-5qprs_2c9e3241-c8bc-44a3-88cd-4e251259dba0/neutron-api/0.log" Nov 28 18:58:00 crc kubenswrapper[4909]: I1128 18:58:00.758969 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-dhcp-openstack-openstack-cell1-r2h42_fa77c1e1-fac0-4183-824f-fbd83a237232/neutron-dhcp-openstack-openstack-cell1/0.log" Nov 28 18:58:00 crc kubenswrapper[4909]: I1128 18:58:00.817397 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-metadata-openstack-openstack-cell1-5rpbd_38f1fa07-931f-4994-a9da-219f6464f5ca/neutron-metadata-openstack-openstack-cell1/0.log" Nov 28 18:58:01 crc kubenswrapper[4909]: I1128 18:58:01.547102 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_e96a498d-55ea-45ba-8bf3-2ab66ba99d0a/nova-api-api/0.log" Nov 28 18:58:01 crc kubenswrapper[4909]: I1128 18:58:01.623441 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-sriov-openstack-openstack-cell1-l48sb_5b0feed7-a809-4479-a204-1d7b86a9b953/neutron-sriov-openstack-openstack-cell1/0.log" Nov 28 18:58:01 crc kubenswrapper[4909]: I1128 18:58:01.838203 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_e96a498d-55ea-45ba-8bf3-2ab66ba99d0a/nova-api-log/0.log" Nov 28 18:58:01 crc kubenswrapper[4909]: I1128 18:58:01.866165 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell0-conductor-0_1b4b7190-2021-4a98-b7a6-f9b051e04f36/nova-cell0-conductor-conductor/0.log" Nov 28 18:58:02 crc kubenswrapper[4909]: I1128 18:58:02.073288 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-conductor-0_6ce5e59d-c3dd-439d-86dc-9abdeb9cc320/nova-cell1-conductor-conductor/0.log" Nov 28 18:58:02 crc kubenswrapper[4909]: I1128 18:58:02.230877 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-novncproxy-0_d8441066-6062-409c-a31e-9009886f1104/nova-cell1-novncproxy-novncproxy/0.log" Nov 28 18:58:02 crc kubenswrapper[4909]: I1128 18:58:02.406607 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell4hv6d_410d136c-96de-485b-a570-3b74d2d66941/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell1/0.log" Nov 28 18:58:02 crc kubenswrapper[4909]: I1128 18:58:02.518749 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-openstack-openstack-cell1-wztp8_8561d38e-6957-4c25-9849-1e73103a9efd/nova-cell1-openstack-openstack-cell1/0.log" Nov 28 18:58:02 crc kubenswrapper[4909]: I1128 18:58:02.696984 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_1184d3c2-eb34-4da7-9f6b-39789cd507e4/nova-metadata-log/0.log" Nov 28 18:58:02 crc kubenswrapper[4909]: I1128 18:58:02.700117 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_1184d3c2-eb34-4da7-9f6b-39789cd507e4/nova-metadata-metadata/0.log" Nov 28 18:58:02 crc kubenswrapper[4909]: I1128 18:58:02.912544 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-api-6fbcff57c4-cnfb7_7d5c1cf2-bc57-40ac-84d4-8e3c82d04e33/init/0.log" Nov 28 18:58:02 crc kubenswrapper[4909]: I1128 18:58:02.923796 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-scheduler-0_732cb966-3efa-4ba9-8bc1-ce4427b9e92b/nova-scheduler-scheduler/0.log" Nov 28 18:58:03 crc kubenswrapper[4909]: I1128 18:58:03.110111 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-api-6fbcff57c4-cnfb7_7d5c1cf2-bc57-40ac-84d4-8e3c82d04e33/init/0.log" Nov 28 18:58:03 crc kubenswrapper[4909]: I1128 18:58:03.202295 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-api-6fbcff57c4-cnfb7_7d5c1cf2-bc57-40ac-84d4-8e3c82d04e33/octavia-api-provider-agent/0.log" Nov 28 18:58:03 crc kubenswrapper[4909]: I1128 18:58:03.338744 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-healthmanager-srrcl_7da68717-3e7e-4652-be23-4662a4fcf3e8/init/0.log" Nov 28 18:58:03 crc kubenswrapper[4909]: I1128 18:58:03.403104 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-api-6fbcff57c4-cnfb7_7d5c1cf2-bc57-40ac-84d4-8e3c82d04e33/octavia-api/0.log" Nov 28 18:58:03 crc kubenswrapper[4909]: I1128 18:58:03.518019 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-healthmanager-srrcl_7da68717-3e7e-4652-be23-4662a4fcf3e8/init/0.log" Nov 28 18:58:03 crc kubenswrapper[4909]: I1128 18:58:03.667105 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-healthmanager-srrcl_7da68717-3e7e-4652-be23-4662a4fcf3e8/octavia-healthmanager/0.log" Nov 28 18:58:03 crc kubenswrapper[4909]: I1128 18:58:03.676242 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-housekeeping-v8g4q_443cff50-0670-48ec-9602-a49de3990b65/init/0.log" Nov 28 18:58:03 crc kubenswrapper[4909]: I1128 18:58:03.873017 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-housekeeping-v8g4q_443cff50-0670-48ec-9602-a49de3990b65/init/0.log" Nov 28 18:58:03 crc kubenswrapper[4909]: I1128 18:58:03.894540 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-housekeeping-v8g4q_443cff50-0670-48ec-9602-a49de3990b65/octavia-housekeeping/0.log" Nov 28 18:58:03 crc kubenswrapper[4909]: I1128 18:58:03.940864 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-image-upload-59f8cff499-fqbq9_44d76067-4398-4f0c-b08e-ace6ec764257/init/0.log" Nov 28 18:58:04 crc kubenswrapper[4909]: I1128 18:58:04.150689 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-image-upload-59f8cff499-fqbq9_44d76067-4398-4f0c-b08e-ace6ec764257/octavia-amphora-httpd/0.log" Nov 28 18:58:04 crc kubenswrapper[4909]: I1128 18:58:04.171061 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-image-upload-59f8cff499-fqbq9_44d76067-4398-4f0c-b08e-ace6ec764257/init/0.log" Nov 28 18:58:04 crc kubenswrapper[4909]: I1128 18:58:04.192569 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-rsyslog-7r7s9_ccb4dffe-4790-45bb-ba07-e38a24c64022/init/0.log" Nov 28 18:58:04 crc kubenswrapper[4909]: I1128 18:58:04.607413 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-rsyslog-7r7s9_ccb4dffe-4790-45bb-ba07-e38a24c64022/octavia-rsyslog/0.log" Nov 28 18:58:04 crc kubenswrapper[4909]: I1128 18:58:04.653874 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-rsyslog-7r7s9_ccb4dffe-4790-45bb-ba07-e38a24c64022/init/0.log" Nov 28 18:58:04 crc kubenswrapper[4909]: I1128 18:58:04.671367 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-worker-dbch8_0961e7b7-53da-4f50-813c-048957ce0256/init/0.log" Nov 28 18:58:04 crc kubenswrapper[4909]: I1128 18:58:04.912152 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-worker-dbch8_0961e7b7-53da-4f50-813c-048957ce0256/init/0.log" Nov 28 18:58:05 crc kubenswrapper[4909]: I1128 18:58:05.018959 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_fbee195d-c0c0-48d6-91d1-fc1d85e708b5/mysql-bootstrap/0.log" Nov 28 18:58:05 crc kubenswrapper[4909]: I1128 18:58:05.023975 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-worker-dbch8_0961e7b7-53da-4f50-813c-048957ce0256/octavia-worker/0.log" Nov 28 18:58:05 crc kubenswrapper[4909]: I1128 18:58:05.189032 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_fbee195d-c0c0-48d6-91d1-fc1d85e708b5/mysql-bootstrap/0.log" Nov 28 18:58:05 crc kubenswrapper[4909]: I1128 18:58:05.214468 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_fbee195d-c0c0-48d6-91d1-fc1d85e708b5/galera/0.log" Nov 28 18:58:05 crc kubenswrapper[4909]: I1128 18:58:05.260408 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_e040cc04-ec87-429f-86b4-37fc9aa86fb1/mysql-bootstrap/0.log" Nov 28 18:58:05 crc kubenswrapper[4909]: I1128 18:58:05.478861 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_e040cc04-ec87-429f-86b4-37fc9aa86fb1/mysql-bootstrap/0.log" Nov 28 18:58:05 crc kubenswrapper[4909]: I1128 18:58:05.483865 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstackclient_ddeeec31-082e-4934-a8e0-5fd8c5beaf81/openstackclient/0.log" Nov 28 18:58:05 crc kubenswrapper[4909]: I1128 18:58:05.487902 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_e040cc04-ec87-429f-86b4-37fc9aa86fb1/galera/0.log" Nov 28 18:58:05 crc kubenswrapper[4909]: I1128 18:58:05.720394 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-metrics-6k4kn_e76f930f-b286-4682-91d2-20a57d3be765/openstack-network-exporter/0.log" Nov 28 18:58:05 crc kubenswrapper[4909]: I1128 18:58:05.767853 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-b7xg2_f579a06d-9f20-4fa2-aaaa-9cdfd4b82a9a/ovsdb-server-init/0.log" Nov 28 18:58:06 crc kubenswrapper[4909]: I1128 18:58:06.030930 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-b7xg2_f579a06d-9f20-4fa2-aaaa-9cdfd4b82a9a/ovsdb-server-init/0.log" Nov 28 18:58:06 crc kubenswrapper[4909]: I1128 18:58:06.031939 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-b7xg2_f579a06d-9f20-4fa2-aaaa-9cdfd4b82a9a/ovs-vswitchd/0.log" Nov 28 18:58:06 crc kubenswrapper[4909]: I1128 18:58:06.113123 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-b7xg2_f579a06d-9f20-4fa2-aaaa-9cdfd4b82a9a/ovsdb-server/0.log" Nov 28 18:58:06 crc kubenswrapper[4909]: I1128 18:58:06.304583 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_8b509e35-cec7-4668-8421-07678240db3b/openstack-network-exporter/0.log" Nov 28 18:58:06 crc kubenswrapper[4909]: I1128 18:58:06.307900 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-vgwmt_6c05fa8e-ecd9-477f-bf04-c67ca51f425d/ovn-controller/0.log" Nov 28 18:58:06 crc kubenswrapper[4909]: I1128 18:58:06.322924 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_8b509e35-cec7-4668-8421-07678240db3b/ovn-northd/0.log" Nov 28 18:58:06 crc kubenswrapper[4909]: I1128 18:58:06.585054 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-openstack-openstack-cell1-qgjl5_c51d51e8-794d-433f-940e-5d12bff09057/ovn-openstack-openstack-cell1/0.log" Nov 28 18:58:06 crc kubenswrapper[4909]: I1128 18:58:06.589928 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_425f39fc-d77b-4a11-b79e-30ae4dbad3c9/openstack-network-exporter/0.log" Nov 28 18:58:06 crc kubenswrapper[4909]: I1128 18:58:06.686821 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_425f39fc-d77b-4a11-b79e-30ae4dbad3c9/ovsdbserver-nb/0.log" Nov 28 18:58:06 crc kubenswrapper[4909]: I1128 18:58:06.779056 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-1_8ae6c4b3-7956-4597-b15b-de51255c1273/openstack-network-exporter/0.log" Nov 28 18:58:06 crc kubenswrapper[4909]: I1128 18:58:06.799084 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-1_8ae6c4b3-7956-4597-b15b-de51255c1273/ovsdbserver-nb/0.log" Nov 28 18:58:07 crc kubenswrapper[4909]: I1128 18:58:07.021645 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-2_01dcb944-56d9-4239-9050-a85af687e4af/ovsdbserver-nb/0.log" Nov 28 18:58:07 crc kubenswrapper[4909]: I1128 18:58:07.033975 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-2_01dcb944-56d9-4239-9050-a85af687e4af/openstack-network-exporter/0.log" Nov 28 18:58:07 crc kubenswrapper[4909]: I1128 18:58:07.129648 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_81b7dd2b-5e9c-454b-a12f-584bad52d211/openstack-network-exporter/0.log" Nov 28 18:58:07 crc kubenswrapper[4909]: I1128 18:58:07.225545 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_81b7dd2b-5e9c-454b-a12f-584bad52d211/ovsdbserver-sb/0.log" Nov 28 18:58:07 crc kubenswrapper[4909]: I1128 18:58:07.313433 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-1_0307a9d2-1a13-4d90-a52a-206ef5a80f4b/openstack-network-exporter/0.log" Nov 28 18:58:07 crc kubenswrapper[4909]: I1128 18:58:07.428449 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-1_0307a9d2-1a13-4d90-a52a-206ef5a80f4b/ovsdbserver-sb/0.log" Nov 28 18:58:07 crc kubenswrapper[4909]: I1128 18:58:07.505671 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-2_501619a3-d9ee-4809-9fe6-1e0170341225/openstack-network-exporter/0.log" Nov 28 18:58:07 crc kubenswrapper[4909]: I1128 18:58:07.618643 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-2_501619a3-d9ee-4809-9fe6-1e0170341225/ovsdbserver-sb/0.log" Nov 28 18:58:07 crc kubenswrapper[4909]: I1128 18:58:07.749526 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-5bd68f754d-s54pf_a1650cdd-5e8c-4c95-b414-2931314725d2/placement-api/0.log" Nov 28 18:58:07 crc kubenswrapper[4909]: I1128 18:58:07.757305 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-5bd68f754d-s54pf_a1650cdd-5e8c-4c95-b414-2931314725d2/placement-log/0.log" Nov 28 18:58:07 crc kubenswrapper[4909]: I1128 18:58:07.948130 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_pre-adoption-validation-openstack-pre-adoption-openstack-cnv78l_2e07ff4b-3800-4921-83fb-4b1da482a8b0/pre-adoption-validation-openstack-pre-adoption-openstack-cell1/0.log" Nov 28 18:58:08 crc kubenswrapper[4909]: I1128 18:58:08.047757 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_873236a5-5fa6-43c8-a44f-cbb590ff9bdc/init-config-reloader/0.log" Nov 28 18:58:08 crc kubenswrapper[4909]: I1128 18:58:08.147491 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_873236a5-5fa6-43c8-a44f-cbb590ff9bdc/config-reloader/0.log" Nov 28 18:58:08 crc kubenswrapper[4909]: I1128 18:58:08.182916 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_873236a5-5fa6-43c8-a44f-cbb590ff9bdc/init-config-reloader/0.log" Nov 28 18:58:08 crc kubenswrapper[4909]: I1128 18:58:08.225614 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_873236a5-5fa6-43c8-a44f-cbb590ff9bdc/prometheus/0.log" Nov 28 18:58:08 crc kubenswrapper[4909]: I1128 18:58:08.260955 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_873236a5-5fa6-43c8-a44f-cbb590ff9bdc/thanos-sidecar/0.log" Nov 28 18:58:09 crc kubenswrapper[4909]: I1128 18:58:09.073246 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_c5cefad2-7466-4c21-a977-bde45a4a0346/setup-container/0.log" Nov 28 18:58:09 crc kubenswrapper[4909]: I1128 18:58:09.250339 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_c5cefad2-7466-4c21-a977-bde45a4a0346/setup-container/0.log" Nov 28 18:58:09 crc kubenswrapper[4909]: I1128 18:58:09.262915 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_c5cefad2-7466-4c21-a977-bde45a4a0346/rabbitmq/0.log" Nov 28 18:58:09 crc kubenswrapper[4909]: I1128 18:58:09.299077 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_01d2e8aa-55af-4fa5-98f4-c176b2701770/setup-container/0.log" Nov 28 18:58:09 crc kubenswrapper[4909]: I1128 18:58:09.528409 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_reboot-os-openstack-openstack-cell1-8zfjh_7b2328b5-fd31-4da6-bd22-401dee11788e/reboot-os-openstack-openstack-cell1/0.log" Nov 28 18:58:09 crc kubenswrapper[4909]: I1128 18:58:09.558300 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_01d2e8aa-55af-4fa5-98f4-c176b2701770/setup-container/0.log" Nov 28 18:58:09 crc kubenswrapper[4909]: I1128 18:58:09.754269 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_run-os-openstack-openstack-cell1-lmwjv_20cfff51-0619-4abf-9897-6c4add02ace1/run-os-openstack-openstack-cell1/0.log" Nov 28 18:58:09 crc kubenswrapper[4909]: I1128 18:58:09.760333 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_01d2e8aa-55af-4fa5-98f4-c176b2701770/rabbitmq/0.log" Nov 28 18:58:09 crc kubenswrapper[4909]: I1128 18:58:09.950246 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ssh-known-hosts-openstack-9xrx8_6e7eacb2-3379-4878-bc25-b818620f471f/ssh-known-hosts-openstack/0.log" Nov 28 18:58:09 crc kubenswrapper[4909]: I1128 18:58:09.957437 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_memcached-0_8144b38e-f654-47a8-bac0-757cb44d606c/memcached/0.log" Nov 28 18:58:10 crc kubenswrapper[4909]: I1128 18:58:10.033740 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_telemetry-openstack-openstack-cell1-dmmb6_0384aec0-25b2-49fd-b0d8-9a426a60005c/telemetry-openstack-openstack-cell1/0.log" Nov 28 18:58:10 crc kubenswrapper[4909]: I1128 18:58:10.204608 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_tripleo-cleanup-tripleo-cleanup-openstack-cell1-zw85w_ff7c1f2c-beb4-45ed-9b2b-1326cbdb8b54/tripleo-cleanup-tripleo-cleanup-openstack-cell1/0.log" Nov 28 18:58:10 crc kubenswrapper[4909]: I1128 18:58:10.249902 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_validate-network-openstack-openstack-cell1-vlmx8_ebd37bf1-01b3-444a-83c4-59ee7f35c529/validate-network-openstack-openstack-cell1/0.log" Nov 28 18:58:19 crc kubenswrapper[4909]: I1128 18:58:19.911389 4909 patch_prober.go:28] interesting pod/machine-config-daemon-d5nd7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 18:58:19 crc kubenswrapper[4909]: I1128 18:58:19.911981 4909 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 18:58:32 crc kubenswrapper[4909]: I1128 18:58:32.959528 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_170303e37f43f7d4fb9c01e6e287a976e4e0e80cbddd20eca5c3983f885m9n5_f1e08c93-a869-4fc1-a497-681cb10fdab3/util/0.log" Nov 28 18:58:33 crc kubenswrapper[4909]: I1128 18:58:33.699266 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_170303e37f43f7d4fb9c01e6e287a976e4e0e80cbddd20eca5c3983f885m9n5_f1e08c93-a869-4fc1-a497-681cb10fdab3/util/0.log" Nov 28 18:58:33 crc kubenswrapper[4909]: I1128 18:58:33.774744 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_170303e37f43f7d4fb9c01e6e287a976e4e0e80cbddd20eca5c3983f885m9n5_f1e08c93-a869-4fc1-a497-681cb10fdab3/pull/0.log" Nov 28 18:58:33 crc kubenswrapper[4909]: I1128 18:58:33.815974 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_170303e37f43f7d4fb9c01e6e287a976e4e0e80cbddd20eca5c3983f885m9n5_f1e08c93-a869-4fc1-a497-681cb10fdab3/pull/0.log" Nov 28 18:58:33 crc kubenswrapper[4909]: I1128 18:58:33.992403 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_170303e37f43f7d4fb9c01e6e287a976e4e0e80cbddd20eca5c3983f885m9n5_f1e08c93-a869-4fc1-a497-681cb10fdab3/pull/0.log" Nov 28 18:58:34 crc kubenswrapper[4909]: I1128 18:58:34.050361 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_170303e37f43f7d4fb9c01e6e287a976e4e0e80cbddd20eca5c3983f885m9n5_f1e08c93-a869-4fc1-a497-681cb10fdab3/extract/0.log" Nov 28 18:58:34 crc kubenswrapper[4909]: I1128 18:58:34.090719 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_170303e37f43f7d4fb9c01e6e287a976e4e0e80cbddd20eca5c3983f885m9n5_f1e08c93-a869-4fc1-a497-681cb10fdab3/util/0.log" Nov 28 18:58:34 crc kubenswrapper[4909]: I1128 18:58:34.210515 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-7d9dfd778-v8ggq_facf7553-0ba6-43b6-b720-0345f63d5706/kube-rbac-proxy/0.log" Nov 28 18:58:34 crc kubenswrapper[4909]: I1128 18:58:34.335406 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-859b6ccc6-cdlxz_5e7d8f53-01bb-407c-8897-ceff90567e28/kube-rbac-proxy/0.log" Nov 28 18:58:34 crc kubenswrapper[4909]: I1128 18:58:34.397572 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-7d9dfd778-v8ggq_facf7553-0ba6-43b6-b720-0345f63d5706/manager/0.log" Nov 28 18:58:34 crc kubenswrapper[4909]: I1128 18:58:34.691457 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-859b6ccc6-cdlxz_5e7d8f53-01bb-407c-8897-ceff90567e28/manager/0.log" Nov 28 18:58:34 crc kubenswrapper[4909]: I1128 18:58:34.775707 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-78b4bc895b-52zmw_5fbd4b3d-b059-4780-b1f1-f04e00a9a90e/kube-rbac-proxy/0.log" Nov 28 18:58:34 crc kubenswrapper[4909]: I1128 18:58:34.793649 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-78b4bc895b-52zmw_5fbd4b3d-b059-4780-b1f1-f04e00a9a90e/manager/0.log" Nov 28 18:58:34 crc kubenswrapper[4909]: I1128 18:58:34.978158 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-668d9c48b9-ggchx_4081f6af-5e99-4383-b12a-654d6c1419d8/kube-rbac-proxy/0.log" Nov 28 18:58:35 crc kubenswrapper[4909]: I1128 18:58:35.211647 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-668d9c48b9-ggchx_4081f6af-5e99-4383-b12a-654d6c1419d8/manager/0.log" Nov 28 18:58:35 crc kubenswrapper[4909]: I1128 18:58:35.316629 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-5f64f6f8bb-k5bt2_5efbc774-20bd-4b16-a9dd-2584462dad47/kube-rbac-proxy/0.log" Nov 28 18:58:35 crc kubenswrapper[4909]: I1128 18:58:35.531696 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-68c6d99b8f-zjfzk_906bccad-3e05-4f36-9ecc-57627d2fb226/manager/0.log" Nov 28 18:58:35 crc kubenswrapper[4909]: I1128 18:58:35.548827 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-5f64f6f8bb-k5bt2_5efbc774-20bd-4b16-a9dd-2584462dad47/manager/0.log" Nov 28 18:58:35 crc kubenswrapper[4909]: I1128 18:58:35.567078 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-68c6d99b8f-zjfzk_906bccad-3e05-4f36-9ecc-57627d2fb226/kube-rbac-proxy/0.log" Nov 28 18:58:35 crc kubenswrapper[4909]: I1128 18:58:35.731554 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-57548d458d-7z5zf_d4d007c0-4c50-4550-b0ec-829717b7aa37/kube-rbac-proxy/0.log" Nov 28 18:58:35 crc kubenswrapper[4909]: I1128 18:58:35.947143 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-6c548fd776-zqxvl_8b35e013-9a3e-4434-9603-e7fbd95f2dca/kube-rbac-proxy/0.log" Nov 28 18:58:36 crc kubenswrapper[4909]: I1128 18:58:36.007753 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-6c548fd776-zqxvl_8b35e013-9a3e-4434-9603-e7fbd95f2dca/manager/0.log" Nov 28 18:58:36 crc kubenswrapper[4909]: I1128 18:58:36.071689 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-57548d458d-7z5zf_d4d007c0-4c50-4550-b0ec-829717b7aa37/manager/0.log" Nov 28 18:58:36 crc kubenswrapper[4909]: I1128 18:58:36.203450 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-546d4bdf48-wsrnd_f5f666e9-9715-444d-a4ec-f6b3bb719df6/kube-rbac-proxy/0.log" Nov 28 18:58:36 crc kubenswrapper[4909]: I1128 18:58:36.403466 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-546d4bdf48-wsrnd_f5f666e9-9715-444d-a4ec-f6b3bb719df6/manager/0.log" Nov 28 18:58:36 crc kubenswrapper[4909]: I1128 18:58:36.460305 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-6546668bfd-t5cvm_735f57d0-1df6-4773-9e9f-2f2745d307d0/kube-rbac-proxy/0.log" Nov 28 18:58:36 crc kubenswrapper[4909]: I1128 18:58:36.482432 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-6546668bfd-t5cvm_735f57d0-1df6-4773-9e9f-2f2745d307d0/manager/0.log" Nov 28 18:58:36 crc kubenswrapper[4909]: I1128 18:58:36.630942 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-56bbcc9d85-5r95f_b8299ad0-0617-430d-9b0f-066022a5679f/kube-rbac-proxy/0.log" Nov 28 18:58:36 crc kubenswrapper[4909]: I1128 18:58:36.684633 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-56bbcc9d85-5r95f_b8299ad0-0617-430d-9b0f-066022a5679f/manager/0.log" Nov 28 18:58:36 crc kubenswrapper[4909]: I1128 18:58:36.762118 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-5fdfd5b6b5-sqpxn_ede8d830-fccd-4337-b0ec-48030a263d44/kube-rbac-proxy/0.log" Nov 28 18:58:36 crc kubenswrapper[4909]: I1128 18:58:36.833859 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-5fdfd5b6b5-sqpxn_ede8d830-fccd-4337-b0ec-48030a263d44/manager/0.log" Nov 28 18:58:36 crc kubenswrapper[4909]: I1128 18:58:36.920036 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-697bc559fc-xnnf4_dc2bc53b-f803-4f9b-943f-e53f132cbb39/kube-rbac-proxy/0.log" Nov 28 18:58:37 crc kubenswrapper[4909]: I1128 18:58:37.078607 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-998648c74-5mfq8_32bd97d0-184a-4f85-bfdc-1b34688753a5/kube-rbac-proxy/0.log" Nov 28 18:58:37 crc kubenswrapper[4909]: I1128 18:58:37.084480 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-697bc559fc-xnnf4_dc2bc53b-f803-4f9b-943f-e53f132cbb39/manager/0.log" Nov 28 18:58:37 crc kubenswrapper[4909]: I1128 18:58:37.174746 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-998648c74-5mfq8_32bd97d0-184a-4f85-bfdc-1b34688753a5/manager/0.log" Nov 28 18:58:37 crc kubenswrapper[4909]: I1128 18:58:37.253888 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-64bc77cfd4hqcvw_f7de33ac-fc6f-4c0c-a22a-0d9919ac6212/kube-rbac-proxy/0.log" Nov 28 18:58:37 crc kubenswrapper[4909]: I1128 18:58:37.304605 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-64bc77cfd4hqcvw_f7de33ac-fc6f-4c0c-a22a-0d9919ac6212/manager/0.log" Nov 28 18:58:37 crc kubenswrapper[4909]: I1128 18:58:37.572696 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-index-prsdg_4d9f796e-bf71-47e0-a15b-13f652dd03b5/registry-server/0.log" Nov 28 18:58:37 crc kubenswrapper[4909]: I1128 18:58:37.691113 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-operator-6fcddf5ccf-lkhfm_bdf3a989-5fba-461f-b8ed-82ed0bd1ece7/operator/0.log" Nov 28 18:58:37 crc kubenswrapper[4909]: I1128 18:58:37.843401 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-b6456fdb6-x28l4_108b7264-8189-4234-a458-ba48a0c28123/kube-rbac-proxy/0.log" Nov 28 18:58:37 crc kubenswrapper[4909]: I1128 18:58:37.953682 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-78f8948974-jlztw_5a4d9dc0-67a3-4014-9265-f88dac783bca/kube-rbac-proxy/0.log" Nov 28 18:58:38 crc kubenswrapper[4909]: I1128 18:58:38.009824 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-b6456fdb6-x28l4_108b7264-8189-4234-a458-ba48a0c28123/manager/0.log" Nov 28 18:58:38 crc kubenswrapper[4909]: I1128 18:58:38.172504 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-78f8948974-jlztw_5a4d9dc0-67a3-4014-9265-f88dac783bca/manager/0.log" Nov 28 18:58:38 crc kubenswrapper[4909]: I1128 18:58:38.181127 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_rabbitmq-cluster-operator-manager-668c99d594-rrfhv_626fb166-d7e7-424e-bcde-08e77d0c54b1/operator/0.log" Nov 28 18:58:38 crc kubenswrapper[4909]: I1128 18:58:38.390957 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-5f8c65bbfc-hbnt7_9024361d-e7dd-4f11-b22f-4027e31bd0ae/kube-rbac-proxy/0.log" Nov 28 18:58:38 crc kubenswrapper[4909]: I1128 18:58:38.436916 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-5f8c65bbfc-hbnt7_9024361d-e7dd-4f11-b22f-4027e31bd0ae/manager/0.log" Nov 28 18:58:38 crc kubenswrapper[4909]: I1128 18:58:38.544425 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-76cc84c6bb-9vnqw_771c530f-d0ab-412d-a6c6-931999bc878f/kube-rbac-proxy/0.log" Nov 28 18:58:38 crc kubenswrapper[4909]: I1128 18:58:38.689281 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-5854674fcc-w8bg4_584e895a-63c3-48db-8207-e89bc9396da7/kube-rbac-proxy/0.log" Nov 28 18:58:38 crc kubenswrapper[4909]: I1128 18:58:38.765549 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-5854674fcc-w8bg4_584e895a-63c3-48db-8207-e89bc9396da7/manager/0.log" Nov 28 18:58:38 crc kubenswrapper[4909]: I1128 18:58:38.858113 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-76cc84c6bb-9vnqw_771c530f-d0ab-412d-a6c6-931999bc878f/manager/0.log" Nov 28 18:58:38 crc kubenswrapper[4909]: I1128 18:58:38.952081 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-769dc69bc-b8ssv_3127edf8-24b9-4170-b677-c625926881a5/kube-rbac-proxy/0.log" Nov 28 18:58:39 crc kubenswrapper[4909]: I1128 18:58:39.001707 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-769dc69bc-b8ssv_3127edf8-24b9-4170-b677-c625926881a5/manager/0.log" Nov 28 18:58:39 crc kubenswrapper[4909]: I1128 18:58:39.553122 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-78d5d44766-bqs6f_4ed1d030-fe78-4211-a585-46c8ae4f419d/manager/0.log" Nov 28 18:58:49 crc kubenswrapper[4909]: I1128 18:58:49.911166 4909 patch_prober.go:28] interesting pod/machine-config-daemon-d5nd7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 18:58:49 crc kubenswrapper[4909]: I1128 18:58:49.911847 4909 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 18:58:49 crc kubenswrapper[4909]: I1128 18:58:49.914345 4909 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" Nov 28 18:58:49 crc kubenswrapper[4909]: I1128 18:58:49.915409 4909 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"6b21cd92c3dfe12571ba7526889fb2493e0be87bb6225f8ee39c471668ab1d1e"} pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 18:58:49 crc kubenswrapper[4909]: I1128 18:58:49.915514 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerName="machine-config-daemon" containerID="cri-o://6b21cd92c3dfe12571ba7526889fb2493e0be87bb6225f8ee39c471668ab1d1e" gracePeriod=600 Nov 28 18:58:50 crc kubenswrapper[4909]: I1128 18:58:50.085867 4909 generic.go:334] "Generic (PLEG): container finished" podID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerID="6b21cd92c3dfe12571ba7526889fb2493e0be87bb6225f8ee39c471668ab1d1e" exitCode=0 Nov 28 18:58:50 crc kubenswrapper[4909]: I1128 18:58:50.085906 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" event={"ID":"5f0ac931-d37b-4342-8c12-c2779b455cc5","Type":"ContainerDied","Data":"6b21cd92c3dfe12571ba7526889fb2493e0be87bb6225f8ee39c471668ab1d1e"} Nov 28 18:58:50 crc kubenswrapper[4909]: I1128 18:58:50.085945 4909 scope.go:117] "RemoveContainer" containerID="141877a9e47864adeab54e6b62e3e3588ad770da6cc4187b344c2cdac9b50495" Nov 28 18:58:51 crc kubenswrapper[4909]: I1128 18:58:51.096729 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" event={"ID":"5f0ac931-d37b-4342-8c12-c2779b455cc5","Type":"ContainerStarted","Data":"04fa792ff2261f49e0e779ccaab71213a7dfefcb1dc649e298924e82b6599708"} Nov 28 18:59:00 crc kubenswrapper[4909]: I1128 18:59:00.660852 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_control-plane-machine-set-operator-78cbb6b69f-87jr2_ebbacd25-74d0-422d-a0b1-51bb64a57468/control-plane-machine-set-operator/0.log" Nov 28 18:59:00 crc kubenswrapper[4909]: I1128 18:59:00.778162 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-bp2ss_53ff20ea-24c0-4364-9bbb-6ef3a48fdd6f/kube-rbac-proxy/0.log" Nov 28 18:59:00 crc kubenswrapper[4909]: I1128 18:59:00.846789 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-bp2ss_53ff20ea-24c0-4364-9bbb-6ef3a48fdd6f/machine-api-operator/0.log" Nov 28 18:59:14 crc kubenswrapper[4909]: I1128 18:59:14.722602 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-86cb77c54b-xk9wg_4c9b7c14-6c6d-4e55-bacc-1e2f3309e069/cert-manager-controller/0.log" Nov 28 18:59:14 crc kubenswrapper[4909]: I1128 18:59:14.952366 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-cainjector-855d9ccff4-274mv_37a96393-8175-4ec6-bf57-d02fd13cc257/cert-manager-cainjector/0.log" Nov 28 18:59:14 crc kubenswrapper[4909]: I1128 18:59:14.992269 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-webhook-f4fb5df64-tl4xj_6bcf5976-b2b7-47b5-aa2a-7010dff6a42c/cert-manager-webhook/0.log" Nov 28 18:59:31 crc kubenswrapper[4909]: I1128 18:59:31.398436 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-console-plugin-7fbb5f6569-pfpfn_622cee50-946d-447b-b403-dba09b89346c/nmstate-console-plugin/0.log" Nov 28 18:59:31 crc kubenswrapper[4909]: I1128 18:59:31.435599 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-handler-bk9bk_7c2c98c8-3835-4a16-b57b-6a67e455b7e3/nmstate-handler/0.log" Nov 28 18:59:31 crc kubenswrapper[4909]: I1128 18:59:31.579233 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-7f946cbc9-t2dgs_1aaa9676-48ce-4f8e-af9d-f683a8432515/kube-rbac-proxy/0.log" Nov 28 18:59:31 crc kubenswrapper[4909]: I1128 18:59:31.635742 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-7f946cbc9-t2dgs_1aaa9676-48ce-4f8e-af9d-f683a8432515/nmstate-metrics/0.log" Nov 28 18:59:31 crc kubenswrapper[4909]: I1128 18:59:31.791691 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-operator-5b5b58f5c8-vs6g7_8b711a0a-46e9-4bee-b916-2333b5800592/nmstate-operator/0.log" Nov 28 18:59:31 crc kubenswrapper[4909]: I1128 18:59:31.855731 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-webhook-5f6d4c5ccb-9m5xv_bfaa4c0e-b2c7-4768-b261-87a0ef5696d6/nmstate-webhook/0.log" Nov 28 18:59:46 crc kubenswrapper[4909]: I1128 18:59:46.502877 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-5pqtc"] Nov 28 18:59:46 crc kubenswrapper[4909]: E1128 18:59:46.504234 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="20ddb6bd-21eb-4744-b3a3-ef28ae63c7cb" containerName="registry-server" Nov 28 18:59:46 crc kubenswrapper[4909]: I1128 18:59:46.504254 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="20ddb6bd-21eb-4744-b3a3-ef28ae63c7cb" containerName="registry-server" Nov 28 18:59:46 crc kubenswrapper[4909]: E1128 18:59:46.504307 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="20ddb6bd-21eb-4744-b3a3-ef28ae63c7cb" containerName="extract-utilities" Nov 28 18:59:46 crc kubenswrapper[4909]: I1128 18:59:46.504322 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="20ddb6bd-21eb-4744-b3a3-ef28ae63c7cb" containerName="extract-utilities" Nov 28 18:59:46 crc kubenswrapper[4909]: E1128 18:59:46.504348 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b22699ac-b671-4b93-a53f-4493984f5874" containerName="container-00" Nov 28 18:59:46 crc kubenswrapper[4909]: I1128 18:59:46.504393 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="b22699ac-b671-4b93-a53f-4493984f5874" containerName="container-00" Nov 28 18:59:46 crc kubenswrapper[4909]: E1128 18:59:46.504431 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="20ddb6bd-21eb-4744-b3a3-ef28ae63c7cb" containerName="extract-content" Nov 28 18:59:46 crc kubenswrapper[4909]: I1128 18:59:46.504442 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="20ddb6bd-21eb-4744-b3a3-ef28ae63c7cb" containerName="extract-content" Nov 28 18:59:46 crc kubenswrapper[4909]: I1128 18:59:46.504808 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="20ddb6bd-21eb-4744-b3a3-ef28ae63c7cb" containerName="registry-server" Nov 28 18:59:46 crc kubenswrapper[4909]: I1128 18:59:46.504854 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="b22699ac-b671-4b93-a53f-4493984f5874" containerName="container-00" Nov 28 18:59:46 crc kubenswrapper[4909]: I1128 18:59:46.507680 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-5pqtc" Nov 28 18:59:46 crc kubenswrapper[4909]: I1128 18:59:46.515460 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-5pqtc"] Nov 28 18:59:46 crc kubenswrapper[4909]: I1128 18:59:46.701973 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f483215b-7e2e-4c48-af93-f049127051c7-catalog-content\") pod \"certified-operators-5pqtc\" (UID: \"f483215b-7e2e-4c48-af93-f049127051c7\") " pod="openshift-marketplace/certified-operators-5pqtc" Nov 28 18:59:46 crc kubenswrapper[4909]: I1128 18:59:46.702351 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f483215b-7e2e-4c48-af93-f049127051c7-utilities\") pod \"certified-operators-5pqtc\" (UID: \"f483215b-7e2e-4c48-af93-f049127051c7\") " pod="openshift-marketplace/certified-operators-5pqtc" Nov 28 18:59:46 crc kubenswrapper[4909]: I1128 18:59:46.702596 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hf6wr\" (UniqueName: \"kubernetes.io/projected/f483215b-7e2e-4c48-af93-f049127051c7-kube-api-access-hf6wr\") pod \"certified-operators-5pqtc\" (UID: \"f483215b-7e2e-4c48-af93-f049127051c7\") " pod="openshift-marketplace/certified-operators-5pqtc" Nov 28 18:59:46 crc kubenswrapper[4909]: I1128 18:59:46.804309 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f483215b-7e2e-4c48-af93-f049127051c7-catalog-content\") pod \"certified-operators-5pqtc\" (UID: \"f483215b-7e2e-4c48-af93-f049127051c7\") " pod="openshift-marketplace/certified-operators-5pqtc" Nov 28 18:59:46 crc kubenswrapper[4909]: I1128 18:59:46.804385 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f483215b-7e2e-4c48-af93-f049127051c7-utilities\") pod \"certified-operators-5pqtc\" (UID: \"f483215b-7e2e-4c48-af93-f049127051c7\") " pod="openshift-marketplace/certified-operators-5pqtc" Nov 28 18:59:46 crc kubenswrapper[4909]: I1128 18:59:46.804478 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hf6wr\" (UniqueName: \"kubernetes.io/projected/f483215b-7e2e-4c48-af93-f049127051c7-kube-api-access-hf6wr\") pod \"certified-operators-5pqtc\" (UID: \"f483215b-7e2e-4c48-af93-f049127051c7\") " pod="openshift-marketplace/certified-operators-5pqtc" Nov 28 18:59:46 crc kubenswrapper[4909]: I1128 18:59:46.805120 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f483215b-7e2e-4c48-af93-f049127051c7-catalog-content\") pod \"certified-operators-5pqtc\" (UID: \"f483215b-7e2e-4c48-af93-f049127051c7\") " pod="openshift-marketplace/certified-operators-5pqtc" Nov 28 18:59:46 crc kubenswrapper[4909]: I1128 18:59:46.805182 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f483215b-7e2e-4c48-af93-f049127051c7-utilities\") pod \"certified-operators-5pqtc\" (UID: \"f483215b-7e2e-4c48-af93-f049127051c7\") " pod="openshift-marketplace/certified-operators-5pqtc" Nov 28 18:59:46 crc kubenswrapper[4909]: I1128 18:59:46.831188 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hf6wr\" (UniqueName: \"kubernetes.io/projected/f483215b-7e2e-4c48-af93-f049127051c7-kube-api-access-hf6wr\") pod \"certified-operators-5pqtc\" (UID: \"f483215b-7e2e-4c48-af93-f049127051c7\") " pod="openshift-marketplace/certified-operators-5pqtc" Nov 28 18:59:46 crc kubenswrapper[4909]: I1128 18:59:46.846872 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-5pqtc" Nov 28 18:59:47 crc kubenswrapper[4909]: I1128 18:59:47.443590 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-5pqtc"] Nov 28 18:59:47 crc kubenswrapper[4909]: I1128 18:59:47.709039 4909 generic.go:334] "Generic (PLEG): container finished" podID="f483215b-7e2e-4c48-af93-f049127051c7" containerID="be5f5e9b8aa88291e45ee074b1f81a9e4a785664cadff48ac89b60f80880a506" exitCode=0 Nov 28 18:59:47 crc kubenswrapper[4909]: I1128 18:59:47.709082 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5pqtc" event={"ID":"f483215b-7e2e-4c48-af93-f049127051c7","Type":"ContainerDied","Data":"be5f5e9b8aa88291e45ee074b1f81a9e4a785664cadff48ac89b60f80880a506"} Nov 28 18:59:47 crc kubenswrapper[4909]: I1128 18:59:47.709107 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5pqtc" event={"ID":"f483215b-7e2e-4c48-af93-f049127051c7","Type":"ContainerStarted","Data":"feb13fe3680bc53508f3830b053df88eb62761c64b5ae019bd0f45766121e94b"} Nov 28 18:59:47 crc kubenswrapper[4909]: I1128 18:59:47.711278 4909 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 28 18:59:48 crc kubenswrapper[4909]: I1128 18:59:48.927596 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-f8648f98b-vcxs6_4f88a2f5-e41e-4832-94e4-1d5e190ccdd4/kube-rbac-proxy/0.log" Nov 28 18:59:49 crc kubenswrapper[4909]: I1128 18:59:49.149562 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-kh8w8_72616f0a-8071-4cc3-a5bb-0c226dcbf877/cp-frr-files/0.log" Nov 28 18:59:49 crc kubenswrapper[4909]: I1128 18:59:49.376482 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-f8648f98b-vcxs6_4f88a2f5-e41e-4832-94e4-1d5e190ccdd4/controller/0.log" Nov 28 18:59:49 crc kubenswrapper[4909]: I1128 18:59:49.618921 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-kh8w8_72616f0a-8071-4cc3-a5bb-0c226dcbf877/cp-frr-files/0.log" Nov 28 18:59:49 crc kubenswrapper[4909]: I1128 18:59:49.618964 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-kh8w8_72616f0a-8071-4cc3-a5bb-0c226dcbf877/cp-reloader/0.log" Nov 28 18:59:49 crc kubenswrapper[4909]: I1128 18:59:49.653345 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-kh8w8_72616f0a-8071-4cc3-a5bb-0c226dcbf877/cp-metrics/0.log" Nov 28 18:59:49 crc kubenswrapper[4909]: I1128 18:59:49.698105 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-kh8w8_72616f0a-8071-4cc3-a5bb-0c226dcbf877/cp-reloader/0.log" Nov 28 18:59:49 crc kubenswrapper[4909]: I1128 18:59:49.726771 4909 generic.go:334] "Generic (PLEG): container finished" podID="f483215b-7e2e-4c48-af93-f049127051c7" containerID="eeafe7dbc4269789966d790b0e78d60e4bb6908275c533cef02f328f662d185e" exitCode=0 Nov 28 18:59:49 crc kubenswrapper[4909]: I1128 18:59:49.726810 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5pqtc" event={"ID":"f483215b-7e2e-4c48-af93-f049127051c7","Type":"ContainerDied","Data":"eeafe7dbc4269789966d790b0e78d60e4bb6908275c533cef02f328f662d185e"} Nov 28 18:59:49 crc kubenswrapper[4909]: I1128 18:59:49.855134 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-kh8w8_72616f0a-8071-4cc3-a5bb-0c226dcbf877/cp-metrics/0.log" Nov 28 18:59:49 crc kubenswrapper[4909]: I1128 18:59:49.885428 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-kh8w8_72616f0a-8071-4cc3-a5bb-0c226dcbf877/cp-reloader/0.log" Nov 28 18:59:49 crc kubenswrapper[4909]: I1128 18:59:49.896106 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-kh8w8_72616f0a-8071-4cc3-a5bb-0c226dcbf877/cp-frr-files/0.log" Nov 28 18:59:49 crc kubenswrapper[4909]: I1128 18:59:49.908060 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-kh8w8_72616f0a-8071-4cc3-a5bb-0c226dcbf877/cp-metrics/0.log" Nov 28 18:59:50 crc kubenswrapper[4909]: I1128 18:59:50.075873 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-kh8w8_72616f0a-8071-4cc3-a5bb-0c226dcbf877/cp-frr-files/0.log" Nov 28 18:59:50 crc kubenswrapper[4909]: I1128 18:59:50.096272 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-kh8w8_72616f0a-8071-4cc3-a5bb-0c226dcbf877/cp-reloader/0.log" Nov 28 18:59:50 crc kubenswrapper[4909]: I1128 18:59:50.113297 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-kh8w8_72616f0a-8071-4cc3-a5bb-0c226dcbf877/controller/0.log" Nov 28 18:59:50 crc kubenswrapper[4909]: I1128 18:59:50.127609 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-kh8w8_72616f0a-8071-4cc3-a5bb-0c226dcbf877/cp-metrics/0.log" Nov 28 18:59:50 crc kubenswrapper[4909]: I1128 18:59:50.243093 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-kh8w8_72616f0a-8071-4cc3-a5bb-0c226dcbf877/frr-metrics/0.log" Nov 28 18:59:50 crc kubenswrapper[4909]: I1128 18:59:50.322191 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-kh8w8_72616f0a-8071-4cc3-a5bb-0c226dcbf877/kube-rbac-proxy/0.log" Nov 28 18:59:50 crc kubenswrapper[4909]: I1128 18:59:50.325074 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-kh8w8_72616f0a-8071-4cc3-a5bb-0c226dcbf877/kube-rbac-proxy-frr/0.log" Nov 28 18:59:50 crc kubenswrapper[4909]: I1128 18:59:50.515945 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-kh8w8_72616f0a-8071-4cc3-a5bb-0c226dcbf877/reloader/0.log" Nov 28 18:59:50 crc kubenswrapper[4909]: I1128 18:59:50.630552 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-webhook-server-7fcb986d4-6w4xm_802e20e0-dcbc-4743-9e54-dad0045f1e64/frr-k8s-webhook-server/0.log" Nov 28 18:59:50 crc kubenswrapper[4909]: I1128 18:59:50.737994 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5pqtc" event={"ID":"f483215b-7e2e-4c48-af93-f049127051c7","Type":"ContainerStarted","Data":"e41258e98b5ca06ebe79df3e293fc6b1f3844f243de887bd37ca29d55d2f03ec"} Nov 28 18:59:50 crc kubenswrapper[4909]: I1128 18:59:50.755010 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-5pqtc" podStartSLOduration=2.230276938 podStartE2EDuration="4.754995004s" podCreationTimestamp="2025-11-28 18:59:46 +0000 UTC" firstStartedPulling="2025-11-28 18:59:47.711052933 +0000 UTC m=+10170.107737457" lastFinishedPulling="2025-11-28 18:59:50.235770999 +0000 UTC m=+10172.632455523" observedRunningTime="2025-11-28 18:59:50.750681288 +0000 UTC m=+10173.147365822" watchObservedRunningTime="2025-11-28 18:59:50.754995004 +0000 UTC m=+10173.151679528" Nov 28 18:59:50 crc kubenswrapper[4909]: I1128 18:59:50.879796 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-controller-manager-7ff45df9c5-whssd_393afd2e-0377-4085-a367-7ad40c67b6a5/manager/0.log" Nov 28 18:59:51 crc kubenswrapper[4909]: I1128 18:59:51.094920 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-webhook-server-6d85d4dbb-jr45x_98c039de-9587-4f9e-989f-f46662bfec99/webhook-server/0.log" Nov 28 18:59:51 crc kubenswrapper[4909]: I1128 18:59:51.181444 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-x27f8_7bfa82af-4dc2-4b43-a682-c6b4e9fedde7/kube-rbac-proxy/0.log" Nov 28 18:59:52 crc kubenswrapper[4909]: I1128 18:59:52.107266 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-x27f8_7bfa82af-4dc2-4b43-a682-c6b4e9fedde7/speaker/0.log" Nov 28 18:59:53 crc kubenswrapper[4909]: I1128 18:59:53.983422 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-kh8w8_72616f0a-8071-4cc3-a5bb-0c226dcbf877/frr/0.log" Nov 28 18:59:56 crc kubenswrapper[4909]: I1128 18:59:56.851237 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-5pqtc" Nov 28 18:59:56 crc kubenswrapper[4909]: I1128 18:59:56.852192 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-5pqtc" Nov 28 18:59:56 crc kubenswrapper[4909]: I1128 18:59:56.925545 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-5pqtc" Nov 28 18:59:57 crc kubenswrapper[4909]: I1128 18:59:57.926607 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-5pqtc" Nov 28 18:59:57 crc kubenswrapper[4909]: I1128 18:59:57.988194 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-5pqtc"] Nov 28 18:59:59 crc kubenswrapper[4909]: I1128 18:59:59.870419 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-5pqtc" podUID="f483215b-7e2e-4c48-af93-f049127051c7" containerName="registry-server" containerID="cri-o://e41258e98b5ca06ebe79df3e293fc6b1f3844f243de887bd37ca29d55d2f03ec" gracePeriod=2 Nov 28 19:00:00 crc kubenswrapper[4909]: I1128 19:00:00.239423 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405940-fgxx4"] Nov 28 19:00:00 crc kubenswrapper[4909]: I1128 19:00:00.241269 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405940-fgxx4" Nov 28 19:00:00 crc kubenswrapper[4909]: I1128 19:00:00.244337 4909 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 28 19:00:00 crc kubenswrapper[4909]: I1128 19:00:00.244390 4909 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 28 19:00:00 crc kubenswrapper[4909]: I1128 19:00:00.257878 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405940-fgxx4"] Nov 28 19:00:00 crc kubenswrapper[4909]: I1128 19:00:00.377808 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/827176ec-b952-43c8-8479-cde48c6ae7a2-secret-volume\") pod \"collect-profiles-29405940-fgxx4\" (UID: \"827176ec-b952-43c8-8479-cde48c6ae7a2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405940-fgxx4" Nov 28 19:00:00 crc kubenswrapper[4909]: I1128 19:00:00.377887 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/827176ec-b952-43c8-8479-cde48c6ae7a2-config-volume\") pod \"collect-profiles-29405940-fgxx4\" (UID: \"827176ec-b952-43c8-8479-cde48c6ae7a2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405940-fgxx4" Nov 28 19:00:00 crc kubenswrapper[4909]: I1128 19:00:00.377966 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7gfq9\" (UniqueName: \"kubernetes.io/projected/827176ec-b952-43c8-8479-cde48c6ae7a2-kube-api-access-7gfq9\") pod \"collect-profiles-29405940-fgxx4\" (UID: \"827176ec-b952-43c8-8479-cde48c6ae7a2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405940-fgxx4" Nov 28 19:00:00 crc kubenswrapper[4909]: I1128 19:00:00.483942 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/827176ec-b952-43c8-8479-cde48c6ae7a2-secret-volume\") pod \"collect-profiles-29405940-fgxx4\" (UID: \"827176ec-b952-43c8-8479-cde48c6ae7a2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405940-fgxx4" Nov 28 19:00:00 crc kubenswrapper[4909]: I1128 19:00:00.484032 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/827176ec-b952-43c8-8479-cde48c6ae7a2-config-volume\") pod \"collect-profiles-29405940-fgxx4\" (UID: \"827176ec-b952-43c8-8479-cde48c6ae7a2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405940-fgxx4" Nov 28 19:00:00 crc kubenswrapper[4909]: I1128 19:00:00.484102 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7gfq9\" (UniqueName: \"kubernetes.io/projected/827176ec-b952-43c8-8479-cde48c6ae7a2-kube-api-access-7gfq9\") pod \"collect-profiles-29405940-fgxx4\" (UID: \"827176ec-b952-43c8-8479-cde48c6ae7a2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405940-fgxx4" Nov 28 19:00:00 crc kubenswrapper[4909]: I1128 19:00:00.485892 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/827176ec-b952-43c8-8479-cde48c6ae7a2-config-volume\") pod \"collect-profiles-29405940-fgxx4\" (UID: \"827176ec-b952-43c8-8479-cde48c6ae7a2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405940-fgxx4" Nov 28 19:00:00 crc kubenswrapper[4909]: I1128 19:00:00.877958 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/827176ec-b952-43c8-8479-cde48c6ae7a2-secret-volume\") pod \"collect-profiles-29405940-fgxx4\" (UID: \"827176ec-b952-43c8-8479-cde48c6ae7a2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405940-fgxx4" Nov 28 19:00:00 crc kubenswrapper[4909]: I1128 19:00:00.880284 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7gfq9\" (UniqueName: \"kubernetes.io/projected/827176ec-b952-43c8-8479-cde48c6ae7a2-kube-api-access-7gfq9\") pod \"collect-profiles-29405940-fgxx4\" (UID: \"827176ec-b952-43c8-8479-cde48c6ae7a2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405940-fgxx4" Nov 28 19:00:00 crc kubenswrapper[4909]: I1128 19:00:00.883061 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405940-fgxx4" Nov 28 19:00:00 crc kubenswrapper[4909]: I1128 19:00:00.895587 4909 generic.go:334] "Generic (PLEG): container finished" podID="f483215b-7e2e-4c48-af93-f049127051c7" containerID="e41258e98b5ca06ebe79df3e293fc6b1f3844f243de887bd37ca29d55d2f03ec" exitCode=0 Nov 28 19:00:00 crc kubenswrapper[4909]: I1128 19:00:00.895702 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5pqtc" event={"ID":"f483215b-7e2e-4c48-af93-f049127051c7","Type":"ContainerDied","Data":"e41258e98b5ca06ebe79df3e293fc6b1f3844f243de887bd37ca29d55d2f03ec"} Nov 28 19:00:01 crc kubenswrapper[4909]: I1128 19:00:01.012976 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-5pqtc" Nov 28 19:00:01 crc kubenswrapper[4909]: I1128 19:00:01.098979 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hf6wr\" (UniqueName: \"kubernetes.io/projected/f483215b-7e2e-4c48-af93-f049127051c7-kube-api-access-hf6wr\") pod \"f483215b-7e2e-4c48-af93-f049127051c7\" (UID: \"f483215b-7e2e-4c48-af93-f049127051c7\") " Nov 28 19:00:01 crc kubenswrapper[4909]: I1128 19:00:01.099084 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f483215b-7e2e-4c48-af93-f049127051c7-catalog-content\") pod \"f483215b-7e2e-4c48-af93-f049127051c7\" (UID: \"f483215b-7e2e-4c48-af93-f049127051c7\") " Nov 28 19:00:01 crc kubenswrapper[4909]: I1128 19:00:01.099117 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f483215b-7e2e-4c48-af93-f049127051c7-utilities\") pod \"f483215b-7e2e-4c48-af93-f049127051c7\" (UID: \"f483215b-7e2e-4c48-af93-f049127051c7\") " Nov 28 19:00:01 crc kubenswrapper[4909]: I1128 19:00:01.100625 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f483215b-7e2e-4c48-af93-f049127051c7-utilities" (OuterVolumeSpecName: "utilities") pod "f483215b-7e2e-4c48-af93-f049127051c7" (UID: "f483215b-7e2e-4c48-af93-f049127051c7"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 19:00:01 crc kubenswrapper[4909]: I1128 19:00:01.106955 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f483215b-7e2e-4c48-af93-f049127051c7-kube-api-access-hf6wr" (OuterVolumeSpecName: "kube-api-access-hf6wr") pod "f483215b-7e2e-4c48-af93-f049127051c7" (UID: "f483215b-7e2e-4c48-af93-f049127051c7"). InnerVolumeSpecName "kube-api-access-hf6wr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 19:00:01 crc kubenswrapper[4909]: I1128 19:00:01.157329 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f483215b-7e2e-4c48-af93-f049127051c7-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "f483215b-7e2e-4c48-af93-f049127051c7" (UID: "f483215b-7e2e-4c48-af93-f049127051c7"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 19:00:01 crc kubenswrapper[4909]: I1128 19:00:01.200623 4909 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f483215b-7e2e-4c48-af93-f049127051c7-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 19:00:01 crc kubenswrapper[4909]: I1128 19:00:01.200666 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hf6wr\" (UniqueName: \"kubernetes.io/projected/f483215b-7e2e-4c48-af93-f049127051c7-kube-api-access-hf6wr\") on node \"crc\" DevicePath \"\"" Nov 28 19:00:01 crc kubenswrapper[4909]: I1128 19:00:01.200677 4909 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f483215b-7e2e-4c48-af93-f049127051c7-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 19:00:01 crc kubenswrapper[4909]: I1128 19:00:01.400964 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405940-fgxx4"] Nov 28 19:00:01 crc kubenswrapper[4909]: I1128 19:00:01.906176 4909 generic.go:334] "Generic (PLEG): container finished" podID="827176ec-b952-43c8-8479-cde48c6ae7a2" containerID="5aa62c8026f89a8a32671b0774619fd8138be9257a65257ee5a9509759d8dc32" exitCode=0 Nov 28 19:00:01 crc kubenswrapper[4909]: I1128 19:00:01.909184 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-5pqtc" Nov 28 19:00:01 crc kubenswrapper[4909]: I1128 19:00:01.912569 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405940-fgxx4" event={"ID":"827176ec-b952-43c8-8479-cde48c6ae7a2","Type":"ContainerDied","Data":"5aa62c8026f89a8a32671b0774619fd8138be9257a65257ee5a9509759d8dc32"} Nov 28 19:00:01 crc kubenswrapper[4909]: I1128 19:00:01.912604 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405940-fgxx4" event={"ID":"827176ec-b952-43c8-8479-cde48c6ae7a2","Type":"ContainerStarted","Data":"e65bf5fbd1a2f09508c11162c2856fb6f1656d6492ead570f0705e80e0133f6f"} Nov 28 19:00:01 crc kubenswrapper[4909]: I1128 19:00:01.912616 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5pqtc" event={"ID":"f483215b-7e2e-4c48-af93-f049127051c7","Type":"ContainerDied","Data":"feb13fe3680bc53508f3830b053df88eb62761c64b5ae019bd0f45766121e94b"} Nov 28 19:00:01 crc kubenswrapper[4909]: I1128 19:00:01.912637 4909 scope.go:117] "RemoveContainer" containerID="e41258e98b5ca06ebe79df3e293fc6b1f3844f243de887bd37ca29d55d2f03ec" Nov 28 19:00:01 crc kubenswrapper[4909]: I1128 19:00:01.961959 4909 scope.go:117] "RemoveContainer" containerID="eeafe7dbc4269789966d790b0e78d60e4bb6908275c533cef02f328f662d185e" Nov 28 19:00:01 crc kubenswrapper[4909]: I1128 19:00:01.969278 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-5pqtc"] Nov 28 19:00:01 crc kubenswrapper[4909]: I1128 19:00:01.979909 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-5pqtc"] Nov 28 19:00:01 crc kubenswrapper[4909]: I1128 19:00:01.988615 4909 scope.go:117] "RemoveContainer" containerID="be5f5e9b8aa88291e45ee074b1f81a9e4a785664cadff48ac89b60f80880a506" Nov 28 19:00:03 crc kubenswrapper[4909]: I1128 19:00:03.406239 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405940-fgxx4" Nov 28 19:00:03 crc kubenswrapper[4909]: I1128 19:00:03.457919 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7gfq9\" (UniqueName: \"kubernetes.io/projected/827176ec-b952-43c8-8479-cde48c6ae7a2-kube-api-access-7gfq9\") pod \"827176ec-b952-43c8-8479-cde48c6ae7a2\" (UID: \"827176ec-b952-43c8-8479-cde48c6ae7a2\") " Nov 28 19:00:03 crc kubenswrapper[4909]: I1128 19:00:03.458006 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/827176ec-b952-43c8-8479-cde48c6ae7a2-config-volume\") pod \"827176ec-b952-43c8-8479-cde48c6ae7a2\" (UID: \"827176ec-b952-43c8-8479-cde48c6ae7a2\") " Nov 28 19:00:03 crc kubenswrapper[4909]: I1128 19:00:03.458360 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/827176ec-b952-43c8-8479-cde48c6ae7a2-secret-volume\") pod \"827176ec-b952-43c8-8479-cde48c6ae7a2\" (UID: \"827176ec-b952-43c8-8479-cde48c6ae7a2\") " Nov 28 19:00:03 crc kubenswrapper[4909]: I1128 19:00:03.459087 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/827176ec-b952-43c8-8479-cde48c6ae7a2-config-volume" (OuterVolumeSpecName: "config-volume") pod "827176ec-b952-43c8-8479-cde48c6ae7a2" (UID: "827176ec-b952-43c8-8479-cde48c6ae7a2"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 19:00:03 crc kubenswrapper[4909]: I1128 19:00:03.473452 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/827176ec-b952-43c8-8479-cde48c6ae7a2-kube-api-access-7gfq9" (OuterVolumeSpecName: "kube-api-access-7gfq9") pod "827176ec-b952-43c8-8479-cde48c6ae7a2" (UID: "827176ec-b952-43c8-8479-cde48c6ae7a2"). InnerVolumeSpecName "kube-api-access-7gfq9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 19:00:03 crc kubenswrapper[4909]: I1128 19:00:03.473491 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/827176ec-b952-43c8-8479-cde48c6ae7a2-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "827176ec-b952-43c8-8479-cde48c6ae7a2" (UID: "827176ec-b952-43c8-8479-cde48c6ae7a2"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 19:00:03 crc kubenswrapper[4909]: I1128 19:00:03.560412 4909 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/827176ec-b952-43c8-8479-cde48c6ae7a2-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 28 19:00:03 crc kubenswrapper[4909]: I1128 19:00:03.560617 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7gfq9\" (UniqueName: \"kubernetes.io/projected/827176ec-b952-43c8-8479-cde48c6ae7a2-kube-api-access-7gfq9\") on node \"crc\" DevicePath \"\"" Nov 28 19:00:03 crc kubenswrapper[4909]: I1128 19:00:03.560719 4909 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/827176ec-b952-43c8-8479-cde48c6ae7a2-config-volume\") on node \"crc\" DevicePath \"\"" Nov 28 19:00:03 crc kubenswrapper[4909]: I1128 19:00:03.915188 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f483215b-7e2e-4c48-af93-f049127051c7" path="/var/lib/kubelet/pods/f483215b-7e2e-4c48-af93-f049127051c7/volumes" Nov 28 19:00:03 crc kubenswrapper[4909]: I1128 19:00:03.939895 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405940-fgxx4" event={"ID":"827176ec-b952-43c8-8479-cde48c6ae7a2","Type":"ContainerDied","Data":"e65bf5fbd1a2f09508c11162c2856fb6f1656d6492ead570f0705e80e0133f6f"} Nov 28 19:00:03 crc kubenswrapper[4909]: I1128 19:00:03.939930 4909 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e65bf5fbd1a2f09508c11162c2856fb6f1656d6492ead570f0705e80e0133f6f" Nov 28 19:00:03 crc kubenswrapper[4909]: I1128 19:00:03.939984 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405940-fgxx4" Nov 28 19:00:04 crc kubenswrapper[4909]: I1128 19:00:04.500742 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405895-66ndf"] Nov 28 19:00:04 crc kubenswrapper[4909]: I1128 19:00:04.509156 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405895-66ndf"] Nov 28 19:00:05 crc kubenswrapper[4909]: I1128 19:00:05.927778 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="953283bf-e833-41ce-83f3-2dc48ae6f291" path="/var/lib/kubelet/pods/953283bf-e833-41ce-83f3-2dc48ae6f291/volumes" Nov 28 19:00:06 crc kubenswrapper[4909]: I1128 19:00:06.449743 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931ah4rkz_4148c1c6-142b-41bb-9607-7d391b4cc45d/util/0.log" Nov 28 19:00:06 crc kubenswrapper[4909]: I1128 19:00:06.610494 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931ah4rkz_4148c1c6-142b-41bb-9607-7d391b4cc45d/util/0.log" Nov 28 19:00:06 crc kubenswrapper[4909]: I1128 19:00:06.657448 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931ah4rkz_4148c1c6-142b-41bb-9607-7d391b4cc45d/pull/0.log" Nov 28 19:00:06 crc kubenswrapper[4909]: I1128 19:00:06.727216 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931ah4rkz_4148c1c6-142b-41bb-9607-7d391b4cc45d/pull/0.log" Nov 28 19:00:06 crc kubenswrapper[4909]: I1128 19:00:06.884282 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931ah4rkz_4148c1c6-142b-41bb-9607-7d391b4cc45d/extract/0.log" Nov 28 19:00:06 crc kubenswrapper[4909]: I1128 19:00:06.887320 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931ah4rkz_4148c1c6-142b-41bb-9607-7d391b4cc45d/pull/0.log" Nov 28 19:00:07 crc kubenswrapper[4909]: I1128 19:00:07.121457 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931ah4rkz_4148c1c6-142b-41bb-9607-7d391b4cc45d/util/0.log" Nov 28 19:00:07 crc kubenswrapper[4909]: I1128 19:00:07.273714 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fqvnvh_d0a72a3a-dca8-4920-a505-fe8bed189e5e/util/0.log" Nov 28 19:00:07 crc kubenswrapper[4909]: I1128 19:00:07.458552 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fqvnvh_d0a72a3a-dca8-4920-a505-fe8bed189e5e/pull/0.log" Nov 28 19:00:07 crc kubenswrapper[4909]: I1128 19:00:07.459796 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fqvnvh_d0a72a3a-dca8-4920-a505-fe8bed189e5e/util/0.log" Nov 28 19:00:07 crc kubenswrapper[4909]: I1128 19:00:07.491369 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fqvnvh_d0a72a3a-dca8-4920-a505-fe8bed189e5e/pull/0.log" Nov 28 19:00:07 crc kubenswrapper[4909]: I1128 19:00:07.637603 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fqvnvh_d0a72a3a-dca8-4920-a505-fe8bed189e5e/pull/0.log" Nov 28 19:00:07 crc kubenswrapper[4909]: I1128 19:00:07.676521 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fqvnvh_d0a72a3a-dca8-4920-a505-fe8bed189e5e/util/0.log" Nov 28 19:00:07 crc kubenswrapper[4909]: I1128 19:00:07.701339 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fqvnvh_d0a72a3a-dca8-4920-a505-fe8bed189e5e/extract/0.log" Nov 28 19:00:07 crc kubenswrapper[4909]: I1128 19:00:07.829407 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92104cfhz_b31728fc-2e37-4b7c-9dd4-3d41a22bc00d/util/0.log" Nov 28 19:00:07 crc kubenswrapper[4909]: I1128 19:00:07.996871 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92104cfhz_b31728fc-2e37-4b7c-9dd4-3d41a22bc00d/pull/0.log" Nov 28 19:00:08 crc kubenswrapper[4909]: I1128 19:00:08.020242 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92104cfhz_b31728fc-2e37-4b7c-9dd4-3d41a22bc00d/util/0.log" Nov 28 19:00:08 crc kubenswrapper[4909]: I1128 19:00:08.081649 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92104cfhz_b31728fc-2e37-4b7c-9dd4-3d41a22bc00d/pull/0.log" Nov 28 19:00:08 crc kubenswrapper[4909]: I1128 19:00:08.237935 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92104cfhz_b31728fc-2e37-4b7c-9dd4-3d41a22bc00d/util/0.log" Nov 28 19:00:08 crc kubenswrapper[4909]: I1128 19:00:08.247759 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92104cfhz_b31728fc-2e37-4b7c-9dd4-3d41a22bc00d/pull/0.log" Nov 28 19:00:08 crc kubenswrapper[4909]: I1128 19:00:08.294549 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92104cfhz_b31728fc-2e37-4b7c-9dd4-3d41a22bc00d/extract/0.log" Nov 28 19:00:08 crc kubenswrapper[4909]: I1128 19:00:08.452928 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83kmlm2_def73bb6-09ff-4074-8213-4962def55a10/util/0.log" Nov 28 19:00:08 crc kubenswrapper[4909]: I1128 19:00:08.622337 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83kmlm2_def73bb6-09ff-4074-8213-4962def55a10/pull/0.log" Nov 28 19:00:08 crc kubenswrapper[4909]: I1128 19:00:08.634030 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83kmlm2_def73bb6-09ff-4074-8213-4962def55a10/pull/0.log" Nov 28 19:00:08 crc kubenswrapper[4909]: I1128 19:00:08.635507 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83kmlm2_def73bb6-09ff-4074-8213-4962def55a10/util/0.log" Nov 28 19:00:08 crc kubenswrapper[4909]: I1128 19:00:08.781320 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83kmlm2_def73bb6-09ff-4074-8213-4962def55a10/util/0.log" Nov 28 19:00:08 crc kubenswrapper[4909]: I1128 19:00:08.807578 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83kmlm2_def73bb6-09ff-4074-8213-4962def55a10/extract/0.log" Nov 28 19:00:08 crc kubenswrapper[4909]: I1128 19:00:08.844627 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83kmlm2_def73bb6-09ff-4074-8213-4962def55a10/pull/0.log" Nov 28 19:00:08 crc kubenswrapper[4909]: I1128 19:00:08.969235 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-5mznf_fcc8fe11-6ac1-4989-9cc2-ae822b108786/extract-utilities/0.log" Nov 28 19:00:09 crc kubenswrapper[4909]: I1128 19:00:09.165772 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-5mznf_fcc8fe11-6ac1-4989-9cc2-ae822b108786/extract-content/0.log" Nov 28 19:00:09 crc kubenswrapper[4909]: I1128 19:00:09.196002 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-5mznf_fcc8fe11-6ac1-4989-9cc2-ae822b108786/extract-content/0.log" Nov 28 19:00:09 crc kubenswrapper[4909]: I1128 19:00:09.210846 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-5mznf_fcc8fe11-6ac1-4989-9cc2-ae822b108786/extract-utilities/0.log" Nov 28 19:00:09 crc kubenswrapper[4909]: I1128 19:00:09.436735 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-5mznf_fcc8fe11-6ac1-4989-9cc2-ae822b108786/extract-utilities/0.log" Nov 28 19:00:09 crc kubenswrapper[4909]: I1128 19:00:09.452836 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-5mznf_fcc8fe11-6ac1-4989-9cc2-ae822b108786/extract-content/0.log" Nov 28 19:00:09 crc kubenswrapper[4909]: I1128 19:00:09.678524 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-5zjlm_774f9f72-67d9-487a-89d9-08fed5aeea79/extract-utilities/0.log" Nov 28 19:00:09 crc kubenswrapper[4909]: I1128 19:00:09.898723 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-5zjlm_774f9f72-67d9-487a-89d9-08fed5aeea79/extract-utilities/0.log" Nov 28 19:00:09 crc kubenswrapper[4909]: I1128 19:00:09.930438 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-5zjlm_774f9f72-67d9-487a-89d9-08fed5aeea79/extract-content/0.log" Nov 28 19:00:09 crc kubenswrapper[4909]: I1128 19:00:09.953672 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-5zjlm_774f9f72-67d9-487a-89d9-08fed5aeea79/extract-content/0.log" Nov 28 19:00:10 crc kubenswrapper[4909]: I1128 19:00:10.164634 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-5zjlm_774f9f72-67d9-487a-89d9-08fed5aeea79/extract-utilities/0.log" Nov 28 19:00:10 crc kubenswrapper[4909]: I1128 19:00:10.170599 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-5mznf_fcc8fe11-6ac1-4989-9cc2-ae822b108786/registry-server/0.log" Nov 28 19:00:10 crc kubenswrapper[4909]: I1128 19:00:10.235208 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-5zjlm_774f9f72-67d9-487a-89d9-08fed5aeea79/extract-content/0.log" Nov 28 19:00:10 crc kubenswrapper[4909]: I1128 19:00:10.362521 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-nggsh_367a78b1-06bc-48a7-ad2b-4e825e5f683f/marketplace-operator/0.log" Nov 28 19:00:10 crc kubenswrapper[4909]: I1128 19:00:10.453489 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-5zjlm_774f9f72-67d9-487a-89d9-08fed5aeea79/registry-server/0.log" Nov 28 19:00:10 crc kubenswrapper[4909]: I1128 19:00:10.472838 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-zvqgx_2d23c6e1-8623-482d-aef1-4367ab709c41/extract-utilities/0.log" Nov 28 19:00:10 crc kubenswrapper[4909]: I1128 19:00:10.608134 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-zvqgx_2d23c6e1-8623-482d-aef1-4367ab709c41/extract-utilities/0.log" Nov 28 19:00:10 crc kubenswrapper[4909]: I1128 19:00:10.638541 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-zvqgx_2d23c6e1-8623-482d-aef1-4367ab709c41/extract-content/0.log" Nov 28 19:00:10 crc kubenswrapper[4909]: I1128 19:00:10.657197 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-zvqgx_2d23c6e1-8623-482d-aef1-4367ab709c41/extract-content/0.log" Nov 28 19:00:10 crc kubenswrapper[4909]: I1128 19:00:10.869183 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-zvqgx_2d23c6e1-8623-482d-aef1-4367ab709c41/extract-utilities/0.log" Nov 28 19:00:10 crc kubenswrapper[4909]: I1128 19:00:10.871555 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-zvqgx_2d23c6e1-8623-482d-aef1-4367ab709c41/extract-content/0.log" Nov 28 19:00:10 crc kubenswrapper[4909]: I1128 19:00:10.936041 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-grq6g_ce9fe3b1-d4b1-4de1-843a-976216847bda/extract-utilities/0.log" Nov 28 19:00:11 crc kubenswrapper[4909]: I1128 19:00:11.061373 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-zvqgx_2d23c6e1-8623-482d-aef1-4367ab709c41/registry-server/0.log" Nov 28 19:00:11 crc kubenswrapper[4909]: I1128 19:00:11.711508 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-grq6g_ce9fe3b1-d4b1-4de1-843a-976216847bda/extract-content/0.log" Nov 28 19:00:11 crc kubenswrapper[4909]: I1128 19:00:11.765975 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-grq6g_ce9fe3b1-d4b1-4de1-843a-976216847bda/extract-utilities/0.log" Nov 28 19:00:11 crc kubenswrapper[4909]: I1128 19:00:11.868942 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-grq6g_ce9fe3b1-d4b1-4de1-843a-976216847bda/extract-content/0.log" Nov 28 19:00:12 crc kubenswrapper[4909]: I1128 19:00:12.030608 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-grq6g_ce9fe3b1-d4b1-4de1-843a-976216847bda/extract-utilities/0.log" Nov 28 19:00:12 crc kubenswrapper[4909]: I1128 19:00:12.170587 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-grq6g_ce9fe3b1-d4b1-4de1-843a-976216847bda/extract-content/0.log" Nov 28 19:00:13 crc kubenswrapper[4909]: I1128 19:00:13.446606 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-grq6g_ce9fe3b1-d4b1-4de1-843a-976216847bda/registry-server/0.log" Nov 28 19:00:20 crc kubenswrapper[4909]: I1128 19:00:20.113621 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-58vmj"] Nov 28 19:00:20 crc kubenswrapper[4909]: E1128 19:00:20.115072 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f483215b-7e2e-4c48-af93-f049127051c7" containerName="registry-server" Nov 28 19:00:20 crc kubenswrapper[4909]: I1128 19:00:20.115094 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="f483215b-7e2e-4c48-af93-f049127051c7" containerName="registry-server" Nov 28 19:00:20 crc kubenswrapper[4909]: E1128 19:00:20.115134 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f483215b-7e2e-4c48-af93-f049127051c7" containerName="extract-utilities" Nov 28 19:00:20 crc kubenswrapper[4909]: I1128 19:00:20.115148 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="f483215b-7e2e-4c48-af93-f049127051c7" containerName="extract-utilities" Nov 28 19:00:20 crc kubenswrapper[4909]: E1128 19:00:20.115179 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="827176ec-b952-43c8-8479-cde48c6ae7a2" containerName="collect-profiles" Nov 28 19:00:20 crc kubenswrapper[4909]: I1128 19:00:20.115194 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="827176ec-b952-43c8-8479-cde48c6ae7a2" containerName="collect-profiles" Nov 28 19:00:20 crc kubenswrapper[4909]: E1128 19:00:20.115231 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f483215b-7e2e-4c48-af93-f049127051c7" containerName="extract-content" Nov 28 19:00:20 crc kubenswrapper[4909]: I1128 19:00:20.115244 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="f483215b-7e2e-4c48-af93-f049127051c7" containerName="extract-content" Nov 28 19:00:20 crc kubenswrapper[4909]: I1128 19:00:20.115753 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="f483215b-7e2e-4c48-af93-f049127051c7" containerName="registry-server" Nov 28 19:00:20 crc kubenswrapper[4909]: I1128 19:00:20.115780 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="827176ec-b952-43c8-8479-cde48c6ae7a2" containerName="collect-profiles" Nov 28 19:00:20 crc kubenswrapper[4909]: I1128 19:00:20.118775 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-58vmj" Nov 28 19:00:20 crc kubenswrapper[4909]: I1128 19:00:20.128150 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-58vmj"] Nov 28 19:00:20 crc kubenswrapper[4909]: I1128 19:00:20.319630 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gwxvc\" (UniqueName: \"kubernetes.io/projected/90b88669-adf2-4956-80eb-3d615d21c242-kube-api-access-gwxvc\") pod \"community-operators-58vmj\" (UID: \"90b88669-adf2-4956-80eb-3d615d21c242\") " pod="openshift-marketplace/community-operators-58vmj" Nov 28 19:00:20 crc kubenswrapper[4909]: I1128 19:00:20.319824 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/90b88669-adf2-4956-80eb-3d615d21c242-catalog-content\") pod \"community-operators-58vmj\" (UID: \"90b88669-adf2-4956-80eb-3d615d21c242\") " pod="openshift-marketplace/community-operators-58vmj" Nov 28 19:00:20 crc kubenswrapper[4909]: I1128 19:00:20.319868 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/90b88669-adf2-4956-80eb-3d615d21c242-utilities\") pod \"community-operators-58vmj\" (UID: \"90b88669-adf2-4956-80eb-3d615d21c242\") " pod="openshift-marketplace/community-operators-58vmj" Nov 28 19:00:20 crc kubenswrapper[4909]: I1128 19:00:20.421233 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/90b88669-adf2-4956-80eb-3d615d21c242-utilities\") pod \"community-operators-58vmj\" (UID: \"90b88669-adf2-4956-80eb-3d615d21c242\") " pod="openshift-marketplace/community-operators-58vmj" Nov 28 19:00:20 crc kubenswrapper[4909]: I1128 19:00:20.421314 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gwxvc\" (UniqueName: \"kubernetes.io/projected/90b88669-adf2-4956-80eb-3d615d21c242-kube-api-access-gwxvc\") pod \"community-operators-58vmj\" (UID: \"90b88669-adf2-4956-80eb-3d615d21c242\") " pod="openshift-marketplace/community-operators-58vmj" Nov 28 19:00:20 crc kubenswrapper[4909]: I1128 19:00:20.421528 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/90b88669-adf2-4956-80eb-3d615d21c242-catalog-content\") pod \"community-operators-58vmj\" (UID: \"90b88669-adf2-4956-80eb-3d615d21c242\") " pod="openshift-marketplace/community-operators-58vmj" Nov 28 19:00:20 crc kubenswrapper[4909]: I1128 19:00:20.422094 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/90b88669-adf2-4956-80eb-3d615d21c242-catalog-content\") pod \"community-operators-58vmj\" (UID: \"90b88669-adf2-4956-80eb-3d615d21c242\") " pod="openshift-marketplace/community-operators-58vmj" Nov 28 19:00:20 crc kubenswrapper[4909]: I1128 19:00:20.422393 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/90b88669-adf2-4956-80eb-3d615d21c242-utilities\") pod \"community-operators-58vmj\" (UID: \"90b88669-adf2-4956-80eb-3d615d21c242\") " pod="openshift-marketplace/community-operators-58vmj" Nov 28 19:00:20 crc kubenswrapper[4909]: I1128 19:00:20.455257 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gwxvc\" (UniqueName: \"kubernetes.io/projected/90b88669-adf2-4956-80eb-3d615d21c242-kube-api-access-gwxvc\") pod \"community-operators-58vmj\" (UID: \"90b88669-adf2-4956-80eb-3d615d21c242\") " pod="openshift-marketplace/community-operators-58vmj" Nov 28 19:00:20 crc kubenswrapper[4909]: I1128 19:00:20.745821 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-58vmj" Nov 28 19:00:21 crc kubenswrapper[4909]: I1128 19:00:21.214144 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-58vmj"] Nov 28 19:00:22 crc kubenswrapper[4909]: I1128 19:00:22.132211 4909 generic.go:334] "Generic (PLEG): container finished" podID="90b88669-adf2-4956-80eb-3d615d21c242" containerID="78d838e7f1713bbb59cd8576b3f9d95a1a7c99f4d9b497da3d614f9b6d10bd1b" exitCode=0 Nov 28 19:00:22 crc kubenswrapper[4909]: I1128 19:00:22.132270 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-58vmj" event={"ID":"90b88669-adf2-4956-80eb-3d615d21c242","Type":"ContainerDied","Data":"78d838e7f1713bbb59cd8576b3f9d95a1a7c99f4d9b497da3d614f9b6d10bd1b"} Nov 28 19:00:22 crc kubenswrapper[4909]: I1128 19:00:22.132505 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-58vmj" event={"ID":"90b88669-adf2-4956-80eb-3d615d21c242","Type":"ContainerStarted","Data":"f56f4f2b4b4b593a956db63d78ce9d2d14129fb2a81572cb0ee4f0354e322d50"} Nov 28 19:00:24 crc kubenswrapper[4909]: I1128 19:00:24.161294 4909 generic.go:334] "Generic (PLEG): container finished" podID="90b88669-adf2-4956-80eb-3d615d21c242" containerID="4cac83692e8ee9f4371c6c227b7f0e4082c8477afca895a3c7fd500bcf85f4ce" exitCode=0 Nov 28 19:00:24 crc kubenswrapper[4909]: I1128 19:00:24.161378 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-58vmj" event={"ID":"90b88669-adf2-4956-80eb-3d615d21c242","Type":"ContainerDied","Data":"4cac83692e8ee9f4371c6c227b7f0e4082c8477afca895a3c7fd500bcf85f4ce"} Nov 28 19:00:26 crc kubenswrapper[4909]: I1128 19:00:26.189543 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-58vmj" event={"ID":"90b88669-adf2-4956-80eb-3d615d21c242","Type":"ContainerStarted","Data":"d569c4a3615996e396b0a74cd02aac41b3268212e8af7ace02ae9cb557183bb7"} Nov 28 19:00:26 crc kubenswrapper[4909]: I1128 19:00:26.215813 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-58vmj" podStartSLOduration=3.492788558 podStartE2EDuration="6.215795557s" podCreationTimestamp="2025-11-28 19:00:20 +0000 UTC" firstStartedPulling="2025-11-28 19:00:22.137046854 +0000 UTC m=+10204.533731388" lastFinishedPulling="2025-11-28 19:00:24.860053823 +0000 UTC m=+10207.256738387" observedRunningTime="2025-11-28 19:00:26.207028782 +0000 UTC m=+10208.603713306" watchObservedRunningTime="2025-11-28 19:00:26.215795557 +0000 UTC m=+10208.612480081" Nov 28 19:00:28 crc kubenswrapper[4909]: I1128 19:00:28.079098 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-668cf9dfbb-q8sth_a5271a09-d325-4edf-838a-52dd9a124eba/prometheus-operator/0.log" Nov 28 19:00:28 crc kubenswrapper[4909]: I1128 19:00:28.198939 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-admission-webhook-6bf745c75-hpn6w_981108b4-6d9a-421c-8e5b-8ae7d1643fc4/prometheus-operator-admission-webhook/0.log" Nov 28 19:00:28 crc kubenswrapper[4909]: I1128 19:00:28.241297 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-admission-webhook-6bf745c75-ssnwd_51f0f5a0-6592-4651-9483-cde6c0fd1afe/prometheus-operator-admission-webhook/0.log" Nov 28 19:00:28 crc kubenswrapper[4909]: I1128 19:00:28.363881 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_observability-operator-d8bb48f5d-hzmm9_7a779e0e-c999-4317-8315-a950ae29cd23/operator/0.log" Nov 28 19:00:28 crc kubenswrapper[4909]: I1128 19:00:28.426464 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_perses-operator-5446b9c989-xk4qq_68065799-9c6f-4fac-9aa3-2dfc97bfca78/perses-operator/0.log" Nov 28 19:00:30 crc kubenswrapper[4909]: I1128 19:00:30.747666 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-58vmj" Nov 28 19:00:30 crc kubenswrapper[4909]: I1128 19:00:30.748120 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-58vmj" Nov 28 19:00:30 crc kubenswrapper[4909]: I1128 19:00:30.845338 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-58vmj" Nov 28 19:00:31 crc kubenswrapper[4909]: I1128 19:00:31.332329 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-58vmj" Nov 28 19:00:31 crc kubenswrapper[4909]: I1128 19:00:31.402930 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-58vmj"] Nov 28 19:00:33 crc kubenswrapper[4909]: I1128 19:00:33.280627 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-58vmj" podUID="90b88669-adf2-4956-80eb-3d615d21c242" containerName="registry-server" containerID="cri-o://d569c4a3615996e396b0a74cd02aac41b3268212e8af7ace02ae9cb557183bb7" gracePeriod=2 Nov 28 19:00:33 crc kubenswrapper[4909]: I1128 19:00:33.837379 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-58vmj" Nov 28 19:00:33 crc kubenswrapper[4909]: I1128 19:00:33.950081 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/90b88669-adf2-4956-80eb-3d615d21c242-utilities\") pod \"90b88669-adf2-4956-80eb-3d615d21c242\" (UID: \"90b88669-adf2-4956-80eb-3d615d21c242\") " Nov 28 19:00:33 crc kubenswrapper[4909]: I1128 19:00:33.950522 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/90b88669-adf2-4956-80eb-3d615d21c242-catalog-content\") pod \"90b88669-adf2-4956-80eb-3d615d21c242\" (UID: \"90b88669-adf2-4956-80eb-3d615d21c242\") " Nov 28 19:00:33 crc kubenswrapper[4909]: I1128 19:00:33.950574 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gwxvc\" (UniqueName: \"kubernetes.io/projected/90b88669-adf2-4956-80eb-3d615d21c242-kube-api-access-gwxvc\") pod \"90b88669-adf2-4956-80eb-3d615d21c242\" (UID: \"90b88669-adf2-4956-80eb-3d615d21c242\") " Nov 28 19:00:33 crc kubenswrapper[4909]: I1128 19:00:33.950817 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/90b88669-adf2-4956-80eb-3d615d21c242-utilities" (OuterVolumeSpecName: "utilities") pod "90b88669-adf2-4956-80eb-3d615d21c242" (UID: "90b88669-adf2-4956-80eb-3d615d21c242"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 19:00:33 crc kubenswrapper[4909]: I1128 19:00:33.951111 4909 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/90b88669-adf2-4956-80eb-3d615d21c242-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 19:00:33 crc kubenswrapper[4909]: I1128 19:00:33.955964 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/90b88669-adf2-4956-80eb-3d615d21c242-kube-api-access-gwxvc" (OuterVolumeSpecName: "kube-api-access-gwxvc") pod "90b88669-adf2-4956-80eb-3d615d21c242" (UID: "90b88669-adf2-4956-80eb-3d615d21c242"). InnerVolumeSpecName "kube-api-access-gwxvc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 19:00:33 crc kubenswrapper[4909]: I1128 19:00:33.999638 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/90b88669-adf2-4956-80eb-3d615d21c242-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "90b88669-adf2-4956-80eb-3d615d21c242" (UID: "90b88669-adf2-4956-80eb-3d615d21c242"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 19:00:34 crc kubenswrapper[4909]: I1128 19:00:34.052734 4909 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/90b88669-adf2-4956-80eb-3d615d21c242-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 19:00:34 crc kubenswrapper[4909]: I1128 19:00:34.052765 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gwxvc\" (UniqueName: \"kubernetes.io/projected/90b88669-adf2-4956-80eb-3d615d21c242-kube-api-access-gwxvc\") on node \"crc\" DevicePath \"\"" Nov 28 19:00:34 crc kubenswrapper[4909]: I1128 19:00:34.303967 4909 generic.go:334] "Generic (PLEG): container finished" podID="90b88669-adf2-4956-80eb-3d615d21c242" containerID="d569c4a3615996e396b0a74cd02aac41b3268212e8af7ace02ae9cb557183bb7" exitCode=0 Nov 28 19:00:34 crc kubenswrapper[4909]: I1128 19:00:34.304010 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-58vmj" event={"ID":"90b88669-adf2-4956-80eb-3d615d21c242","Type":"ContainerDied","Data":"d569c4a3615996e396b0a74cd02aac41b3268212e8af7ace02ae9cb557183bb7"} Nov 28 19:00:34 crc kubenswrapper[4909]: I1128 19:00:34.304035 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-58vmj" event={"ID":"90b88669-adf2-4956-80eb-3d615d21c242","Type":"ContainerDied","Data":"f56f4f2b4b4b593a956db63d78ce9d2d14129fb2a81572cb0ee4f0354e322d50"} Nov 28 19:00:34 crc kubenswrapper[4909]: I1128 19:00:34.304052 4909 scope.go:117] "RemoveContainer" containerID="d569c4a3615996e396b0a74cd02aac41b3268212e8af7ace02ae9cb557183bb7" Nov 28 19:00:34 crc kubenswrapper[4909]: I1128 19:00:34.304115 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-58vmj" Nov 28 19:00:34 crc kubenswrapper[4909]: I1128 19:00:34.327456 4909 scope.go:117] "RemoveContainer" containerID="4cac83692e8ee9f4371c6c227b7f0e4082c8477afca895a3c7fd500bcf85f4ce" Nov 28 19:00:34 crc kubenswrapper[4909]: I1128 19:00:34.344643 4909 scope.go:117] "RemoveContainer" containerID="78d838e7f1713bbb59cd8576b3f9d95a1a7c99f4d9b497da3d614f9b6d10bd1b" Nov 28 19:00:34 crc kubenswrapper[4909]: I1128 19:00:34.400690 4909 scope.go:117] "RemoveContainer" containerID="d569c4a3615996e396b0a74cd02aac41b3268212e8af7ace02ae9cb557183bb7" Nov 28 19:00:34 crc kubenswrapper[4909]: E1128 19:00:34.402843 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d569c4a3615996e396b0a74cd02aac41b3268212e8af7ace02ae9cb557183bb7\": container with ID starting with d569c4a3615996e396b0a74cd02aac41b3268212e8af7ace02ae9cb557183bb7 not found: ID does not exist" containerID="d569c4a3615996e396b0a74cd02aac41b3268212e8af7ace02ae9cb557183bb7" Nov 28 19:00:34 crc kubenswrapper[4909]: I1128 19:00:34.402872 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d569c4a3615996e396b0a74cd02aac41b3268212e8af7ace02ae9cb557183bb7"} err="failed to get container status \"d569c4a3615996e396b0a74cd02aac41b3268212e8af7ace02ae9cb557183bb7\": rpc error: code = NotFound desc = could not find container \"d569c4a3615996e396b0a74cd02aac41b3268212e8af7ace02ae9cb557183bb7\": container with ID starting with d569c4a3615996e396b0a74cd02aac41b3268212e8af7ace02ae9cb557183bb7 not found: ID does not exist" Nov 28 19:00:34 crc kubenswrapper[4909]: I1128 19:00:34.402895 4909 scope.go:117] "RemoveContainer" containerID="4cac83692e8ee9f4371c6c227b7f0e4082c8477afca895a3c7fd500bcf85f4ce" Nov 28 19:00:34 crc kubenswrapper[4909]: E1128 19:00:34.403182 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4cac83692e8ee9f4371c6c227b7f0e4082c8477afca895a3c7fd500bcf85f4ce\": container with ID starting with 4cac83692e8ee9f4371c6c227b7f0e4082c8477afca895a3c7fd500bcf85f4ce not found: ID does not exist" containerID="4cac83692e8ee9f4371c6c227b7f0e4082c8477afca895a3c7fd500bcf85f4ce" Nov 28 19:00:34 crc kubenswrapper[4909]: I1128 19:00:34.403200 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4cac83692e8ee9f4371c6c227b7f0e4082c8477afca895a3c7fd500bcf85f4ce"} err="failed to get container status \"4cac83692e8ee9f4371c6c227b7f0e4082c8477afca895a3c7fd500bcf85f4ce\": rpc error: code = NotFound desc = could not find container \"4cac83692e8ee9f4371c6c227b7f0e4082c8477afca895a3c7fd500bcf85f4ce\": container with ID starting with 4cac83692e8ee9f4371c6c227b7f0e4082c8477afca895a3c7fd500bcf85f4ce not found: ID does not exist" Nov 28 19:00:34 crc kubenswrapper[4909]: I1128 19:00:34.403214 4909 scope.go:117] "RemoveContainer" containerID="78d838e7f1713bbb59cd8576b3f9d95a1a7c99f4d9b497da3d614f9b6d10bd1b" Nov 28 19:00:34 crc kubenswrapper[4909]: E1128 19:00:34.403417 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"78d838e7f1713bbb59cd8576b3f9d95a1a7c99f4d9b497da3d614f9b6d10bd1b\": container with ID starting with 78d838e7f1713bbb59cd8576b3f9d95a1a7c99f4d9b497da3d614f9b6d10bd1b not found: ID does not exist" containerID="78d838e7f1713bbb59cd8576b3f9d95a1a7c99f4d9b497da3d614f9b6d10bd1b" Nov 28 19:00:34 crc kubenswrapper[4909]: I1128 19:00:34.403436 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"78d838e7f1713bbb59cd8576b3f9d95a1a7c99f4d9b497da3d614f9b6d10bd1b"} err="failed to get container status \"78d838e7f1713bbb59cd8576b3f9d95a1a7c99f4d9b497da3d614f9b6d10bd1b\": rpc error: code = NotFound desc = could not find container \"78d838e7f1713bbb59cd8576b3f9d95a1a7c99f4d9b497da3d614f9b6d10bd1b\": container with ID starting with 78d838e7f1713bbb59cd8576b3f9d95a1a7c99f4d9b497da3d614f9b6d10bd1b not found: ID does not exist" Nov 28 19:00:34 crc kubenswrapper[4909]: I1128 19:00:34.407226 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-58vmj"] Nov 28 19:00:34 crc kubenswrapper[4909]: I1128 19:00:34.420731 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-58vmj"] Nov 28 19:00:35 crc kubenswrapper[4909]: I1128 19:00:35.913135 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="90b88669-adf2-4956-80eb-3d615d21c242" path="/var/lib/kubelet/pods/90b88669-adf2-4956-80eb-3d615d21c242/volumes" Nov 28 19:00:41 crc kubenswrapper[4909]: I1128 19:00:41.187503 4909 scope.go:117] "RemoveContainer" containerID="fb0404ca80a40babf6731397718cef05613b665937aa9144733c8b25e5baa8b9" Nov 28 19:00:50 crc kubenswrapper[4909]: E1128 19:00:50.837239 4909 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp 38.102.83.53:38756->38.102.83.53:44787: write tcp 38.102.83.53:38756->38.102.83.53:44787: write: broken pipe Nov 28 19:00:53 crc kubenswrapper[4909]: E1128 19:00:53.883236 4909 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp 38.102.83.53:38858->38.102.83.53:44787: write tcp 38.102.83.53:38858->38.102.83.53:44787: write: broken pipe Nov 28 19:00:56 crc kubenswrapper[4909]: E1128 19:00:56.861095 4909 upgradeaware.go:441] Error proxying data from backend to client: writeto tcp 38.102.83.53:38950->38.102.83.53:44787: read tcp 38.102.83.53:38950->38.102.83.53:44787: read: connection reset by peer Nov 28 19:01:00 crc kubenswrapper[4909]: I1128 19:01:00.194451 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-cron-29405941-f75bc"] Nov 28 19:01:00 crc kubenswrapper[4909]: E1128 19:01:00.195376 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="90b88669-adf2-4956-80eb-3d615d21c242" containerName="extract-utilities" Nov 28 19:01:00 crc kubenswrapper[4909]: I1128 19:01:00.195388 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="90b88669-adf2-4956-80eb-3d615d21c242" containerName="extract-utilities" Nov 28 19:01:00 crc kubenswrapper[4909]: E1128 19:01:00.195411 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="90b88669-adf2-4956-80eb-3d615d21c242" containerName="extract-content" Nov 28 19:01:00 crc kubenswrapper[4909]: I1128 19:01:00.195417 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="90b88669-adf2-4956-80eb-3d615d21c242" containerName="extract-content" Nov 28 19:01:00 crc kubenswrapper[4909]: E1128 19:01:00.195438 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="90b88669-adf2-4956-80eb-3d615d21c242" containerName="registry-server" Nov 28 19:01:00 crc kubenswrapper[4909]: I1128 19:01:00.195445 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="90b88669-adf2-4956-80eb-3d615d21c242" containerName="registry-server" Nov 28 19:01:00 crc kubenswrapper[4909]: I1128 19:01:00.195628 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="90b88669-adf2-4956-80eb-3d615d21c242" containerName="registry-server" Nov 28 19:01:00 crc kubenswrapper[4909]: I1128 19:01:00.196359 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29405941-f75bc"] Nov 28 19:01:00 crc kubenswrapper[4909]: I1128 19:01:00.196432 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29405941-f75bc" Nov 28 19:01:00 crc kubenswrapper[4909]: I1128 19:01:00.319194 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gp7ml\" (UniqueName: \"kubernetes.io/projected/f79fafa5-1017-41e5-ad6a-8d12a887e9e1-kube-api-access-gp7ml\") pod \"keystone-cron-29405941-f75bc\" (UID: \"f79fafa5-1017-41e5-ad6a-8d12a887e9e1\") " pod="openstack/keystone-cron-29405941-f75bc" Nov 28 19:01:00 crc kubenswrapper[4909]: I1128 19:01:00.319284 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/f79fafa5-1017-41e5-ad6a-8d12a887e9e1-fernet-keys\") pod \"keystone-cron-29405941-f75bc\" (UID: \"f79fafa5-1017-41e5-ad6a-8d12a887e9e1\") " pod="openstack/keystone-cron-29405941-f75bc" Nov 28 19:01:00 crc kubenswrapper[4909]: I1128 19:01:00.319311 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f79fafa5-1017-41e5-ad6a-8d12a887e9e1-combined-ca-bundle\") pod \"keystone-cron-29405941-f75bc\" (UID: \"f79fafa5-1017-41e5-ad6a-8d12a887e9e1\") " pod="openstack/keystone-cron-29405941-f75bc" Nov 28 19:01:00 crc kubenswrapper[4909]: I1128 19:01:00.319363 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f79fafa5-1017-41e5-ad6a-8d12a887e9e1-config-data\") pod \"keystone-cron-29405941-f75bc\" (UID: \"f79fafa5-1017-41e5-ad6a-8d12a887e9e1\") " pod="openstack/keystone-cron-29405941-f75bc" Nov 28 19:01:00 crc kubenswrapper[4909]: I1128 19:01:00.422493 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/f79fafa5-1017-41e5-ad6a-8d12a887e9e1-fernet-keys\") pod \"keystone-cron-29405941-f75bc\" (UID: \"f79fafa5-1017-41e5-ad6a-8d12a887e9e1\") " pod="openstack/keystone-cron-29405941-f75bc" Nov 28 19:01:00 crc kubenswrapper[4909]: I1128 19:01:00.422541 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f79fafa5-1017-41e5-ad6a-8d12a887e9e1-combined-ca-bundle\") pod \"keystone-cron-29405941-f75bc\" (UID: \"f79fafa5-1017-41e5-ad6a-8d12a887e9e1\") " pod="openstack/keystone-cron-29405941-f75bc" Nov 28 19:01:00 crc kubenswrapper[4909]: I1128 19:01:00.422632 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f79fafa5-1017-41e5-ad6a-8d12a887e9e1-config-data\") pod \"keystone-cron-29405941-f75bc\" (UID: \"f79fafa5-1017-41e5-ad6a-8d12a887e9e1\") " pod="openstack/keystone-cron-29405941-f75bc" Nov 28 19:01:00 crc kubenswrapper[4909]: I1128 19:01:00.422748 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gp7ml\" (UniqueName: \"kubernetes.io/projected/f79fafa5-1017-41e5-ad6a-8d12a887e9e1-kube-api-access-gp7ml\") pod \"keystone-cron-29405941-f75bc\" (UID: \"f79fafa5-1017-41e5-ad6a-8d12a887e9e1\") " pod="openstack/keystone-cron-29405941-f75bc" Nov 28 19:01:00 crc kubenswrapper[4909]: I1128 19:01:00.430274 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f79fafa5-1017-41e5-ad6a-8d12a887e9e1-combined-ca-bundle\") pod \"keystone-cron-29405941-f75bc\" (UID: \"f79fafa5-1017-41e5-ad6a-8d12a887e9e1\") " pod="openstack/keystone-cron-29405941-f75bc" Nov 28 19:01:00 crc kubenswrapper[4909]: I1128 19:01:00.438352 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/f79fafa5-1017-41e5-ad6a-8d12a887e9e1-fernet-keys\") pod \"keystone-cron-29405941-f75bc\" (UID: \"f79fafa5-1017-41e5-ad6a-8d12a887e9e1\") " pod="openstack/keystone-cron-29405941-f75bc" Nov 28 19:01:00 crc kubenswrapper[4909]: I1128 19:01:00.443547 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f79fafa5-1017-41e5-ad6a-8d12a887e9e1-config-data\") pod \"keystone-cron-29405941-f75bc\" (UID: \"f79fafa5-1017-41e5-ad6a-8d12a887e9e1\") " pod="openstack/keystone-cron-29405941-f75bc" Nov 28 19:01:00 crc kubenswrapper[4909]: I1128 19:01:00.444174 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gp7ml\" (UniqueName: \"kubernetes.io/projected/f79fafa5-1017-41e5-ad6a-8d12a887e9e1-kube-api-access-gp7ml\") pod \"keystone-cron-29405941-f75bc\" (UID: \"f79fafa5-1017-41e5-ad6a-8d12a887e9e1\") " pod="openstack/keystone-cron-29405941-f75bc" Nov 28 19:01:00 crc kubenswrapper[4909]: I1128 19:01:00.536930 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29405941-f75bc" Nov 28 19:01:01 crc kubenswrapper[4909]: I1128 19:01:01.017064 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29405941-f75bc"] Nov 28 19:01:01 crc kubenswrapper[4909]: W1128 19:01:01.038001 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf79fafa5_1017_41e5_ad6a_8d12a887e9e1.slice/crio-870530999f66ae4539c971e0166db8743a001f3c518411da5996ae30b739cf87 WatchSource:0}: Error finding container 870530999f66ae4539c971e0166db8743a001f3c518411da5996ae30b739cf87: Status 404 returned error can't find the container with id 870530999f66ae4539c971e0166db8743a001f3c518411da5996ae30b739cf87 Nov 28 19:01:01 crc kubenswrapper[4909]: I1128 19:01:01.603323 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29405941-f75bc" event={"ID":"f79fafa5-1017-41e5-ad6a-8d12a887e9e1","Type":"ContainerStarted","Data":"d478f5e7186b3f1bda0cff56384cc1650034212267726b24ea4c42c54ede08d5"} Nov 28 19:01:01 crc kubenswrapper[4909]: I1128 19:01:01.603770 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29405941-f75bc" event={"ID":"f79fafa5-1017-41e5-ad6a-8d12a887e9e1","Type":"ContainerStarted","Data":"870530999f66ae4539c971e0166db8743a001f3c518411da5996ae30b739cf87"} Nov 28 19:01:01 crc kubenswrapper[4909]: I1128 19:01:01.638488 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-cron-29405941-f75bc" podStartSLOduration=1.6384556959999999 podStartE2EDuration="1.638455696s" podCreationTimestamp="2025-11-28 19:01:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 19:01:01.618184312 +0000 UTC m=+10244.014868866" watchObservedRunningTime="2025-11-28 19:01:01.638455696 +0000 UTC m=+10244.035140260" Nov 28 19:01:03 crc kubenswrapper[4909]: I1128 19:01:03.625973 4909 generic.go:334] "Generic (PLEG): container finished" podID="f79fafa5-1017-41e5-ad6a-8d12a887e9e1" containerID="d478f5e7186b3f1bda0cff56384cc1650034212267726b24ea4c42c54ede08d5" exitCode=0 Nov 28 19:01:03 crc kubenswrapper[4909]: I1128 19:01:03.626096 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29405941-f75bc" event={"ID":"f79fafa5-1017-41e5-ad6a-8d12a887e9e1","Type":"ContainerDied","Data":"d478f5e7186b3f1bda0cff56384cc1650034212267726b24ea4c42c54ede08d5"} Nov 28 19:01:05 crc kubenswrapper[4909]: I1128 19:01:05.062616 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29405941-f75bc" Nov 28 19:01:05 crc kubenswrapper[4909]: I1128 19:01:05.233780 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/f79fafa5-1017-41e5-ad6a-8d12a887e9e1-fernet-keys\") pod \"f79fafa5-1017-41e5-ad6a-8d12a887e9e1\" (UID: \"f79fafa5-1017-41e5-ad6a-8d12a887e9e1\") " Nov 28 19:01:05 crc kubenswrapper[4909]: I1128 19:01:05.233916 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gp7ml\" (UniqueName: \"kubernetes.io/projected/f79fafa5-1017-41e5-ad6a-8d12a887e9e1-kube-api-access-gp7ml\") pod \"f79fafa5-1017-41e5-ad6a-8d12a887e9e1\" (UID: \"f79fafa5-1017-41e5-ad6a-8d12a887e9e1\") " Nov 28 19:01:05 crc kubenswrapper[4909]: I1128 19:01:05.235036 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f79fafa5-1017-41e5-ad6a-8d12a887e9e1-config-data\") pod \"f79fafa5-1017-41e5-ad6a-8d12a887e9e1\" (UID: \"f79fafa5-1017-41e5-ad6a-8d12a887e9e1\") " Nov 28 19:01:05 crc kubenswrapper[4909]: I1128 19:01:05.235481 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f79fafa5-1017-41e5-ad6a-8d12a887e9e1-combined-ca-bundle\") pod \"f79fafa5-1017-41e5-ad6a-8d12a887e9e1\" (UID: \"f79fafa5-1017-41e5-ad6a-8d12a887e9e1\") " Nov 28 19:01:05 crc kubenswrapper[4909]: I1128 19:01:05.239973 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f79fafa5-1017-41e5-ad6a-8d12a887e9e1-kube-api-access-gp7ml" (OuterVolumeSpecName: "kube-api-access-gp7ml") pod "f79fafa5-1017-41e5-ad6a-8d12a887e9e1" (UID: "f79fafa5-1017-41e5-ad6a-8d12a887e9e1"). InnerVolumeSpecName "kube-api-access-gp7ml". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 19:01:05 crc kubenswrapper[4909]: I1128 19:01:05.241390 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f79fafa5-1017-41e5-ad6a-8d12a887e9e1-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "f79fafa5-1017-41e5-ad6a-8d12a887e9e1" (UID: "f79fafa5-1017-41e5-ad6a-8d12a887e9e1"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 19:01:05 crc kubenswrapper[4909]: I1128 19:01:05.289578 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f79fafa5-1017-41e5-ad6a-8d12a887e9e1-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f79fafa5-1017-41e5-ad6a-8d12a887e9e1" (UID: "f79fafa5-1017-41e5-ad6a-8d12a887e9e1"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 19:01:05 crc kubenswrapper[4909]: I1128 19:01:05.318929 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f79fafa5-1017-41e5-ad6a-8d12a887e9e1-config-data" (OuterVolumeSpecName: "config-data") pod "f79fafa5-1017-41e5-ad6a-8d12a887e9e1" (UID: "f79fafa5-1017-41e5-ad6a-8d12a887e9e1"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 19:01:05 crc kubenswrapper[4909]: I1128 19:01:05.338592 4909 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f79fafa5-1017-41e5-ad6a-8d12a887e9e1-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 19:01:05 crc kubenswrapper[4909]: I1128 19:01:05.338631 4909 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f79fafa5-1017-41e5-ad6a-8d12a887e9e1-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 19:01:05 crc kubenswrapper[4909]: I1128 19:01:05.338643 4909 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/f79fafa5-1017-41e5-ad6a-8d12a887e9e1-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 28 19:01:05 crc kubenswrapper[4909]: I1128 19:01:05.338669 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gp7ml\" (UniqueName: \"kubernetes.io/projected/f79fafa5-1017-41e5-ad6a-8d12a887e9e1-kube-api-access-gp7ml\") on node \"crc\" DevicePath \"\"" Nov 28 19:01:05 crc kubenswrapper[4909]: I1128 19:01:05.654096 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29405941-f75bc" event={"ID":"f79fafa5-1017-41e5-ad6a-8d12a887e9e1","Type":"ContainerDied","Data":"870530999f66ae4539c971e0166db8743a001f3c518411da5996ae30b739cf87"} Nov 28 19:01:05 crc kubenswrapper[4909]: I1128 19:01:05.654143 4909 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="870530999f66ae4539c971e0166db8743a001f3c518411da5996ae30b739cf87" Nov 28 19:01:05 crc kubenswrapper[4909]: I1128 19:01:05.654215 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29405941-f75bc" Nov 28 19:01:19 crc kubenswrapper[4909]: I1128 19:01:19.911238 4909 patch_prober.go:28] interesting pod/machine-config-daemon-d5nd7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 19:01:19 crc kubenswrapper[4909]: I1128 19:01:19.912106 4909 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 19:01:49 crc kubenswrapper[4909]: I1128 19:01:49.910971 4909 patch_prober.go:28] interesting pod/machine-config-daemon-d5nd7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 19:01:49 crc kubenswrapper[4909]: I1128 19:01:49.911495 4909 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 19:02:19 crc kubenswrapper[4909]: I1128 19:02:19.911790 4909 patch_prober.go:28] interesting pod/machine-config-daemon-d5nd7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 19:02:19 crc kubenswrapper[4909]: I1128 19:02:19.913068 4909 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 19:02:19 crc kubenswrapper[4909]: I1128 19:02:19.927763 4909 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" Nov 28 19:02:19 crc kubenswrapper[4909]: I1128 19:02:19.940094 4909 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"04fa792ff2261f49e0e779ccaab71213a7dfefcb1dc649e298924e82b6599708"} pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 19:02:19 crc kubenswrapper[4909]: I1128 19:02:19.940242 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerName="machine-config-daemon" containerID="cri-o://04fa792ff2261f49e0e779ccaab71213a7dfefcb1dc649e298924e82b6599708" gracePeriod=600 Nov 28 19:02:20 crc kubenswrapper[4909]: E1128 19:02:20.080458 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 19:02:20 crc kubenswrapper[4909]: I1128 19:02:20.689322 4909 generic.go:334] "Generic (PLEG): container finished" podID="5f0ac931-d37b-4342-8c12-c2779b455cc5" containerID="04fa792ff2261f49e0e779ccaab71213a7dfefcb1dc649e298924e82b6599708" exitCode=0 Nov 28 19:02:20 crc kubenswrapper[4909]: I1128 19:02:20.689398 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" event={"ID":"5f0ac931-d37b-4342-8c12-c2779b455cc5","Type":"ContainerDied","Data":"04fa792ff2261f49e0e779ccaab71213a7dfefcb1dc649e298924e82b6599708"} Nov 28 19:02:20 crc kubenswrapper[4909]: I1128 19:02:20.689715 4909 scope.go:117] "RemoveContainer" containerID="6b21cd92c3dfe12571ba7526889fb2493e0be87bb6225f8ee39c471668ab1d1e" Nov 28 19:02:20 crc kubenswrapper[4909]: I1128 19:02:20.690878 4909 scope.go:117] "RemoveContainer" containerID="04fa792ff2261f49e0e779ccaab71213a7dfefcb1dc649e298924e82b6599708" Nov 28 19:02:20 crc kubenswrapper[4909]: E1128 19:02:20.691334 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 19:02:23 crc kubenswrapper[4909]: I1128 19:02:23.631956 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-qtjjg"] Nov 28 19:02:23 crc kubenswrapper[4909]: E1128 19:02:23.633140 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f79fafa5-1017-41e5-ad6a-8d12a887e9e1" containerName="keystone-cron" Nov 28 19:02:23 crc kubenswrapper[4909]: I1128 19:02:23.633155 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="f79fafa5-1017-41e5-ad6a-8d12a887e9e1" containerName="keystone-cron" Nov 28 19:02:23 crc kubenswrapper[4909]: I1128 19:02:23.633435 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="f79fafa5-1017-41e5-ad6a-8d12a887e9e1" containerName="keystone-cron" Nov 28 19:02:23 crc kubenswrapper[4909]: I1128 19:02:23.635452 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-qtjjg" Nov 28 19:02:23 crc kubenswrapper[4909]: I1128 19:02:23.642612 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-qtjjg"] Nov 28 19:02:23 crc kubenswrapper[4909]: I1128 19:02:23.792211 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3b32e3fe-858c-4735-a248-b716b3cf9990-utilities\") pod \"redhat-marketplace-qtjjg\" (UID: \"3b32e3fe-858c-4735-a248-b716b3cf9990\") " pod="openshift-marketplace/redhat-marketplace-qtjjg" Nov 28 19:02:23 crc kubenswrapper[4909]: I1128 19:02:23.792376 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3b32e3fe-858c-4735-a248-b716b3cf9990-catalog-content\") pod \"redhat-marketplace-qtjjg\" (UID: \"3b32e3fe-858c-4735-a248-b716b3cf9990\") " pod="openshift-marketplace/redhat-marketplace-qtjjg" Nov 28 19:02:23 crc kubenswrapper[4909]: I1128 19:02:23.792412 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rxfss\" (UniqueName: \"kubernetes.io/projected/3b32e3fe-858c-4735-a248-b716b3cf9990-kube-api-access-rxfss\") pod \"redhat-marketplace-qtjjg\" (UID: \"3b32e3fe-858c-4735-a248-b716b3cf9990\") " pod="openshift-marketplace/redhat-marketplace-qtjjg" Nov 28 19:02:23 crc kubenswrapper[4909]: I1128 19:02:23.894554 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3b32e3fe-858c-4735-a248-b716b3cf9990-catalog-content\") pod \"redhat-marketplace-qtjjg\" (UID: \"3b32e3fe-858c-4735-a248-b716b3cf9990\") " pod="openshift-marketplace/redhat-marketplace-qtjjg" Nov 28 19:02:23 crc kubenswrapper[4909]: I1128 19:02:23.894624 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rxfss\" (UniqueName: \"kubernetes.io/projected/3b32e3fe-858c-4735-a248-b716b3cf9990-kube-api-access-rxfss\") pod \"redhat-marketplace-qtjjg\" (UID: \"3b32e3fe-858c-4735-a248-b716b3cf9990\") " pod="openshift-marketplace/redhat-marketplace-qtjjg" Nov 28 19:02:23 crc kubenswrapper[4909]: I1128 19:02:23.894729 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3b32e3fe-858c-4735-a248-b716b3cf9990-utilities\") pod \"redhat-marketplace-qtjjg\" (UID: \"3b32e3fe-858c-4735-a248-b716b3cf9990\") " pod="openshift-marketplace/redhat-marketplace-qtjjg" Nov 28 19:02:23 crc kubenswrapper[4909]: I1128 19:02:23.895344 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3b32e3fe-858c-4735-a248-b716b3cf9990-utilities\") pod \"redhat-marketplace-qtjjg\" (UID: \"3b32e3fe-858c-4735-a248-b716b3cf9990\") " pod="openshift-marketplace/redhat-marketplace-qtjjg" Nov 28 19:02:23 crc kubenswrapper[4909]: I1128 19:02:23.895439 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3b32e3fe-858c-4735-a248-b716b3cf9990-catalog-content\") pod \"redhat-marketplace-qtjjg\" (UID: \"3b32e3fe-858c-4735-a248-b716b3cf9990\") " pod="openshift-marketplace/redhat-marketplace-qtjjg" Nov 28 19:02:23 crc kubenswrapper[4909]: I1128 19:02:23.926806 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rxfss\" (UniqueName: \"kubernetes.io/projected/3b32e3fe-858c-4735-a248-b716b3cf9990-kube-api-access-rxfss\") pod \"redhat-marketplace-qtjjg\" (UID: \"3b32e3fe-858c-4735-a248-b716b3cf9990\") " pod="openshift-marketplace/redhat-marketplace-qtjjg" Nov 28 19:02:23 crc kubenswrapper[4909]: I1128 19:02:23.980724 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-qtjjg" Nov 28 19:02:24 crc kubenswrapper[4909]: W1128 19:02:24.491865 4909 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3b32e3fe_858c_4735_a248_b716b3cf9990.slice/crio-b761d2fbf617102d4203857634d8f3303d66536e0daca5850e01f9d2710b2325 WatchSource:0}: Error finding container b761d2fbf617102d4203857634d8f3303d66536e0daca5850e01f9d2710b2325: Status 404 returned error can't find the container with id b761d2fbf617102d4203857634d8f3303d66536e0daca5850e01f9d2710b2325 Nov 28 19:02:24 crc kubenswrapper[4909]: I1128 19:02:24.501954 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-qtjjg"] Nov 28 19:02:24 crc kubenswrapper[4909]: I1128 19:02:24.736040 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qtjjg" event={"ID":"3b32e3fe-858c-4735-a248-b716b3cf9990","Type":"ContainerStarted","Data":"b761d2fbf617102d4203857634d8f3303d66536e0daca5850e01f9d2710b2325"} Nov 28 19:02:25 crc kubenswrapper[4909]: I1128 19:02:25.748560 4909 generic.go:334] "Generic (PLEG): container finished" podID="3b32e3fe-858c-4735-a248-b716b3cf9990" containerID="af02cb83b6628b4d4f28ae126065236025343845ba25970431879f15182ea6dc" exitCode=0 Nov 28 19:02:25 crc kubenswrapper[4909]: I1128 19:02:25.748729 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qtjjg" event={"ID":"3b32e3fe-858c-4735-a248-b716b3cf9990","Type":"ContainerDied","Data":"af02cb83b6628b4d4f28ae126065236025343845ba25970431879f15182ea6dc"} Nov 28 19:02:27 crc kubenswrapper[4909]: I1128 19:02:27.779178 4909 generic.go:334] "Generic (PLEG): container finished" podID="3b32e3fe-858c-4735-a248-b716b3cf9990" containerID="cb117f8fbe3f05e0278a5c406d4e6bb90fba36da28f1cd48dccf42bba446f342" exitCode=0 Nov 28 19:02:27 crc kubenswrapper[4909]: I1128 19:02:27.779262 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qtjjg" event={"ID":"3b32e3fe-858c-4735-a248-b716b3cf9990","Type":"ContainerDied","Data":"cb117f8fbe3f05e0278a5c406d4e6bb90fba36da28f1cd48dccf42bba446f342"} Nov 28 19:02:27 crc kubenswrapper[4909]: I1128 19:02:27.783238 4909 generic.go:334] "Generic (PLEG): container finished" podID="2c3a1077-9e2d-4c6a-8963-98e8a0662189" containerID="ffe1df1a67259d0edaba28ddf00bd1460a871ae4abb2601a22cb13893092fef3" exitCode=0 Nov 28 19:02:27 crc kubenswrapper[4909]: I1128 19:02:27.783326 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-xw4tq/must-gather-9vc6g" event={"ID":"2c3a1077-9e2d-4c6a-8963-98e8a0662189","Type":"ContainerDied","Data":"ffe1df1a67259d0edaba28ddf00bd1460a871ae4abb2601a22cb13893092fef3"} Nov 28 19:02:27 crc kubenswrapper[4909]: I1128 19:02:27.784250 4909 scope.go:117] "RemoveContainer" containerID="ffe1df1a67259d0edaba28ddf00bd1460a871ae4abb2601a22cb13893092fef3" Nov 28 19:02:28 crc kubenswrapper[4909]: I1128 19:02:28.308773 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-xw4tq_must-gather-9vc6g_2c3a1077-9e2d-4c6a-8963-98e8a0662189/gather/0.log" Nov 28 19:02:28 crc kubenswrapper[4909]: I1128 19:02:28.807001 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qtjjg" event={"ID":"3b32e3fe-858c-4735-a248-b716b3cf9990","Type":"ContainerStarted","Data":"796baf72dc3d1c8f0954ce6688c66c6dba55960a70d503726a9c63a37505d1fe"} Nov 28 19:02:28 crc kubenswrapper[4909]: I1128 19:02:28.859844 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-qtjjg" podStartSLOduration=3.23538173 podStartE2EDuration="5.859823403s" podCreationTimestamp="2025-11-28 19:02:23 +0000 UTC" firstStartedPulling="2025-11-28 19:02:25.752210753 +0000 UTC m=+10328.148895287" lastFinishedPulling="2025-11-28 19:02:28.376652386 +0000 UTC m=+10330.773336960" observedRunningTime="2025-11-28 19:02:28.832008117 +0000 UTC m=+10331.228692631" watchObservedRunningTime="2025-11-28 19:02:28.859823403 +0000 UTC m=+10331.256507937" Nov 28 19:02:31 crc kubenswrapper[4909]: I1128 19:02:31.902075 4909 scope.go:117] "RemoveContainer" containerID="04fa792ff2261f49e0e779ccaab71213a7dfefcb1dc649e298924e82b6599708" Nov 28 19:02:31 crc kubenswrapper[4909]: E1128 19:02:31.902780 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 19:02:33 crc kubenswrapper[4909]: I1128 19:02:33.981738 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-qtjjg" Nov 28 19:02:33 crc kubenswrapper[4909]: I1128 19:02:33.983454 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-qtjjg" Nov 28 19:02:34 crc kubenswrapper[4909]: I1128 19:02:34.067045 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-qtjjg" Nov 28 19:02:34 crc kubenswrapper[4909]: I1128 19:02:34.961638 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-qtjjg" Nov 28 19:02:35 crc kubenswrapper[4909]: I1128 19:02:35.039350 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-qtjjg"] Nov 28 19:02:36 crc kubenswrapper[4909]: I1128 19:02:36.744096 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-xw4tq/must-gather-9vc6g"] Nov 28 19:02:36 crc kubenswrapper[4909]: I1128 19:02:36.744896 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-must-gather-xw4tq/must-gather-9vc6g" podUID="2c3a1077-9e2d-4c6a-8963-98e8a0662189" containerName="copy" containerID="cri-o://8343aacd3bcdc478c4b067ec67fbc7a57668b99929aec5071a8facf091585ec5" gracePeriod=2 Nov 28 19:02:36 crc kubenswrapper[4909]: I1128 19:02:36.760690 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-xw4tq/must-gather-9vc6g"] Nov 28 19:02:36 crc kubenswrapper[4909]: I1128 19:02:36.915807 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-xw4tq_must-gather-9vc6g_2c3a1077-9e2d-4c6a-8963-98e8a0662189/copy/0.log" Nov 28 19:02:36 crc kubenswrapper[4909]: I1128 19:02:36.916576 4909 generic.go:334] "Generic (PLEG): container finished" podID="2c3a1077-9e2d-4c6a-8963-98e8a0662189" containerID="8343aacd3bcdc478c4b067ec67fbc7a57668b99929aec5071a8facf091585ec5" exitCode=143 Nov 28 19:02:36 crc kubenswrapper[4909]: I1128 19:02:36.916934 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-qtjjg" podUID="3b32e3fe-858c-4735-a248-b716b3cf9990" containerName="registry-server" containerID="cri-o://796baf72dc3d1c8f0954ce6688c66c6dba55960a70d503726a9c63a37505d1fe" gracePeriod=2 Nov 28 19:02:37 crc kubenswrapper[4909]: I1128 19:02:37.228850 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-xw4tq_must-gather-9vc6g_2c3a1077-9e2d-4c6a-8963-98e8a0662189/copy/0.log" Nov 28 19:02:37 crc kubenswrapper[4909]: I1128 19:02:37.230045 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-xw4tq/must-gather-9vc6g" Nov 28 19:02:37 crc kubenswrapper[4909]: I1128 19:02:37.255154 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/2c3a1077-9e2d-4c6a-8963-98e8a0662189-must-gather-output\") pod \"2c3a1077-9e2d-4c6a-8963-98e8a0662189\" (UID: \"2c3a1077-9e2d-4c6a-8963-98e8a0662189\") " Nov 28 19:02:37 crc kubenswrapper[4909]: I1128 19:02:37.255379 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-klfpt\" (UniqueName: \"kubernetes.io/projected/2c3a1077-9e2d-4c6a-8963-98e8a0662189-kube-api-access-klfpt\") pod \"2c3a1077-9e2d-4c6a-8963-98e8a0662189\" (UID: \"2c3a1077-9e2d-4c6a-8963-98e8a0662189\") " Nov 28 19:02:37 crc kubenswrapper[4909]: I1128 19:02:37.261720 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2c3a1077-9e2d-4c6a-8963-98e8a0662189-kube-api-access-klfpt" (OuterVolumeSpecName: "kube-api-access-klfpt") pod "2c3a1077-9e2d-4c6a-8963-98e8a0662189" (UID: "2c3a1077-9e2d-4c6a-8963-98e8a0662189"). InnerVolumeSpecName "kube-api-access-klfpt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 19:02:37 crc kubenswrapper[4909]: I1128 19:02:37.358228 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-klfpt\" (UniqueName: \"kubernetes.io/projected/2c3a1077-9e2d-4c6a-8963-98e8a0662189-kube-api-access-klfpt\") on node \"crc\" DevicePath \"\"" Nov 28 19:02:37 crc kubenswrapper[4909]: I1128 19:02:37.370420 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-qtjjg" Nov 28 19:02:37 crc kubenswrapper[4909]: I1128 19:02:37.439844 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2c3a1077-9e2d-4c6a-8963-98e8a0662189-must-gather-output" (OuterVolumeSpecName: "must-gather-output") pod "2c3a1077-9e2d-4c6a-8963-98e8a0662189" (UID: "2c3a1077-9e2d-4c6a-8963-98e8a0662189"). InnerVolumeSpecName "must-gather-output". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 19:02:37 crc kubenswrapper[4909]: I1128 19:02:37.459450 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rxfss\" (UniqueName: \"kubernetes.io/projected/3b32e3fe-858c-4735-a248-b716b3cf9990-kube-api-access-rxfss\") pod \"3b32e3fe-858c-4735-a248-b716b3cf9990\" (UID: \"3b32e3fe-858c-4735-a248-b716b3cf9990\") " Nov 28 19:02:37 crc kubenswrapper[4909]: I1128 19:02:37.459540 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3b32e3fe-858c-4735-a248-b716b3cf9990-utilities\") pod \"3b32e3fe-858c-4735-a248-b716b3cf9990\" (UID: \"3b32e3fe-858c-4735-a248-b716b3cf9990\") " Nov 28 19:02:37 crc kubenswrapper[4909]: I1128 19:02:37.459594 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3b32e3fe-858c-4735-a248-b716b3cf9990-catalog-content\") pod \"3b32e3fe-858c-4735-a248-b716b3cf9990\" (UID: \"3b32e3fe-858c-4735-a248-b716b3cf9990\") " Nov 28 19:02:37 crc kubenswrapper[4909]: I1128 19:02:37.460084 4909 reconciler_common.go:293] "Volume detached for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/2c3a1077-9e2d-4c6a-8963-98e8a0662189-must-gather-output\") on node \"crc\" DevicePath \"\"" Nov 28 19:02:37 crc kubenswrapper[4909]: I1128 19:02:37.460315 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3b32e3fe-858c-4735-a248-b716b3cf9990-utilities" (OuterVolumeSpecName: "utilities") pod "3b32e3fe-858c-4735-a248-b716b3cf9990" (UID: "3b32e3fe-858c-4735-a248-b716b3cf9990"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 19:02:37 crc kubenswrapper[4909]: I1128 19:02:37.462758 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3b32e3fe-858c-4735-a248-b716b3cf9990-kube-api-access-rxfss" (OuterVolumeSpecName: "kube-api-access-rxfss") pod "3b32e3fe-858c-4735-a248-b716b3cf9990" (UID: "3b32e3fe-858c-4735-a248-b716b3cf9990"). InnerVolumeSpecName "kube-api-access-rxfss". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 19:02:37 crc kubenswrapper[4909]: I1128 19:02:37.479990 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3b32e3fe-858c-4735-a248-b716b3cf9990-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "3b32e3fe-858c-4735-a248-b716b3cf9990" (UID: "3b32e3fe-858c-4735-a248-b716b3cf9990"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 19:02:37 crc kubenswrapper[4909]: I1128 19:02:37.561697 4909 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3b32e3fe-858c-4735-a248-b716b3cf9990-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 19:02:37 crc kubenswrapper[4909]: I1128 19:02:37.561730 4909 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3b32e3fe-858c-4735-a248-b716b3cf9990-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 19:02:37 crc kubenswrapper[4909]: I1128 19:02:37.561744 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rxfss\" (UniqueName: \"kubernetes.io/projected/3b32e3fe-858c-4735-a248-b716b3cf9990-kube-api-access-rxfss\") on node \"crc\" DevicePath \"\"" Nov 28 19:02:37 crc kubenswrapper[4909]: I1128 19:02:37.924891 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2c3a1077-9e2d-4c6a-8963-98e8a0662189" path="/var/lib/kubelet/pods/2c3a1077-9e2d-4c6a-8963-98e8a0662189/volumes" Nov 28 19:02:37 crc kubenswrapper[4909]: I1128 19:02:37.936754 4909 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-xw4tq_must-gather-9vc6g_2c3a1077-9e2d-4c6a-8963-98e8a0662189/copy/0.log" Nov 28 19:02:37 crc kubenswrapper[4909]: I1128 19:02:37.937086 4909 scope.go:117] "RemoveContainer" containerID="8343aacd3bcdc478c4b067ec67fbc7a57668b99929aec5071a8facf091585ec5" Nov 28 19:02:37 crc kubenswrapper[4909]: I1128 19:02:37.937230 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-xw4tq/must-gather-9vc6g" Nov 28 19:02:37 crc kubenswrapper[4909]: I1128 19:02:37.948923 4909 generic.go:334] "Generic (PLEG): container finished" podID="3b32e3fe-858c-4735-a248-b716b3cf9990" containerID="796baf72dc3d1c8f0954ce6688c66c6dba55960a70d503726a9c63a37505d1fe" exitCode=0 Nov 28 19:02:37 crc kubenswrapper[4909]: I1128 19:02:37.948973 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qtjjg" event={"ID":"3b32e3fe-858c-4735-a248-b716b3cf9990","Type":"ContainerDied","Data":"796baf72dc3d1c8f0954ce6688c66c6dba55960a70d503726a9c63a37505d1fe"} Nov 28 19:02:37 crc kubenswrapper[4909]: I1128 19:02:37.949009 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qtjjg" event={"ID":"3b32e3fe-858c-4735-a248-b716b3cf9990","Type":"ContainerDied","Data":"b761d2fbf617102d4203857634d8f3303d66536e0daca5850e01f9d2710b2325"} Nov 28 19:02:37 crc kubenswrapper[4909]: I1128 19:02:37.949108 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-qtjjg" Nov 28 19:02:37 crc kubenswrapper[4909]: I1128 19:02:37.992015 4909 scope.go:117] "RemoveContainer" containerID="ffe1df1a67259d0edaba28ddf00bd1460a871ae4abb2601a22cb13893092fef3" Nov 28 19:02:37 crc kubenswrapper[4909]: I1128 19:02:37.993801 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-qtjjg"] Nov 28 19:02:38 crc kubenswrapper[4909]: I1128 19:02:38.004699 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-qtjjg"] Nov 28 19:02:38 crc kubenswrapper[4909]: I1128 19:02:38.070723 4909 scope.go:117] "RemoveContainer" containerID="796baf72dc3d1c8f0954ce6688c66c6dba55960a70d503726a9c63a37505d1fe" Nov 28 19:02:38 crc kubenswrapper[4909]: I1128 19:02:38.094557 4909 scope.go:117] "RemoveContainer" containerID="cb117f8fbe3f05e0278a5c406d4e6bb90fba36da28f1cd48dccf42bba446f342" Nov 28 19:02:38 crc kubenswrapper[4909]: I1128 19:02:38.112170 4909 scope.go:117] "RemoveContainer" containerID="af02cb83b6628b4d4f28ae126065236025343845ba25970431879f15182ea6dc" Nov 28 19:02:38 crc kubenswrapper[4909]: I1128 19:02:38.163264 4909 scope.go:117] "RemoveContainer" containerID="796baf72dc3d1c8f0954ce6688c66c6dba55960a70d503726a9c63a37505d1fe" Nov 28 19:02:38 crc kubenswrapper[4909]: E1128 19:02:38.163709 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"796baf72dc3d1c8f0954ce6688c66c6dba55960a70d503726a9c63a37505d1fe\": container with ID starting with 796baf72dc3d1c8f0954ce6688c66c6dba55960a70d503726a9c63a37505d1fe not found: ID does not exist" containerID="796baf72dc3d1c8f0954ce6688c66c6dba55960a70d503726a9c63a37505d1fe" Nov 28 19:02:38 crc kubenswrapper[4909]: I1128 19:02:38.163755 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"796baf72dc3d1c8f0954ce6688c66c6dba55960a70d503726a9c63a37505d1fe"} err="failed to get container status \"796baf72dc3d1c8f0954ce6688c66c6dba55960a70d503726a9c63a37505d1fe\": rpc error: code = NotFound desc = could not find container \"796baf72dc3d1c8f0954ce6688c66c6dba55960a70d503726a9c63a37505d1fe\": container with ID starting with 796baf72dc3d1c8f0954ce6688c66c6dba55960a70d503726a9c63a37505d1fe not found: ID does not exist" Nov 28 19:02:38 crc kubenswrapper[4909]: I1128 19:02:38.163775 4909 scope.go:117] "RemoveContainer" containerID="cb117f8fbe3f05e0278a5c406d4e6bb90fba36da28f1cd48dccf42bba446f342" Nov 28 19:02:38 crc kubenswrapper[4909]: E1128 19:02:38.164807 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cb117f8fbe3f05e0278a5c406d4e6bb90fba36da28f1cd48dccf42bba446f342\": container with ID starting with cb117f8fbe3f05e0278a5c406d4e6bb90fba36da28f1cd48dccf42bba446f342 not found: ID does not exist" containerID="cb117f8fbe3f05e0278a5c406d4e6bb90fba36da28f1cd48dccf42bba446f342" Nov 28 19:02:38 crc kubenswrapper[4909]: I1128 19:02:38.164866 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cb117f8fbe3f05e0278a5c406d4e6bb90fba36da28f1cd48dccf42bba446f342"} err="failed to get container status \"cb117f8fbe3f05e0278a5c406d4e6bb90fba36da28f1cd48dccf42bba446f342\": rpc error: code = NotFound desc = could not find container \"cb117f8fbe3f05e0278a5c406d4e6bb90fba36da28f1cd48dccf42bba446f342\": container with ID starting with cb117f8fbe3f05e0278a5c406d4e6bb90fba36da28f1cd48dccf42bba446f342 not found: ID does not exist" Nov 28 19:02:38 crc kubenswrapper[4909]: I1128 19:02:38.164899 4909 scope.go:117] "RemoveContainer" containerID="af02cb83b6628b4d4f28ae126065236025343845ba25970431879f15182ea6dc" Nov 28 19:02:38 crc kubenswrapper[4909]: E1128 19:02:38.165173 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"af02cb83b6628b4d4f28ae126065236025343845ba25970431879f15182ea6dc\": container with ID starting with af02cb83b6628b4d4f28ae126065236025343845ba25970431879f15182ea6dc not found: ID does not exist" containerID="af02cb83b6628b4d4f28ae126065236025343845ba25970431879f15182ea6dc" Nov 28 19:02:38 crc kubenswrapper[4909]: I1128 19:02:38.165200 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"af02cb83b6628b4d4f28ae126065236025343845ba25970431879f15182ea6dc"} err="failed to get container status \"af02cb83b6628b4d4f28ae126065236025343845ba25970431879f15182ea6dc\": rpc error: code = NotFound desc = could not find container \"af02cb83b6628b4d4f28ae126065236025343845ba25970431879f15182ea6dc\": container with ID starting with af02cb83b6628b4d4f28ae126065236025343845ba25970431879f15182ea6dc not found: ID does not exist" Nov 28 19:02:39 crc kubenswrapper[4909]: I1128 19:02:39.922223 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3b32e3fe-858c-4735-a248-b716b3cf9990" path="/var/lib/kubelet/pods/3b32e3fe-858c-4735-a248-b716b3cf9990/volumes" Nov 28 19:02:42 crc kubenswrapper[4909]: I1128 19:02:42.902797 4909 scope.go:117] "RemoveContainer" containerID="04fa792ff2261f49e0e779ccaab71213a7dfefcb1dc649e298924e82b6599708" Nov 28 19:02:42 crc kubenswrapper[4909]: E1128 19:02:42.904115 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 19:02:53 crc kubenswrapper[4909]: I1128 19:02:53.901797 4909 scope.go:117] "RemoveContainer" containerID="04fa792ff2261f49e0e779ccaab71213a7dfefcb1dc649e298924e82b6599708" Nov 28 19:02:53 crc kubenswrapper[4909]: E1128 19:02:53.902743 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 19:03:07 crc kubenswrapper[4909]: I1128 19:03:07.910439 4909 scope.go:117] "RemoveContainer" containerID="04fa792ff2261f49e0e779ccaab71213a7dfefcb1dc649e298924e82b6599708" Nov 28 19:03:07 crc kubenswrapper[4909]: E1128 19:03:07.911455 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 19:03:19 crc kubenswrapper[4909]: I1128 19:03:19.901906 4909 scope.go:117] "RemoveContainer" containerID="04fa792ff2261f49e0e779ccaab71213a7dfefcb1dc649e298924e82b6599708" Nov 28 19:03:19 crc kubenswrapper[4909]: E1128 19:03:19.902828 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 19:03:34 crc kubenswrapper[4909]: I1128 19:03:34.901464 4909 scope.go:117] "RemoveContainer" containerID="04fa792ff2261f49e0e779ccaab71213a7dfefcb1dc649e298924e82b6599708" Nov 28 19:03:34 crc kubenswrapper[4909]: E1128 19:03:34.902362 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 19:03:43 crc kubenswrapper[4909]: I1128 19:03:43.748972 4909 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-9sfn5"] Nov 28 19:03:43 crc kubenswrapper[4909]: E1128 19:03:43.750337 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3b32e3fe-858c-4735-a248-b716b3cf9990" containerName="extract-content" Nov 28 19:03:43 crc kubenswrapper[4909]: I1128 19:03:43.750366 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="3b32e3fe-858c-4735-a248-b716b3cf9990" containerName="extract-content" Nov 28 19:03:43 crc kubenswrapper[4909]: E1128 19:03:43.750398 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2c3a1077-9e2d-4c6a-8963-98e8a0662189" containerName="gather" Nov 28 19:03:43 crc kubenswrapper[4909]: I1128 19:03:43.750407 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="2c3a1077-9e2d-4c6a-8963-98e8a0662189" containerName="gather" Nov 28 19:03:43 crc kubenswrapper[4909]: E1128 19:03:43.750429 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2c3a1077-9e2d-4c6a-8963-98e8a0662189" containerName="copy" Nov 28 19:03:43 crc kubenswrapper[4909]: I1128 19:03:43.750437 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="2c3a1077-9e2d-4c6a-8963-98e8a0662189" containerName="copy" Nov 28 19:03:43 crc kubenswrapper[4909]: E1128 19:03:43.750455 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3b32e3fe-858c-4735-a248-b716b3cf9990" containerName="registry-server" Nov 28 19:03:43 crc kubenswrapper[4909]: I1128 19:03:43.750465 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="3b32e3fe-858c-4735-a248-b716b3cf9990" containerName="registry-server" Nov 28 19:03:43 crc kubenswrapper[4909]: E1128 19:03:43.750482 4909 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3b32e3fe-858c-4735-a248-b716b3cf9990" containerName="extract-utilities" Nov 28 19:03:43 crc kubenswrapper[4909]: I1128 19:03:43.750491 4909 state_mem.go:107] "Deleted CPUSet assignment" podUID="3b32e3fe-858c-4735-a248-b716b3cf9990" containerName="extract-utilities" Nov 28 19:03:43 crc kubenswrapper[4909]: I1128 19:03:43.750812 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="2c3a1077-9e2d-4c6a-8963-98e8a0662189" containerName="gather" Nov 28 19:03:43 crc kubenswrapper[4909]: I1128 19:03:43.750850 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="2c3a1077-9e2d-4c6a-8963-98e8a0662189" containerName="copy" Nov 28 19:03:43 crc kubenswrapper[4909]: I1128 19:03:43.750884 4909 memory_manager.go:354] "RemoveStaleState removing state" podUID="3b32e3fe-858c-4735-a248-b716b3cf9990" containerName="registry-server" Nov 28 19:03:43 crc kubenswrapper[4909]: I1128 19:03:43.753794 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-9sfn5" Nov 28 19:03:43 crc kubenswrapper[4909]: I1128 19:03:43.761378 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-9sfn5"] Nov 28 19:03:43 crc kubenswrapper[4909]: I1128 19:03:43.832288 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cp75f\" (UniqueName: \"kubernetes.io/projected/5a104166-c231-4b54-a68f-2683f53b3adf-kube-api-access-cp75f\") pod \"redhat-operators-9sfn5\" (UID: \"5a104166-c231-4b54-a68f-2683f53b3adf\") " pod="openshift-marketplace/redhat-operators-9sfn5" Nov 28 19:03:43 crc kubenswrapper[4909]: I1128 19:03:43.832507 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5a104166-c231-4b54-a68f-2683f53b3adf-catalog-content\") pod \"redhat-operators-9sfn5\" (UID: \"5a104166-c231-4b54-a68f-2683f53b3adf\") " pod="openshift-marketplace/redhat-operators-9sfn5" Nov 28 19:03:43 crc kubenswrapper[4909]: I1128 19:03:43.832785 4909 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5a104166-c231-4b54-a68f-2683f53b3adf-utilities\") pod \"redhat-operators-9sfn5\" (UID: \"5a104166-c231-4b54-a68f-2683f53b3adf\") " pod="openshift-marketplace/redhat-operators-9sfn5" Nov 28 19:03:43 crc kubenswrapper[4909]: I1128 19:03:43.934360 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5a104166-c231-4b54-a68f-2683f53b3adf-utilities\") pod \"redhat-operators-9sfn5\" (UID: \"5a104166-c231-4b54-a68f-2683f53b3adf\") " pod="openshift-marketplace/redhat-operators-9sfn5" Nov 28 19:03:43 crc kubenswrapper[4909]: I1128 19:03:43.934518 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cp75f\" (UniqueName: \"kubernetes.io/projected/5a104166-c231-4b54-a68f-2683f53b3adf-kube-api-access-cp75f\") pod \"redhat-operators-9sfn5\" (UID: \"5a104166-c231-4b54-a68f-2683f53b3adf\") " pod="openshift-marketplace/redhat-operators-9sfn5" Nov 28 19:03:43 crc kubenswrapper[4909]: I1128 19:03:43.934631 4909 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5a104166-c231-4b54-a68f-2683f53b3adf-catalog-content\") pod \"redhat-operators-9sfn5\" (UID: \"5a104166-c231-4b54-a68f-2683f53b3adf\") " pod="openshift-marketplace/redhat-operators-9sfn5" Nov 28 19:03:43 crc kubenswrapper[4909]: I1128 19:03:43.934918 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5a104166-c231-4b54-a68f-2683f53b3adf-utilities\") pod \"redhat-operators-9sfn5\" (UID: \"5a104166-c231-4b54-a68f-2683f53b3adf\") " pod="openshift-marketplace/redhat-operators-9sfn5" Nov 28 19:03:43 crc kubenswrapper[4909]: I1128 19:03:43.935410 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5a104166-c231-4b54-a68f-2683f53b3adf-catalog-content\") pod \"redhat-operators-9sfn5\" (UID: \"5a104166-c231-4b54-a68f-2683f53b3adf\") " pod="openshift-marketplace/redhat-operators-9sfn5" Nov 28 19:03:43 crc kubenswrapper[4909]: I1128 19:03:43.958530 4909 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cp75f\" (UniqueName: \"kubernetes.io/projected/5a104166-c231-4b54-a68f-2683f53b3adf-kube-api-access-cp75f\") pod \"redhat-operators-9sfn5\" (UID: \"5a104166-c231-4b54-a68f-2683f53b3adf\") " pod="openshift-marketplace/redhat-operators-9sfn5" Nov 28 19:03:44 crc kubenswrapper[4909]: I1128 19:03:44.089187 4909 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-9sfn5" Nov 28 19:03:44 crc kubenswrapper[4909]: I1128 19:03:44.603252 4909 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-9sfn5"] Nov 28 19:03:44 crc kubenswrapper[4909]: I1128 19:03:44.815256 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9sfn5" event={"ID":"5a104166-c231-4b54-a68f-2683f53b3adf","Type":"ContainerStarted","Data":"a3e91ff736f7739363e8ff5e1d572a0703da4d8566349cd9ced21a8b4abd6f0e"} Nov 28 19:03:44 crc kubenswrapper[4909]: I1128 19:03:44.815521 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9sfn5" event={"ID":"5a104166-c231-4b54-a68f-2683f53b3adf","Type":"ContainerStarted","Data":"a4253977da6b598d290288ee106a3472d9643ef58e029c9dc27d03688f2391e4"} Nov 28 19:03:45 crc kubenswrapper[4909]: I1128 19:03:45.830378 4909 generic.go:334] "Generic (PLEG): container finished" podID="5a104166-c231-4b54-a68f-2683f53b3adf" containerID="a3e91ff736f7739363e8ff5e1d572a0703da4d8566349cd9ced21a8b4abd6f0e" exitCode=0 Nov 28 19:03:45 crc kubenswrapper[4909]: I1128 19:03:45.830649 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9sfn5" event={"ID":"5a104166-c231-4b54-a68f-2683f53b3adf","Type":"ContainerDied","Data":"a3e91ff736f7739363e8ff5e1d572a0703da4d8566349cd9ced21a8b4abd6f0e"} Nov 28 19:03:46 crc kubenswrapper[4909]: I1128 19:03:46.902829 4909 scope.go:117] "RemoveContainer" containerID="04fa792ff2261f49e0e779ccaab71213a7dfefcb1dc649e298924e82b6599708" Nov 28 19:03:46 crc kubenswrapper[4909]: E1128 19:03:46.904431 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 19:03:47 crc kubenswrapper[4909]: I1128 19:03:47.895978 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9sfn5" event={"ID":"5a104166-c231-4b54-a68f-2683f53b3adf","Type":"ContainerStarted","Data":"bb3e5b5bb1b1bdafde5c96ad739d1107387a4220740cdc1b06eb559a919237c1"} Nov 28 19:03:48 crc kubenswrapper[4909]: I1128 19:03:48.913546 4909 generic.go:334] "Generic (PLEG): container finished" podID="5a104166-c231-4b54-a68f-2683f53b3adf" containerID="bb3e5b5bb1b1bdafde5c96ad739d1107387a4220740cdc1b06eb559a919237c1" exitCode=0 Nov 28 19:03:48 crc kubenswrapper[4909]: I1128 19:03:48.913614 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9sfn5" event={"ID":"5a104166-c231-4b54-a68f-2683f53b3adf","Type":"ContainerDied","Data":"bb3e5b5bb1b1bdafde5c96ad739d1107387a4220740cdc1b06eb559a919237c1"} Nov 28 19:03:49 crc kubenswrapper[4909]: I1128 19:03:49.957278 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9sfn5" event={"ID":"5a104166-c231-4b54-a68f-2683f53b3adf","Type":"ContainerStarted","Data":"e9c5379330ce631565bac70c7e0294a0cf4e4efb5e0a35b5f372521694d787dd"} Nov 28 19:03:50 crc kubenswrapper[4909]: I1128 19:03:50.005221 4909 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-9sfn5" podStartSLOduration=3.423189416 podStartE2EDuration="7.005196847s" podCreationTimestamp="2025-11-28 19:03:43 +0000 UTC" firstStartedPulling="2025-11-28 19:03:45.833316446 +0000 UTC m=+10408.230000970" lastFinishedPulling="2025-11-28 19:03:49.415323877 +0000 UTC m=+10411.812008401" observedRunningTime="2025-11-28 19:03:49.989521746 +0000 UTC m=+10412.386206280" watchObservedRunningTime="2025-11-28 19:03:50.005196847 +0000 UTC m=+10412.401881381" Nov 28 19:03:54 crc kubenswrapper[4909]: I1128 19:03:54.090252 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-9sfn5" Nov 28 19:03:54 crc kubenswrapper[4909]: I1128 19:03:54.090740 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-9sfn5" Nov 28 19:03:55 crc kubenswrapper[4909]: I1128 19:03:55.171785 4909 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-9sfn5" podUID="5a104166-c231-4b54-a68f-2683f53b3adf" containerName="registry-server" probeResult="failure" output=< Nov 28 19:03:55 crc kubenswrapper[4909]: timeout: failed to connect service ":50051" within 1s Nov 28 19:03:55 crc kubenswrapper[4909]: > Nov 28 19:04:01 crc kubenswrapper[4909]: I1128 19:04:01.901899 4909 scope.go:117] "RemoveContainer" containerID="04fa792ff2261f49e0e779ccaab71213a7dfefcb1dc649e298924e82b6599708" Nov 28 19:04:01 crc kubenswrapper[4909]: E1128 19:04:01.902805 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 19:04:04 crc kubenswrapper[4909]: I1128 19:04:04.170394 4909 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-9sfn5" Nov 28 19:04:04 crc kubenswrapper[4909]: I1128 19:04:04.264326 4909 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-9sfn5" Nov 28 19:04:04 crc kubenswrapper[4909]: I1128 19:04:04.432034 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-9sfn5"] Nov 28 19:04:06 crc kubenswrapper[4909]: I1128 19:04:06.209233 4909 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-9sfn5" podUID="5a104166-c231-4b54-a68f-2683f53b3adf" containerName="registry-server" containerID="cri-o://e9c5379330ce631565bac70c7e0294a0cf4e4efb5e0a35b5f372521694d787dd" gracePeriod=2 Nov 28 19:04:06 crc kubenswrapper[4909]: I1128 19:04:06.753958 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-9sfn5" Nov 28 19:04:06 crc kubenswrapper[4909]: I1128 19:04:06.801594 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5a104166-c231-4b54-a68f-2683f53b3adf-catalog-content\") pod \"5a104166-c231-4b54-a68f-2683f53b3adf\" (UID: \"5a104166-c231-4b54-a68f-2683f53b3adf\") " Nov 28 19:04:06 crc kubenswrapper[4909]: I1128 19:04:06.801773 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cp75f\" (UniqueName: \"kubernetes.io/projected/5a104166-c231-4b54-a68f-2683f53b3adf-kube-api-access-cp75f\") pod \"5a104166-c231-4b54-a68f-2683f53b3adf\" (UID: \"5a104166-c231-4b54-a68f-2683f53b3adf\") " Nov 28 19:04:06 crc kubenswrapper[4909]: I1128 19:04:06.801842 4909 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5a104166-c231-4b54-a68f-2683f53b3adf-utilities\") pod \"5a104166-c231-4b54-a68f-2683f53b3adf\" (UID: \"5a104166-c231-4b54-a68f-2683f53b3adf\") " Nov 28 19:04:06 crc kubenswrapper[4909]: I1128 19:04:06.803352 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5a104166-c231-4b54-a68f-2683f53b3adf-utilities" (OuterVolumeSpecName: "utilities") pod "5a104166-c231-4b54-a68f-2683f53b3adf" (UID: "5a104166-c231-4b54-a68f-2683f53b3adf"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 19:04:06 crc kubenswrapper[4909]: I1128 19:04:06.844061 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5a104166-c231-4b54-a68f-2683f53b3adf-kube-api-access-cp75f" (OuterVolumeSpecName: "kube-api-access-cp75f") pod "5a104166-c231-4b54-a68f-2683f53b3adf" (UID: "5a104166-c231-4b54-a68f-2683f53b3adf"). InnerVolumeSpecName "kube-api-access-cp75f". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 19:04:06 crc kubenswrapper[4909]: I1128 19:04:06.905218 4909 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cp75f\" (UniqueName: \"kubernetes.io/projected/5a104166-c231-4b54-a68f-2683f53b3adf-kube-api-access-cp75f\") on node \"crc\" DevicePath \"\"" Nov 28 19:04:06 crc kubenswrapper[4909]: I1128 19:04:06.905284 4909 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5a104166-c231-4b54-a68f-2683f53b3adf-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 19:04:06 crc kubenswrapper[4909]: I1128 19:04:06.911265 4909 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5a104166-c231-4b54-a68f-2683f53b3adf-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5a104166-c231-4b54-a68f-2683f53b3adf" (UID: "5a104166-c231-4b54-a68f-2683f53b3adf"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 19:04:07 crc kubenswrapper[4909]: I1128 19:04:07.007012 4909 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5a104166-c231-4b54-a68f-2683f53b3adf-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 19:04:07 crc kubenswrapper[4909]: I1128 19:04:07.227060 4909 generic.go:334] "Generic (PLEG): container finished" podID="5a104166-c231-4b54-a68f-2683f53b3adf" containerID="e9c5379330ce631565bac70c7e0294a0cf4e4efb5e0a35b5f372521694d787dd" exitCode=0 Nov 28 19:04:07 crc kubenswrapper[4909]: I1128 19:04:07.227112 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9sfn5" event={"ID":"5a104166-c231-4b54-a68f-2683f53b3adf","Type":"ContainerDied","Data":"e9c5379330ce631565bac70c7e0294a0cf4e4efb5e0a35b5f372521694d787dd"} Nov 28 19:04:07 crc kubenswrapper[4909]: I1128 19:04:07.227185 4909 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9sfn5" event={"ID":"5a104166-c231-4b54-a68f-2683f53b3adf","Type":"ContainerDied","Data":"a4253977da6b598d290288ee106a3472d9643ef58e029c9dc27d03688f2391e4"} Nov 28 19:04:07 crc kubenswrapper[4909]: I1128 19:04:07.227141 4909 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-9sfn5" Nov 28 19:04:07 crc kubenswrapper[4909]: I1128 19:04:07.227221 4909 scope.go:117] "RemoveContainer" containerID="e9c5379330ce631565bac70c7e0294a0cf4e4efb5e0a35b5f372521694d787dd" Nov 28 19:04:07 crc kubenswrapper[4909]: I1128 19:04:07.274564 4909 scope.go:117] "RemoveContainer" containerID="bb3e5b5bb1b1bdafde5c96ad739d1107387a4220740cdc1b06eb559a919237c1" Nov 28 19:04:07 crc kubenswrapper[4909]: I1128 19:04:07.295487 4909 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-9sfn5"] Nov 28 19:04:07 crc kubenswrapper[4909]: I1128 19:04:07.309260 4909 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-9sfn5"] Nov 28 19:04:07 crc kubenswrapper[4909]: I1128 19:04:07.327146 4909 scope.go:117] "RemoveContainer" containerID="a3e91ff736f7739363e8ff5e1d572a0703da4d8566349cd9ced21a8b4abd6f0e" Nov 28 19:04:07 crc kubenswrapper[4909]: I1128 19:04:07.400221 4909 scope.go:117] "RemoveContainer" containerID="e9c5379330ce631565bac70c7e0294a0cf4e4efb5e0a35b5f372521694d787dd" Nov 28 19:04:07 crc kubenswrapper[4909]: E1128 19:04:07.401154 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e9c5379330ce631565bac70c7e0294a0cf4e4efb5e0a35b5f372521694d787dd\": container with ID starting with e9c5379330ce631565bac70c7e0294a0cf4e4efb5e0a35b5f372521694d787dd not found: ID does not exist" containerID="e9c5379330ce631565bac70c7e0294a0cf4e4efb5e0a35b5f372521694d787dd" Nov 28 19:04:07 crc kubenswrapper[4909]: I1128 19:04:07.401214 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e9c5379330ce631565bac70c7e0294a0cf4e4efb5e0a35b5f372521694d787dd"} err="failed to get container status \"e9c5379330ce631565bac70c7e0294a0cf4e4efb5e0a35b5f372521694d787dd\": rpc error: code = NotFound desc = could not find container \"e9c5379330ce631565bac70c7e0294a0cf4e4efb5e0a35b5f372521694d787dd\": container with ID starting with e9c5379330ce631565bac70c7e0294a0cf4e4efb5e0a35b5f372521694d787dd not found: ID does not exist" Nov 28 19:04:07 crc kubenswrapper[4909]: I1128 19:04:07.401252 4909 scope.go:117] "RemoveContainer" containerID="bb3e5b5bb1b1bdafde5c96ad739d1107387a4220740cdc1b06eb559a919237c1" Nov 28 19:04:07 crc kubenswrapper[4909]: E1128 19:04:07.401565 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bb3e5b5bb1b1bdafde5c96ad739d1107387a4220740cdc1b06eb559a919237c1\": container with ID starting with bb3e5b5bb1b1bdafde5c96ad739d1107387a4220740cdc1b06eb559a919237c1 not found: ID does not exist" containerID="bb3e5b5bb1b1bdafde5c96ad739d1107387a4220740cdc1b06eb559a919237c1" Nov 28 19:04:07 crc kubenswrapper[4909]: I1128 19:04:07.401611 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bb3e5b5bb1b1bdafde5c96ad739d1107387a4220740cdc1b06eb559a919237c1"} err="failed to get container status \"bb3e5b5bb1b1bdafde5c96ad739d1107387a4220740cdc1b06eb559a919237c1\": rpc error: code = NotFound desc = could not find container \"bb3e5b5bb1b1bdafde5c96ad739d1107387a4220740cdc1b06eb559a919237c1\": container with ID starting with bb3e5b5bb1b1bdafde5c96ad739d1107387a4220740cdc1b06eb559a919237c1 not found: ID does not exist" Nov 28 19:04:07 crc kubenswrapper[4909]: I1128 19:04:07.401628 4909 scope.go:117] "RemoveContainer" containerID="a3e91ff736f7739363e8ff5e1d572a0703da4d8566349cd9ced21a8b4abd6f0e" Nov 28 19:04:07 crc kubenswrapper[4909]: E1128 19:04:07.401963 4909 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a3e91ff736f7739363e8ff5e1d572a0703da4d8566349cd9ced21a8b4abd6f0e\": container with ID starting with a3e91ff736f7739363e8ff5e1d572a0703da4d8566349cd9ced21a8b4abd6f0e not found: ID does not exist" containerID="a3e91ff736f7739363e8ff5e1d572a0703da4d8566349cd9ced21a8b4abd6f0e" Nov 28 19:04:07 crc kubenswrapper[4909]: I1128 19:04:07.402006 4909 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a3e91ff736f7739363e8ff5e1d572a0703da4d8566349cd9ced21a8b4abd6f0e"} err="failed to get container status \"a3e91ff736f7739363e8ff5e1d572a0703da4d8566349cd9ced21a8b4abd6f0e\": rpc error: code = NotFound desc = could not find container \"a3e91ff736f7739363e8ff5e1d572a0703da4d8566349cd9ced21a8b4abd6f0e\": container with ID starting with a3e91ff736f7739363e8ff5e1d572a0703da4d8566349cd9ced21a8b4abd6f0e not found: ID does not exist" Nov 28 19:04:07 crc kubenswrapper[4909]: I1128 19:04:07.942473 4909 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5a104166-c231-4b54-a68f-2683f53b3adf" path="/var/lib/kubelet/pods/5a104166-c231-4b54-a68f-2683f53b3adf/volumes" Nov 28 19:04:16 crc kubenswrapper[4909]: I1128 19:04:16.903401 4909 scope.go:117] "RemoveContainer" containerID="04fa792ff2261f49e0e779ccaab71213a7dfefcb1dc649e298924e82b6599708" Nov 28 19:04:16 crc kubenswrapper[4909]: E1128 19:04:16.904726 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 19:04:29 crc kubenswrapper[4909]: I1128 19:04:29.903382 4909 scope.go:117] "RemoveContainer" containerID="04fa792ff2261f49e0e779ccaab71213a7dfefcb1dc649e298924e82b6599708" Nov 28 19:04:29 crc kubenswrapper[4909]: E1128 19:04:29.904368 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 19:04:44 crc kubenswrapper[4909]: I1128 19:04:44.903531 4909 scope.go:117] "RemoveContainer" containerID="04fa792ff2261f49e0e779ccaab71213a7dfefcb1dc649e298924e82b6599708" Nov 28 19:04:44 crc kubenswrapper[4909]: E1128 19:04:44.904621 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 19:04:55 crc kubenswrapper[4909]: I1128 19:04:55.920383 4909 scope.go:117] "RemoveContainer" containerID="04fa792ff2261f49e0e779ccaab71213a7dfefcb1dc649e298924e82b6599708" Nov 28 19:04:55 crc kubenswrapper[4909]: E1128 19:04:55.921488 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" Nov 28 19:05:06 crc kubenswrapper[4909]: I1128 19:05:06.902041 4909 scope.go:117] "RemoveContainer" containerID="04fa792ff2261f49e0e779ccaab71213a7dfefcb1dc649e298924e82b6599708" Nov 28 19:05:06 crc kubenswrapper[4909]: E1128 19:05:06.903278 4909 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-d5nd7_openshift-machine-config-operator(5f0ac931-d37b-4342-8c12-c2779b455cc5)\"" pod="openshift-machine-config-operator/machine-config-daemon-d5nd7" podUID="5f0ac931-d37b-4342-8c12-c2779b455cc5" var/home/core/zuul-output/logs/crc-cloud-workdir-crc-all-logs.tar.gz0000644000175000000000000000005515112371160024442 0ustar coreroot‹íÁ  ÷Om7 €7šÞ'(var/home/core/zuul-output/logs/crc-cloud/0000755000175000000000000000000015112371161017360 5ustar corerootvar/home/core/zuul-output/artifacts/0000755000175000017500000000000015112344220016477 5ustar corecorevar/home/core/zuul-output/docs/0000755000175000017500000000000015112344220015447 5ustar corecore